repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
mscheltienne/pyprep
[ "82920228a1bfe46a8f8c04443547cc2726d3d189", "82920228a1bfe46a8f8c04443547cc2726d3d189" ]
[ "pyprep/removeTrend.py", "examples/run_ransac.py" ]
[ "\"\"\"High-pass filter and locally detrend the EEG signal.\"\"\"\nimport logging\n\nimport mne\nimport numpy as np\n\nfrom pyprep.utils import _eeglab_create_highpass, _eeglab_fir_filter\n\n\ndef removeTrend(\n EEG,\n sample_rate,\n detrendType=\"high pass\",\n detrendCutoff=1.0,\n detrendChannels=None,\n matlab_strict=False,\n):\n \"\"\"Remove trends (i.e., slow drifts in baseline) from an array of EEG data.\n\n Parameters\n ----------\n EEG : np.ndarray\n A 2-D array of EEG data to detrend.\n sample_rate : float\n The sample rate (in Hz) of the input EEG data.\n detrendType : str, optional\n Type of detrending to be performed: must be one of 'high pass',\n 'high pass sinc, or 'local detrend'. Defaults to 'high pass'.\n detrendCutoff : float, optional\n The high-pass cutoff frequency (in Hz) to use for detrending. Defaults\n to 1.0 Hz.\n detrendChannels : {list, None}, optional\n List of the indices of all channels that require detrending/filtering.\n If ``None``, all channels are used (default).\n matlab_strict : bool, optional\n Whether or not detrending should strictly follow MATLAB PREP's internal\n math, ignoring any improvements made in PyPREP over the original code\n (see :ref:`matlab-diffs` for more details). Defaults to ``False``.\n\n Returns\n -------\n EEG : np.ndarray\n A 2-D array containing the filtered/detrended EEG data.\n\n Notes\n -----\n High-pass filtering is implemented using the MNE filter function\n :func:``mne.filter.filter_data`` unless `matlab_strict` is ``True``, in\n which case it is performed using a minimal re-implementation of EEGLAB's\n ``pop_eegfiltnew``. Local detrending is performed using a Python\n re-implementation of the ``runline`` function from the Chronux package for\n MATLAB [1]_.\n\n References\n ----------\n .. [1] http://chronux.org/\n\n \"\"\"\n if len(EEG.shape) == 1:\n EEG = np.reshape(EEG, (1, EEG.shape[0]))\n\n if detrendType.lower() == \"high pass\":\n if matlab_strict:\n picks = detrendChannels if detrendChannels else range(EEG.shape[0])\n filt = _eeglab_create_highpass(detrendCutoff, sample_rate)\n EEG[picks, :] = _eeglab_fir_filter(EEG[picks, :], filt)\n else:\n EEG = mne.filter.filter_data(\n EEG,\n sfreq=sample_rate,\n l_freq=detrendCutoff,\n h_freq=None,\n picks=detrendChannels,\n )\n\n elif detrendType.lower() == \"high pass sinc\":\n fOrder = np.round(14080 * sample_rate / 512)\n fOrder = int(fOrder + fOrder % 2)\n EEG = mne.filter.filter_data(\n data=EEG,\n sfreq=sample_rate,\n l_freq=1,\n h_freq=None,\n picks=detrendChannels,\n filter_length=fOrder,\n fir_window=\"blackman\",\n )\n\n elif detrendType.lower() == \"local detrend\":\n if detrendChannels is None:\n detrendChannels = np.arange(0, EEG.shape[0])\n windowSize = 1.5 / detrendCutoff\n windowSize = np.minimum(windowSize, EEG.shape[1])\n stepSize = 0.02\n EEG = np.transpose(EEG)\n n = np.round(sample_rate * windowSize)\n dn = np.round(sample_rate * stepSize)\n\n if dn > n or dn < 1:\n logging.error(\n \"Step size should be less than the window size and \"\n \"contain at least 1 sample\"\n )\n if n == EEG.shape[0]:\n # data = scipy.signal.detrend(EEG, axis=0)\n pass\n else:\n for ch in detrendChannels:\n EEG[:, ch] = runline(EEG[:, ch], int(n), int(dn))\n EEG = np.transpose(EEG)\n\n else:\n logging.warning(\n \"No filtering/detreding performed since the detrend type did not match\"\n )\n\n return EEG\n\n\ndef runline(y, n, dn):\n \"\"\"Perform local linear regression on a channel of EEG data.\n\n A re-implementation of the ``runline`` function from the Chronux package\n for MATLAB [1]_.\n\n Parameters\n ----------\n y : np.ndarray\n A 1-D array of data from a single EEG channel.\n n : int\n Length of the detrending window.\n dn : int\n Length of the window step size.\n\n Returns\n -------\n y: np.ndarray\n The detrended signal for the given EEG channel.\n\n References\n ----------\n .. [1] http://chronux.org/\n\n \"\"\"\n nt = y.shape[0]\n y_line = np.zeros((nt, 1))\n norm = np.zeros((nt, 1))\n nwin = int(np.ceil((nt - n) / dn))\n yfit = np.zeros((nwin, n))\n xwt = (np.arange(1, n + 1) - n / 2) / (n / 2)\n wt = np.power(1 - np.power(np.absolute(xwt), 3), 3)\n for j in range(0, nwin):\n tseg = y[dn * j : dn * j + n]\n y1 = np.mean(tseg)\n y2 = np.mean(np.multiply(np.arange(1, n + 1), tseg)) * (2 / (n + 1))\n a = np.multiply(np.subtract(y2, y1), 6 / (n - 1))\n b = np.subtract(y1, a * (n + 1) / 2)\n yfit[j, :] = np.multiply(np.arange(1, n + 1), a) + b\n y_line[j * dn : j * dn + n] = y_line[j * dn : j * dn + n] + np.reshape(\n np.multiply(yfit[j, :], wt), (n, 1)\n )\n norm[j * dn : j * dn + n] = norm[j * dn : j * dn + n] + np.reshape(wt, (n, 1))\n\n for i in range(0, len(norm)):\n if norm[i] > 0:\n y_line[i] = y_line[i] / norm[i]\n indx = (nwin - 1) * dn + n - 1\n npts = len(y) - indx + 1\n y_line[indx - 1 :] = np.reshape(\n (np.multiply(np.arange(n + 1, n + npts + 1), a) + b), (npts, 1)\n )\n for i in range(0, len(y_line)):\n y[i] = y[i] - y_line[i]\n return y\n", "\"\"\"\n=================\nRun RANSAC\n=================\n\n\nIn this example we show how to run the RANSAC of ``pyprep``.\n\n.. currentmodule:: pyprep\n\"\"\" # noqa: D205 D400\n\n# Authors: Yorguin Mantilla <[email protected]>\n#\n# License: MIT\n# Based On: use_noisy_module.py\n\n###############################################################################\n# First we import what we need for this example.\nimport numpy as np\nimport mne\nfrom scipy import signal as signal\nfrom time import perf_counter\n\nfrom pyprep.find_noisy_channels import NoisyChannels\n\n###############################################################################\n# Now let's make some arbitrary MNE raw object for demonstration purposes.\n# We will think of good channels as sine waves and bad channels correlated with\n# each other as sawtooths. The RANSAC will be biased towards sines in its\n# prediction (they are the majority) so it will identify the sawtooths as bad.\n# We will need to set a montage because the RANSAC needs to interpolate.\n\nsfreq = 1000.0\n\n# We need a montage, because RANSAC uses spherical splines for interpolation\nmontage = mne.channels.make_standard_montage(\"standard_1020\")\n\nch_names = montage.ch_names\n\nn_chans = len(ch_names)\n\ninfo = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=[\"eeg\"] * n_chans)\n\ntime = np.arange(0, 30, 1.0 / sfreq) # 30 seconds of recording\nn_bad_chans = 3\n\nrng = np.random.default_rng(42)\nbad_channels = rng.choice(np.arange(n_chans), n_bad_chans, replace=False)\nbad_channels = [int(i) for i in bad_channels]\nbad_ch_names = [ch_names[i] for i in bad_channels]\n\n# The frequency components to use in the signal for good and bad channels\nfreq_good = 20\nfreq_bad = 20\n\n# Generate the data\nX = [\n signal.sawtooth(2 * np.pi * freq_bad * time)\n if i in bad_channels\n else np.sin(2 * np.pi * freq_good * time)\n for i in range(n_chans)\n]\n# Scale the signal amplitude and add noise.\nX = 2e-5 * np.array(X) + 1e-5 * rng.random((n_chans, time.shape[0]))\n\nraw = mne.io.RawArray(X, info)\n\nraw.set_montage(montage, verbose=False)\n\n\n###############################################################################\n# Assign the mne object to the :class:`NoisyChannels` class. The resulting object\n# will be the place where all following methods are performed.\n\nnd = NoisyChannels(raw, random_state=1337)\nnd2 = NoisyChannels(raw, random_state=1337)\n\n###############################################################################\n# Find all bad channels using channel-wise RANSAC and print a summary\nstart_time = perf_counter()\nnd.find_bad_by_ransac(channel_wise=True)\nprint(\"--- %s seconds ---\" % (perf_counter() - start_time))\n\n# Repeat channel-wise RANSAC using a single channel at a time. This is slower\n# but needs less memory.\nstart_time = perf_counter()\nnd2.find_bad_by_ransac(channel_wise=True, max_chunk_size=1)\nprint(\"--- %s seconds ---\" % (perf_counter() - start_time))\n\n###############################################################################\n# Now the bad channels are saved in `bads` and we can continue processing our\n# `raw` object. For more information, we can access attributes of the ``nd``\n# instance:\n\n# Check channels that go bad together by correlation (RANSAC)\nprint(nd.bad_by_ransac)\nassert set(bad_ch_names) == set(nd.bad_by_ransac)\n\n# Check that the channel wise RANSAC yields identical results\nprint(nd2.bad_by_ransac)\nassert set(bad_ch_names) == set(nd2.bad_by_ransac)\n" ]
[ [ "numpy.multiply", "numpy.transpose", "numpy.ceil", "numpy.zeros", "numpy.subtract", "numpy.reshape", "numpy.arange", "numpy.absolute", "numpy.round", "numpy.mean", "numpy.minimum" ], [ "numpy.random.default_rng", "numpy.arange", "numpy.array", "numpy.sin", "scipy.signal.sawtooth" ] ]
FlorianBury/talos
[ "30f7af4d1f628364f8e8a2e983f72b2631cee6d9" ]
[ "talos/parameters/ParamGrid.py" ]
[ "from numpy import arange, unique, array, column_stack, concatenate\nfrom itertools import product\n\nfrom ..reducers.sample_reducer import sample_reducer\n\n\nclass ParamGrid:\n\n '''Suite for handling parameters internally within Talos\n\n Takes as input the parameter dictionary from the user, and\n returns a class object which can then be used to pick parameters\n for each round together with other parameter related operations.\n\n '''\n\n def __init__(self, main_self):\n\n self.main_self = main_self\n\n # convert the input to useful format\n self._p = self._param_input_conversion()\n\n # build the parameter permutation grid\n self.param_grid = self._param_grid()\n \n # reduce according to downsample\n if self.main_self.grid_downsample is not None:\n self.param_grid = sample_reducer(self)\n\n # create a index for logging purpose\n self.param_log = list(range(len(self.param_grid)))\n\n # add the log index to param grid\n self.param_grid = column_stack((self.param_grid, self.param_log))\n\n # Repeat model a certain amount of times #\n for i in range(1,self.main_self.repetition):\n self.param_grid = concatenate((self.param_grid,self.param_grid),axis=0)\n\n\n\n\n def _param_grid(self):\n\n '''CREATE THE PARAMETER PERMUTATIONS\n\n This is done once before starting the experiment.\n Takes in the parameter dictionary, and returns\n every possible permutation in an array.\n '''\n\n ls = [list(self._p[key]) for key in self._p.keys()]\n _param_grid_out = array(list(product(*ls)), dtype='object')\n\n return _param_grid_out\n\n def _param_input_conversion(self):\n\n '''DETECT PARAM FORMAT\n\n Checks of the hyperparameter input format is list\n or tupple in the params dictionary and expands accordingly.\n\n '''\n\n out = {}\n\n for param in self.main_self.params.keys():\n\n # for range/step style input\n if isinstance(self.main_self.params[param], tuple):\n out[param] = self._param_range(self.main_self.params[param][0],\n self.main_self.params[param][1],\n self.main_self.params[param][2])\n # all other input styles\n else:\n out[param] = self.main_self.params[param]\n\n return out\n\n def _param_range(self, start, end, n):\n\n '''PARAMETER RANGE\n\n Deals with the format where a start, end\n and steps values are given for a parameter\n in a tuple format.\n\n This is called internally from param_format()\n '''\n\n try:\n out = arange(start, end, (end - start) / n, dtype=float)\n # this is for python2\n except ZeroDivisionError:\n out = arange(start, end, (end - start) / float(n), dtype=float)\n\n if type(start) == int and type(end) == int:\n out = out.astype(int)\n out = unique(out)\n\n return out\n" ]
[ [ "numpy.arange", "numpy.concatenate", "numpy.unique", "numpy.column_stack" ] ]
quasiben/bokeh
[ "738343bd18c851dfd1fdc82cf35fe3eb4cdfd475" ]
[ "bokeh/charts/stats.py" ]
[ "\"\"\" Statistical methods used to define or modify position of glyphs.\n\nReferences:\n Wilkinson L. The Grammer of Graphics, sections 7, 7.1\n\nMethod Types:\n - Bin: Partitions a space before statistical calculation\n - Summary: Produces a single value comprising a statistical summary\n - Region: Produces two values bounding an interval.\n - Smooth: Produces values representing smoothed versions of the input data.\n - Link: Produces edges from pairs of nodes in a graph.\n\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport pandas as pd\n\nfrom bokeh.models.sources import ColumnDataSource\nfrom bokeh.core.properties import (HasProps, Float, Either, String, Date, Datetime, Int,\n Bool, List, Instance)\nfrom .properties import Column, EitherColumn, ColumnLabel\n\n\nclass Stat(HasProps):\n \"\"\"Represents a statistical operation to summarize a column of data.\n\n Can be computed from either a ColumnLabel with a ColumnDataSource, *or*, a\n discrete column of data.\n \"\"\"\n\n # inputs\n column = ColumnLabel(help=\"\"\"A column to use for the stat calculation. Required\n when providing a ColumnDataSource as input.\"\"\")\n source = Instance(ColumnDataSource, help=\"\"\"One option for providing the data\n source for stat calculation.\"\"\")\n values = EitherColumn(Column(Float), Column(Int), Column(String),\n Column(Date), Column(Datetime), Column(Bool), default=None, help=\"\"\"\n Second option for providing values for stat calculation is by\n passing the actual column of data.\"\"\")\n\n # output\n value = Float(help=\"\"\"The value calculated for the stat. Some stats could use\n multiple properties to provide the calculation if required.\"\"\")\n\n def __init__(self, **properties):\n\n source = properties.pop('source', None)\n if source is not None:\n if isinstance(source, pd.DataFrame):\n source = ColumnDataSource(source)\n properties['source'] = source\n\n super(Stat, self).__init__(**properties)\n self._refresh()\n\n def _refresh(self):\n \"\"\"Lazy update of properties, used for initial transform init.\"\"\"\n if self.get_data() is not None:\n self.update()\n self.calculate()\n\n def set_data(self, data, column=None):\n \"\"\"Set data properties and update all dependent properties.\"\"\"\n if isinstance(data, pd.DataFrame):\n data = ColumnDataSource(data)\n\n if isinstance(data, ColumnDataSource):\n self.source = data\n if column is not None:\n self.column = column\n else:\n self.values = data\n\n self.update()\n self.calculate()\n\n def get_data(self, column=None):\n \"\"\"Returns the available columnlabel/source values or column values.\"\"\"\n if self.source is not None and (self.column is not None or column is not None):\n if column is not None:\n col = column\n else:\n col = self.column\n\n return pd.Series(self.source.data[col])\n elif self.values is None and self.source is not None:\n return pd.Series(self.source.to_df().index)\n elif self.values is not None:\n return self.values\n else:\n return None\n\n def calculate(self):\n \"\"\"Return transformed value from column label/source or column-like data.\"\"\"\n raise NotImplementedError('You must implement the calculate method '\n 'for each stat type.')\n\n def update(self):\n \"\"\"Perform any initial work before the actual calculation is performed.\"\"\"\n pass\n\n\nclass Sum(Stat):\n def calculate(self):\n self.value = self.get_data().sum()\n\n\nclass Mean(Stat):\n def calculate(self):\n self.value = self.get_data().mean()\n\n\nclass Count(Stat):\n def calculate(self):\n self.value = self.get_data().count()\n\n\nclass CountDistinct(Stat):\n def calculate(self):\n self.value = self.get_data().nunique()\n\n\nclass Median(Stat):\n def calculate(self):\n self.value = self.get_data().median()\n\n\nclass StdDeviation(Stat):\n def calculate(self):\n self.value = self.get_data().std()\n\n\nclass Min(Stat):\n def calculate(self):\n self.value = self.get_data().min()\n\n\nclass Max(Stat):\n def calculate(self):\n self.value = self.get_data().max()\n\n\nclass Quantile(Stat):\n \"\"\"Produces the cutpoint that divides the input data by the interval.\n\n Quartiles are a special case of quartiles that divide a dataset into four\n equal-size groups. (https://en.wikipedia.org/wiki/Quantile)\n \"\"\"\n interval = Float(default=0.5)\n\n def calculate(self):\n self.value = self.get_data().quantile(self.interval)\n\n\nclass Bin(Stat):\n \"\"\"Represents a single bin of data values and attributes of the bin.\"\"\"\n label = Either(String, List(String))\n start = Either(Float, List(Float))\n stop = Either(Float, List(Float))\n\n start_label = String()\n stop_label = String()\n\n center = Either(Float, List(Float))\n\n stat = Instance(Stat, default=Count())\n width = Float()\n\n def __init__(self, bin_label, values=None, source=None, **properties):\n if isinstance(bin_label, tuple):\n bin_label = list(bin_label)\n else:\n bin_label = [bin_label]\n properties['label'] = bin_label\n\n bounds = self.process_bounds(bin_label)\n\n starts, stops = zip(*bounds)\n centers = [(start + stop)/2.0 for start, stop in zip(starts, stops)]\n if len(starts) == 1:\n starts = starts[0]\n stops = stops[0]\n centers = centers[0]\n else:\n starts = list(starts)\n stops = list(stops)\n centers = list(centers)\n\n properties['start'] = starts\n properties['stop'] = stops\n properties['center'] = centers\n properties['values'] = values\n super(Bin, self).__init__(**properties)\n\n @staticmethod\n def binstr_to_list(bins):\n \"\"\"Produce a consistent display of a bin of data.\"\"\"\n value_chunks = bins.split(',')\n value_chunks = [val.replace('[', '').replace(']', '').replace('(', '').replace(')', '') for val in value_chunks]\n bin_values = [float(value) for value in value_chunks]\n\n return bin_values[0], bin_values[1]\n\n def process_bounds(self, bin_label):\n if isinstance(bin_label, list):\n return [self.binstr_to_list(dim) for dim in bin_label]\n else:\n return [self.binstr_to_list(bin_label)]\n\n def update(self):\n self.stat.set_data(self.values)\n\n def calculate(self):\n self.value = self.stat.value\n\n\nclass BinStats(Stat):\n \"\"\"A set of statistical calculations for binning values.\n\n Bin counts using: https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule\n \"\"\"\n bins = Either(Int, Float, List(Float), default=None, help=\"\"\"\n If bins is an int, it defines the number of equal-width bins in the\n given range. If bins is a sequence, it defines the\n bin edges, including the rightmost edge, allowing for non-uniform\n bin widths.\n\n (default: None, use Freedman-Diaconis rule)\n \"\"\")\n bin_width = Float(default=None, help='Use Freedman-Diaconis rule if None.')\n q1 = Quantile(interval=0.25)\n q3 = Quantile(interval=0.75)\n labels = List(String)\n\n def __init__(self, values=None, column=None, **properties):\n properties['values'] = values\n properties['column'] = column or 'values'\n\n super(BinStats, self).__init__(**properties)\n\n def update(self):\n values = self.get_data()\n self.q1.set_data(values)\n self.q3.set_data(values)\n if self.bins is None:\n self.calc_num_bins(values)\n\n def calc_num_bins(self, values):\n \"\"\"Calculate optimal number of bins using IQR.\n\n From: http://stats.stackexchange.com/questions/114490/optimal-bin-width-for-two-dimensional-histogram\n\n \"\"\"\n iqr = self.q3.value - self.q1.value\n\n if iqr == 0:\n self.bin_width = np.sqrt(values.size)\n else:\n self.bin_width = 2 * iqr * (len(values) ** -(1. / 3.))\n\n self.bins = int(np.ceil((values.max() - values.min()) / self.bin_width))\n\n if self.bins <= 1:\n self.bins = 3\n\n def calculate(self):\n pass\n\n\nclass BinnedStat(Stat):\n \"\"\" Base class for shared functionality accross bins and aggregates\n dimensions for plotting.\n\n \"\"\"\n bin_stat = Instance(BinStats, help=\"\"\"\n A mapping between each dimension and associated binning calculations.\n \"\"\")\n\n bins = List(Instance(Bin), help=\"\"\"\n A list of the `Bin` instances that were produced as result of the inputs.\n Iterating over `Bins` will iterate over this list. Each `Bin` can be inspected\n for metadata about the bin and the values associated with it.\n \"\"\")\n\n stat = Instance(Stat, default=Count(), help=\"\"\"\n The statistical operation to be used on the values in each bin.\n \"\"\")\n\n bin_column = String()\n centers_column = String()\n\n aggregate = Bool(default=True)\n\n bin_values = Bool(default=False)\n\n bin_width = Float()\n\n def __init__(self, values=None, column=None, bins=None,\n stat='count', source=None, **properties):\n\n if isinstance(stat, str):\n stat = stats[stat]()\n\n properties['column'] = column or 'vals'\n properties['stat'] = stat\n properties['values'] = values\n properties['source'] = source\n self._bins = bins\n super(BinnedStat, self).__init__(**properties)\n\n\n def _get_stat(self):\n stat_kwargs = {}\n\n if self.source is not None:\n stat_kwargs['source'] = self.source\n stat_kwargs['column'] = self.column\n\n elif self.values is not None:\n stat_kwargs['values'] = self.values\n\n stat_kwargs['bins'] = self._bins\n\n return BinStats(**stat_kwargs)\n\n def update(self):\n self.bin_stat = self._get_stat()\n self.bin_stat.update()\n\n\nclass Bins(BinnedStat):\n \"\"\"Bins and aggregates dimensions for plotting.\n\n Takes the inputs and produces a list of bins that can be iterated over and\n inspected for their metadata. The bins provide easy access to consistent labeling,\n bounds, and values.\n \"\"\"\n\n def calculate(self):\n\n bin_str = '_bin'\n self.bin_column = self.column + bin_str\n bin_models = []\n\n data = self.bin_stat.get_data()\n bins = self.bin_stat.bins\n\n # Choose bin bounds when data range is ill-defined; pd.cut()\n # does not handle this well for values that are <= 0\n if data.size < 2:\n raise ValueError('Histogram data must have at least two elements.')\n if data.ndim == 1 and data.std() == 0:\n margin = 0.01 * abs(float(data[0])) or 0.01\n bins = np.linspace(data[0] - margin, data[0] + margin, bins+1)\n\n binned, bin_bounds = pd.cut(data, bins,\n retbins=True, include_lowest=True, precision=0)\n\n self.bin_width = np.round(bin_bounds[2] - bin_bounds[1], 1)\n\n if self.source is not None:\n # add bin column to data source\n self.source.add(binned.tolist(), name=self.bin_column)\n df = self.source.to_df()\n else:\n df = pd.DataFrame({self.column: self.values, self.bin_column: binned})\n\n for name, group in df.groupby(self.bin_column):\n bin_models.append(Bin(bin_label=name, values=group[self.column],\n stat=self.stat))\n\n self.bins = bin_models\n\n centers = binned.copy()\n centers = centers.astype(str)\n for bin in self.bins:\n centers[binned == bin.label] = bin.center\n\n self.centers_column = self.column + '_center'\n if self.source is not None:\n self.source.add(centers.tolist(), name=self.centers_column)\n else:\n df[self.centers_column] = centers\n\n def __getitem__(self, item):\n return self.bins[item]\n\n def apply(self, data):\n self.set_data(data.source)\n return self.source.to_df()\n\n def sort(self, ascending=True):\n if self.bins is not None:\n self.bins = list(sorted(self.bins, key=lambda x: x.center,\n reverse=~ascending))\n\n\nclass Histogram(BinnedStat):\n \"\"\"Bins and aggregates dimensions for plotting.\n\n Takes the inputs and produces a list of bins that can be iterated over and\n inspected for their metadata. The bins provide easy access to consistent labeling,\n bounds, and values.\n \"\"\"\n\n density = Bool(False, help=\"\"\"\n Whether to normalize the histogram.\n\n If True, the result is the value of the probability *density* function\n at the bin, normalized such that the *integral* over the range is 1. If\n False, the result will contain the number of samples in each bin.\n\n For more info check ``numpy.histogram`` function documentation.\n\n (default: False)\n \"\"\")\n\n def calculate(self):\n bin_str = '_bin'\n self.bin_column = self.column + bin_str\n\n data = self.bin_stat.get_data()\n bins = self.bin_stat.bins\n\n binned, bin_bounds = np.histogram(\n np.array(data), density=self.density, bins=bins\n )\n\n self.bin_width = np.round(bin_bounds[2] - bin_bounds[1], 1)\n self.bins = []\n\n for i, b in enumerate(binned):\n width = bin_bounds[i+1] - bin_bounds[i]\n if i == 0:\n lbl = \"[%.1f, %.1f]\" % (bin_bounds[i], bin_bounds[i+1])\n else:\n lbl = \"(%.1f, %.1f]\" % (bin_bounds[i], bin_bounds[i+1])\n self.bins.append(Bin(bin_label=lbl, values=[binned[i]], stat=Max(),\n width=width))\n\n\ndef bins(data, values=None, column=None, bins=None, labels=None,\n **kwargs):\n \"\"\"Specify binning or bins to be used for column or values.\"\"\"\n\n if isinstance(data, str):\n column = data\n values = None\n else:\n column = None\n\n return Bins(values=values, column=column, bins=bins, **kwargs)\n\n\nstats = {\n 'sum': Sum,\n 'mean': Mean,\n 'count': Count,\n 'nunique': CountDistinct,\n 'median': Median,\n 'stddev': StdDeviation,\n 'min': Min,\n 'max': Max,\n 'quantile': Quantile\n}\n" ]
[ [ "numpy.sqrt", "pandas.Series", "numpy.array", "pandas.DataFrame", "pandas.cut", "numpy.round", "numpy.linspace" ] ]
floft/squeezeDet
[ "e7c0860eb1d141729cf02a2ec9cafc0cfb4a21aa" ]
[ "src/dataset/kitti.py" ]
[ "# Author: Bichen Wu ([email protected]) 08/25/2016\n\n\"\"\"Image data base class for kitti\"\"\"\n\nimport cv2\nimport os \nimport numpy as np\nimport subprocess\n\nfrom dataset.imdb import imdb\nfrom utils.util import bbox_transform_inv, batch_iou\n\nclass kitti(imdb):\n def __init__(self, image_set, data_path, mc):\n imdb.__init__(self, 'kitti_'+image_set, mc)\n self._image_set = image_set\n self._data_root_path = data_path\n self._image_path = os.path.join(self._data_root_path, 'training', 'image_2')\n self._label_path = os.path.join(self._data_root_path, 'training', 'label_2')\n self._classes = self.mc.CLASS_NAMES\n self._class_to_idx = dict(zip(self.classes, xrange(self.num_classes)))\n\n # a list of string indices of images in the directory\n self._image_idx = self._load_image_set_idx() \n # a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by\n # the image width and height\n self._rois = self._load_kitti_annotation()\n\n ## batch reader ##\n self._perm_idx = None\n self._cur_idx = 0\n # TODO(bichen): add a random seed as parameter\n self._shuffle_image_idx()\n\n self._eval_tool = './src/dataset/kitti-eval/cpp/evaluate_object'\n\n def _load_image_set_idx(self):\n image_set_file = os.path.join(\n self._data_root_path, 'ImageSets', self._image_set+'.txt')\n assert os.path.exists(image_set_file), \\\n 'File does not exist: {}'.format(image_set_file)\n\n with open(image_set_file) as f:\n image_idx = [x.strip() for x in f.readlines()]\n return image_idx\n\n def _image_path_at(self, idx):\n image_path = os.path.join(self._image_path, idx+'.png')\n assert os.path.exists(image_path), \\\n 'Image does not exist: {}'.format(image_path)\n return image_path\n\n def _load_kitti_annotation(self):\n def _get_obj_level(obj):\n height = float(obj[7]) - float(obj[5]) + 1\n truncation = float(obj[1])\n occlusion = float(obj[2])\n if height >= 40 and truncation <= 0.15 and occlusion <= 0:\n return 1\n elif height >= 25 and truncation <= 0.3 and occlusion <= 1:\n return 2\n elif height >= 25 and truncation <= 0.5 and occlusion <= 2:\n return 3\n else:\n return 4\n\n idx2annotation = {}\n for index in self._image_idx:\n filename = os.path.join(self._label_path, index+'.txt')\n with open(filename, 'r') as f:\n lines = f.readlines()\n f.close()\n bboxes = []\n for line in lines:\n obj = line.strip().split(' ')\n try:\n cls = self._class_to_idx[obj[0].lower().strip()]\n except:\n continue\n\n if self.mc.EXCLUDE_HARD_EXAMPLES and _get_obj_level(obj) > 3:\n continue\n xmin = float(obj[4])\n ymin = float(obj[5])\n xmax = float(obj[6])\n ymax = float(obj[7])\n assert xmin >= 0.0 and xmin <= xmax, \\\n 'Invalid bounding box x-coord xmin {} or xmax {} at {}.txt' \\\n .format(xmin, xmax, index)\n assert ymin >= 0.0 and ymin <= ymax, \\\n 'Invalid bounding box y-coord ymin {} or ymax {} at {}.txt' \\\n .format(ymin, ymax, index)\n x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])\n bboxes.append([x, y, w, h, cls])\n\n idx2annotation[index] = bboxes\n\n return idx2annotation\n\n def evaluate_detections(self, eval_dir, global_step, all_boxes):\n \"\"\"Evaluate detection results.\n Args:\n eval_dir: directory to write evaluation logs\n global_step: step of the checkpoint\n all_boxes: all_boxes[cls][image] = N x 5 arrays of \n [xmin, ymin, xmax, ymax, score]\n Returns:\n aps: array of average precisions.\n names: class names corresponding to each ap\n \"\"\"\n det_file_dir = os.path.join(\n eval_dir, 'detection_files_{:s}'.format(global_step), 'data')\n if not os.path.isdir(det_file_dir):\n os.makedirs(det_file_dir)\n\n for im_idx, index in enumerate(self._image_idx):\n filename = os.path.join(det_file_dir, index+'.txt')\n with open(filename, 'wt') as f:\n for cls_idx, cls in enumerate(self._classes):\n dets = all_boxes[cls_idx][im_idx]\n for k in xrange(len(dets)):\n f.write(\n '{:s} -1 -1 0.0 {:.2f} {:.2f} {:.2f} {:.2f} 0.0 0.0 0.0 0.0 0.0 '\n '0.0 0.0 {:.3f}\\n'.format(\n cls.lower(), dets[k][0], dets[k][1], dets[k][2], dets[k][3],\n dets[k][4])\n )\n\n cmd = self._eval_tool + ' ' \\\n + os.path.join(self._data_root_path, 'training') + ' ' \\\n + os.path.join(self._data_root_path, 'ImageSets',\n self._image_set+'.txt') + ' ' \\\n + os.path.dirname(det_file_dir) + ' ' + str(len(self._image_idx))\n\n print('Running: {}'.format(cmd))\n status = subprocess.call(cmd, shell=True)\n\n aps = []\n names = []\n for cls in self._classes:\n det_file_name = os.path.join(\n os.path.dirname(det_file_dir), 'stats_{:s}_ap.txt'.format(cls))\n if os.path.exists(det_file_name):\n with open(det_file_name, 'r') as f:\n lines = f.readlines()\n assert len(lines) == 3, \\\n 'Line number of {} should be 3'.format(det_file_name)\n\n aps.append(float(lines[0].split('=')[1].strip()))\n aps.append(float(lines[1].split('=')[1].strip()))\n aps.append(float(lines[2].split('=')[1].strip()))\n else:\n aps.extend([0.0, 0.0, 0.0])\n\n names.append(cls+'_easy')\n names.append(cls+'_medium')\n names.append(cls+'_hard')\n\n return aps, names\n\n def do_detection_analysis_in_eval(self, eval_dir, global_step):\n det_file_dir = os.path.join(\n eval_dir, 'detection_files_{:s}'.format(global_step), 'data')\n det_error_dir = os.path.join(\n eval_dir, 'detection_files_{:s}'.format(global_step),\n 'error_analysis')\n if not os.path.exists(det_error_dir):\n os.makedirs(det_error_dir)\n det_error_file = os.path.join(det_error_dir, 'det_error_file.txt')\n\n stats = self.analyze_detections(det_file_dir, det_error_file)\n ims = self.visualize_detections(\n image_dir=self._image_path,\n image_format='.png',\n det_error_file=det_error_file,\n output_image_dir=det_error_dir,\n num_det_per_type=10\n )\n\n return stats, ims\n\n def analyze_detections(self, detection_file_dir, det_error_file):\n def _save_detection(f, idx, error_type, det, score):\n f.write(\n '{:s} {:s} {:.1f} {:.1f} {:.1f} {:.1f} {:s} {:.3f}\\n'.format(\n idx, error_type,\n det[0]-det[2]/2., det[1]-det[3]/2.,\n det[0]+det[2]/2., det[1]+det[3]/2.,\n self._classes[int(det[4])], \n score\n )\n )\n\n # load detections\n self._det_rois = {}\n for idx in self._image_idx:\n det_file_name = os.path.join(detection_file_dir, idx+'.txt')\n with open(det_file_name) as f:\n lines = f.readlines()\n f.close()\n bboxes = []\n for line in lines:\n obj = line.strip().split(' ')\n cls = self._class_to_idx[obj[0].lower().strip()]\n xmin = float(obj[4])\n ymin = float(obj[5])\n xmax = float(obj[6])\n ymax = float(obj[7])\n score = float(obj[-1])\n\n x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])\n bboxes.append([x, y, w, h, cls, score])\n bboxes.sort(key=lambda x: x[-1], reverse=True)\n self._det_rois[idx] = bboxes\n\n # do error analysis\n num_objs = 0.\n num_dets = 0.\n num_correct = 0.\n num_loc_error = 0.\n num_cls_error = 0.\n num_bg_error = 0.\n num_repeated_error = 0.\n num_detected_obj = 0.\n\n with open(det_error_file, 'w') as f:\n for idx in self._image_idx:\n gt_bboxes = np.array(self._rois[idx])\n num_objs += len(gt_bboxes)\n detected = [False]*len(gt_bboxes)\n\n det_bboxes = self._det_rois[idx]\n if len(gt_bboxes) < 1:\n continue\n\n for i, det in enumerate(det_bboxes):\n if i < len(gt_bboxes):\n num_dets += 1\n ious = batch_iou(gt_bboxes[:, :4], det[:4])\n max_iou = np.max(ious)\n gt_idx = np.argmax(ious)\n if max_iou > 0.1:\n if gt_bboxes[gt_idx, 4] == det[4]:\n if max_iou >= 0.5:\n if i < len(gt_bboxes):\n if not detected[gt_idx]:\n num_correct += 1\n detected[gt_idx] = True\n else:\n num_repeated_error += 1\n else:\n if i < len(gt_bboxes):\n num_loc_error += 1\n _save_detection(f, idx, 'loc', det, det[5])\n else:\n if i < len(gt_bboxes):\n num_cls_error += 1\n _save_detection(f, idx, 'cls', det, det[5])\n else:\n if i < len(gt_bboxes):\n num_bg_error += 1\n _save_detection(f, idx, 'bg', det, det[5])\n\n for i, gt in enumerate(gt_bboxes):\n if not detected[i]:\n _save_detection(f, idx, 'missed', gt, -1.0)\n num_detected_obj += sum(detected)\n f.close()\n\n print ('Detection Analysis:')\n print (' Number of detections: {}'.format(num_dets))\n print (' Number of objects: {}'.format(num_objs))\n print (' Percentage of correct detections: {}'.format(\n num_correct/num_dets))\n print (' Percentage of localization error: {}'.format(\n num_loc_error/num_dets))\n print (' Percentage of classification error: {}'.format(\n num_cls_error/num_dets))\n print (' Percentage of background error: {}'.format(\n num_bg_error/num_dets))\n print (' Percentage of repeated detections: {}'.format(\n num_repeated_error/num_dets))\n print (' Recall: {}'.format(\n num_detected_obj/num_objs))\n\n out = {}\n out['num of detections'] = num_dets\n out['num of objects'] = num_objs\n out['% correct detections'] = num_correct/num_dets\n out['% localization error'] = num_loc_error/num_dets\n out['% classification error'] = num_cls_error/num_dets\n out['% background error'] = num_bg_error/num_dets\n out['% repeated error'] = num_repeated_error/num_dets\n out['% recall'] = num_detected_obj/num_objs\n\n return out\n" ]
[ [ "numpy.array", "numpy.max", "numpy.argmax" ] ]
ZhangShiqiu1993/CSCI-567-machine-learning
[ "07144b299aeb9f29c304798549ef2d44fe1f4083" ]
[ "Assignment-3/decision_tree_test.py" ]
[ "import numpy as np\nfrom sklearn.metrics import accuracy_score\nimport json\n\nimport data_loader\nimport decision_tree\n\n# load data\nX_train, X_test, y_train, y_test = data_loader.discrete_2D_iris_dataset()\n\n# set classifier\ndTree = decision_tree.DecisionTree()\n\n# training\ndTree.train(X_train, y_train)\ny_est_train = dTree.predict(X_train)\ntrain_accu = accuracy_score(y_est_train, y_train)\nprint('train_accu', train_accu)\n\n# testing\ny_est_test = dTree.predict(X_test)\ntest_accu = accuracy_score(y_est_test, y_test)\nprint('test_accu', test_accu)\n\n\n\n# print\ndTree.print_tree()\n\n# save\njson.dump({'train_accu': train_accu, 'test_accu': test_accu},\n\t\t\topen('decision_tree.json', 'w'))" ]
[ [ "sklearn.metrics.accuracy_score" ] ]
ashwinb/pytorch-lightning
[ "89787947304a0db3a98a1ddd0e818a91a924e43f" ]
[ "tests/models/test_gpu.py" ]
[ "import os\n\nimport pytest\nimport torch\n\nimport tests.base.utils as tutils\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.core import memory\nfrom pytorch_lightning.trainer.distrib_parts import parse_gpu_ids, determine_root_gpu_device\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.base import EvalModelTemplate\n\nPRETEND_N_OF_GPUS = 16\n\n\[email protected]\[email protected](\"backend\", ['dp', 'ddp', 'ddp2'])\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_model(tmpdir, backend):\n \"\"\"Make sure DDP works.\"\"\"\n tutils.set_random_master_port()\n\n trainer_options = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n gpus=[0, 1],\n distributed_backend=backend,\n )\n\n model = EvalModelTemplate()\n # tutils.run_model_test(trainer_options, model)\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n assert result\n\n # test memory helper functions\n memory.get_memory_profile('min_max')\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_ddp_all_dataloaders_passed_to_fit(tmpdir):\n \"\"\"Make sure DDP works with dataloaders passed to fit()\"\"\"\n tutils.set_random_master_port()\n\n trainer_options = dict(default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n gpus=[0, 1],\n distributed_backend='ddp')\n\n model = EvalModelTemplate()\n fit_options = dict(train_dataloader=model.train_dataloader(),\n val_dataloaders=model.val_dataloader())\n\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model, **fit_options)\n assert result == 1, \"DDP doesn't work with dataloaders passed to fit().\"\n\n\ndef test_cpu_slurm_save_load(tmpdir):\n \"\"\"Verify model save/load/checkpoint on CPU.\"\"\"\n hparams = EvalModelTemplate.get_default_hparams()\n model = EvalModelTemplate(hparams)\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n version = logger.version\n\n # fit model\n trainer = Trainer(\n max_epochs=1,\n logger=logger,\n checkpoint_callback=ModelCheckpoint(tmpdir)\n )\n result = trainer.fit(model)\n real_global_step = trainer.global_step\n\n # traning complete\n assert result == 1, 'cpu model failed to complete'\n\n # predict with trained model before saving\n # make a prediction\n dataloaders = model.test_dataloader()\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n for dataloader in dataloaders:\n for batch in dataloader:\n break\n\n x, y = batch\n x = x.view(x.size(0), -1)\n\n model.eval()\n pred_before_saving = model(x)\n\n # test HPC saving\n # simulate snapshot on slurm\n saved_filepath = trainer.hpc_save(tmpdir, logger)\n assert os.path.exists(saved_filepath)\n\n # new logger file to get meta\n logger = tutils.get_default_logger(tmpdir, version=version)\n\n trainer = Trainer(\n max_epochs=1,\n logger=logger,\n checkpoint_callback=ModelCheckpoint(tmpdir),\n )\n model = EvalModelTemplate(hparams)\n\n # set the epoch start hook so we can predict before the model does the full training\n def assert_pred_same():\n assert trainer.global_step == real_global_step and trainer.global_step > 0\n\n # predict with loaded model to make sure answers are the same\n trainer.model.eval()\n new_pred = trainer.model(x)\n assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1\n\n model.on_epoch_start = assert_pred_same\n\n # by calling fit again, we trigger training, loading weights from the cluster\n # and our hook to predict using current model before any more weight updates\n trainer.fit(model)\n\n\[email protected]\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_none_backend(tmpdir):\n \"\"\"Make sure when using multiple GPUs the user can't use `distributed_backend = None`.\"\"\"\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=1,\n train_percent_check=0.1,\n val_percent_check=0.1,\n gpus='-1'\n )\n\n model = EvalModelTemplate()\n with pytest.warns(UserWarning):\n tutils.run_model_test(trainer_options, model)\n\n\[email protected]\ndef mocked_device_count(monkeypatch):\n def device_count():\n return PRETEND_N_OF_GPUS\n\n monkeypatch.setattr(torch.cuda, 'device_count', device_count)\n\n\[email protected]\ndef mocked_device_count_0(monkeypatch):\n def device_count():\n return 0\n\n monkeypatch.setattr(torch.cuda, 'device_count', device_count)\n\n\[email protected]_param_tests\[email protected]([\"gpus\", \"expected_num_gpus\", \"distributed_backend\"], [\n pytest.param(None, 0, None, id=\"None - expect 0 gpu to use.\"),\n pytest.param(0, 0, None, id=\"Oth gpu, expect 1 gpu to use.\"),\n pytest.param(1, 1, None, id=\"1st gpu, expect 1 gpu to use.\"),\n pytest.param(-1, PRETEND_N_OF_GPUS, \"ddp\", id=\"-1 - use all gpus\"),\n pytest.param('-1', PRETEND_N_OF_GPUS, \"ddp\", id=\"'-1' - use all gpus\"),\n pytest.param(3, 3, \"ddp\", id=\"3rd gpu - 1 gpu to use (backend:ddp)\")\n])\ndef test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus\n\n\[email protected]_param_tests\[email protected]([\"gpus\", \"expected_num_gpus\", \"distributed_backend\"], [\n pytest.param(None, 0, None, id=\"None - expect 0 gpu to use.\"),\n pytest.param(None, 0, \"ddp\", id=\"None - expect 0 gpu to use.\"),\n])\ndef test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(None, None, \"ddp\", id=\"None is None\"),\n pytest.param(0, None, \"ddp\", id=\"O gpus, expect gpu root device to be None.\"),\n pytest.param(1, 0, \"ddp\", id=\"1 gpu, expect gpu root device to be 0.\"),\n pytest.param(-1, 0, \"ddp\", id=\"-1 - use all gpus, expect gpu root device to be 0.\"),\n pytest.param('-1', 0, \"ddp\", id=\"'-1' - use all gpus, expect gpu root device to be 0.\"),\n pytest.param(3, 0, \"ddp\", id=\"3 gpus, expect gpu root device to be 0.(backend:ddp)\")\n])\ndef test_root_gpu_property(mocked_device_count, gpus, expected_root_gpu, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(None, None, None, id=\"None is None\"),\n pytest.param(None, None, \"ddp\", id=\"None is None\"),\n pytest.param(0, None, \"ddp\", id=\"None is None\"),\n])\ndef test_root_gpu_property_0_passing(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu\n\n\n# Asking for a gpu when non are available will result in a MisconfigurationException\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(1, None, \"ddp\"),\n pytest.param(3, None, \"ddp\"),\n pytest.param(3, None, \"ddp\"),\n pytest.param([1, 2], None, \"ddp\"),\n pytest.param([0, 1], None, \"ddp\"),\n pytest.param(-1, None, \"ddp\"),\n pytest.param('-1', None, \"ddp\")\n])\ndef test_root_gpu_property_0_raising(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):\n with pytest.raises(MisconfigurationException):\n Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu'], [\n pytest.param(None, None, id=\"No gpus, expect gpu root device to be None\"),\n pytest.param([0], 0, id=\"Oth gpu, expect gpu root device to be 0.\"),\n pytest.param([1], 1, id=\"1st gpu, expect gpu root device to be 1.\"),\n pytest.param([3], 3, id=\"3rd gpu, expect gpu root device to be 3.\"),\n pytest.param([1, 2], 1, id=\"[1, 2] gpus, expect gpu root device to be 1.\"),\n])\ndef test_determine_root_gpu_device(gpus, expected_root_gpu):\n assert determine_root_gpu_device(gpus) == expected_root_gpu\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_gpu_ids'], [\n pytest.param(None, None),\n pytest.param(0, None),\n pytest.param(1, [0]),\n pytest.param(3, [0, 1, 2]),\n pytest.param(-1, list(range(PRETEND_N_OF_GPUS)), id=\"-1 - use all gpus\"),\n pytest.param([0], [0]),\n pytest.param([1, 3], [1, 3]),\n pytest.param('0', [0]),\n pytest.param('3', [3]),\n pytest.param('1, 3', [1, 3]),\n pytest.param('2,', [2]),\n pytest.param('-1', list(range(PRETEND_N_OF_GPUS)), id=\"'-1' - use all gpus\"),\n])\ndef test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):\n assert parse_gpu_ids(gpus) == expected_gpu_ids\n\n\[email protected]_param_tests\[email protected](['gpus'], [\n pytest.param(0.1),\n pytest.param(-2),\n pytest.param(False),\n pytest.param([]),\n pytest.param([-1]),\n pytest.param([None]),\n pytest.param(['0']),\n pytest.param((0, 1)),\n])\ndef test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n\n\[email protected]_param_tests\[email protected](\"gpus\", [[1, 2, 19], -1, '-1'])\ndef test_parse_gpu_fail_on_non_existent_id(mocked_device_count_0, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n\n\[email protected]_param_tests\ndef test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids([1, 2, 19])\n\n\[email protected]_param_tests\[email protected](\"gpus\", [-1, '-1'])\ndef test_parse_gpu_returns_None_when_no_devices_are_available(mocked_device_count_0, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n" ]
[ [ "torch.eq", "torch.cuda.device_count" ] ]
VITA-Group/Peek-a-Boo
[ "9290d4e5e3aee0dff994e1a664ec91bd6ec93176" ]
[ "main_imagenet.py" ]
[ "import argparse\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n# import torchvision.models as models\n\nimport logging\nfrom logger import set_logging_config\nimport models\nfrom bop import Bop\n\nfrom models.seed_conv import SeedConv2d\nfrom models.masked_psg_seed_conv import PredictiveSeedConv2d\nimport pruners\nfrom generator import masked_parameters\nfrom prune import prune_loop\n\n\nprint = print\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--optimizer', default='SGD', type=str,\n help='choose among [`SGD`, `BOP`, `Counter`]')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--savedir', default='results', type=str,\n help='root dir to save exp checkpoints and logs')\nparser.add_argument('--exp-name', default='SeedNet', type=str,\n help='path to location to save logs and checkpoints')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=100, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n# SeedNet options\nparser.add_argument('--sign-grouped-dim', default=\"\", type=str,\n help='dimensions that will be grouped for sign parameters')\nparser.add_argument('--init-method', default='standard', type=str,\n help='initialization method for conv weights')\nparser.add_argument('--hidden-act', type=str, default='standard',\n help='choose among [`pruning`, `flipping`, `ternery`, `none`]')\nparser.add_argument('--scaling-input', action='store_true',\n help='whether scale the input in SeedNet models')\n# BOP options\nparser.add_argument('--ar', type=float,\n help='list of layer-wise inital adaptivity rates in BOP')\nparser.add_argument('--tau', type=float,\n help='list of layer-wise thresholds in BOP')\nparser.add_argument('--ar-decay-freq', type=int, default=100,\n help='freqency to decay the ar hyperparameter in BOP')\nparser.add_argument('--ar-decay-ratio', type=float, default=0.1,\n help='decay ratio when decay ar')\n# PSG options\nparser.add_argument('--psg-no-backward', action='store_true',\n help='Do predictive gradient calculation in backward')\nparser.add_argument('--msb-bits', type=int, default=4,\n help='MSB bits for the input')\nparser.add_argument('--msb-bits-weight', type=int, default=4,\n help='MSB bits for the weight')\nparser.add_argument('--msb-bits-grad', type=int, default=8,\n help='MSB bits for the grad')\nparser.add_argument('--psg-threshold', type=float, default=0.0,\n help='Threshold used in PSG')\nparser.add_argument('--psg-sparsify', action='store_true',\n help='Sparsify by ignoring small gradients')\nparser.add_argument('--psg-no-take-sign', action='store_true',\n help='Do not take sign for PSG')\n# Pruning options\nparser.add_argument('--pruner', type=str, default=None, choices=['Mag', 'SNIP', 'GraSP', 'SynFlow'],\n help='pruning strategy')\nparser.add_argument('--prune-epoch', type=int, default=0,\n help='epoch number to finish sparsifying by')\nparser.add_argument('--prune-ratio', type=float, default=1.0,\n help='fraction of non-zero parameters after pruning')\nparser.add_argument('--prune-iters', type=int, default=1,\n help='number of iterations for scoring (should be 1 for Mag, SNIP, and GraSP)')\nparser.add_argument('--prune-batch-size', type=int, default=256,\n help='size of sample mini-batch for pruning methods')\nparser.add_argument('--prune-schedule', type=str, default='exponential', choices=['linear', 'exponential'],\n help='scheduling method for iterative pruning (SynFlow)')\nparser.add_argument('--prune-scope', type=str, default='global', choices=['global', 'local'],\n help='masking scope')\nparser.add_argument('--prune-shots', type=int, default=1,\n help='number of shots for pruning')\n\nbest_acc1 = 0\n\n\ndef main():\n args = parser.parse_args()\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n args.gpu = gpu\n\n args.savedir = os.path.join(args.savedir, args.exp_name)\n if not os.path.isdir(args.savedir):\n os.makedirs(args.savedir)\n args.logger = set_logging_config(args.savedir)\n\n if args.gpu is not None:\n args.logger.info(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n # create model\n if args.pretrained:\n args.logger.info(\"=> using pre-trained model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=True)\n else:\n args.logger.info(\"=> creating model '{}'\".format(args.arch))\n if args.arch.startswith('seed_resnet'):\n pass\n if args.arch.startswith('psg'):\n model = models.__dict__[args.arch](\n init_method=args.init_method,\n predictive_backward = not args.psg_no_backward,\n msb_bits = args.msb_bits,\n msb_bits_weight = args.msb_bits_weight,\n msb_bits_grad = args.msb_bits_grad,\n threshold = args.psg_threshold,\n sparsify = args.psg_sparsify,\n sign = not args.psg_no_take_sign\n )\n temp_arch = args.arch[9:] if 'seed' in args.arch else args.arch[4:]\n model_for_pruning = models.__dict__[temp_arch](init_method=args.init_method)\n else:\n model = models.__dict__[args.arch](init_method=args.init_method)\n model_for_pruning = None\n\n if not torch.cuda.is_available():\n print('using CPU, this will be slow')\n elif args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n model_without_ddp = model.module\n if model_for_pruning is not None:\n model_for_pruning.cuda(args.gpu)\n model_for_pruning = torch.nn.parallel.DistributedDataParallel(model_for_pruning, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n model_without_ddp = model.module\n if model_for_pruning is not None:\n model_for_pruning.cuda()\n model_for_pruning = torch.nn.parallel.DistributedDataParallel(model_for_pruning)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n\n if args.optimizer == 'SGD':\n parameters = [p for p in model_without_ddp.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(parameters, args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n bop_optimizer = None\n elif args.optimizer == 'BOP':\n bop_params, non_bop_params = model_without_ddp.get_bop_params(), model_without_ddp.get_non_bop_params()\n bop_param_masks = model_without_ddp.get_bop_param_masks()\n bop_dict = [{'params': bop_params, 'adaptivity_rate': args.ar, 'threshold': args.tau}]\n # optimizer = optim.SGD(non_bop_params, lr=args.lr, momentum=0.9, weight_decay=5e-4)\n optimizer = torch.optim.SGD(non_bop_params, args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n # bop_optimizer = Bop(bop_params, None, ar=args.ar, threshold=args.tau)\n bop_optimizer = Bop(bop_params, None, bop_param_masks, ar=args.ar, threshold=args.tau, device=args.gpu)\n # schedulers = (optim.lr_scheduler.MultiStepLR(non_bop_optimizer, milestones=[80, 120], gamma=0.1),)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n args.logger.info(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n if args.gpu is not None:\n # best_acc1 may be from a checkpoint from a different GPU\n best_acc1 = best_acc1.to(args.gpu)\n model_without_ddp.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n args.logger.info(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n args.logger.info(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = True\n\n # Data loading code\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n sample_batch_indices = torch.randperm(len(train_dataset))[:100]\n sample_batch = torch.utils.data.Subset(train_dataset, sample_batch_indices)\n pruneloader = torch.utils.data.DataLoader(sample_batch, args.prune_batch_size, shuffle=True, num_workers=4)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion, args)\n return\n\n # Create pruner\n num_classes = 1000\n # if args.pruner:\n # pruner = pruners.__dict__[args.pruner](masked_parameters(model, False, False, False), num_classes)\n\n seed_convs = list(filter(lambda m: isinstance(m, (SeedConv2d, PredictiveSeedConv2d,)), model.modules()))\n cur_shot = 0\n prune_interval = int(args.prune_epoch / args.prune_shots)\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args)\n if args.optimizer == 'BOP' and (epoch + 1) % args.ar_decay_freq == 0:\n bop_optimizer.decay_ar(args.ar_decay_ratio)\n\n # Enable gradients for pruning in SeedNet\n for seed_conv in seed_convs:\n seed_conv.enable_weight_grad()\n if args.pruner and epoch == (cur_shot + 1) * prune_interval and cur_shot < args.prune_shots:\n target_sparsity = 1 - (1 - args.prune_ratio) * (cur_shot + 1) / args.prune_shots\n if args.arch.lower().startswith('psg'):\n model_for_pruning.load_state_dict(model.state_dict(), strict=False)\n # pruner = pruners.__dict__[args.pruner](masked_parameters(model_for_pruning, False, False, False), num_classes)\n # prune_loop(model_for_pruning, criterion, pruner, pruneloader, num_classes, args.gpu, target_sparsity,\n # args.prune_schedule, args.prune_scope, args.prune_iters)\n prune_loop(model_for_pruning, criterion, args.pruner,\n pruneloader, num_classes, args.gpu, target_sparsity,\n args.prune_schedule, args.prune_scope, args.prune_iters,\n prune_bias=False, prune_batchnorm=False, prune_residual=False,\n weight_flips=None, score_threshold=None)\n model.load_state_dict(model_for_pruning.state_dict(), strict=False)\n else:\n # prune_loop(model, criterion, pruner, pruneloader, num_classes, args.gpu, target_sparsity,\n # args.prune_schedule, args.prune_scope, args.prune_iters)\n prune_loop(model, criterion, args.pruner,\n pruneloader, num_classes, args.gpu, target_sparsity,\n args.prune_schedule, args.prune_scope, args.prune_iters,\n prune_bias=False, prune_batchnorm=False, prune_residual=False,\n weight_flips=None, score_threshold=None)\n # Really copy the mask to the model\n # with torch.no_grad():\n # pruned_masks = [m for m, _ in pruner.masked_parameters]\n # model_masks = [m for m, _ in masked_parameters(model, False, False, False)]\n # for model_mask, pruned_mask in zip(model_masks, pruned_masks):\n # model_mask.copy_(pruned_mask.data.detach().clone())\n # Disable gradients when resuming training for SeedNet\n for seed_conv in seed_convs:\n seed_conv.disable_weight_grad()\n cur_shot += 1\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args, bop_optimizer=bop_optimizer)\n\n # evaluate on validation set\n acc1, acc5 = validate(val_loader, model, criterion, args)\n if args.gpu == 0:\n args.logger.info('epoch {} \\t Top-1 acc {} \\t Top-5 acc {}'.format(epoch + 1, acc1, acc5))\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n args.logger.info(f'Max accuracy: {best_acc1}')\n best_acc1_acc5 = acc5\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model_without_ddp.state_dict(),\n 'best_acc1': best_acc1,\n 'acc5': best_acc1_acc5,\n 'optimizer' : optimizer.state_dict(),\n }, is_best)\n\n args.logger.info('best Top-1 acc {} \\t corresponding Top-5 acc {}'.format(best_acc1, best_acc1_acc5))\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args, bop_optimizer=None):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n if bop_optimizer is not None:\n bop_optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if bop_optimizer is not None:\n bop_optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if args.gpu == 0 and i % args.print_freq == 0:\n progress.display(i)\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if args.gpu == 0 and i % args.print_freq == 0:\n progress.display(i)\n\n # TODO: this should also be done with the ProgressMeter\n top1.synchronize()\n top5.synchronize()\n # args.logger.info(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n # .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def synchronize(self):\n \"\"\"\n Warning: does not synchronize `val`\n \"\"\"\n t = torch.tensor([self.sum, self.count], dtype=torch.float64, device='cuda')\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.sum = float(t[0])\n self.count = int(t[1])\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.is_available", "torch.distributed.init_process_group", "torch.save", "torch.cuda.device_count", "torch.nn.DataParallel", "torch.cuda.set_device", "torch.optim.SGD", "torch.multiprocessing.spawn", "torch.load", "torch.utils.data.Subset", "torch.manual_seed", "torch.tensor", "torch.distributed.barrier", "torch.nn.parallel.DistributedDataParallel", "torch.distributed.all_reduce", "torch.utils.data.distributed.DistributedSampler", "torch.nn.CrossEntropyLoss" ] ]
annagitel/ocs-ci
[ "284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5" ]
[ "ocs_ci/utility/utils.py" ]
[ "from functools import reduce\nimport io\nimport json\nimport logging\nimport os\nimport platform\nimport random\nimport re\nimport shlex\nimport smtplib\nimport string\nimport subprocess\nimport time\nimport traceback\nimport stat\nfrom copy import deepcopy\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom scipy.stats import tmean, scoreatpercentile\nfrom shutil import which, move, rmtree\n\nimport hcl2\nimport requests\nimport yaml\nimport git\nfrom bs4 import BeautifulSoup\nfrom paramiko import SSHClient, AutoAddPolicy\nfrom paramiko.auth_handler import AuthenticationException, SSHException\nfrom semantic_version import Version\nfrom tempfile import NamedTemporaryFile, mkdtemp\n\nfrom ocs_ci.framework import config\nfrom ocs_ci.ocs import constants, defaults\nfrom ocs_ci.ocs.exceptions import (\n CephHealthException,\n ClientDownloadError,\n CommandFailed,\n TagNotFoundException,\n TimeoutException,\n TimeoutExpiredError,\n UnavailableBuildException,\n UnexpectedImage,\n UnsupportedOSType,\n)\nfrom ocs_ci.utility import version as version_module\nfrom ocs_ci.utility.flexy import load_cluster_info\nfrom ocs_ci.utility.retry import retry\n\n\nlog = logging.getLogger(__name__)\n\n# variables\nmounting_dir = \"/mnt/cephfs/\"\nclients = []\nmd5sum_list1 = []\nmd5sum_list2 = []\nfuse_clients = []\nkernel_clients = []\nmon_node = \"\"\nmon_node_ip = \"\"\nmds_nodes = []\nmd5sum_file_lock = []\nactive_mdss = []\nRC = []\nfailure = {}\noutput = []\nunique_test_names = []\n\n\n# function for getting the clients\ndef get_client_info(ceph_nodes, clients):\n log.info(\"Getting Clients\")\n for node in ceph_nodes:\n if node.role == \"client\":\n clients.append(node)\n # Identifying MON node\n for node in ceph_nodes:\n if node.role == \"mon\":\n mon_node = node\n out, err = mon_node.exec_command(cmd=\"sudo hostname -I\")\n mon_node_ip = out.read().decode().rstrip(\"\\n\")\n break\n for node in ceph_nodes:\n if node.role == \"mds\":\n mds_nodes.append(node)\n for node in clients:\n node.exec_command(cmd=\"sudo yum install -y attr\")\n\n fuse_clients = clients[0:2] # seperating clients for fuse and kernel\n kernel_clients = clients[2:4]\n return (\n fuse_clients,\n kernel_clients,\n mon_node,\n mounting_dir,\n mds_nodes,\n md5sum_file_lock,\n mon_node_ip,\n )\n\n\n# function for providing authorization to the clients from MON ndoe\ndef auth_list(clients, mon_node):\n for node in clients:\n log.info(\"Giving required permissions for clients from MON node:\")\n mon_node.exec_command(\n cmd=\"sudo ceph auth get-or-create client.%s mon 'allow *' mds 'allow *, allow rw path=/' \"\n \"osd 'allow rw pool=cephfs_data' -o /etc/ceph/ceph.client.%s.keyring\"\n % (node.hostname, node.hostname)\n )\n out, err = mon_node.exec_command(\n sudo=True, cmd=\"cat /etc/ceph/ceph.client.%s.keyring\" % (node.hostname)\n )\n keyring = out.read().decode()\n key_file = node.write_file(\n sudo=True,\n file_name=\"/etc/ceph/ceph.client.%s.keyring\" % (node.hostname),\n file_mode=\"w\",\n )\n key_file.write(keyring)\n\n key_file.flush()\n\n node.exec_command(\n cmd=\"sudo chmod 644 /etc/ceph/ceph.client.%s.keyring\" % (node.hostname)\n )\n # creating mounting directory\n node.exec_command(cmd=\"sudo mkdir %s\" % (mounting_dir))\n\n\n# MOunting single FS with ceph-fuse\ndef fuse_mount(fuse_clients, mounting_dir):\n try:\n for client in fuse_clients:\n log.info(\"Creating mounting dir:\")\n log.info(\"Mounting fs with ceph-fuse on client %s:\" % (client.hostname))\n client.exec_command(\n cmd=\"sudo ceph-fuse -n client.%s %s\" % (client.hostname, mounting_dir)\n )\n out, err = client.exec_command(cmd=\"mount\")\n mount_output = out.read().decode()\n mount_output.split()\n log.info(\"Checking if fuse mount is is passed of failed:\")\n if \"fuse\" in mount_output:\n log.info(\"ceph-fuse mounting passed\")\n else:\n log.error(\"ceph-fuse mounting failed\")\n return md5sum_list1\n except Exception as e:\n log.error(e)\n\n\ndef kernel_mount(mounting_dir, mon_node_ip, kernel_clients):\n try:\n for client in kernel_clients:\n out, err = client.exec_command(\n cmd=\"sudo ceph auth get-key client.%s\" % (client.hostname)\n )\n secret_key = out.read().decode().rstrip(\"\\n\")\n mon_node_ip = mon_node_ip.replace(\" \", \"\")\n client.exec_command(\n cmd=\"sudo mount -t ceph %s:6789:/ %s -o name=%s,secret=%s\"\n % (mon_node_ip, mounting_dir, client.hostname, secret_key)\n )\n out, err = client.exec_command(cmd=\"mount\")\n mount_output = out.read().decode()\n mount_output.split()\n log.info(\"Checking if kernel mount is is passed of failed:\")\n if \"%s:6789:/\" % (mon_node_ip) in mount_output:\n log.info(\"kernel mount passed\")\n else:\n log.error(\"kernel mount failed\")\n return md5sum_list2\n except Exception as e:\n log.error(e)\n\n\ndef fuse_client_io(client, mounting_dir):\n try:\n rand_count = random.randint(1, 5)\n rand_bs = random.randint(100, 300)\n log.info(\"Performing IOs on fuse-clients\")\n client.exec_command(\n cmd=\"sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d\"\n % (mounting_dir, client.hostname, rand_bs, rand_count),\n long_running=True,\n )\n except Exception as e:\n log.error(e)\n\n\ndef kernel_client_io(client, mounting_dir):\n try:\n rand_count = random.randint(1, 6)\n rand_bs = random.randint(100, 500)\n log.info(\"Performing IOs on kernel-clients\")\n client.exec_command(\n cmd=\"sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d\"\n % (mounting_dir, client.hostname, rand_bs, rand_count),\n long_running=True,\n )\n except Exception as e:\n log.error(e)\n\n\ndef fuse_client_md5(fuse_clients, md5sum_list1):\n try:\n log.info(\"Calculating MD5 sums of files in fuse-clients:\")\n for client in fuse_clients:\n md5sum_list1.append(\n client.exec_command(\n cmd=\"sudo md5sum %s* | awk '{print $1}' \" % (mounting_dir),\n long_running=True,\n )\n )\n\n except Exception as e:\n log.error(e)\n\n\ndef kernel_client_md5(kernel_clients, md5sum_list2):\n try:\n log.info(\"Calculating MD5 sums of files in kernel-clients:\")\n for client in kernel_clients:\n md5sum_list2.append(\n client.exec_command(\n cmd=\"sudo md5sum %s* | awk '{print $1}' \" % (mounting_dir),\n long_running=True,\n )\n )\n except Exception as e:\n log.error(e)\n\n\n# checking file locking mechanism\ndef file_locking(client):\n try:\n to_lock_file = \"\"\"\nimport fcntl\nimport subprocess\nimport time\ntry:\n f = open('/mnt/cephfs/to_test_file_lock', 'w+')\n fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)\n print \"locking file:--------------------------------\"\n subprocess.check_output([\"sudo\",\"dd\",\"if=/dev/zero\",\"of=/mnt/cephfs/to_test_file_lock\",\"bs=1M\",\"count=2\"])\nexcept IOError as e:\n print e\nfinally:\n print \"Unlocking file:------------------------------\"\n fcntl.lockf(f,fcntl.LOCK_UN)\n \"\"\"\n to_lock_code = client.write_file(\n sudo=True, file_name=\"/home/cephuser/file_lock.py\", file_mode=\"w\"\n )\n to_lock_code.write(to_lock_file)\n to_lock_code.flush()\n out, err = client.exec_command(cmd=\"sudo python /home/cephuser/file_lock.py\")\n output = out.read().decode()\n output.split()\n if \"Errno 11\" in output:\n log.info(\"File locking achieved, data is not corrupted\")\n elif \"locking\" in output:\n log.info(\"File locking achieved, data is not corrupted\")\n else:\n log.error(\"Data is corrupted\")\n\n out, err = client.exec_command(\n cmd=\"sudo md5sum %sto_test_file_lock | awk '{print $1}'\" % (mounting_dir)\n )\n\n md5sum_file_lock.append(out.read().decode())\n\n except Exception as e:\n log.error(e)\n\n\ndef activate_multiple_mdss(mds_nodes):\n try:\n log.info(\"Activating Multiple MDSs\")\n for node in mds_nodes:\n out1, err = node.exec_command(\n cmd=\"sudo ceph fs set cephfs allow_multimds true --yes-i-really-mean-it\"\n )\n out2, err = node.exec_command(cmd=\"sudo ceph fs set cephfs max_mds 2\")\n break\n\n except Exception as e:\n log.error(e)\n\n\ndef mkdir_pinning(clients, range1, range2, dir_name, pin_val):\n try:\n log.info(\"Creating Directories and Pinning to MDS %s\" % (pin_val))\n for client in clients:\n for num in range(range1, range2):\n out, err = client.exec_command(\n cmd=\"sudo mkdir %s%s_%d\" % (mounting_dir, dir_name, num)\n )\n if pin_val != \"\":\n client.exec_command(\n cmd=\"sudo setfattr -n ceph.dir.pin -v %s %s%s_%d\"\n % (pin_val, mounting_dir, dir_name, num)\n )\n else:\n print(\"Pin val not given\")\n print(out.read().decode())\n print(time.time())\n break\n except Exception as e:\n log.error(e)\n\n\ndef allow_dir_fragmentation(mds_nodes):\n try:\n log.info(\"Allowing directorty fragmenation for splitting\")\n for node in mds_nodes:\n node.exec_command(cmd=\"sudo ceph fs set cephfs allow_dirfrags 1\")\n break\n except Exception as e:\n log.error(e)\n\n\ndef mds_fail_over(mds_nodes):\n try:\n rand = random.randint(0, 1)\n for node in mds_nodes:\n log.info(\"Failing MDS %d\" % (rand))\n node.exec_command(cmd=\"sudo ceph mds fail %d\" % (rand))\n break\n\n except Exception as e:\n log.error(e)\n\n\ndef pinned_dir_io(clients, mds_fail_over, num_of_files, range1, range2):\n try:\n log.info(\"Performing IOs and MDSfailovers on clients\")\n for client in clients:\n client.exec_command(cmd=\"sudo pip install crefi\")\n for num in range(range1, range2):\n if mds_fail_over != \"\":\n mds_fail_over(mds_nodes)\n out, err = client.exec_command(\n cmd=\"sudo crefi -n %d %sdir_%d\" % (num_of_files, mounting_dir, num)\n )\n rc = out.channel.recv_exit_status()\n print(out.read().decode())\n RC.append(rc)\n print(time.time())\n if rc == 0:\n log.info(\"Client IO is going on,success\")\n else:\n log.error(\"Client IO got interrupted\")\n failure.update({client: out})\n break\n break\n\n except Exception as e:\n log.error(e)\n\n\ndef custom_ceph_config(suite_config, custom_config, custom_config_file):\n \"\"\"\n Combines and returns custom configuration overrides for ceph.\n Hierarchy is as follows::\n\n custom_config > custom_config_file > suite_config\n\n Args:\n suite_config: ceph_conf_overrides that currently exist in the test suite\n custom_config: custom config args provided by the cli (these all go to the global scope)\n custom_config_file: path to custom config yaml file provided by the cli\n\n Returns\n New value to be used for ceph_conf_overrides in test config\n \"\"\"\n log.debug(\"Suite config: {}\".format(suite_config))\n log.debug(\"Custom config: {}\".format(custom_config))\n log.debug(\"Custom config file: {}\".format(custom_config_file))\n\n full_custom_config = suite_config or {}\n cli_config_dict = {}\n custom_config_dict = {}\n\n # retrieve custom config from file\n if custom_config_file:\n with open(custom_config_file) as f:\n custom_config_dict = yaml.safe_load(f)\n log.info(\"File contents: {}\".format(custom_config_dict))\n\n # format cli configs into dict\n if custom_config:\n cli_config_dict = dict(item.split(\"=\") for item in custom_config)\n\n # combine file and cli configs\n if cli_config_dict:\n if not custom_config_dict.get(\"global\"):\n custom_config_dict[\"global\"] = {}\n for key, value in cli_config_dict.items():\n custom_config_dict[\"global\"][key] = value\n\n # combine file and suite configs\n for key, value in custom_config_dict.items():\n subsection = {}\n if full_custom_config.get(key):\n subsection.update(full_custom_config[key])\n subsection.update(value)\n full_custom_config[key] = subsection\n\n log.info(\"Full custom config: {}\".format(full_custom_config))\n return full_custom_config\n\n\ndef mask_secrets(plaintext, secrets):\n \"\"\"\n Replace secrets in plaintext with asterisks\n\n Args:\n plaintext (str or list): The plaintext to remove the secrets from or\n list of strings to remove secrets from\n secrets (list): List of secret strings to replace in the plaintext\n\n Returns:\n str: The censored version of plaintext\n\n \"\"\"\n if secrets:\n for secret in secrets:\n if isinstance(plaintext, list):\n plaintext = [string.replace(secret, \"*\" * 5) for string in plaintext]\n else:\n plaintext = plaintext.replace(secret, \"*\" * 5)\n return plaintext\n\n\ndef run_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):\n \"\"\"\n *The deprecated form of exec_cmd.*\n Run an arbitrary command locally\n\n Args:\n cmd (str): command to run\n secrets (list): A list of secrets to be masked with asterisks\n This kwarg is popped in order to not interfere with\n subprocess.run(``**kwargs``)\n timeout (int): Timeout for the command, defaults to 600 seconds.\n ignore_error (bool): True if ignore non zero return code and do not\n raise the exception.\n\n Raises:\n CommandFailed: In case the command execution fails\n\n Returns:\n (str) Decoded stdout of command\n \"\"\"\n completed_process = exec_cmd(cmd, secrets, timeout, ignore_error, **kwargs)\n return mask_secrets(completed_process.stdout.decode(), secrets)\n\n\ndef exec_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):\n \"\"\"\n Run an arbitrary command locally\n\n Args:\n cmd (str): command to run\n secrets (list): A list of secrets to be masked with asterisks\n This kwarg is popped in order to not interfere with\n subprocess.run(``**kwargs``)\n timeout (int): Timeout for the command, defaults to 600 seconds.\n ignore_error (bool): True if ignore non zero return code and do not\n raise the exception.\n\n Raises:\n CommandFailed: In case the command execution fails\n\n Returns:\n (CompletedProcess) A CompletedProcess object of the command that was executed\n CompletedProcess attributes:\n args: The list or str args passed to run().\n returncode (str): The exit code of the process, negative for signals.\n stdout (str): The standard output (None if not captured).\n stderr (str): The standard error (None if not captured).\n\n \"\"\"\n masked_cmd = mask_secrets(cmd, secrets)\n log.info(f\"Executing command: {masked_cmd}\")\n if isinstance(cmd, str):\n cmd = shlex.split(cmd)\n completed_process = subprocess.run(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n timeout=timeout,\n **kwargs,\n )\n masked_stdout = mask_secrets(completed_process.stdout.decode(), secrets)\n if len(completed_process.stdout) > 0:\n log.debug(f\"Command stdout: {masked_stdout}\")\n else:\n log.debug(\"Command stdout is empty\")\n\n masked_stderr = mask_secrets(completed_process.stderr.decode(), secrets)\n if len(completed_process.stderr) > 0:\n log.warning(f\"Command stderr: {masked_stderr}\")\n else:\n log.debug(\"Command stderr is empty\")\n log.debug(f\"Command return code: {completed_process.returncode}\")\n if completed_process.returncode and not ignore_error:\n raise CommandFailed(\n f\"Error during execution of command: {masked_cmd}.\"\n f\"\\nError is {masked_stderr}\"\n )\n return completed_process\n\n\ndef download_file(url, filename, **kwargs):\n \"\"\"\n Download a file from a specified url\n\n Args:\n url (str): URL of the file to download\n filename (str): Name of the file to write the download to\n kwargs (dict): additional keyword arguments passed to requests.get(...)\n\n \"\"\"\n log.debug(f\"Download '{url}' to '{filename}'.\")\n with open(filename, \"wb\") as f:\n r = requests.get(url, **kwargs)\n assert r.ok, f\"The URL {url} is not available! Status: {r.status_code}.\"\n f.write(r.content)\n\n\ndef get_url_content(url, **kwargs):\n \"\"\"\n Return URL content\n\n Args:\n url (str): URL address to return\n kwargs (dict): additional keyword arguments passed to requests.get(...)\n Returns:\n str: Content of URL\n\n Raises:\n AssertionError: When couldn't load URL\n\n \"\"\"\n log.debug(f\"Download '{url}' content.\")\n r = requests.get(url, **kwargs)\n assert r.ok, f\"Couldn't load URL: {url} content! Status: {r.status_code}.\"\n return r.content\n\n\ndef expose_ocp_version(version):\n \"\"\"\n This helper function exposes latest nightly version or GA version of OCP.\n When the version string ends with .nightly (e.g. 4.2.0-0.nightly) it will\n expose the version to latest accepted OCP build\n (e.g. 4.2.0-0.nightly-2019-08-08-103722)\n If the version ends with -ga than it will find the latest GA OCP version\n and will expose 4.2-ga to for example 4.2.22.\n\n Args:\n version (str): Verison of OCP\n\n Returns:\n str: Version of OCP exposed to full version if latest nighly passed\n\n \"\"\"\n if version.endswith(\".nightly\"):\n latest_nightly_url = (\n f\"https://amd64.ocp.releases.ci.openshift.org/api/v1/\"\n f\"releasestream/{version}/latest\"\n )\n version_url_content = get_url_content(latest_nightly_url)\n version_json = json.loads(version_url_content)\n return version_json[\"name\"]\n if version.endswith(\"-ga\"):\n channel = config.DEPLOYMENT.get(\"ocp_channel\", \"stable\")\n ocp_version = version.rstrip(\"-ga\")\n index = config.DEPLOYMENT.get(\"ocp_version_index\", -1)\n return get_latest_ocp_version(f\"{channel}-{ocp_version}\", index)\n else:\n return version\n\n\ndef get_openshift_installer(\n version=None,\n bin_dir=None,\n force_download=False,\n):\n \"\"\"\n Download the OpenShift installer binary, if not already present.\n Update env. PATH and get path of the openshift installer binary.\n\n Args:\n version (str): Version of the installer to download\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n force_download (bool): Force installer download even if already present\n\n Returns:\n str: Path to the installer binary\n\n \"\"\"\n version = version or config.DEPLOYMENT[\"installer_version\"]\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n installer_filename = \"openshift-install\"\n installer_binary_path = os.path.join(bin_dir, installer_filename)\n if os.path.isfile(installer_binary_path) and force_download:\n delete_file(installer_binary_path)\n if os.path.isfile(installer_binary_path):\n log.debug(f\"Installer exists ({installer_binary_path}), skipping download.\")\n # TODO: check installer version\n else:\n version = expose_ocp_version(version)\n log.info(f\"Downloading openshift installer ({version}).\")\n prepare_bin_dir()\n # record current working directory and switch to BIN_DIR\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n tarball = f\"{installer_filename}.tar.gz\"\n url = get_openshift_mirror_url(installer_filename, version)\n download_file(url, tarball)\n run_cmd(f\"tar xzvf {tarball} {installer_filename}\")\n delete_file(tarball)\n # return to the previous working directory\n os.chdir(previous_dir)\n\n installer_version = run_cmd(f\"{installer_binary_path} version\")\n log.info(f\"OpenShift Installer version: {installer_version}\")\n return installer_binary_path\n\n\ndef get_ocm_cli(\n version=None,\n bin_dir=None,\n force_download=False,\n):\n \"\"\"\n Download the OCM binary, if not already present.\n Update env. PATH and get path of the OCM binary.\n\n Args:\n version (str): Version of the OCM to download\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n force_download (bool): Force OCM download even if already present\n\n Returns:\n str: Path to the OCM binary\n\n \"\"\"\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n ocm_filename = \"ocm\"\n ocm_binary_path = os.path.join(bin_dir, ocm_filename)\n if os.path.isfile(ocm_binary_path) and force_download:\n delete_file(ocm_binary_path)\n if os.path.isfile(ocm_binary_path):\n log.debug(f\"ocm exists ({ocm_binary_path}), skipping download.\")\n else:\n log.info(f\"Downloading ocm cli ({version}).\")\n prepare_bin_dir()\n # record current working directory and switch to BIN_DIR\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n url = f\"https://github.com/openshift-online/ocm-cli/releases/download/v{version}/ocm-linux-amd64\"\n download_file(url, ocm_filename)\n # return to the previous working directory\n os.chdir(previous_dir)\n\n current_file_permissions = os.stat(ocm_binary_path)\n os.chmod(\n ocm_binary_path,\n current_file_permissions.st_mode | stat.S_IEXEC,\n )\n ocm_version = run_cmd(f\"{ocm_binary_path} version\")\n log.info(f\"OCM version: {ocm_version}\")\n\n return ocm_binary_path\n\n\ndef get_rosa_cli(\n version=None,\n bin_dir=None,\n force_download=False,\n):\n \"\"\"\n Download the ROSA binary, if not already present.\n Update env. PATH and get path of the ROSA binary.\n\n Args:\n version (str): Version of the ROSA to download\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n force_download (bool): Force ROSA download even if already present\n\n Returns:\n str: Path to the rosa binary\n\n \"\"\"\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n rosa_filename = \"rosa\"\n rosa_binary_path = os.path.join(bin_dir, rosa_filename)\n if os.path.isfile(rosa_binary_path) and force_download:\n delete_file(rosa_binary_path)\n if os.path.isfile(rosa_binary_path):\n log.debug(f\"rosa exists ({rosa_binary_path}), skipping download.\")\n else:\n log.info(f\"Downloading rosa cli ({version}).\")\n prepare_bin_dir()\n # record current working directory and switch to BIN_DIR\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n url = f\"https://github.com/openshift/rosa/releases/download/v{version}/rosa-linux-amd64\"\n download_file(url, rosa_filename)\n # return to the previous working directory\n os.chdir(previous_dir)\n\n current_file_permissions = os.stat(rosa_binary_path)\n os.chmod(\n rosa_binary_path,\n current_file_permissions.st_mode | stat.S_IEXEC,\n )\n rosa_version = run_cmd(f\"{rosa_binary_path} version\")\n log.info(f\"rosa version: {rosa_version}\")\n\n return rosa_binary_path\n\n\ndef get_openshift_client(\n version=None, bin_dir=None, force_download=False, skip_comparison=False\n):\n \"\"\"\n Download the OpenShift client binary, if not already present.\n Update env. PATH and get path of the oc binary.\n\n Args:\n version (str): Version of the client to download\n (default: config.RUN['client_version'])\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n force_download (bool): Force client download even if already present\n skip_comparison (bool): Skip the comparison between the existing OCP client\n version and the configured one.\n\n Returns:\n str: Path to the client binary\n\n \"\"\"\n version = version or config.RUN[\"client_version\"]\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n client_binary_path = os.path.join(bin_dir, \"oc\")\n kubectl_binary_path = os.path.join(bin_dir, \"kubectl\")\n download_client = True\n client_version = None\n try:\n version = expose_ocp_version(version)\n except Exception:\n log.exception(\"Unable to expose OCP version, skipping client download.\")\n skip_comparison = True\n download_client = False\n force_download = False\n\n if force_download:\n log.info(\"Forcing client download.\")\n elif os.path.isfile(client_binary_path) and not skip_comparison:\n current_client_version = get_client_version(client_binary_path)\n if current_client_version != version:\n log.info(\n f\"Existing client version ({current_client_version}) does not match \"\n f\"configured version ({version}).\"\n )\n else:\n log.debug(\n f\"Client exists ({client_binary_path}) and matches configured version, \"\n f\"skipping download.\"\n )\n download_client = False\n\n if download_client:\n # Move existing client binaries to backup location\n client_binary_backup = f\"{client_binary_path}.bak\"\n kubectl_binary_backup = f\"{kubectl_binary_path}.bak\"\n\n try:\n os.rename(client_binary_path, client_binary_backup)\n os.rename(kubectl_binary_path, kubectl_binary_backup)\n except FileNotFoundError:\n pass\n\n # Download the client\n log.info(f\"Downloading openshift client ({version}).\")\n prepare_bin_dir()\n # record current working directory and switch to BIN_DIR\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n url = get_openshift_mirror_url(\"openshift-client\", version)\n tarball = \"openshift-client.tar.gz\"\n download_file(url, tarball)\n run_cmd(f\"tar xzvf {tarball} oc kubectl\")\n delete_file(tarball)\n\n try:\n client_version = run_cmd(f\"{client_binary_path} version --client\")\n except CommandFailed:\n log.error(\"Unable to get version from downloaded client.\")\n\n if client_version:\n try:\n delete_file(client_binary_backup)\n delete_file(kubectl_binary_backup)\n log.info(\"Deleted backup binaries.\")\n except FileNotFoundError:\n pass\n else:\n try:\n os.rename(client_binary_backup, client_binary_path)\n os.rename(kubectl_binary_backup, kubectl_binary_path)\n log.info(\"Restored backup binaries to their original location.\")\n except FileNotFoundError:\n raise ClientDownloadError(\n \"No backups exist and new binary was unable to be verified.\"\n )\n\n # return to the previous working directory\n os.chdir(previous_dir)\n\n log.info(f\"OpenShift Client version: {client_version}\")\n return client_binary_path\n\n\ndef get_vault_cli(bind_dir=None, force_download=False):\n \"\"\"\n Download vault based on platform\n basically for CLI purpose. Binary will be directly\n put into ocs_ci/bin/ directory\n\n Args:\n bind_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n force_download (bool): Force vault cli download even if already present\n\n \"\"\"\n res = requests.get(constants.VAULT_VERSION_INFO_URL)\n version = res.url.split(\"/\")[-1].lstrip(\"v\")\n bin_dir = os.path.expanduser(bind_dir or config.RUN[\"bin_dir\"])\n system = platform.system()\n if \"Darwin\" not in system and \"Linux\" not in system:\n raise UnsupportedOSType(\"Not a supported platform for vault\")\n\n system = system.lower()\n zip_file = f\"vault_{version}_{system}_amd64.zip\"\n vault_cli_filename = \"vault\"\n vault_binary_path = os.path.join(bin_dir, vault_cli_filename)\n if os.path.isfile(vault_binary_path) and force_download:\n delete_file(vault_binary_path)\n if os.path.isfile(vault_binary_path):\n log.debug(\n f\"Vault CLI binary already exists {vault_binary_path}, skipping download.\"\n )\n else:\n log.info(f\"Downloading vault cli {version}\")\n prepare_bin_dir()\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n url = f\"{constants.VAULT_DOWNLOAD_BASE_URL}/{version}/{zip_file}\"\n download_file(url, zip_file)\n run_cmd(f\"unzip {zip_file}\")\n delete_file(zip_file)\n os.chdir(previous_dir)\n vault_ver = run_cmd(f\"{vault_binary_path} version\")\n log.info(f\"Vault cli version:{vault_ver}\")\n\n\ndef ensure_nightly_build_availability(build_url):\n base_build_url = build_url.rsplit(\"/\", 1)[0]\n r = requests.get(base_build_url)\n extracting_condition = b\"Extracting\" in r.content\n if extracting_condition:\n log.info(\"Build is extracting now, may take up to a minute.\")\n return r.ok and not extracting_condition\n\n\ndef get_openshift_mirror_url(file_name, version):\n \"\"\"\n Format url to OpenShift mirror (for client and installer download).\n\n Args:\n file_name (str): Name of file\n version (str): Version of the installer or client to download\n\n Returns:\n str: Url of the desired file (installer or client)\n\n Raises:\n UnsupportedOSType: In case the OS type is not supported\n UnavailableBuildException: In case the build url is not reachable\n \"\"\"\n if platform.system() == \"Darwin\":\n os_type = \"mac\"\n elif platform.system() == \"Linux\":\n os_type = \"linux\"\n else:\n raise UnsupportedOSType\n url_template = config.DEPLOYMENT.get(\n \"ocp_url_template\",\n \"https://openshift-release-artifacts.apps.ci.l2s4.p1.openshiftapps.com/\"\n \"{version}/{file_name}-{os_type}-{version}.tar.gz\",\n )\n url = url_template.format(\n version=version,\n file_name=file_name,\n os_type=os_type,\n )\n sample = TimeoutSampler(\n timeout=540,\n sleep=5,\n func=ensure_nightly_build_availability,\n build_url=url,\n )\n if not sample.wait_for_func_status(result=True):\n raise UnavailableBuildException(f\"The build url {url} is not reachable\")\n return url\n\n\ndef prepare_bin_dir(bin_dir=None):\n \"\"\"\n Prepare bin directory for OpenShift client and installer\n\n Args:\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n \"\"\"\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n try:\n os.mkdir(bin_dir)\n log.info(f\"Directory '{bin_dir}' successfully created.\")\n except FileExistsError:\n log.debug(f\"Directory '{bin_dir}' already exists.\")\n\n\ndef add_path_to_env_path(path):\n \"\"\"\n Add path to the PATH environment variable (if not already there).\n\n Args:\n path (str): Path which should be added to the PATH env. variable\n\n \"\"\"\n env_path = os.environ[\"PATH\"].split(os.pathsep)\n if path not in env_path:\n os.environ[\"PATH\"] = os.pathsep.join([path] + env_path)\n log.info(f\"Path '{path}' added to the PATH environment variable.\")\n log.debug(f\"PATH: {os.environ['PATH']}\")\n\n\ndef delete_file(file_name):\n \"\"\"\n Delete file_name\n\n Args:\n file_name (str): Path to the file you want to delete\n \"\"\"\n os.remove(file_name)\n\n\ndef delete_dir(dir_name):\n \"\"\"\n Deletes the directory\n\n Args:\n dir_name (str): Directory path to delete\n\n \"\"\"\n try:\n rmtree(dir_name)\n except OSError as e:\n log.error(f\"Failed to delete the directory {dir_name}. Error: {e.strerror}\")\n\n\nclass TimeoutSampler(object):\n \"\"\"\n Samples the function output.\n\n This is a generator object that at first yields the output of function\n `func`. After the yield, it either raises instance of `timeout_exc_cls` or\n sleeps `sleep` seconds.\n\n Yielding the output allows you to handle every value as you wish.\n\n Feel free to set the instance variables.\n\n\n Args:\n timeout (int): Timeout in seconds\n sleep (int): Sleep interval in seconds\n func (function): The function to sample\n func_args: Arguments for the function\n func_kwargs: Keyword arguments for the function\n \"\"\"\n\n def __init__(self, timeout, sleep, func, *func_args, **func_kwargs):\n self.timeout = timeout\n self.sleep = sleep\n # check that given timeout and sleep values makes sense\n if self.timeout < self.sleep:\n raise ValueError(\"timeout should be larger than sleep time\")\n\n self.func = func\n self.func_args = func_args\n self.func_kwargs = func_kwargs\n\n # Timestamps of the first and most recent samples\n self.start_time = None\n self.last_sample_time = None\n # The exception to raise\n self.timeout_exc_cls = TimeoutExpiredError\n # Arguments that will be passed to the exception\n self.timeout_exc_args = [self.timeout]\n try:\n self.timeout_exc_args.append(\n f\"Timed out after {timeout}s running {self._build_call_string()}\"\n )\n except Exception:\n log.exception(\n \"Failed to assemble call string. Not necessarily a test failure.\"\n )\n\n def _build_call_string(self):\n def stringify(value):\n if isinstance(value, str):\n return f'\"{value}\"'\n return str(value)\n\n args = list(map(stringify, self.func_args))\n kwargs = [f\"{stringify(k)}={stringify(v)}\" for k, v in self.func_kwargs.items()]\n all_args_string = \", \".join(args + kwargs)\n return f\"{self.func.__name__}({all_args_string})\"\n\n def __iter__(self):\n if self.start_time is None:\n self.start_time = time.time()\n while True:\n self.last_sample_time = time.time()\n if self.timeout <= (self.last_sample_time - self.start_time):\n raise self.timeout_exc_cls(*self.timeout_exc_args)\n try:\n yield self.func(*self.func_args, **self.func_kwargs)\n except Exception as ex:\n msg = f\"Exception raised during iteration: {ex}\"\n log.exception(msg)\n if self.timeout <= (time.time() - self.start_time):\n raise self.timeout_exc_cls(*self.timeout_exc_args)\n log.info(\"Going to sleep for %d seconds before next iteration\", self.sleep)\n time.sleep(self.sleep)\n\n def wait_for_func_value(self, value):\n \"\"\"\n Implements common usecase of TimeoutSampler: waiting until func (given\n function) returns a given value.\n\n Args:\n value: Expected return value of func we are waiting for.\n \"\"\"\n try:\n for i_value in self:\n if i_value == value:\n break\n except self.timeout_exc_cls:\n log.error(\n \"function %s failed to return expected value %s \"\n \"after multiple retries during %d second timeout\",\n self.func.__name__,\n value,\n self.timeout,\n )\n raise\n\n def wait_for_func_status(self, result):\n \"\"\"\n Get function and run it for given time until success or timeout.\n (using __iter__ function)\n\n Args:\n result (bool): Expected result from func.\n\n Examples::\n\n sample = TimeoutSampler(\n timeout=60, sleep=1, func=some_func, func_arg1=\"1\",\n func_arg2=\"2\"\n )\n if not sample.wait_for_func_status(result=True):\n raise Exception\n\n \"\"\"\n try:\n self.wait_for_func_value(result)\n return True\n except self.timeout_exc_cls:\n return False\n\n\nclass TimeoutIterator(TimeoutSampler):\n \"\"\"\n Wrapper of TimeoutSampler which separates parameters of the class itself\n and func arguments in __init__ method. Such way of passing function with\n parameters is used in python standard library.\n\n This allows more explicit usage, which improves readability, eg.::\n\n t1 = TimeoutIterator(timeout=60, sleep=5, func=foo, func_args=[bar])\n t2 = TimeoutIterator(3600, sleep=10, func=foo, func_args=[bar])\n \"\"\"\n\n def __init__(self, timeout, sleep, func, func_args=None, func_kwargs=None):\n if func_args is None:\n func_args = []\n if func_kwargs is None:\n func_kwargs = {}\n super().__init__(timeout, sleep, func, *func_args, **func_kwargs)\n\n\ndef get_random_str(size=13):\n \"\"\"\n generates the random string of given size\n\n Args:\n size (int): number of random characters to generate\n\n Returns:\n str : string of random characters of given size\n\n \"\"\"\n chars = string.ascii_lowercase + string.digits\n return \"\".join(random.choice(chars) for _ in range(size))\n\n\ndef run_async(command):\n \"\"\"\n Run command locally and return without waiting for completion\n\n Args:\n command (str): The command to run.\n\n Returns:\n An open descriptor to be used by the calling function.\n\n Example:\n command = 'oc delete pvc pvc1'\n proc = run_async(command)\n ret, out, err = proc.async_communicate()\n \"\"\"\n log.info(f\"Executing command: {command}\")\n popen_obj = subprocess.Popen(\n command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True,\n encoding=\"utf-8\",\n )\n\n def async_communicate():\n \"\"\"\n Wait for command to complete and fetch the result\n\n Returns:\n retcode, stdout, stderr of the command\n \"\"\"\n stdout, stderr = popen_obj.communicate()\n retcode = popen_obj.returncode\n return retcode, stdout, stderr\n\n popen_obj.async_communicate = async_communicate\n return popen_obj\n\n\ndef is_cluster_running(cluster_path):\n from ocs_ci.ocs.openshift_ops import OCP\n\n return config.RUN[\"cli_params\"].get(\"cluster_path\") and OCP.set_kubeconfig(\n os.path.join(cluster_path, config.RUN.get(\"kubeconfig_location\"))\n )\n\n\ndef decompose_html_attributes(soup, attributes):\n \"\"\"\n Decomposes the given html attributes\n\n Args:\n soup (obj): BeautifulSoup object\n attributes (list): attributes to decompose\n\n Returns: None\n\n \"\"\"\n for attribute in attributes:\n tg = soup.find_all(attrs={\"class\": attribute})\n for each in tg:\n each.decompose()\n\n\ndef parse_html_for_email(soup):\n \"\"\"\n Parses the html and filters out the unnecessary data/tags/attributes\n for email reporting\n\n Args:\n soup (obj): BeautifulSoup object\n\n \"\"\"\n attributes_to_decompose = [\"extra\"]\n if not config.RUN.get(\"logs_url\"):\n attributes_to_decompose.append(\"col-links\")\n decompose_html_attributes(soup, attributes_to_decompose)\n soup.find(id=\"not-found-message\").decompose()\n\n if not config.RUN.get(\"logs_url\"):\n for tr in soup.find_all(\"tr\"):\n for th in tr.find_all(\"th\"):\n if \"Links\" in th.text:\n th.decompose()\n\n for p in soup.find_all(\"p\"):\n if \"(Un)check the boxes to filter the results.\" in p.text:\n p.decompose()\n if \"pytest-html\" in p.text:\n data = p.text.split(\"by\")[0]\n p.string = data\n\n for ip in soup.find_all(\"input\"):\n if not ip.has_attr(\"disabled\"):\n ip[\"disabled\"] = \"true\"\n\n for td in soup.find_all(\"td\"):\n if \"pytest\" in td.text or \"html\" in td.text:\n data = td.text.replace(\"&apos\", \"\")\n td.string = data\n\n main_header = soup.find(\"h1\")\n main_header.string.replace_with(\"OCS-CI RESULTS\")\n\n\ndef add_squad_analysis_to_email(session, soup):\n \"\"\"\n Add squad analysis to the html test results used in email reporting\n\n Args:\n session (obj): Pytest session object\n soup (obj): BeautifulSoup object of HTML Report data\n\n \"\"\"\n failed = {}\n skipped = {}\n # sort out failed and skipped test cases to failed and skipped dicts\n for result in session.results.values():\n if result.failed or result.skipped:\n unassigned = True\n for squad, res in constants.SQUADS.items():\n for item in res:\n if item in result.nodeid:\n if result.failed:\n if squad not in failed:\n failed[squad] = []\n failed[squad].append(result.nodeid)\n unassigned = False\n\n if result.skipped:\n if squad not in skipped:\n skipped[squad] = []\n try:\n skipped_message = result.longrepr[2][8:]\n except TypeError:\n skipped_message = \"--unknown--\"\n skipped[squad].append((result.nodeid, skipped_message))\n unassigned = False\n if unassigned:\n if result.failed:\n if \"UNASSIGNED\" not in failed:\n failed[\"UNASSIGNED\"] = []\n failed[\"UNASSIGNED\"].append(result.nodeid)\n if result.skipped:\n if \"UNASSIGNED\" not in skipped:\n skipped[\"UNASSIGNED\"] = []\n try:\n skipped_message = result.longrepr[2][8:]\n except TypeError:\n skipped_message = \"--unknown--\"\n skipped[\"UNASSIGNED\"].append((result.nodeid, skipped_message))\n\n # no failed or skipped tests - exit the function\n if not failed and not skipped:\n return\n\n # add CSS for the Squad Analysis report\n style = soup.find(\"style\")\n # use colors for squad names from squad names\n style.string += \"\\n\".join(\n [\n f\"h4.squad-{color.lower()} {{\\n color: {color.lower()};\\n}}\"\n for color in constants.SQUADS\n ]\n )\n # few additional styles\n style.string += \"\"\"\n .squad-analysis {\n color: black;\n font-family: monospace;\n background-color: #eee;\n padding: 5px;\n margin-top: 10px;\n }\n .squad-analysis h2 {\n margin: 0px;\n }\n .squad-analysis h3 {\n margin: 0px;\n margin-top: 10px;\n }\n .squad-analysis h4 {\n margin: 0px;\n }\n .squad-analysis ul {\n margin: 0px;\n }\n .squad-analysis ul li em {\n margin-left: 1em;\n }\n .squad-unassigned {\n background-color: #FFBA88;\n }\n h4.squad-yellow {\n color: black;\n background-color: yellow;\n display: inline;\n }\n \"\"\"\n # prepare place for the Squad Analysis in the email\n squad_analysis_div = soup.new_tag(\"div\")\n squad_analysis_div[\"class\"] = \"squad-analysis\"\n main_header = soup.find(\"h1\")\n main_header.insert_after(squad_analysis_div)\n failed_h2_tag = soup.new_tag(\"h2\")\n failed_h2_tag.string = \"Squad Analysis - please analyze:\"\n squad_analysis_div.append(failed_h2_tag)\n if failed:\n # print failed testcases peer squad\n failed_div_tag = soup.new_tag(\"div\")\n squad_analysis_div.append(failed_div_tag)\n failed_h3_tag = soup.new_tag(\"h3\")\n failed_h3_tag.string = \"Failures:\"\n failed_div_tag.append(failed_h3_tag)\n for squad in failed:\n failed_h4_tag = soup.new_tag(\"h4\")\n failed_h4_tag.string = f\"{squad} squad\"\n failed_h4_tag[\"class\"] = f\"squad-{squad.lower()}\"\n failed_div_tag.append(failed_h4_tag)\n failed_ul_tag = soup.new_tag(\"ul\")\n failed_ul_tag[\"class\"] = f\"squad-{squad.lower()}\"\n failed_div_tag.append(failed_ul_tag)\n for test in failed[squad]:\n failed_li_tag = soup.new_tag(\"li\")\n failed_li_tag.string = test\n failed_ul_tag.append(failed_li_tag)\n if skipped:\n # print skipped testcases with reason peer squad\n skips_div_tag = soup.new_tag(\"div\")\n squad_analysis_div.append(skips_div_tag)\n skips_h3_tag = soup.new_tag(\"h3\")\n skips_h3_tag.string = \"Skips:\"\n skips_div_tag.append(skips_h3_tag)\n for squad in skipped:\n skips_h4_tag = soup.new_tag(\"h4\")\n skips_h4_tag.string = f\"{squad} squad\"\n skips_h4_tag[\"class\"] = f\"squad-{squad.lower()}\"\n skips_div_tag.append(skips_h4_tag)\n skips_ul_tag = soup.new_tag(\"ul\")\n skips_ul_tag[\"class\"] = f\"squad-{squad.lower()}\"\n skips_div_tag.append(skips_ul_tag)\n for test in skipped[squad]:\n skips_li_tag = soup.new_tag(\"li\")\n skips_test_span_tag = soup.new_tag(\"span\")\n skips_test_span_tag.string = test[0]\n skips_li_tag.append(skips_test_span_tag)\n skips_li_tag.append(soup.new_tag(\"br\"))\n skips_reason_em_tag = soup.new_tag(\"em\")\n skips_reason_em_tag.string = f\"Reason: {test[1]}\"\n skips_li_tag.append(skips_reason_em_tag)\n skips_ul_tag.append(skips_li_tag)\n\n\ndef move_summary_to_top(soup):\n \"\"\"\n Move summary to the top of the eamil report\n\n \"\"\"\n summary = []\n summary.append(soup.find(\"h2\", text=\"Summary\"))\n for tag in summary[0].next_siblings:\n if tag.name == \"h2\":\n break\n else:\n summary.append(tag)\n for tag in summary:\n tag.extract()\n main_header = soup.find(\"h1\")\n # because we are inserting the tags just after the header one by one, we\n # have to insert them in reverse order\n summary.reverse()\n for tag in summary:\n main_header.insert_after(tag)\n\n\ndef email_reports(session):\n \"\"\"\n Email results of test run\n\n \"\"\"\n # calculate percentage pass\n # reporter = session.config.pluginmanager.get_plugin(\"terminalreporter\")\n # passed = len(reporter.stats.get(\"passed\", []))\n # failed = len(reporter.stats.get(\"failed\", []))\n # error = len(reporter.stats.get(\"error\", []))\n # total = passed + failed + error\n # percentage_passed = (passed / total) * 100\n\n try:\n build_id = get_ocs_build_number()\n except Exception:\n build_id = \"\"\n log.exception(\"Getting OCS operator build number failed!\")\n build_str = f\"BUILD ID: {build_id} \" if build_id else \"\"\n mailids = config.RUN[\"cli_params\"][\"email\"]\n recipients = []\n [recipients.append(mailid) for mailid in mailids.split(\",\")]\n sender = \"[email protected]\"\n msg = MIMEMultipart(\"alternative\")\n msg[\"Subject\"] = (\n f\"ocs-ci results for {get_testrun_name()} \"\n f\"({build_str}\"\n f\"RUN ID: {config.RUN['run_id']}) \"\n # f\"Passed: {percentage_passed:.0f}%\"\n )\n msg[\"From\"] = sender\n msg[\"To\"] = \", \".join(recipients)\n\n html = config.RUN[\"cli_params\"][\"--html\"]\n with open(os.path.expanduser(html)) as fd:\n html_data = fd.read()\n soup = BeautifulSoup(html_data, \"html.parser\")\n\n parse_html_for_email(soup)\n if config.RUN[\"cli_params\"].get(\"squad_analysis\"):\n add_squad_analysis_to_email(session, soup)\n move_summary_to_top(soup)\n part1 = MIMEText(soup, \"html\")\n msg.attach(part1)\n try:\n s = smtplib.SMTP(config.REPORTING[\"email\"][\"smtp_server\"])\n s.sendmail(sender, recipients, msg.as_string())\n s.quit()\n log.info(f\"Results have been emailed to {recipients}\")\n except Exception:\n log.exception(\"Sending email with results failed!\")\n\n\ndef get_cluster_version_info():\n \"\"\"\n Gets the complete cluster version information\n\n Returns:\n dict: cluster version information\n\n \"\"\"\n # importing here to avoid circular imports\n from ocs_ci.ocs.ocp import OCP\n\n ocp = OCP(kind=\"clusterversion\")\n cluster_version_info = ocp.get(\"version\")\n return cluster_version_info\n\n\ndef get_ocs_build_number():\n \"\"\"\n Gets the build number for ocs operator\n\n Return:\n str: build number for ocs operator version\n\n \"\"\"\n # Importing here to avoid circular dependency\n from ocs_ci.ocs.resources.csv import get_csvs_start_with_prefix\n from ocs_ci.ocs.resources.catalog_source import CatalogSource\n from ocs_ci.ocs.resources.packagemanifest import get_selector_for_ocs_operator\n\n build_num = \"\"\n if (\n version_module.get_semantic_ocs_version_from_config()\n >= version_module.VERSION_4_9\n ):\n operator_name = defaults.ODF_OPERATOR_NAME\n else:\n operator_name = defaults.OCS_OPERATOR_NAME\n ocs_csvs = get_csvs_start_with_prefix(\n operator_name,\n defaults.ROOK_CLUSTER_NAMESPACE,\n )\n try:\n ocs_csv = ocs_csvs[0]\n csv_labels = ocs_csv[\"metadata\"][\"labels\"]\n if \"full_version\" in csv_labels:\n return csv_labels[\"full_version\"]\n build_num = ocs_csv[\"spec\"][\"version\"]\n operator_selector = get_selector_for_ocs_operator()\n # This is a temporary solution how to get the build id from the registry image.\n # Because we are now missing build ID in the CSV. If catalog source with our\n # internal label exists, we will be getting build id from the tag of the image\n # in catalog source. Boris is working on better way how to populate the internal\n # build version in the CSV.\n if operator_selector:\n catalog_source = CatalogSource(\n resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME,\n namespace=constants.MARKETPLACE_NAMESPACE,\n selector=operator_selector,\n )\n cs_data = catalog_source.get()[\"items\"][0]\n cs_image = cs_data[\"spec\"][\"image\"]\n image_tag = cs_image.split(\":\")[1]\n if \"-\" in image_tag:\n build_id = image_tag.split(\"-\")[1]\n build_num += f\"-{build_id}\"\n\n except (IndexError, AttributeError, CommandFailed, KeyError):\n log.exception(\"No version info found for OCS operator\")\n return build_num\n\n\ndef get_cluster_version():\n \"\"\"\n Gets the cluster version\n\n Returns:\n str: cluster version\n\n \"\"\"\n return get_cluster_version_info()[\"status\"][\"desired\"][\"version\"]\n\n\ndef get_cluster_image():\n \"\"\"\n Gets the cluster image\n\n Returns:\n str: cluster image\n\n \"\"\"\n return get_cluster_version_info()[\"status\"][\"desired\"][\"image\"]\n\n\ndef get_ceph_version():\n \"\"\"\n Gets the ceph version\n\n Returns:\n str: ceph version\n\n \"\"\"\n # importing here to avoid circular imports\n from ocs_ci.ocs.resources import pod\n\n ct_pod = pod.get_ceph_tools_pod()\n ceph_version = ct_pod.exec_ceph_cmd(\"ceph version\")\n return re.split(r\"ceph version \", ceph_version[\"version\"])[1]\n\n\ndef get_rook_version():\n \"\"\"\n Gets the rook version\n\n Returns:\n str: rook version\n\n \"\"\"\n # importing here to avoid circular imports\n from ocs_ci.ocs.resources import pod\n\n ct_pod = pod.get_ceph_tools_pod()\n rook_versions = ct_pod.exec_ceph_cmd(\"rook version\", format=\"\")\n return rook_versions[\"rook\"]\n\n\ndef get_csi_versions():\n \"\"\"\n Gets the CSI related version information\n\n Returns:\n dict: CSI related version information\n\n \"\"\"\n csi_versions = {}\n # importing here to avoid circular imports\n from ocs_ci.ocs.ocp import OCP\n\n ocp_pod_obj = OCP(\n kind=constants.POD, namespace=config.ENV_DATA[\"cluster_namespace\"]\n )\n csi_provisioners = [\"csi-cephfsplugin-provisioner\", \"csi-rbdplugin-provisioner\"]\n for provisioner in csi_provisioners:\n csi_provisioner_pod = run_cmd(\n f\"oc -n {config.ENV_DATA['cluster_namespace']} get pod -l \"\n f\"'app={provisioner}' -o jsonpath='{{.items[0].metadata.name}}'\"\n )\n desc = ocp_pod_obj.get(csi_provisioner_pod)\n for container in desc[\"spec\"][\"containers\"]:\n name = container[\"name\"]\n version = container[\"image\"].split(\"/\")[-1].split(\":\")[1]\n csi_versions[name] = version\n return csi_versions\n\n\ndef get_ocp_version(seperator=None):\n \"\"\"\n Get current ocp version\n\n Args:\n seperator (str): String that would seperate major and\n minor version nubers\n\n Returns:\n string : If seperator is 'None', version string will be returned as is\n eg: '4.2', '4.3'.\n If seperator is provided then '.' in the version string would be\n replaced by seperator and resulting string will be returned.\n eg: If seperator is '_' then string returned would be '4_2'\n\n \"\"\"\n char = seperator if seperator else \".\"\n if config.ENV_DATA.get(\"skip_ocp_deployment\"):\n raw_version = json.loads(run_cmd(\"oc version -o json\"))[\"openshiftVersion\"]\n else:\n raw_version = config.DEPLOYMENT[\"installer_version\"]\n version = Version.coerce(raw_version)\n return char.join([str(version.major), str(version.minor)])\n\n\ndef get_running_ocp_version(separator=None):\n \"\"\"\n Get current running ocp version\n\n Args:\n separator (str): String that would separate major and\n minor version numbers\n\n Returns:\n string : If separator is 'None', version string will be returned as is\n eg: '4.2', '4.3'.\n If separator is provided then '.' in the version string would be\n replaced by separator and resulting string will be returned.\n eg: If separator is '_' then string returned would be '4_2'\n\n \"\"\"\n char = separator if separator else \".\"\n namespace = config.ENV_DATA[\"cluster_namespace\"]\n try:\n # if the cluster exist, this part will be run\n results = run_cmd(f\"oc get clusterversion -n {namespace} -o yaml\")\n build = yaml.safe_load(results)[\"items\"][0][\"status\"][\"desired\"][\"version\"]\n return char.join(build.split(\".\")[0:2])\n except Exception:\n # this part will return version from the config file in case\n # cluster is not exists.\n return get_ocp_version(seperator=char)\n\n\ndef get_ocp_repo():\n \"\"\"\n Get ocp repo file, name will be generated dynamically based on\n ocp version.\n\n Returns:\n string : Path to ocp repo file\n\n \"\"\"\n repo_path = os.path.join(constants.REPO_DIR, f\"ocp_{get_ocp_version('_')}.repo\")\n path = os.path.expanduser(repo_path)\n assert os.path.exists(path), f\"OCP repo file {path} doesn't exists!\"\n return path\n\n\ndef parse_pgsql_logs(data):\n \"\"\"\n Parse the pgsql benchmark data from ripsaw and return\n the data in list format\n\n Args:\n data (str): log data from pgsql bench run\n\n Returns:\n list_data (list): data digestable by scripts with below format\n e.g.:\n\n [\n {1: {'num_clients': '2','num_threads': '7','latency_avg': '7',\n 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},\n {2: {'num_clients': '2','num_threads': '7','latency_avg': '7',\n 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},\n {3: {'num_clients': '2','num_threads': '7','latency_avg': '7',\n 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},\n ]\n where keys{1,2,3} are run-IDs\n\n \"\"\"\n match = data.split(\"PGBench Results\")\n list_data = []\n for i in range(2, len(match)):\n log = \"\".join(match[i].split(\"\\n\"))\n pgsql_data = dict()\n pgsql_data[i - 1] = {}\n clients = re.search(r\"scaling_factor\\':\\s+(\\d+),\", log)\n if clients and clients.group(1):\n pgsql_data[i - 1][\"scaling_factor\"] = clients.group(1)\n clients = re.search(r\"number_of_clients\\':\\s+(\\d+),\", log)\n if clients and clients.group(1):\n pgsql_data[i - 1][\"num_clients\"] = clients.group(1)\n threads = re.search(r\"number_of_threads\\':\\s+(\\d+)\", log)\n if threads and threads.group(1):\n pgsql_data[i - 1][\"num_threads\"] = threads.group(1)\n clients = re.search(r\"number_of_transactions_per_client\\':\\s+(\\d+),\", log)\n if clients and clients.group(1):\n pgsql_data[i - 1][\"number_of_transactions_per_client\"] = clients.group(1)\n clients = re.search(\n r\"number_of_transactions_actually_processed\\':\\s+(\\d+),\", log\n )\n if clients and clients.group(1):\n pgsql_data[i - 1][\n \"number_of_transactions_actually_processed\"\n ] = clients.group(1)\n lat_avg = re.search(r\"latency_average_ms\\':\\s+(\\d+)\", log)\n if lat_avg and lat_avg.group(1):\n pgsql_data[i - 1][\"latency_avg\"] = lat_avg.group(1)\n lat_stddev = re.search(r\"latency_stddev_ms\\':\\s+(\\d+)\", log)\n if lat_stddev and lat_stddev.group(1):\n pgsql_data[i - 1][\"lat_stddev\"] = lat_stddev.group(1)\n tps_incl = re.search(r\"tps_incl_con_est\\':\\s+(\\w+)\", log)\n if tps_incl and tps_incl.group(1):\n pgsql_data[i - 1][\"tps_incl\"] = tps_incl.group(1)\n tps_excl = re.search(r\"tps_excl_con_est\\':\\s+(\\w+)\", log)\n if tps_excl and tps_excl.group(1):\n pgsql_data[i - 1][\"tps_excl\"] = tps_excl.group(1)\n list_data.append(pgsql_data)\n\n return list_data\n\n\ndef create_directory_path(path):\n \"\"\"\n Creates directory if path doesn't exists\n \"\"\"\n path = os.path.expanduser(path)\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n log.debug(f\"{path} already exists\")\n\n\ndef ocsci_log_path():\n \"\"\"\n Construct the full path for the log directory.\n\n Returns:\n str: full path for ocs-ci log directory\n\n \"\"\"\n return os.path.expanduser(\n os.path.join(config.RUN[\"log_dir\"], f\"ocs-ci-logs-{config.RUN['run_id']}\")\n )\n\n\ndef get_testrun_name():\n \"\"\"\n Prepare testrun ID for Polarion (and other reports).\n\n Returns:\n str: String containing testrun name\n\n \"\"\"\n markers = config.RUN[\"cli_params\"].get(\"-m\", \"\").replace(\" \", \"-\")\n us_ds = config.REPORTING.get(\"us_ds\")\n if us_ds.upper() == \"US\":\n us_ds = \"Upstream\"\n elif us_ds.upper() == \"DS\":\n us_ds = \"Downstream\"\n ocp_version = \".\".join(config.DEPLOYMENT.get(\"installer_version\").split(\".\")[:-2])\n ocp_version_string = f\"OCP{ocp_version}\" if ocp_version else \"\"\n ocs_version = config.ENV_DATA.get(\"ocs_version\")\n ocs_version_string = f\"OCS{ocs_version}\" if ocs_version else \"\"\n worker_os = \"RHEL\" if config.ENV_DATA.get(\"rhel_workers\") else \"RHCOS\"\n build_user = None\n baremetal_config = None\n if config.ENV_DATA.get(\"mon_type\"):\n baremetal_config = (\n f\"MON {config.ENV_DATA.get('mon_type').upper()} \"\n f\"OSD {config.ENV_DATA.get('osd_type').upper()}\"\n )\n\n lso_deployment = \"\"\n if not baremetal_config and config.DEPLOYMENT.get(\"local_storage\"):\n lso_deployment = \"LSO \"\n\n if config.REPORTING.get(\"display_name\"):\n testrun_name = config.REPORTING.get(\"display_name\")\n else:\n build_user = config.REPORTING.get(\"build_user\")\n testrun_name = (\n f\"{config.ENV_DATA.get('platform', '').upper()} \"\n f\"{config.ENV_DATA.get('deployment_type', '').upper()} \"\n )\n if baremetal_config:\n testrun_name = f\"LSO {baremetal_config} {testrun_name}\"\n\n testrun_name = (\n f\"{testrun_name}\"\n f\"{get_az_count()}AZ \"\n f\"{worker_os} \"\n f\"{lso_deployment}\"\n f\"{config.ENV_DATA.get('master_replicas')}M \"\n f\"{config.ENV_DATA.get('worker_replicas')}W \"\n f\"{markers}\"\n )\n testrun_name = (\n f\"{ocs_version_string} {us_ds} {ocp_version_string} \" f\"{testrun_name}\"\n )\n if build_user:\n testrun_name = f\"{build_user} {testrun_name}\"\n # replace invalid character(s) by '-'\n testrun_name = testrun_name.translate(\n str.maketrans({key: \"-\" for key in \"\"\" \\\\/.:*\"<>|~!@#$?%^&'*(){}+`,=\\t\"\"\"})\n )\n log.info(\"testrun_name: %s\", testrun_name)\n return testrun_name\n\n\ndef get_az_count():\n \"\"\"\n Using a number of different configuration attributes, determine how many\n availability zones the cluster is configured for.\n\n Returns:\n int: number of availability zones\n\n \"\"\"\n if config.ENV_DATA.get(\"availability_zone_count\"):\n return int(config.ENV_DATA.get(\"availability_zone_count\"))\n elif config.ENV_DATA.get(\"worker_availability_zones\"):\n return len(config.ENV_DATA.get(\"worker_availability_zones\"))\n elif config.ENV_DATA.get(\"platform\") == \"vsphere\":\n return 1\n else:\n return 1\n\n\ndef ceph_health_check(namespace=None, tries=20, delay=30):\n \"\"\"\n Args:\n namespace (str): Namespace of OCS\n (default: config.ENV_DATA['cluster_namespace'])\n tries (int): Number of retries\n delay (int): Delay in seconds between retries\n\n Returns:\n bool: ceph_health_check_base return value with default retries of 20,\n delay of 30 seconds if default values are not changed via args.\n\n \"\"\"\n if config.ENV_DATA[\"platform\"].lower() == constants.IBM_POWER_PLATFORM:\n delay = 60\n return retry(\n (CephHealthException, CommandFailed, subprocess.TimeoutExpired),\n tries=tries,\n delay=delay,\n backoff=1,\n )(ceph_health_check_base)(namespace)\n\n\ndef ceph_health_check_base(namespace=None):\n \"\"\"\n Exec `ceph health` cmd on tools pod to determine health of cluster.\n\n Args:\n namespace (str): Namespace of OCS\n (default: config.ENV_DATA['cluster_namespace'])\n\n Raises:\n CephHealthException: If the ceph health returned is not HEALTH_OK\n CommandFailed: If the command to retrieve the tools pod name or the\n command to get ceph health returns a non-zero exit code\n Returns:\n boolean: True if HEALTH_OK\n\n \"\"\"\n namespace = namespace or config.ENV_DATA[\"cluster_namespace\"]\n run_cmd(\n f\"oc wait --for condition=ready pod \"\n f\"-l app=rook-ceph-tools \"\n f\"-n {namespace} \"\n f\"--timeout=120s\"\n )\n tools_pod = run_cmd(\n f\"oc -n {namespace} get pod -l 'app=rook-ceph-tools' \"\n f\"-o jsonpath='{{.items[0].metadata.name}}'\",\n timeout=60,\n )\n health = run_cmd(f\"oc -n {namespace} exec {tools_pod} -- ceph health\")\n if health.strip() == \"HEALTH_OK\":\n log.info(\"Ceph cluster health is HEALTH_OK.\")\n return True\n else:\n raise CephHealthException(f\"Ceph cluster health is not OK. Health: {health}\")\n\n\ndef get_rook_repo(branch=\"master\", to_checkout=None):\n \"\"\"\n Clone and checkout the rook repository to specific branch/commit.\n\n Args:\n branch (str): Branch name to checkout\n to_checkout (str): Commit id or tag to checkout\n\n \"\"\"\n cwd = constants.ROOK_REPO_DIR\n if not os.path.isdir(cwd):\n log.info(f\"Cloning rook repository into {cwd}.\")\n run_cmd(f\"git clone {constants.ROOK_REPOSITORY} {cwd}\")\n else:\n log.info(\n f\"The rook directory {cwd} already exists, ocs-ci will skip the \"\n f\"clone of rook repository.\"\n )\n log.info(\"Fetching latest changes from rook repository.\")\n run_cmd(\"git fetch --all\", cwd=cwd)\n log.info(f\"Checkout rook repository to specific branch: {branch}\")\n run_cmd(f\"git checkout {branch}\", cwd=cwd)\n log.info(f\"Reset branch: {branch} with latest changes\")\n run_cmd(f\"git reset --hard origin/{branch}\", cwd=cwd)\n if to_checkout:\n run_cmd(f\"git checkout {to_checkout}\", cwd=cwd)\n\n\ndef clone_repo(url, location, branch=\"master\", to_checkout=None):\n \"\"\"\n Clone a repository or checkout latest changes if it already exists at\n specified location.\n\n Args:\n url (str): location of the repository to clone\n location (str): path where the repository will be cloned to\n branch (str): branch name to checkout\n to_checkout (str): commit id or tag to checkout\n \"\"\"\n if not os.path.isdir(location):\n log.info(\"Cloning repository into %s\", location)\n run_cmd(f\"git clone {url} {location}\")\n else:\n log.info(\"Repository already cloned at %s, skipping clone\", location)\n log.info(\"Fetching latest changes from repository\")\n run_cmd(\"git fetch --all\", cwd=location)\n log.info(\"Checking out repository to specific branch: %s\", branch)\n run_cmd(f\"git checkout {branch}\", cwd=location)\n log.info(\"Reset branch: %s with latest changes\", branch)\n run_cmd(f\"git reset --hard origin/{branch}\", cwd=location)\n if to_checkout:\n run_cmd(f\"git checkout {to_checkout}\", cwd=location)\n\n\ndef get_latest_ds_olm_tag(upgrade=False, latest_tag=None):\n \"\"\"\n This function returns latest tag of OCS downstream registry or one before\n latest if upgrade parameter is True\n\n Args:\n upgrade (str): If True then it returns one version of the build before\n the latest.\n latest_tag (str): Tag of the latest build. If not specified\n config.DEPLOYMENT['default_latest_tag'] or 'latest' will be used.\n\n Returns:\n str: latest tag for downstream image from quay registry\n\n Raises:\n TagNotFoundException: In case no tag found\n\n \"\"\"\n latest_tag = latest_tag or config.DEPLOYMENT.get(\"default_latest_tag\", \"latest\")\n tags = get_ocs_olm_operator_tags()\n latest_image = None\n ocs_version = config.ENV_DATA[\"ocs_version\"]\n upgrade_ocs_version = config.UPGRADE.get(\"upgrade_ocs_version\")\n use_rc_build = config.UPGRADE.get(\"use_rc_build\")\n previous_rc_build = config.UPGRADE.get(\"previous_rc_build\")\n upgrade_version_change = upgrade_ocs_version and ocs_version != upgrade_ocs_version\n if upgrade and use_rc_build and previous_rc_build and not upgrade_version_change:\n latest_tag = previous_rc_build\n if upgrade_version_change:\n upgrade = False\n for tag in tags:\n if tag[\"name\"] == latest_tag:\n latest_image = tag[\"manifest_digest\"]\n break\n if not latest_image:\n raise TagNotFoundException(\"Couldn't find latest tag!\")\n latest_tag_found = False\n for tag in tags:\n if not upgrade:\n if (\n not any(t in tag[\"name\"] for t in constants.LATEST_TAGS)\n and tag[\"manifest_digest\"] == latest_image\n ):\n return tag[\"name\"]\n if upgrade:\n if not latest_tag_found and tag[\"name\"] == latest_tag:\n latest_tag_found = True\n continue\n if not latest_tag_found:\n continue\n if (\n not any(t in tag[\"name\"] for t in constants.LATEST_TAGS)\n and tag[\"manifest_digest\"] != latest_image\n and ocs_version in tag[\"name\"]\n ):\n if config.UPGRADE.get(\"use_rc_build\") and \"rc\" not in tag[\"name\"]:\n continue\n return tag[\"name\"]\n raise TagNotFoundException(\"Couldn't find any desired tag!\")\n\n\ndef get_next_version_available_for_upgrade(current_tag):\n \"\"\"\n This function returns the tag built after the current_version\n\n Args:\n current_tag (str): Current build tag from which to search the next one\n build tag.\n\n Returns:\n str: tag for downstream image from quay registry built after\n the current_tag.\n\n Raises:\n TagNotFoundException: In case no tag suitable for upgrade found\n\n \"\"\"\n tags = get_ocs_olm_operator_tags()\n if any(t in current_tag for t in constants.LATEST_TAGS):\n return current_tag\n current_tag_index = None\n for index, tag in enumerate(tags):\n if tag[\"name\"] == current_tag:\n if index < 2:\n raise TagNotFoundException(\"Couldn't find tag for upgrade!\")\n current_tag_index = index\n break\n sliced_reversed_tags = tags[:current_tag_index]\n sliced_reversed_tags.reverse()\n ocs_version = config.ENV_DATA[\"ocs_version\"]\n for tag in sliced_reversed_tags:\n if (\n not any(t in tag[\"name\"] for t in constants.LATEST_TAGS)\n and ocs_version in tag[\"name\"]\n ):\n if config.UPGRADE.get(\"use_rc_build\") and \"rc\" not in tag[\"name\"]:\n continue\n return tag[\"name\"]\n raise TagNotFoundException(\"Couldn't find any tag!\")\n\n\ndef load_auth_config():\n \"\"\"\n Load the authentication config YAML from /data/auth.yaml\n\n Raises:\n FileNotFoundError: if the auth config is not found\n\n Returns:\n dict: A dictionary reprensenting the YAML file\n\n \"\"\"\n log.info(\"Retrieving the authentication config dictionary\")\n auth_file = os.path.join(constants.TOP_DIR, \"data\", constants.AUTHYAML)\n try:\n with open(auth_file) as f:\n return yaml.safe_load(f)\n except FileNotFoundError:\n log.warning(\n f\"Unable to find the authentication configuration at {auth_file}, \"\n f\"please refer to the getting started guide ({constants.AUTH_CONFIG_DOCS})\"\n )\n return {}\n\n\ndef get_ocs_olm_operator_tags(limit=100):\n \"\"\"\n Query the OCS OLM Operator repo and retrieve a list of tags. Since we are limited\n to 100 tags per page, we end up making several API calls and combining the results\n into a single list of tags.\n\n Args:\n limit: the number of tags to limit the request to\n\n Raises:\n KeyError: if the auth config isn't setup properly\n requests.RequestException: if the response return code is not ok\n\n Returns:\n list: OCS OLM Operator tags\n\n \"\"\"\n try:\n quay_access_token = load_auth_config()[\"quay\"][\"access_token\"]\n except (KeyError, TypeError):\n log.error(\n \"Unable to retrieve the access token for quay, please refer to \"\n f\"the getting started guide ({constants.AUTH_CONFIG_DOCS}) \"\n \"to properly setup your authentication configuration\"\n )\n raise\n headers = {\"Authorization\": f\"Bearer {quay_access_token}\"}\n image = \"ocs-registry\"\n try:\n ocs_version = float(config.ENV_DATA.get(\"ocs_version\"))\n if ocs_version < 4.5:\n image = \"ocs-olm-operator\"\n except (ValueError, TypeError):\n log.warning(\"Invalid ocs_version given, defaulting to ocs-registry image\")\n pass\n all_tags = []\n page = 1\n while True:\n log.info(f\"Retrieving OCS OLM Operator tags (limit {limit}, page {page})\")\n resp = requests.get(\n constants.OPERATOR_CS_QUAY_API_QUERY.format(\n tag_limit=limit,\n image=image,\n page=page,\n ),\n headers=headers,\n )\n if not resp.ok:\n raise requests.RequestException(resp.json())\n tags = resp.json()[\"tags\"]\n if len(tags) == 0:\n log.info(\"No more tags to retrieve\")\n break\n log.debug(tags)\n all_tags.extend(tags)\n page += 1\n return all_tags\n\n\ndef check_if_executable_in_path(exec_name):\n \"\"\"\n Checks whether an executable can be found in the $PATH\n\n Args:\n exec_name: Name of executable to look for\n\n Returns:\n Boolean: Whether the executable was found\n\n \"\"\"\n return which(exec_name) is not None\n\n\ndef upload_file(server, localpath, remotepath, user=None, password=None, key_file=None):\n \"\"\"\n Upload a file to remote server\n\n Args:\n server (str): Name of the server to upload\n localpath (str): Local file to upload\n remotepath (str): Target path on the remote server. filename should be included\n user (str): User to use for the remote connection\n\n \"\"\"\n if not user:\n user = \"root\"\n try:\n ssh = SSHClient()\n ssh.set_missing_host_key_policy(AutoAddPolicy())\n if password:\n ssh.connect(hostname=server, username=user, password=password)\n else:\n log.info(key_file)\n ssh.connect(hostname=server, username=user, key_filename=key_file)\n sftp = ssh.open_sftp()\n log.info(f\"uploading {localpath} to {user}@{server}:{remotepath}\")\n sftp.put(localpath, remotepath)\n sftp.close()\n ssh.close()\n except AuthenticationException as authException:\n log.error(f\"Authentication failed: {authException}\")\n raise authException\n except SSHException as sshException:\n log.error(f\"SSH connection failed: {sshException}\")\n raise sshException\n\n\ndef read_file_as_str(filepath):\n \"\"\"\n Reads the file content\n\n Args:\n filepath (str): File to read\n\n Returns:\n str : File contents in string\n\n \"\"\"\n with open(rf\"{filepath}\") as fd:\n content = fd.read()\n return content\n\n\ndef replace_content_in_file(file, old, new, match_and_replace_line=False):\n \"\"\"\n Replaces contents in file, if old value is not found, it adds\n new value to the file\n\n Args:\n file (str): Name of the file in which contents will be replaced\n old (str): Data to search for\n new (str): Data to replace the old value\n match_and_replace_line (bool): If True, it will match a line if\n `old` pattern is found in the line. The whole line will be replaced\n with `new` content.\n Otherwise it will replace only `old` string with `new` string but\n the rest of the line will be intact. This is the default option.\n\n \"\"\"\n # Read the file\n with open(rf\"{file}\", \"r\") as fd:\n file_data = [line.rstrip(\"\\n\") for line in fd.readlines()]\n\n if match_and_replace_line:\n # Replace the whole line with `new` string if the line contains `old`\n # string pattern.\n file_data = [new if old in line else line for line in file_data]\n else:\n # Replace the old string by new\n file_data = [\n line.replace(old, new) if old in line else line for line in file_data\n ]\n updated_data = [line for line in file_data if new in line]\n # In case the old pattern wasn't found it will be added as first line\n if not updated_data:\n file_data.insert(0, new)\n file_data = [f\"{line}\\n\" for line in file_data]\n # Write the file out again\n with open(rf\"{file}\", \"w\") as fd:\n fd.writelines(file_data)\n\n\n@retry((CommandFailed), tries=100, delay=10, backoff=1)\ndef wait_for_co(operator):\n \"\"\"\n Waits for ClusterOperator to created\n\n Args:\n operator (str): Name of the ClusterOperator\n\n \"\"\"\n from ocs_ci.ocs.ocp import OCP\n\n ocp = OCP(kind=\"ClusterOperator\")\n ocp.get(operator)\n\n\ndef censor_values(data_to_censor):\n \"\"\"\n This function censor string and numeric values in dictionary based on\n keys that match pattern defined in config_keys_patterns_to_censor in\n constants. It is performed recursively for nested dictionaries.\n\n Args:\n data_to_censor (dict): Data to censor.\n\n Returns:\n dict: filtered data\n\n \"\"\"\n for key in data_to_censor:\n if isinstance(data_to_censor[key], dict):\n censor_values(data_to_censor[key])\n elif isinstance(data_to_censor[key], (str, int, float)):\n for pattern in constants.config_keys_patterns_to_censor:\n if pattern in key.lower():\n data_to_censor[key] = \"*\" * 5\n return data_to_censor\n\n\ndef dump_config_to_file(file_path):\n \"\"\"\n Dump the config to the yaml file with censored secret values.\n\n Args:\n file_path (str): Path to file where to write the configuration.\n\n \"\"\"\n config_copy = deepcopy(config.to_dict())\n censor_values(config_copy)\n with open(file_path, \"w+\") as fs:\n yaml.safe_dump(config_copy, fs)\n\n\ndef create_rhelpod(namespace, pod_name, timeout=300):\n \"\"\"\n Creates the RHEL pod\n\n Args:\n namespace (str): Namespace to create RHEL pod\n pod_name (str): Pod name\n timeout (int): wait time for RHEL pod to be in Running state\n\n Returns:\n pod: Pod instance for RHEL\n\n \"\"\"\n # importing here to avoid dependencies\n from ocs_ci.helpers import helpers\n\n rhelpod_obj = helpers.create_pod(\n namespace=namespace,\n pod_name=pod_name,\n pod_dict_path=constants.RHEL_7_7_POD_YAML,\n )\n helpers.wait_for_resource_state(rhelpod_obj, constants.STATUS_RUNNING, timeout)\n return rhelpod_obj\n\n\ndef check_timeout_reached(start_time, timeout, err_msg=None):\n \"\"\"\n Check if timeout reached and if so raise the exception.\n\n Args:\n start_time (time): Star time of the operation.\n timeout (int): Timeout in seconds.\n err_msg (str): Error message for the exception.\n\n Raises:\n TimeoutException: In case the timeout reached.\n\n \"\"\"\n msg = f\"Timeout {timeout} reached!\"\n if err_msg:\n msg += \" Error: {err_msg}\"\n\n if timeout < (time.time() - start_time):\n raise TimeoutException(msg)\n\n\ndef convert_yaml2tfvars(yaml):\n \"\"\"\n Converts yaml file to tfvars. It creates the tfvars with the\n same filename in the required format which is used for deployment.\n\n Args:\n yaml (str): File path to yaml\n\n Returns:\n str: File path to tfvars\n\n \"\"\"\n # importing here to avoid dependencies\n from ocs_ci.utility.templating import load_yaml\n\n data = load_yaml(yaml)\n tfvars_file = os.path.splitext(yaml)[0]\n log.debug(f\"Converting {yaml} to {tfvars_file}\")\n with open(tfvars_file, \"w+\") as fd:\n for key, val in data.items():\n if key == \"control_plane_ignition\":\n fd.write(\"control_plane_ignition = <<END_OF_MASTER_IGNITION\\n\")\n fd.write(f\"{val}\\n\")\n fd.write(\"END_OF_MASTER_IGNITION\\n\")\n continue\n\n if key == \"compute_ignition\":\n fd.write(\"compute_ignition = <<END_OF_WORKER_IGNITION\\n\")\n fd.write(f\"{val}\\n\")\n fd.write(\"END_OF_WORKER_IGNITION\\n\")\n continue\n\n if key == \"vm_dns_addresses\":\n fd.write(f'vm_dns_addresses = [\"{val}\"]\\n')\n continue\n\n fd.write(key)\n fd.write(\" = \")\n fd.write('\"')\n fd.write(f\"{val}\")\n fd.write('\"\\n')\n\n return tfvars_file\n\n\ndef remove_keys_from_tf_variable_file(tf_file, keys):\n \"\"\"\n Removes the keys from the tf files and convert to json format\n\n Args:\n tf_file (str): path to tf file\n keys (list): list of keys to remove\n\n \"\"\"\n # importing here to avoid dependencies\n from ocs_ci.utility.templating import dump_data_to_json\n\n with open(tf_file, \"r\") as fd:\n obj = hcl2.load(fd)\n for key in keys:\n obj[\"variable\"].pop(key)\n\n dump_data_to_json(obj, f\"{tf_file}.json\")\n os.rename(tf_file, f\"{tf_file}.backup\")\n\n\ndef get_kubeadmin_password():\n filename = os.path.join(\n config.ENV_DATA[\"cluster_path\"], config.RUN[\"password_location\"]\n )\n with open(filename) as f:\n return f.read()\n\n\ndef get_infra_id(cluster_path):\n \"\"\"\n Get infraID from metadata.json in given cluster_path\n\n Args:\n cluster_path: path to cluster install directory\n\n Returns:\n str: metadata.json['infraID']\n\n \"\"\"\n metadata_file = os.path.join(cluster_path, \"metadata.json\")\n with open(metadata_file) as f:\n metadata = json.load(f)\n return metadata[\"infraID\"]\n\n\ndef get_cluster_name(cluster_path):\n \"\"\"\n Get clusterName from metadata.json in given cluster_path\n\n Args:\n cluster_path: path to cluster install directory\n\n Returns:\n str: metadata.json['clusterName']\n\n \"\"\"\n metadata_file = os.path.join(cluster_path, \"metadata.json\")\n with open(metadata_file) as f:\n metadata = json.load(f)\n return metadata[\"clusterName\"]\n\n\ndef skipif_ocp_version(expressions):\n \"\"\"\n This function evaluates the condition for test skip\n based on expression\n\n Args:\n expressions (str OR list): condition for which we need to check,\n eg: A single expression string '>=4.2' OR\n A list of expressions like ['<4.3', '>4.2'], ['<=4.3', '>=4.2']\n\n Return:\n 'True' if test needs to be skipped else 'False'\n\n \"\"\"\n skip_this = True\n ocp_version = get_running_ocp_version()\n expr_list = [expressions] if isinstance(expressions, str) else expressions\n for expr in expr_list:\n comparision_str = ocp_version + expr\n skip_this = skip_this and eval(comparision_str)\n # skip_this will be either True or False after eval\n return skip_this\n\n\ndef skipif_ocs_version(expressions):\n \"\"\"\n This function evaluates the condition for test skip\n based on expression\n\n Args:\n expressions (str OR list): condition for which we need to check,\n eg: A single expression string '>=4.2' OR\n A list of expressions like ['<4.3', '>4.2'], ['<=4.3', '>=4.2']\n\n Return:\n 'True' if test needs to be skipped else 'False'\n \"\"\"\n expr_list = [expressions] if isinstance(expressions, str) else expressions\n return any(eval(config.ENV_DATA[\"ocs_version\"] + expr) for expr in expr_list)\n\n\ndef skipif_ui_not_support(ui_test):\n \"\"\"\n This function evaluates the condition for ui test skip\n based on ui_test expression\n\n Args:\n ui_test (str): condition for which we need to check,\n\n Return:\n 'True' if test needs to be skipped else 'False'\n\n \"\"\"\n from ocs_ci.ocs.ui.views import locators\n\n ocp_version = get_running_ocp_version()\n if (\n config.ENV_DATA[\"platform\"].lower() == constants.IBMCLOUD_PLATFORM\n or config.ENV_DATA[\"platform\"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM\n or config.ENV_DATA[\"platform\"].lower() == constants.ROSA_PLATFORM\n ):\n return True\n try:\n locators[ocp_version][ui_test]\n except KeyError:\n return True\n return False\n\n\ndef get_ocs_version_from_image(image):\n \"\"\"\n Parse major.minor version from OCS image tag.\n\n Args:\n image (str): image in format url:tag\n\n Returns\n str: Version in x.y format\n\n Raises:\n ValueError: In case of the tag which we cannot parse to version.\n\n \"\"\"\n try:\n version = image.rsplit(\":\", 1)[1].lstrip(\"latest-\").lstrip(\"stable-\")\n version = Version.coerce(version)\n return \"{major}.{minor}\".format(major=version.major, minor=version.minor)\n except ValueError:\n log.error(f\"The version: {version} couldn't be parsed!\")\n raise\n\n\ndef get_available_ocp_versions(channel):\n \"\"\"\n Find all available OCP versions for specific channel.\n\n Args:\n channel (str): Channel of OCP (e.g. stable-4.2 or fast-4.2)\n\n Returns\n list: Sorted list with OCP versions for specified channel.\n\n \"\"\"\n headers = {\"Accept\": \"application/json\"}\n req = requests.get(\n constants.OPENSHIFT_UPGRADE_INFO_API.format(channel=channel), headers=headers\n )\n data = req.json()\n versions = [Version(node[\"version\"]) for node in data[\"nodes\"]]\n versions.sort()\n return versions\n\n\ndef get_latest_ocp_version(channel, index=-1):\n \"\"\"\n Find latest OCP version for specific channel.\n\n Args:\n channel (str): Channel of OCP (e.g. stable-4.2 or fast-4.2)\n index (int): Index to get from all available versions list\n e.g. default -1 is latest version (version[-1]). If you want to get\n previous version pass index -2 and so on.\n\n Returns\n str: Latest OCP version for specified channel.\n\n \"\"\"\n versions = get_available_ocp_versions(channel)\n return str(versions[index])\n\n\ndef load_config_file(config_file):\n \"\"\"\n Loads config file to the ocs-ci config\n\n Args:\n config_file (str): Path to yaml config file.\n\n Raises:\n FileNotFoundError: In the case the config file not found.\n\n \"\"\"\n config_file = os.path.expanduser(config_file)\n assert os.path.exists(config_file), f\"Config file {config_file} doesn't exist!\"\n with open(os.path.abspath(os.path.expanduser(config_file)), \"r\") as file_stream:\n custom_config_data = yaml.safe_load(file_stream)\n config.update(custom_config_data)\n\n\ndef destroy_cluster(installer, cluster_path, log_level=\"DEBUG\"):\n \"\"\"\n Destroy OCP cluster specific\n\n\n Args:\n installer (str): The path to the installer binary\n cluster_path (str): The path of the cluster\n log_level (str): log level openshift-installer (default: DEBUG)\n\n \"\"\"\n destroy_cmd = (\n f\"{installer} destroy cluster \"\n f\"--dir {cluster_path} \"\n f\"--log-level {log_level}\"\n )\n\n try:\n # Execute destroy cluster using OpenShift installer\n log.info(f\"Destroying cluster defined in {cluster_path}\")\n run_cmd(destroy_cmd, timeout=1200)\n except CommandFailed:\n log.error(traceback.format_exc())\n raise\n except Exception:\n log.error(traceback.format_exc())\n\n\ndef config_to_string(config):\n \"\"\"\n Convert ConfigParser object to string in INI format.\n\n Args:\n config (obj): ConfigParser object\n\n Returns:\n str: Config in one string\n\n \"\"\"\n strio = io.StringIO()\n config.write(strio, space_around_delimiters=False)\n return strio.getvalue()\n\n\nclass AZInfo(object):\n \"\"\"\n A class for getting different az numbers across calls\n \"\"\"\n\n zone_number = 0\n\n def get_zone_number(self):\n \"\"\"\n Increment current zone_number and perform modulus op\n to roll-on to next available number\n\n Returns:\n int: zone number index\n \"\"\"\n prev = AZInfo.zone_number\n AZInfo.zone_number += 1\n AZInfo.zone_number %= get_az_count()\n return prev\n\n\ndef convert_device_size(unformatted_size, units_to_covert_to):\n \"\"\"\n Convert a string representing a size to an int according to the given units\n to convert to\n\n Args:\n unformatted_size (str): The size to convert (i.e, '1Gi'/'100Mi')\n units_to_covert_to (str): The units to convert the size to (i.e, TB/GB/MB)\n\n Returns:\n int: The converted size\n\n \"\"\"\n units = unformatted_size[-2:]\n abso = int(unformatted_size[:-2])\n conversion = {\n \"TB\": {\"Ti\": abso, \"Gi\": abso / 1000, \"Mi\": abso / 1e6, \"Ki\": abso / 1e9},\n \"GB\": {\"Ti\": abso * 1000, \"Gi\": abso, \"Mi\": abso / 1000, \"Ki\": abso / 1e6},\n \"MB\": {\"Ti\": abso * 1e6, \"Gi\": abso * 1000, \"Mi\": abso, \"Ki\": abso / 1000},\n \"KB\": {\"Ti\": abso * 1e9, \"Gi\": abso * 1e6, \"Mi\": abso * 1000, \"Ki\": abso},\n \"B\": {\"Ti\": abso * 1e12, \"Gi\": abso * 1e9, \"Mi\": abso * 1e6, \"Ki\": abso * 1000},\n }\n return conversion[units_to_covert_to][units]\n\n\ndef prepare_customized_pull_secret(images=None):\n \"\"\"\n Prepare customized pull-secret containing auth section related to given\n image(s). If image(s) not defined or no related section is found, it will\n use whole content of pull-secret.\n\n Args:\n images (str, list): image (or images) to match with auth section\n\n Returns:\n NamedTemporaryFile: prepared pull-secret\n\n \"\"\"\n log.debug(f\"Prepare customized pull-secret for images: {images}\")\n if type(images) == str:\n images = [images]\n # load pull-secret file to pull_secret dict\n pull_secret_path = os.path.join(constants.TOP_DIR, \"data\", \"pull-secret\")\n with open(pull_secret_path) as pull_secret_fo:\n pull_secret = json.load(pull_secret_fo)\n\n authfile_content = {\"auths\": {}}\n # if images defined, try to find auth section related to specified images\n if images:\n for image in images:\n # find all auths which might be related to the specified image\n tmp_auths = [auth for auth in pull_secret[\"auths\"] if auth in image]\n # get the most specific auth for particular image\n tmp_auths = sorted(tmp_auths, key=len, reverse=True)\n if tmp_auths:\n # if there is match to particular auth, prepare authfile just with the\n # matching auth\n auth = tmp_auths[0]\n # as key use only server name, without namespace\n authfile_content[\"auths\"][auth.split(\"/\", 1)[0]] = pull_secret[\"auths\"][\n auth\n ]\n\n if not authfile_content[\"auths\"]:\n authfile_content = pull_secret\n\n # create temporary auth file\n authfile_fo = NamedTemporaryFile(mode=\"w\", prefix=\"authfile_\")\n json.dump(authfile_content, authfile_fo)\n # ensure the content will be saved into the file\n authfile_fo.flush()\n return authfile_fo\n\n\ndef inspect_image(image, authfile_fo):\n \"\"\"\n Inspect image\n\n Args:\n image (str): image to inspect\n authfile_fo (NamedTemporaryFile): pull-secret required for pulling the given image\n\n Returns:\n dict: json object of the inspected image\n\n \"\"\"\n # pull original image (to be able to inspect it)\n exec_cmd(f\"podman image pull {image} --authfile {authfile_fo.name}\")\n # inspect the image\n cmd_result = exec_cmd(f\"podman image inspect {image}\")\n image_inspect = json.loads(cmd_result.stdout)\n return image_inspect\n\n\ndef get_image_with_digest(image):\n \"\"\"\n Return image with sha256 digest for usage in disconnected environment\n\n Args:\n image (str): image\n\n Raises:\n UnexpectedImage: In case the image information is unexpected\n\n Returns:\n str: image with sha256 digest specification\n\n \"\"\"\n if \"@sha256:\" in image:\n return image\n with prepare_customized_pull_secret(image) as authfile_fo:\n image_inspect = inspect_image(image, authfile_fo)\n\n # we expect, that 'Digest' will match one of the images in 'RepoDigests',\n # if not, raise UnexpectedImage\n for image in image_inspect[0][\"RepoDigests\"]:\n if image_inspect[0][\"Digest\"] in image:\n return image\n else:\n raise UnexpectedImage(\n f\"Image digest ({image_inspect[0]['Digest']}) doesn't match with \"\n f\"any image from RepoDigests ({image_inspect[0]['RepoDigests']}).\"\n )\n\n\ndef login_to_mirror_registry(authfile):\n \"\"\"\n Login to mirror registry\n\n Args:\n authfile (str): authfile (pull-secret) path\n\n \"\"\"\n # load cluster info\n load_cluster_info()\n\n mirror_registry = config.DEPLOYMENT[\"mirror_registry\"]\n mirror_registry_user = config.DEPLOYMENT[\"mirror_registry_user\"]\n mirror_registry_password = config.DEPLOYMENT[\"mirror_registry_password\"]\n login_cmd = (\n f\"podman login --authfile {authfile} \"\n f\"{mirror_registry} -u {mirror_registry_user} \"\n f\"-p {mirror_registry_password} --tls-verify=false\"\n )\n exec_cmd(login_cmd, (mirror_registry_user, mirror_registry_password))\n\n\ndef mirror_image(image):\n \"\"\"\n Mirror image to mirror image registry.\n\n Args:\n image (str): image to be mirrored, can be defined just with name or\n with full url, with or without tag or digest\n\n Returns:\n str: the mirrored image link\n\n \"\"\"\n with prepare_customized_pull_secret(image) as authfile_fo:\n # login to mirror registry\n login_to_mirror_registry(authfile_fo.name)\n\n # if there is any tag specified, use it in the full image url,\n # otherwise use url with digest\n image_inspect = inspect_image(image, authfile_fo)\n if image_inspect[0].get(\"RepoTags\"):\n orig_image_full = image_inspect[0][\"RepoTags\"][0]\n else:\n orig_image_full = image_inspect[0][\"RepoDigests\"][0]\n # prepare mirrored image url\n mirror_registry = config.DEPLOYMENT[\"mirror_registry\"]\n mirrored_image = mirror_registry + re.sub(r\"^[^/]*\", \"\", orig_image_full)\n # mirror the image\n log.info(\n f\"Mirroring image '{image}' ('{orig_image_full}') to '{mirrored_image}'\"\n )\n exec_cmd(\n f\"oc image mirror --insecure --registry-config\"\n f\" {authfile_fo.name} {orig_image_full} {mirrored_image}\"\n )\n return mirrored_image\n\n\ndef update_container_with_mirrored_image(job_pod_dict):\n \"\"\"\n Update Job or Pod configuration dict with mirrored image (required for\n disconnected installation).\n\n Args:\n job_pod_dict (dict): dictionary with Job or Pod configuration\n\n Returns:\n dict: for disconnected installation, returns updated Job or Pod dict,\n for normal installation return unchanged job_pod_dict\n\n \"\"\"\n if config.DEPLOYMENT.get(\"disconnected\"):\n if \"containers\" in job_pod_dict[\"spec\"]:\n container = job_pod_dict[\"spec\"][\"containers\"][0]\n else:\n container = job_pod_dict[\"spec\"][\"template\"][\"spec\"][\"containers\"][0]\n container[\"image\"] = mirror_image(container[\"image\"])\n return job_pod_dict\n\n\ndef get_trim_mean(values, percentage=20):\n \"\"\"\n Get the trimmed mean of a list of values.\n Explanation: This function finds the arithmetic mean of given values,\n ignoring values outside the given limits.\n\n Args:\n values (list): The list of values\n percentage (int): The percentage to be trimmed\n\n Returns:\n float: Trimmed mean. In case trimmed mean calculation fails,\n the regular mean average is returned\n\n \"\"\"\n lower_limit = scoreatpercentile(values, percentage)\n upper_limit = scoreatpercentile(values, 100 - percentage)\n try:\n return tmean(values, limits=(lower_limit, upper_limit))\n except ValueError:\n log.warning(\n f\"Failed to calculate the trimmed mean of {values}. The \"\n f\"Regular mean average will be calculated instead\"\n )\n return sum(values) / len(values)\n\n\ndef set_selinux_permissions(workers=None):\n \"\"\"\n Workaround for #1777384 - enable container_use_cephfs on RHEL workers\n Ticket: RHSTOR-787, see more details in the issue: #1151\n\n Args:\n workers (list): List of worker nodes to set selinux permissions\n\n \"\"\"\n log.info(\"Running WA for ticket: RHSTOR-787\")\n from ocs_ci.ocs import ocp\n\n ocp_obj = ocp.OCP()\n cmd = [\"/usr/sbin/setsebool -P container_use_cephfs on\"]\n cmd_list = cmd.copy()\n if not workers:\n from ocs_ci.ocs.node import get_typed_worker_nodes\n\n worker_nodes = get_typed_worker_nodes(os_id=\"rhel\")\n else:\n worker_nodes = workers\n\n for worker in worker_nodes:\n node = worker.get().get(\"metadata\").get(\"name\") if not workers else worker\n log.info(f\"{node} is a RHEL based worker - applying '{cmd_list}'\")\n if config.ENV_DATA[\"platform\"] == constants.IBMCLOUD_PLATFORM:\n retry(CommandFailed, tries=10, delay=3, backoff=2)(\n ocp_obj.exec_oc_debug_cmd\n )(node=node, cmd_list=cmd_list)\n else:\n retry(CommandFailed)(ocp_obj.exec_oc_debug_cmd)(\n node=node, cmd_list=cmd_list\n )\n\n\ndef set_registry_to_managed_state():\n \"\"\"\n In order to be able to deploy from stage we need to change\n image registry config to Managed state.\n More described in BZs:\n https://bugzilla.redhat.com/show_bug.cgi?id=1806593\n https://bugzilla.redhat.com/show_bug.cgi?id=1807471#c3\n We need to change to managed state as described here:\n https://github.com/red-hat-storage/ocs-ci/issues/1436\n So this is not suppose to be deleted as WA case we really need to do\n this operation for OCS deployment as was originally done here:\n https://github.com/red-hat-storage/ocs-ci/pull/1437\n Currently it has to be moved here to enable CA certificate to be\n properly propagated for the stage deployment as mentioned in BZ.\n \"\"\"\n # In RHV platform config is already set to Managed and storage pre-configured\n on_prem_platform_to_exclude = [constants.RHV_PLATFORM]\n platform_list_to_exclude = constants.CLOUD_PLATFORMS + on_prem_platform_to_exclude\n if config.ENV_DATA[\"platform\"] not in platform_list_to_exclude:\n cluster_config = yaml.safe_load(\n exec_cmd(f\"oc get {constants.IMAGE_REGISTRY_CONFIG} -o yaml\").stdout\n )\n if \"emptyDir\" not in cluster_config[\"spec\"].get(\"storage\", {}).keys():\n run_cmd(\n f\"oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p \"\n f'\\'{{\"spec\":{{\"storage\": {{\"emptyDir\":{{}}}}}}}}\\''\n )\n if cluster_config[\"spec\"].get(\"managementState\") != \"Managed\":\n run_cmd(\n f\"oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p \"\n f'\\'{{\"spec\":{{\"managementState\": \"Managed\"}}}}\\''\n )\n\n\ndef add_stage_cert():\n \"\"\"\n Deploy stage certificate to the cluster.\n \"\"\"\n log.info(\"Create configmap stage-registry-config with stage CA.\")\n run_cmd(\n f\"oc -n openshift-config create configmap stage-registry-config\"\n f\" --from-file=registry.stage.redhat.io={constants.STAGE_CA_FILE}\"\n )\n\n log.info(\"Add stage-registry-config to additionalTrustedCA.\")\n additional_trusted_ca_patch = (\n '{\"spec\":{\"additionalTrustedCA\":{\"name\":\"stage-registry-config\"}}}'\n )\n run_cmd(\n f\"oc patch image.config.openshift.io cluster --type=merge\"\n f\" -p '{additional_trusted_ca_patch}'\"\n )\n\n\ndef get_terraform(version=None, bin_dir=None):\n \"\"\"\n Downloads the terraform binary\n\n Args:\n version (str): Version of the terraform to download\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n\n Returns:\n str: Path to the terraform binary\n\n \"\"\"\n if platform.system() == \"Darwin\":\n os_type = \"darwin\"\n elif platform.system() == \"Linux\":\n os_type = \"linux\"\n else:\n raise UnsupportedOSType\n\n version = version or config.DEPLOYMENT[\"terraform_version\"]\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n terraform_zip_file = f\"terraform_{version}_{os_type}_amd64.zip\"\n terraform_filename = \"terraform\"\n terraform_binary_path = os.path.join(bin_dir, terraform_filename)\n log.info(f\"Downloading terraform version {version}\")\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n url = f\"https://releases.hashicorp.com/terraform/{version}/\" f\"{terraform_zip_file}\"\n download_file(url, terraform_zip_file)\n run_cmd(f\"unzip -o {terraform_zip_file}\")\n delete_file(terraform_zip_file)\n # return to the previous working directory\n os.chdir(previous_dir)\n\n return terraform_binary_path\n\n\ndef get_terraform_ignition_provider(terraform_dir, version=None):\n \"\"\"\n Downloads the terraform ignition provider\n\n Args:\n terraform_dir (str): Path to terraform working directory\n version (str): Version of the terraform ignition provider to download\n\n \"\"\"\n version = version or constants.TERRAFORM_IGNITION_PROVIDER_VERSION\n terraform_ignition_provider_zip_file = (\n f\"terraform-provider-ignition-{version}-linux-amd64.tar.gz\"\n )\n terraform_ignition_provider_dir = (\n f\"terraform-provider-ignition-{version}-linux-amd64\"\n )\n terraform_plugins_path = \".terraform/plugins/linux_amd64/\"\n log.info(f\"Downloading terraform ignition proivider version {version}\")\n previous_dir = os.getcwd()\n os.chdir(terraform_dir)\n url = (\n \"https://github.com/community-terraform-providers/\"\n f\"terraform-provider-ignition/releases/download/{version}/\"\n f\"{terraform_ignition_provider_zip_file}\"\n )\n\n # Download and untar\n download_file(url, terraform_ignition_provider_zip_file)\n run_cmd(f\"tar xzf {terraform_ignition_provider_zip_file}\")\n\n # move the ignition provider binary to plugins path\n create_directory_path(terraform_plugins_path)\n move(\n f\"{terraform_ignition_provider_dir}/terraform-provider-ignition\",\n terraform_plugins_path,\n )\n\n # delete the downloaded files\n delete_file(terraform_ignition_provider_zip_file)\n delete_dir(terraform_ignition_provider_dir)\n\n # return to the previous working directory\n os.chdir(previous_dir)\n\n\ndef get_module_ip(terraform_state_file, module):\n \"\"\"\n Gets the node IP from terraform.tfstate file\n\n Args:\n terraform_state_file (str): Path to terraform state file\n module (str): Module name in terraform.tfstate file\n e.g: constants.LOAD_BALANCER_MODULE\n\n Returns:\n list: IP of the node\n\n \"\"\"\n ips = []\n with open(terraform_state_file) as fd:\n obj = json.loads(fd.read())\n\n if config.ENV_DATA.get(\"folder_structure\"):\n resources = obj[\"resources\"]\n log.debug(f\"Extracting module information for {module}\")\n log.debug(f\"Resource in {terraform_state_file}: {resources}\")\n for resource in resources:\n if resource.get(\"module\") == module and resource.get(\"mode\") == \"data\":\n for each_resource in resource[\"instances\"]:\n resource_body = each_resource[\"attributes\"][\"body\"]\n ips.append(resource_body.split('\"')[3])\n else:\n modules = obj[\"modules\"]\n target_module = module.split(\"_\")[1]\n log.debug(f\"Extracting module information for {module}\")\n log.debug(f\"Modules in {terraform_state_file}: {modules}\")\n for each_module in modules:\n if target_module in each_module[\"path\"]:\n return each_module[\"outputs\"][\"ip_addresses\"][\"value\"]\n\n return ips\n\n\ndef set_aws_region(region=None):\n \"\"\"\n Exports environment variable AWS_REGION\n\n Args:\n region (str): AWS region to export\n\n \"\"\"\n log.debug(\"Exporting environment variable AWS_REGION\")\n region = region or config.ENV_DATA[\"region\"]\n os.environ[\"AWS_REGION\"] = region\n\n\ndef get_system_architecture():\n \"\"\"\n Get output from 'uname -m' command run on first worker node.\n\n Returns:\n str: Architecture of system\n\n \"\"\"\n from ocs_ci.ocs.node import get_nodes\n\n log.info(\"Checking architecture of system\")\n node = get_nodes(node_type=constants.WORKER_MACHINE)[0]\n return node.ocp.exec_oc_debug_cmd(node.data[\"metadata\"][\"name\"], [\"uname -m\"])\n\n\ndef wait_for_machineconfigpool_status(node_type, timeout=900):\n \"\"\"\n Check for Machineconfigpool status\n\n Args:\n node_type (str): The node type to check machineconfigpool\n status is updated.\n e.g: worker, master and all if we want to check for all nodes\n timeout (int): Time in seconds to wait\n\n \"\"\"\n # importing here to avoid dependencies\n from ocs_ci.ocs import ocp\n\n node_types = [node_type]\n if node_type == \"all\":\n node_types = [f\"{constants.WORKER_MACHINE}\", f\"{constants.MASTER_MACHINE}\"]\n\n for role in node_types:\n log.info(f\"Checking machineconfigpool status for {role} nodes\")\n ocp_obj = ocp.OCP(kind=constants.MACHINECONFIGPOOL, resource_name=role)\n machine_count = ocp_obj.get()[\"status\"][\"machineCount\"]\n\n assert ocp_obj.wait_for_resource(\n condition=str(machine_count),\n column=\"READYMACHINECOUNT\",\n timeout=timeout,\n sleep=5,\n )\n\n\ndef configure_chrony_and_wait_for_machineconfig_status(\n node_type=constants.WORKER_MACHINE, timeout=900\n):\n \"\"\"\n Configure chrony on the nodes\n\n Args:\n node_type (str): The node type to configure chrony\n e.g: worker, master and all if we want to configure on all nodes\n timeout (int): Time in seconds to wait\n\n \"\"\"\n # importing here to avoid dependencies\n from ocs_ci.utility.templating import load_yaml\n from ocs_ci.ocs.resources.ocs import OCS\n\n chrony_data = load_yaml(constants.NTP_CHRONY_CONF)\n\n node_types = [node_type]\n if node_type == \"all\":\n node_types = [f\"{constants.WORKER_MACHINE}\", f\"{constants.MASTER_MACHINE}\"]\n\n for role in node_types:\n log.info(f\"Creating chrony for {role} nodes\")\n chrony_data[\"metadata\"][\"labels\"][\n \"machineconfiguration.openshift.io/role\"\n ] = role\n chrony_data[\"metadata\"][\"name\"] = f\"{role}-chrony-configuration\"\n chrony_obj = OCS(**chrony_data)\n chrony_obj.create()\n\n # sleep here to start update machineconfigpool status\n time.sleep(60)\n wait_for_machineconfigpool_status(role, timeout=timeout)\n\n\ndef modify_csv(csv, replace_from, replace_to):\n \"\"\"\n Modify the CSV\n\n Args:\n csv (str): The CSV name\n replace_from (str): The pattern to replace from in the CSV\n replace_to (str): The pattern to replace to in the CSV\n\n \"\"\"\n data = (\n f\"oc -n openshift-storage get csv {csv} -o yaml | sed\"\n f\" 's,{replace_from},{replace_to},g' | oc replace -f -\"\n )\n log.info(\n f\"CSV {csv} will be modified: {replace_from} will be replaced \"\n f\"with {replace_to}.\\nThe command that will be used for that is:\\n{data}\"\n )\n\n temp_file = NamedTemporaryFile(mode=\"w+\", prefix=\"csv_modification\", suffix=\".sh\")\n\n with open(temp_file.name, \"w\") as t_file:\n t_file.writelines(data)\n\n run_cmd(f\"chmod 777 {temp_file.name}\")\n run_cmd(f\"sh {temp_file.name}\")\n\n\ndef check_for_rhcos_images(url):\n \"\"\"\n Check for rhcos images are present in given location\n\n Args:\n url (str): rhcos_images url\n Returns:\n (bool): True if images present if not false\n\n \"\"\"\n r = requests.head(url)\n return r.status_code == requests.codes.ok\n\n\ndef download_file_from_git_repo(git_repo_url, path_to_file_in_git, filename):\n \"\"\"\n Download a file from a specified git repository\n\n Args:\n git_repo_url (str): The git repository url\n path_to_file_in_git (str): Path to the file to download\n in git repository\n filename (str): Name of the file to write the download to\n\n \"\"\"\n log.debug(\n f\"Download file '{path_to_file_in_git}' from \"\n f\"git repository {git_repo_url} to local file '{filename}'.\"\n )\n temp_dir = mkdtemp()\n git.Repo.clone_from(git_repo_url, temp_dir, branch=\"master\", depth=1)\n move(os.path.join(temp_dir, path_to_file_in_git), filename)\n rmtree(temp_dir)\n\n\ndef skipif_upgraded_from(version_list):\n \"\"\"\n This function evaluates the condition to skip a test if the cluster\n is upgraded from a particular OCS version\n\n Args:\n version_list (list): List of versions to check\n\n Return:\n (bool): True if test needs to be skipped else False\n\n \"\"\"\n try:\n from ocs_ci.ocs.resources.ocs import get_ocs_csv\n\n skip_this = False\n version_list = [version_list] if isinstance(version_list, str) else version_list\n ocs_csv = get_ocs_csv()\n csv_info = ocs_csv.get()\n prev_version = csv_info.get(\"spec\").get(\"replaces\", \"\")\n for version in version_list:\n if f\".v{version}\" in prev_version:\n skip_this = True\n break\n return skip_this\n except Exception as err:\n log.error(str(err))\n return False\n\n\ndef get_cluster_id(cluster_path):\n \"\"\"\n Get ClusterID from metadata.json in given cluster_path\n\n Args:\n cluster_path: path to cluster install directory\n\n Returns:\n str: metadata.json['clusterID']\n\n \"\"\"\n metadata_file = os.path.join(cluster_path, \"metadata.json\")\n with open(metadata_file) as f:\n metadata = json.load(f)\n return metadata[\"clusterID\"]\n\n\ndef get_running_cluster_id():\n \"\"\"\n Get cluster UUID\n Not relying on metadata.json as user sometimes want to run\n only with kubeconfig for some tests. For this function to work\n cluster has to be in running state\n\n Returns:\n str: cluster UUID\n\n \"\"\"\n cluster_id = run_cmd(\n \"oc get clusterversion version -o jsonpath='{.spec.clusterID}'\"\n )\n return cluster_id\n\n\ndef get_ocp_upgrade_history():\n \"\"\"\n Gets the OCP upgrade history for the cluster\n\n Returns:\n list: List of OCP upgrade paths. Latest version in the\n beginning of the list\n\n \"\"\"\n # importing here to avoid circular imports\n from ocs_ci.ocs.ocp import OCP\n\n ocp = OCP(kind=\"clusterversion\")\n cluster_version_info = ocp.get(\"version\")\n upgrade_history_info = cluster_version_info[\"status\"][\"history\"]\n upgrade_history = [each_upgrade[\"version\"] for each_upgrade in upgrade_history_info]\n return upgrade_history\n\n\ndef get_attr_chain(obj, attr_chain):\n \"\"\"\n Attempt to retrieve object attributes when uncertain about the existence of the attribute\n or a different attribute in a given attribute chain. If the retrieval fails, None is returned.\n The function can be used to retrieve a direct attribute, or a chain of attributes.\n i.e. - obj.attr_a, obj_attr_a.sub_attr\n\n Another example - trying to access \"sub_attr_b\" in object.attr.sub_attr_a.sub_attr_b -\n get_attr_chain(object, \"attr.sub_attr_a.sub_attr_b\")\n\n The function can be used to try and retrieve \"sub_attribute_b\" without an exception,\n even in cases where \"attr\" or \"sub_attr_a\" might not exist.\n In those cases, the function will return None.\n\n Args:\n obj: An object\n attr_chain (str): A string containing one attribute or several sub-attributes\n separated by dots (i.e. - \"attr.sub_attr_a.sub_attr_b\")\n\n Returns:\n The requested attribute if found, otherwise None\n \"\"\"\n return reduce(\n lambda _obj, _attr: getattr(_obj, _attr, None), attr_chain.split(\".\"), obj\n )\n\n\ndef get_default_if_keyval_empty(dictionary, key, default_val):\n \"\"\"\n if Key has an empty value OR key doesn't exist\n then return default value\n\n Args:\n dictionary (dict): Dictionary where we have to lookup\n key (str): key to lookup\n default_val (str): If key doesn't have value then return\n this default_val\n\n Returns:\n dictionary[key] if value is present else default_val\n\n \"\"\"\n if not dictionary.get(key):\n return default_val\n return dictionary.get(key)\n\n\ndef get_client_version(client_binary_path):\n \"\"\"\n Get version reported by `oc version`.\n\n Args:\n client_binary_path (str): path to `oc` binary\n\n Returns:\n str: version reported by `oc version`.\n None if the client does not exist at the provided path.\n\n \"\"\"\n if os.path.isfile(client_binary_path):\n cmd = f\"{client_binary_path} version --client -o json\"\n resp = exec_cmd(cmd)\n stdout = json.loads(resp.stdout.decode())\n return stdout[\"releaseClientVersion\"]\n\n\ndef clone_notify():\n \"\"\"\n Repository contains the source code of notify tool,\n which is a python3 based tool wrapped by a container\n used to configure Ceph Bucket Notifications\n\n Returns:\n notify_path (str): Path location of the notify code\n\n \"\"\"\n notify_dir = mkdtemp(prefix=\"notify_\")\n log.info(f\"cloning repo notify in {notify_dir}\")\n git_clone_cmd = f\"git clone {constants.RGW_KAFKA_NOTIFY}\"\n subprocess.run(git_clone_cmd, shell=True, cwd=notify_dir, check=True)\n notify_path = f\"{notify_dir}/notify/notify.py\"\n return notify_path\n\n\ndef add_chrony_to_ocp_deployment():\n \"\"\"\n Create and Add necessary chrony resources\n\n \"\"\"\n for role in [\"master\", \"worker\"]:\n log.info(f\"Creating and Adding Chrony file for {role}\")\n with open(constants.CHRONY_TEMPLATE) as file_stream:\n chrony_template_obj = yaml.safe_load(file_stream)\n chrony_template_obj[\"metadata\"][\"labels\"][\n \"machineconfiguration.openshift.io/role\"\n ] = role\n chrony_template_obj[\"metadata\"][\"name\"] = f\"99-{role}-chrony-configuration\"\n ignition_version = config.DEPLOYMENT[\"ignition_version\"]\n chrony_template_obj[\"spec\"][\"config\"][\"ignition\"][\"version\"] = ignition_version\n\n if Version.coerce(ignition_version) < Version.coerce(\"3.0\"):\n chrony_template_obj[\"spec\"][\"config\"][\"storage\"][\"files\"][0][\n \"filesystem\"\n ] = \"root\"\n\n chrony_template_str = yaml.safe_dump(chrony_template_obj)\n chrony_file = os.path.join(\n config.ENV_DATA[\"cluster_path\"],\n \"openshift\",\n f\"99-{role}-chrony-configuration.yaml\",\n )\n with open(chrony_file, \"w\") as f:\n f.write(chrony_template_str)\n\n\ndef enable_huge_pages():\n log.info(\"Enabling huge pages.\")\n exec_cmd(f\"oc apply -f {constants.HUGE_PAGES_TEMPLATE}\")\n time.sleep(10)\n log.info(\"Waiting for machine config will be applied with huge pages\")\n wait_for_machineconfigpool_status(node_type=constants.WORKER_MACHINE)\n" ]
[ [ "scipy.stats.scoreatpercentile", "scipy.stats.tmean" ] ]
dcronqvist/restberry-api
[ "35a2698ae946fc392e5e7d56dbc22b0719d6f5b6" ]
[ "api_v1/namespaces/ai.py" ]
[ "from datetime import datetime\nfrom flask import request\nfrom flask_restx import Resource, Namespace, fields, reqparse\nfrom api_v1 import privilege_required\nimport pandas as pd\nimport pickle\nfrom db import coll_accounts\n\napi = Namespace(\"ai\", path=\"/ai\", description=\"Endpoints utilizing some of my trained scikit models.\")\n\npost_model = api.model(\"accountant_payload\", {\n \"amount\": fields.Float(example=39.9, required=True, min=0),\n \"date_trans\": fields.Integer(example=round(datetime.now().timestamp()), required=True),\n \"desc\": fields.String(example=\"Transaction for stuff\", required=True),\n \"is_outcome\": fields.Boolean(example=True, required=True),\n \"is_swish\": fields.Boolean(example=False, required=True),\n \"account_names\": fields.Boolean(example=True, default=False, help=\"If true, account names will also be returned.\")\n})\n\naccountant_post_doc = \"\"\"\n### A model for predicting transaction accounts\n\nBy supplying only very little information about a transaction, this model will be able to quite accurately predict both which account the transaction's amount is going FROM, but also TO.\n\"\"\"\n\ndef get_known(desc):\n known_tech_stores = [\n \"webhall\",\n \"elgig\",\n \"clas ohl\",\n \"nintendo\",\n \"steam\",\n \"adobe\",\n \"blizzard\",\n \"komplett\",\n \"inet\",\n \"KJELL & CO\",\n \"Electrokit\",\n \"Billigtekn\",\n \"SLOJD \",\n \"DISCORD\",\n \"Proshop\",\n \"Miss Hosting\"\n ]\n known_grocery_stores = [\n \"coop\",\n \"ica\",\n \"willys\",\n \"hemköp\",\n \"wh götebo\",\n \"SAIGON\",\n \"matse\",\n \"HEMK@P\",\n \"tempo\"\n ]\n known_restaurants = [\n \"sanneg\",\n \"miss faj\",\n \"taco bar\",\n \"tugg\",\n \"max\",\n \"bruncho\",\n \"lucy\",\n \"pizza\",\n \"pizz\",\n \"hamburg\",\n \"foodora\",\n \"UBER *EATS\",\n \"frasses\",\n \"brodernas\",\n \"iZ *DATATEKNOLOG\",\n \"sush\",\n \"plankan\",\n \"dine\",\n \"O LEARYS\",\n \"john sco\",\n \"UBER * EATS\",\n \"taverna\",\n \"W.O.K\",\n \"mat ute\",\n \"restaurang\",\n \"äta ute\",\n \"åt ute\",\n \"restaurant\"\n ]\n known_snacks = [\n \"snacks\",\n \"fika\",\n \"godis\",\n \"glass\",\n \"klubba\",\n \"snickers\",\n \"selecta\",\n \"alltgodis\",\n \"alltigodis\",\n \"pressbyr\",\n \"condeco\",\n \"espresso\",\n \"pomona\",\n \"cafe\",\n \"too good to go\",\n \"7-ELEVEN\",\n \"CIRCLE K\"\n ] \n known_stuff = {\n 1: known_grocery_stores,\n 2: known_snacks,\n 3: known_restaurants,\n 4: known_tech_stores,\n 5: [\"västtrafik\", \"buss\", \"public transport\", \"spårvagn\", \"tunnelbana\", \"tbana\"],\n 6: [\"lyko\", \"salong\", \"levi\", \"zalando\"]\n } \n for known in known_stuff:\n if any([k.lower() in desc.lower() for k in known_stuff[known]]):\n return known\n return 0\n\nwith open(\"scikit-models/from_account_v1.ai\", \"rb\") as f:\n from_account_model = pickle.load(f)\n\nwith open(\"scikit-models/to_account_v1.ai\", \"rb\") as f:\n to_account_model = pickle.load(f)\n\[email protected](\"/accountant\")\nclass TransactionAccountsPredictor(Resource):\n @api.doc(description=accountant_post_doc)\n @api.expect(post_model, validate=True)\n def post(self):\n trans = api.payload\n df = pd.DataFrame()\n\n df[\"Transaktionsdag\"] = [trans[\"date_trans\"]]\n df[\"Belopp\"] = [trans[\"amount\"]]\n df[\"IsOutcome\"] = [trans[\"is_outcome\"]]\n df[\"IsSwish\"] = [trans[\"is_swish\"]]\n df[\"DayOfWeek\"] = [datetime.fromtimestamp(trans[\"date_trans\"]).weekday()]\n df[\"IsWeekend\"] = [datetime.fromtimestamp(trans[\"date_trans\"]) in [5,6]]\n df[\"Known\"] = [get_known(trans[\"desc\"])]\n\n predicted_from = from_account_model.predict(df)\n predicted_to = to_account_model.predict(df)\n\n trans[\"from_account\"] = [int(x) for x in list(predicted_from)][0]\n trans[\"to_account\"] = [int(x) for x in list(predicted_to)][0]\n\n if trans[\"account_names\"]:\n trans[\"from_account_info\"] = coll_accounts.find_one({ \"number\": trans[\"from_account\"], \"user\": \"dani\" }, { \"_id\": 0, \"user\": 0, \"number\": 0})\n trans[\"to_account_info\"] = coll_accounts.find_one({ \"number\": trans[\"to_account\"], \"user\": \"dani\" }, { \"_id\": 0, \"user\": 0, \"number\": 0})\n \n\n del trans[\"account_names\"]\n del trans[\"is_outcome\"]\n del trans[\"is_swish\"] \n\n return trans, 200\n" ]
[ [ "pandas.DataFrame" ] ]
mestagtx/deepimpute
[ "a6bb01f6d000d265557f7e681b10b9eaac458fdd" ]
[ "deepimpute/multinet.py" ]
[ "import os\nimport warnings\nimport tempfile\n\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import pearsonr\n\nimport tensorflow.keras as keras\nfrom keras import backend as K\nfrom keras.models import Model,model_from_json\nfrom keras.layers import Dense,Dropout,Input\nfrom keras.callbacks import EarlyStopping\nimport keras.losses\n\nimport tensorflow as tf\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\ndef get_distance_matrix(raw, n_pred=None):\n\n VMR = raw.std() / raw.mean()\n VMR[np.isinf(VMR)] = 0\n \n if n_pred is None:\n potential_pred = raw.columns[VMR > 0]\n else:\n print(\"Using {} predictors\".format(n_pred))\n potential_pred = VMR.sort_values(ascending=False).index[:n_pred]\n\n covariance_matrix = pd.DataFrame(np.abs(np.corrcoef(raw.T.loc[potential_pred])),\n index=potential_pred,\n columns=potential_pred).fillna(0)\n return covariance_matrix\n\ndef wMSE(y_true, y_pred, binary=False):\n if binary:\n weights = tf.cast(y_true>0, tf.float32)\n else:\n weights = y_true\n return tf.reduce_mean(weights*tf.square(y_true-y_pred))\n\ndef inspect_data(data):\n # Check if there area any duplicated cell/gene labels\n \n if sum(data.index.duplicated()):\n print(\"ERROR: duplicated cell labels. Please provide unique cell labels.\")\n exit(1)\n \n if sum(data.columns.duplicated()):\n print(\"ERROR: duplicated gene labels. Please provide unique gene labels.\")\n exit(1)\n \n max_value = np.max(data.values)\n if max_value < 10:\n print(\"ERROR: max value = {}. Is your data log-transformed? Please provide raw counts\"\n .format(max_value))\n exit(1)\n \n print(\"Input dataset is {} cells (rows) and {} genes (columns)\"\n .format(*data.shape))\n print(\"First 3 rows and columns:\")\n print(data.iloc[:3,:3])\n\nclass MultiNet:\n\n def __init__(self,\n learning_rate=1e-4,\n batch_size=64,\n max_epochs=500,\n patience=5,\n ncores=-1,\n loss=\"wMSE\",\n output_prefix=tempfile.mkdtemp(),\n sub_outputdim=512,\n verbose=1,\n seed=1234,\n architecture=None\n ):\n self.NN_parameters = {\"learning_rate\": learning_rate,\n \"batch_size\": batch_size,\n \"loss\": loss,\n \"architecture\": architecture,\n \"max_epochs\": max_epochs,\n \"patience\": patience}\n self.sub_outputdim = sub_outputdim\n self.outputdir = output_prefix\n self.verbose = verbose\n self.seed = seed\n self.setCores(ncores)\n\n def setCores(self, ncores):\n if ncores > 0:\n self.ncores = ncores\n else:\n self.ncores = os.cpu_count()\n print(\"Using all the cores ({})\".format(self.ncores))\n \n def loadDefaultArchitecture(self):\n self.NN_parameters['architecture'] = [\n {\"type\": \"dense\", \"neurons\": self.sub_outputdim//2, \"activation\": \"relu\"},\n {\"type\": \"dropout\", \"rate\": 0.2},\n ]\n \n def save(self, model):\n os.system(\"mkdir -p {}\".format(self.outputdir))\n \n model_json = model.to_json()\n \n with open(\"{}/model.json\".format(self.outputdir), \"w\") as json_file:\n json_file.write(model_json)\n \n # serialize weights to HDF5\n model.save_weights(\"{}/model.h5\".format(self.outputdir))\n print(\"Saved model to disk in {}\".format(self.outputdir))\n\n def load(self):\n json_file = open('{}/model.json'.format(self.outputdir), 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n model = model_from_json(loaded_model_json)\n model.load_weights('{}/model.h5'.format(self.outputdir))\n\n return model\n \n def build(self, inputdims):\n if self.NN_parameters['architecture'] is None:\n self.loadDefaultArchitecture()\n\n print(self.NN_parameters['architecture'])\n\n inputs = [ Input(shape=(inputdim,)) for inputdim in inputdims ]\n outputs = inputs\n\n for layer in self.NN_parameters['architecture']:\n if layer['type'].lower() == 'dense':\n outputs = [ Dense(layer['neurons'], activation=layer['activation'])(output)\n for output in outputs ]\n elif layer['type'].lower() == 'dropout':\n outputs = [ Dropout(layer['rate'], seed=self.seed)(output)\n for output in outputs] \n else:\n print(\"Unknown layer type.\")\n\n outputs = [Dense(self.sub_outputdim, activation=\"softplus\")(output)\n for output in outputs]\n \n model = Model(inputs=inputs, outputs=outputs)\n\n loss = self.NN_parameters['loss']\n\n if loss in [k for k, v in globals().items() if callable(v)]:\n # if loss is a defined function\n loss = eval(self.NN_parameters['loss'])\n \n if not callable(loss):\n # it is defined in Keras\n if hasattr(keras.losses, loss):\n loss = getattr(keras.losses, loss) \n else:\n print('Unknown loss: {}. Aborting.'.format(loss))\n exit(1)\n\n model.compile(optimizer=keras.optimizer_v2.adam.Adam(lr=self.NN_parameters['learning_rate']),\n loss=loss)\n\n return model\n\n def fit(self,\n raw,\n cell_subset=1,\n NN_lim=None,\n genes_to_impute=None,\n n_pred=None,\n ntop=5,\n minVMR=0.5,\n mode='random',\n ):\n \n inspect_data(raw)\n \n if self.seed is not None:\n np.random.seed(self.seed)\n\n if cell_subset != 1:\n if cell_subset < 1:\n raw = raw.sample(frac=cell_subset)\n else:\n raw = raw.sample(cell_subset)\n\n gene_metric = (raw.var()/(1+raw.mean())).sort_values(ascending=False)\n gene_metric = gene_metric[gene_metric > 0]\n\n if genes_to_impute is None:\n genes_to_impute = self.filter_genes(gene_metric, minVMR, NN_lim=NN_lim)\n else:\n # Make the number of genes to impute a multiple of the network output dim\n n_genes = len(genes_to_impute)\n if n_genes % self.sub_outputdim != 0:\n print(\"The number of input genes is not a multiple of {}. Filling with other genes.\".format(n_genes))\n fill_genes = gene_metric.index[:self.sub_outputdim-n_genes]\n\n if len(fill_genes) < self.sub_outputdim-n_genes:\n # Not enough genes in gene_metric. Sample with replacement\n rest = self.sub_outputdim - n_genes - len(fill_genes)\n fill_genes = np.concatenate([fill_genes,\n np.random.choice(gene_metric.index, rest, replace=True)])\n\n genes_to_impute = np.concatenate([genes_to_impute, fill_genes])\n\n covariance_matrix = get_distance_matrix(raw, n_pred=n_pred)\n\n self.setTargets(raw.reindex(columns=genes_to_impute), mode=mode)\n self.setPredictors(covariance_matrix, ntop=ntop)\n\n print(\"Normalization\")\n norm_data = np.log1p(raw).astype(np.float32) # normalizer.transform(raw)\n\n np.random.seed(self.seed)\n tf.random.set_seed(self.seed)\n \n tf.config.threading.set_inter_op_parallelism_threads(self.ncores)\n tf.config.threading.set_intra_op_parallelism_threads(self.ncores)\n\n print(\"Building network\")\n model = self.build([len(genes) for genes in self.predictors])\n\n test_cells = np.random.choice(norm_data.index, int(0.05 * norm_data.shape[0]), replace=False)\n train_cells = np.setdiff1d(norm_data.index, test_cells)\n\n X_train = [norm_data.loc[train_cells, inputgenes].values for inputgenes in self.predictors]\n Y_train = [norm_data.loc[train_cells, targetgenes].values for targetgenes in self.targets]\n \n X_test = [norm_data.loc[test_cells, inputgenes].values for inputgenes in self.predictors]\n Y_test = [norm_data.loc[test_cells, targetgenes].values for targetgenes in self.targets]\n\n print(\"Fitting with {} cells\".format(norm_data.shape[0]))\n result = model.fit(X_train, Y_train,\n validation_data=(X_test,Y_test),\n epochs=self.NN_parameters[\"max_epochs\"],\n batch_size=self.NN_parameters[\"batch_size\"],\n callbacks=[EarlyStopping(monitor='val_loss',\n patience=self.NN_parameters[\"patience\"])],\n verbose=self.verbose)\n\n self.trained_epochs = len(result.history['loss'])\n print(\"Stopped fitting after {} epochs\".format(self.trained_epochs))\n\n self.save(model)\n\n # Save some metrics on test data\n Y_test_raw = np.hstack(Y_test).flatten()\n Y_test_imputed = np.hstack(model.predict(X_test)).flatten()\n\n # Keep only positive values (since negative values could be dropouts)\n Y_test_imputed = Y_test_imputed[Y_test_raw>0]\n Y_test_raw = Y_test_raw[Y_test_raw>0]\n\n self.test_metrics = {\n 'correlation': pearsonr(Y_test_raw,Y_test_imputed)[0],\n 'MSE': np.sum((Y_test_raw-Y_test_imputed)**2)/len(Y_test_raw)\n } \n\n return self\n\n def predict(self,\n raw,\n imputed_only=False,\n policy=\"restore\"):\n\n norm_raw = np.log1p(raw)\n\n inputs = [ norm_raw.loc[:,predictors].values.astype(np.float32)\n for predictors in self.predictors ]\n\n model = self.load()\n\n predicted = model.predict(inputs)\n if len(inputs)>1:\n predicted = np.hstack(predicted)\n \n predicted = pd.DataFrame(predicted, index=raw.index, columns=self.targets.flatten())\n\n predicted = predicted.groupby(by=predicted.columns, axis=1).mean()\n not_predicted = norm_raw.drop(self.targets.flatten(), axis=1)\n\n imputed = (pd.concat([predicted,not_predicted],axis=1)\n .loc[raw.index, raw.columns]\n .values)\n \n # To prevent overflow\n imputed[ (imputed > 2*norm_raw.values.max()) | (np.isnan(imputed)) ] = 0\n # Convert back to counts\n imputed = np.expm1(imputed)\n\n if policy == \"restore\":\n print(\"Filling zeros\")\n mask = (raw.values > 0)\n imputed[mask] = raw.values[mask]\n elif policy == \"max\":\n print(\"Imputing data with 'max' policy\")\n mask = (raw.values > imputed)\n imputed[mask] = raw.values[mask]\n\n imputed = pd.DataFrame(imputed, index=raw.index, columns=raw.columns)\n\n if imputed_only:\n return imputed.loc[:, predicted.columns]\n else:\n return imputed\n \n def filter_genes(self,\n gene_metric, # assumes gene_metric is sorted\n threshold,\n NN_lim=None\n ):\n if not str(NN_lim).isdigit():\n NN_lim = (gene_metric > threshold).sum()\n\n n_subsets = int(np.ceil(NN_lim / self.sub_outputdim))\n genes_to_impute = gene_metric.index[:n_subsets*self.sub_outputdim]\n\n rest = self.sub_outputdim - (len(genes_to_impute) % self.sub_outputdim)\n\n if rest > 0:\n fill_genes = np.random.choice(gene_metric.index, rest)\n genes_to_impute = np.concatenate([genes_to_impute, fill_genes])\n\n print(\"{} genes selected for imputation\".format(len(genes_to_impute)))\n\n return genes_to_impute\n\n def setTargets(self,data, mode='random'):\n \n n_subsets = int(data.shape[1]/self.sub_outputdim)\n\n if mode == 'progressive':\n self.targets = data.columns.values.reshape([n_subsets, self.sub_outputdim])\n else:\n self.targets = np.random.choice(data.columns,\n [n_subsets, self.sub_outputdim],\n replace=False)\n \n def setPredictors(self, covariance_matrix, ntop=5):\n self.predictors = []\n \n for i,targets in enumerate(self.targets):\n\n genes_not_in_target = np.setdiff1d(covariance_matrix.columns, targets)\n\n if genes_not_in_target.size == 0:\n warnings.warn('Warning: number of target genes lower than output dim. Consider lowering down the sub_outputdim parameter',\n UserWarning)\n genes_not_in_target = covariance_matrix.columns\n \n subMatrix = ( covariance_matrix\n .loc[targets, genes_not_in_target]\n )\n sorted_idx = np.argsort(-subMatrix.values, axis=1)\n predictors = subMatrix.columns[sorted_idx[:,:ntop].flatten()]\n\n self.predictors.append(predictors.unique())\n\n print(\"Net {}: {} predictors, {} targets\"\n .format(i,len(np.unique(predictors)),len(targets)))\n\n def score(self, data, policy=None):\n warnings.warn(\n \"This method is deprecated. Please use model.test_metrics to measure model accuracy instead\",\n DeprecationWarning)\n Y_hat = self.predict(data, policy=policy)\n Y = data.loc[Y_hat.index, Y_hat.columns]\n\n return pearsonr(Y_hat.values.reshape(-1), Y.values.reshape(-1))\n \n\n\n\n \n" ]
[ [ "numpy.sum", "tensorflow.keras.optimizer_v2.adam.Adam", "numpy.random.seed", "numpy.argsort", "tensorflow.config.threading.set_inter_op_parallelism_threads", "numpy.log1p", "numpy.random.choice", "numpy.isnan", "numpy.expm1", "tensorflow.random.set_seed", "numpy.unique", "numpy.corrcoef", "scipy.stats.pearsonr", "numpy.ceil", "numpy.setdiff1d", "tensorflow.config.threading.set_intra_op_parallelism_threads", "tensorflow.cast", "numpy.hstack", "numpy.max", "pandas.concat", "numpy.isinf", "pandas.DataFrame", "tensorflow.square", "numpy.concatenate" ] ]
searchsolved/sentence-transformers-master
[ "50f345322d602ebab9e6d2b5e2a98e7e9d0cf9a3" ]
[ "sentence_transformers/models/Pooling.py" ]
[ "import torch\nfrom torch import Tensor\nfrom torch import nn\nfrom typing import Union, Tuple, List, Iterable, Dict\nimport os\nimport json\n\n\nclass Pooling(nn.Module):\n \"\"\"Performs pooling (max or mean) on the token embeddings.\n\n Using pooling, it generates from a variable sized sentence a fixed sized sentence embedding. This layer also allows to use the CLS token if it is returned by the underlying word embedding model.\n You can concatenate multiple poolings together.\n\n :param word_embedding_dimension: Dimensions for the word embeddings\n :param pooling_mode: Can be a string: mean/max/cls. If set, overwrites the other pooling_mode_* settings\n :param pooling_mode_cls_token: Use the first token (CLS token) as text representations\n :param pooling_mode_max_tokens: Use max in each dimension over all tokens.\n :param pooling_mode_mean_tokens: Perform mean-pooling\n :param pooling_mode_mean_sqrt_len_tokens: Perform mean-pooling, but devide by sqrt(input_length).\n \"\"\"\n def __init__(self,\n word_embedding_dimension: int,\n pooling_mode: str = None,\n pooling_mode_cls_token: bool = False,\n pooling_mode_max_tokens: bool = False,\n pooling_mode_mean_tokens: bool = True,\n pooling_mode_mean_sqrt_len_tokens: bool = False,\n ):\n super(Pooling, self).__init__()\n\n self.config_keys = ['word_embedding_dimension', 'pooling_mode_cls_token', 'pooling_mode_mean_tokens', 'pooling_mode_max_tokens', 'pooling_mode_mean_sqrt_len_tokens']\n\n if pooling_mode is not None: #Set pooling mode by string\n pooling_mode = pooling_mode.lower()\n assert pooling_mode in ['mean', 'max', 'cls']\n pooling_mode_cls_token = (pooling_mode == 'cls')\n pooling_mode_max_tokens = (pooling_mode == 'max')\n pooling_mode_mean_tokens = (pooling_mode == 'mean')\n\n self.word_embedding_dimension = word_embedding_dimension\n self.pooling_mode_cls_token = pooling_mode_cls_token\n self.pooling_mode_mean_tokens = pooling_mode_mean_tokens\n self.pooling_mode_max_tokens = pooling_mode_max_tokens\n self.pooling_mode_mean_sqrt_len_tokens = pooling_mode_mean_sqrt_len_tokens\n\n pooling_mode_multiplier = sum([pooling_mode_cls_token, pooling_mode_max_tokens, pooling_mode_mean_tokens, pooling_mode_mean_sqrt_len_tokens])\n self.pooling_output_dimension = (pooling_mode_multiplier * word_embedding_dimension)\n\n\n def __repr__(self):\n return \"Pooling({})\".format(self.get_config_dict())\n\n def get_pooling_mode_str(self) -> str:\n \"\"\"\n Returns the pooling mode as string\n \"\"\"\n modes = []\n if self.pooling_mode_cls_token:\n modes.append('cls')\n if self.pooling_mode_mean_tokens:\n modes.append('mean')\n if self.pooling_mode_max_tokens:\n modes.append('max')\n if self.pooling_mode_mean_sqrt_len_tokens:\n modes.append('mean_sqrt_len_tokens')\n\n return \"+\".join(modes)\n\n def forward(self, features: Dict[str, Tensor]):\n token_embeddings = features['token_embeddings']\n attention_mask = features['attention_mask']\n\n ## Pooling strategy\n output_vectors = []\n if self.pooling_mode_cls_token:\n cls_token = features.get('cls_token_embeddings', token_embeddings[:, 0]) # Take first token by default\n output_vectors.append(cls_token)\n if self.pooling_mode_max_tokens:\n input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n token_embeddings[input_mask_expanded == 0] = -1e9 # Set padding tokens to large negative value\n max_over_time = torch.max(token_embeddings, 1)[0]\n output_vectors.append(max_over_time)\n if self.pooling_mode_mean_tokens or self.pooling_mode_mean_sqrt_len_tokens:\n input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)\n\n #If tokens are weighted (by WordWeights layer), feature 'token_weights_sum' will be present\n if 'token_weights_sum' in features:\n sum_mask = features['token_weights_sum'].unsqueeze(-1).expand(sum_embeddings.size())\n else:\n sum_mask = input_mask_expanded.sum(1)\n\n sum_mask = torch.clamp(sum_mask, min=1e-9)\n\n if self.pooling_mode_mean_tokens:\n output_vectors.append(sum_embeddings / sum_mask)\n if self.pooling_mode_mean_sqrt_len_tokens:\n output_vectors.append(sum_embeddings / torch.sqrt(sum_mask))\n\n output_vector = torch.cat(output_vectors, 1)\n features.update({'sentence_embedding': output_vector})\n return features\n\n def get_sentence_embedding_dimension(self):\n return self.pooling_output_dimension\n\n def get_config_dict(self):\n return {key: self.__dict__[key] for key in self.config_keys}\n\n def save(self, output_path):\n with open(os.path.join(output_path, 'config.json'), 'w') as fOut:\n json.dump(self.get_config_dict(), fOut, indent=2)\n\n @staticmethod\n def load(input_path):\n with open(os.path.join(input_path, 'config.json')) as fIn:\n config = json.load(fIn)\n\n return Pooling(**config)\n" ]
[ [ "torch.sum", "torch.sqrt", "torch.max", "torch.cat", "torch.clamp" ] ]
JanSchulz/statsmodels
[ "a160bbc790ef447ec365651ad01da3cf11e75f7f" ]
[ "statsmodels/stats/tests/test_multi.py" ]
[ "'''Tests for multipletests and fdr pvalue corrections\n\nAuthor : Josef Perktold\n\n\n['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n', 'fdr_tsbh']\nare tested against R:multtest\n\n'hommel' is tested against R stats p_adjust (not available in multtest\n\n'fdr_gbs', 'fdr_2sbky' I did not find them in R, currently tested for\n consistency only\n\n'''\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_equal, assert_\n\nfrom statsmodels.stats.multitest import (multipletests, fdrcorrection,\n fdrcorrection_twostage)\nfrom statsmodels.stats.multicomp import tukeyhsd\n\npval0 = np.array([0.838541367553 , 0.642193923795 , 0.680845947633 ,\n 0.967833824309 , 0.71626938238 , 0.177096952723 , 5.23656777208e-005 ,\n 0.0202732688798 , 0.00028140506198 , 0.0149877310796])\n\nres_multtest1 = np.array([[ 5.2365677720800003e-05, 5.2365677720800005e-04,\n 5.2365677720800005e-04, 5.2365677720800005e-04,\n 5.2353339704891422e-04, 5.2353339704891422e-04,\n 5.2365677720800005e-04, 1.5337740764175588e-03],\n [ 2.8140506198000000e-04, 2.8140506197999998e-03,\n 2.5326455578199999e-03, 2.5326455578199999e-03,\n 2.8104897961789277e-03, 2.5297966317768816e-03,\n 1.4070253098999999e-03, 4.1211324652269442e-03],\n [ 1.4987731079600001e-02, 1.4987731079600000e-01,\n 1.1990184863680001e-01, 1.1990184863680001e-01,\n 1.4016246580579017e-01, 1.1379719679449507e-01,\n 4.9959103598666670e-02, 1.4632862843720582e-01],\n [ 2.0273268879800001e-02, 2.0273268879799999e-01,\n 1.4191288215860001e-01, 1.4191288215860001e-01,\n 1.8520270949069695e-01, 1.3356756197485375e-01,\n 5.0683172199499998e-02, 1.4844940238274187e-01],\n [ 1.7709695272300000e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 8.5760763426056130e-01, 6.8947825122356643e-01,\n 3.5419390544599999e-01, 1.0000000000000000e+00],\n [ 6.4219392379499995e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 9.9996560644133570e-01, 9.9413539782557070e-01,\n 8.9533672797500008e-01, 1.0000000000000000e+00],\n [ 6.8084594763299999e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 9.9998903512635740e-01, 9.9413539782557070e-01,\n 8.9533672797500008e-01, 1.0000000000000000e+00],\n [ 7.1626938238000004e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 9.9999661886871472e-01, 9.9413539782557070e-01,\n 8.9533672797500008e-01, 1.0000000000000000e+00],\n [ 8.3854136755300002e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 9.9999998796038225e-01, 9.9413539782557070e-01,\n 9.3171263061444454e-01, 1.0000000000000000e+00],\n [ 9.6783382430900000e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 9.9999999999999878e-01, 9.9413539782557070e-01,\n 9.6783382430900000e-01, 1.0000000000000000e+00]])\n\n\nres_multtest2_columns = ['rawp', 'Bonferroni', 'Holm', 'Hochberg', 'SidakSS', 'SidakSD',\n 'BH', 'BY', 'ABH', 'TSBH_0.05']\n\nrmethods = {'rawp':(0,'pval'), 'Bonferroni':(1,'b'), 'Holm':(2,'h'),\n 'Hochberg':(3,'sh'), 'SidakSS':(4,'s'), 'SidakSD':(5,'hs'),\n 'BH':(6,'fdr_i'), 'BY':(7,'fdr_n'),\n 'TSBH_0.05':(9, 'fdr_tsbh')}\n\nNA = np.nan\n# all rejections, except for Bonferroni and Sidak\nres_multtest2 = np.array([\n 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.012, 0.024, 0.036, 0.048,\n 0.06, 0.072, 0.012, 0.02, 0.024, 0.024, 0.024, 0.024, 0.012, 0.012,\n 0.012, 0.012, 0.012, 0.012, 0.01194015976019192, 0.02376127616613988,\n 0.03546430060660932, 0.04705017875634587, 0.058519850599,\n 0.06987425045000606, 0.01194015976019192, 0.01984063872102404,\n 0.02378486270400004, 0.023808512, 0.023808512, 0.023808512, 0.012,\n 0.012, 0.012, 0.012, 0.012, 0.012, 0.0294, 0.0294, 0.0294, 0.0294,\n 0.0294, 0.0294, NA, NA, NA, NA, NA, NA, 0, 0, 0, 0, 0, 0\n ]).reshape(6,10, order='F')\n\nres_multtest3 = np.array([\n 0.001, 0.002, 0.003, 0.004, 0.005, 0.05, 0.06, 0.07, 0.08, 0.09, 0.01,\n 0.02, 0.03, 0.04, 0.05, 0.5, 0.6, 0.7, 0.8, 0.9, 0.01, 0.018, 0.024,\n 0.028, 0.03, 0.25, 0.25, 0.25, 0.25, 0.25, 0.01, 0.018, 0.024, 0.028,\n 0.03, 0.09, 0.09, 0.09, 0.09, 0.09, 0.00995511979025177,\n 0.01982095664805061, 0.02959822305108317, 0.03928762649718986,\n 0.04888986953422814, 0.4012630607616213, 0.4613848859051006,\n 0.5160176928207072, 0.5656115457763677, 0.6105838818818925,\n 0.00995511979025177, 0.0178566699880266, 0.02374950634358763,\n 0.02766623106147537, 0.02962749064373438, 0.2262190625000001,\n 0.2262190625000001, 0.2262190625000001, 0.2262190625000001,\n 0.2262190625000001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.08333333333333334,\n 0.0857142857142857, 0.0875, 0.0888888888888889, 0.09,\n 0.02928968253968254, 0.02928968253968254, 0.02928968253968254,\n 0.02928968253968254, 0.02928968253968254, 0.2440806878306878,\n 0.2510544217687075, 0.2562847222222222, 0.2603527336860670,\n 0.2636071428571428, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0.005,\n 0.005, 0.005, 0.005, 0.005, 0.04166666666666667, 0.04285714285714286,\n 0.04375, 0.04444444444444445, 0.045\n ]).reshape(10,10, order='F')\n\nres0_large = np.array([\n 0.00031612, 0.0003965, 0.00048442, 0.00051932, 0.00101436, 0.00121506,\n 0.0014516, 0.00265684, 0.00430043, 0.01743686, 0.02080285, 0.02785414,\n 0.0327198, 0.03494679, 0.04206808, 0.08067095, 0.23882767, 0.28352304,\n 0.36140401, 0.43565145, 0.44866768, 0.45368782, 0.48282088,\n 0.49223781, 0.55451638, 0.6207473, 0.71847853, 0.72424145, 0.85950263,\n 0.89032747, 0.0094836, 0.011895, 0.0145326, 0.0155796, 0.0304308,\n 0.0364518, 0.043548, 0.0797052, 0.1290129, 0.5231058, 0.6240855,\n 0.8356242, 0.981594, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 0.0094836, 0.0114985, 0.01356376, 0.01402164, 0.02637336,\n 0.0303765, 0.0348384, 0.06110732, 0.09460946, 0.36617406, 0.416057,\n 0.52922866, 0.5889564, 0.59409543, 0.67308928, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 0.0094836, 0.0114985, 0.01356376, 0.01402164,\n 0.02637336, 0.0303765, 0.0348384, 0.06110732, 0.09460946, 0.36617406,\n 0.416057, 0.52922866, 0.5889564, 0.59409543, 0.67308928, 0.89032747,\n 0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.89032747,\n 0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.89032747,\n 0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.009440257627368331,\n 0.01182686507401931, 0.01443098172617119, 0.01546285007478554,\n 0.02998742566629453, 0.03581680249125385, 0.04264369065603335,\n 0.0767094173291795, 0.1212818694859857, 0.410051586220387,\n 0.4677640287633493, 0.5715077903157826, 0.631388450393325,\n 0.656016359012282, 0.724552174001554, 0.919808283456286,\n 0.999721715014484, 0.9999547032674126, 0.9999985652190126,\n 0.999999964809746, 0.999999982525548, 0.999999986719131,\n 0.999999997434160, 0.999999998521536, 0.999999999970829,\n 0.999999999999767, 1, 1, 1, 1, 0.009440257627368331,\n 0.01143489901147732, 0.0134754287611275, 0.01392738605848343,\n 0.0260416568490015, 0.02993768724817902, 0.0342629726119179,\n 0.0593542206208364, 0.09045742964699988, 0.308853956167216,\n 0.343245865702423, 0.4153483370083637, 0.4505333180190900,\n 0.453775200643535, 0.497247406680671, 0.71681858015803,\n 0.978083969553718, 0.986889206426321, 0.995400461639735,\n 0.9981506396214986, 0.9981506396214986, 0.9981506396214986,\n 0.9981506396214986, 0.9981506396214986, 0.9981506396214986,\n 0.9981506396214986, 0.9981506396214986, 0.9981506396214986,\n 0.9981506396214986, 0.9981506396214986, 0.0038949, 0.0038949,\n 0.0038949, 0.0038949, 0.0060753, 0.0060753, 0.006221142857142857,\n 0.00996315, 0.01433476666666667, 0.05231058, 0.05673504545454545,\n 0.06963535, 0.07488597857142856, 0.07488597857142856, 0.08413616,\n 0.15125803125, 0.421460594117647, 0.4725384, 0.570637910526316,\n 0.6152972625, 0.6152972625, 0.6152972625, 0.6152972625, 0.6152972625,\n 0.665419656, 0.7162468846153845, 0.775972982142857, 0.775972982142857,\n 0.889140651724138, 0.89032747, 0.01556007537622183,\n 0.01556007537622183, 0.01556007537622183, 0.01556007537622183,\n 0.02427074531648065, 0.02427074531648065, 0.02485338565390302,\n 0.0398026560334295, 0.0572672083580799, 0.2089800939109816,\n 0.2266557764630925, 0.2781923271071372, 0.2991685206792373,\n 0.2991685206792373, 0.336122876445059, 0.6042738882921044, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.00220711, 0.00220711, 0.00220711,\n 0.00220711, 0.00344267, 0.00344267, 0.003525314285714285, 0.005645785,\n 0.00812303444444444, 0.029642662, 0.0321498590909091,\n 0.03946003166666667, 0.04243538785714285, 0.04243538785714285,\n 0.0476771573333333, 0.085712884375, 0.23882767, 0.26777176,\n 0.323361482631579, 0.34866844875, 0.34866844875, 0.34866844875,\n 0.34866844875, 0.34866844875, 0.3770711384, 0.4058732346153846,\n 0.4397180232142857, 0.4397180232142857, 0.503846369310345,\n 0.504518899666667, 0.00272643, 0.00272643, 0.00272643, 0.00272643,\n 0.00425271, 0.00425271, 0.0043548, 0.006974205, 0.01003433666666667,\n 0.036617406, 0.03971453181818182, 0.048744745, 0.052420185,\n 0.052420185, 0.058895312, 0.105880621875, 0.295022415882353,\n 0.33077688, 0.399446537368421, 0.43070808375, 0.43070808375,\n 0.43070808375, 0.43070808375, 0.43070808375, 0.4657937592,\n 0.5013728192307692, 0.5431810875, 0.5431810875, 0.622398456206897,\n 0.623229229\n ]).reshape(30,10, order='F')\n\n\nclass CheckMultiTestsMixin(object):\n def test_multi_pvalcorrection(self):\n #test against R package multtest mt.rawp2adjp\n\n res_multtest = self.res2\n pval0 = res_multtest[:,0]\n\n for k,v in rmethods.items():\n if v[1] in self.methods:\n reject, pvalscorr = multipletests(pval0,\n alpha=self.alpha,\n method=v[1])[:2]\n assert_almost_equal(pvalscorr, res_multtest[:,v[0]], 15)\n assert_equal(reject, pvalscorr <= self.alpha)\n\n pvalscorr = np.sort(fdrcorrection(pval0, method='n')[1])\n assert_almost_equal(pvalscorr, res_multtest[:,7], 15)\n pvalscorr = np.sort(fdrcorrection(pval0, method='i')[1])\n assert_almost_equal(pvalscorr, res_multtest[:,6], 15)\n\nclass TestMultiTests1(CheckMultiTestsMixin):\n def __init__(self):\n self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n']\n self.alpha = 0.1\n self.res2 = res_multtest1\n\nclass TestMultiTests2(CheckMultiTestsMixin):\n # case: all hypothesis rejected (except 'b' and 's'\n def __init__(self):\n self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n']\n self.alpha = 0.05\n self.res2 = res_multtest2\n\nclass TestMultiTests3(CheckMultiTestsMixin):\n def __init__(self):\n self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n',\n 'fdr_tsbh']\n self.alpha = 0.05\n self.res2 = res0_large\n\nclass TestMultiTests4(CheckMultiTestsMixin):\n # in simulations, all two stage fdr, fdr_tsbky, fdr_tsbh, fdr_gbs, have in\n # some cases (cases with large Alternative) an FDR that looks too large\n # this is the first case #rejected = 12, DGP : has 10 false\n def __init__(self):\n self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n',\n 'fdr_tsbh']\n self.alpha = 0.05\n self.res2 = res_multtest3\n\ndef test_pvalcorrection_reject():\n # consistency test for reject boolean and pvalscorr\n\n for alpha in [0.01, 0.05, 0.1]:\n for method in ['b', 's', 'sh', 'hs', 'h', 'hommel', 'fdr_i', 'fdr_n',\n 'fdr_tsbky', 'fdr_tsbh', 'fdr_gbs']:\n for ii in range(11):\n pval1 = np.hstack((np.linspace(0.0001, 0.0100, ii),\n np.linspace(0.05001, 0.11, 10 - ii)))\n # using .05001 instead of 0.05 to avoid edge case issue #768\n reject, pvalscorr = multipletests(pval1, alpha=alpha,\n method=method)[:2]\n #print 'reject.sum', v[1], reject.sum()\n msg = 'case %s %3.2f rejected:%d\\npval_raw=%r\\npvalscorr=%r' % (\n method, alpha, reject.sum(), pval1, pvalscorr)\n #assert_equal(reject, pvalscorr <= alpha, err_msg=msg)\n yield assert_equal, reject, pvalscorr <= alpha, msg\n\n\ndef test_hommel():\n #tested agains R stats p_adjust(pval0, method='hommel')\n pval0 = np.array(\n [ 0.00116, 0.00924, 0.01075, 0.01437, 0.01784, 0.01918,\n 0.02751, 0.02871, 0.03054, 0.03246, 0.04259, 0.06879,\n 0.0691 , 0.08081, 0.08593, 0.08993, 0.09386, 0.09412,\n 0.09718, 0.09758, 0.09781, 0.09788, 0.13282, 0.20191,\n 0.21757, 0.24031, 0.26061, 0.26762, 0.29474, 0.32901,\n 0.41386, 0.51479, 0.52461, 0.53389, 0.56276, 0.62967,\n 0.72178, 0.73403, 0.87182, 0.95384])\n\n result_ho = np.array(\n [ 0.0464 , 0.25872 , 0.29025 ,\n 0.3495714285714286, 0.41032 , 0.44114 ,\n 0.57771 , 0.60291 , 0.618954 ,\n 0.6492 , 0.7402725000000001, 0.86749 ,\n 0.86749 , 0.8889100000000001, 0.8971477777777778,\n 0.8993 , 0.9175374999999999, 0.9175374999999999,\n 0.9175374999999999, 0.9175374999999999, 0.9175374999999999,\n 0.9175374999999999, 0.95384 , 0.9538400000000001,\n 0.9538400000000001, 0.9538400000000001, 0.9538400000000001,\n 0.9538400000000001, 0.9538400000000001, 0.9538400000000001,\n 0.9538400000000001, 0.9538400000000001, 0.9538400000000001,\n 0.9538400000000001, 0.9538400000000001, 0.9538400000000001,\n 0.9538400000000001, 0.9538400000000001, 0.9538400000000001,\n 0.9538400000000001])\n\n rej, pvalscorr, _, _ = multipletests(pval0, alpha=0.1, method='ho')\n assert_almost_equal(pvalscorr, result_ho, 15)\n assert_equal(rej, result_ho < 0.1) #booleans\n\ndef test_fdr_bky():\n # test for fdrcorrection_twostage\n # example from BKY\n pvals = [0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, 0.0459,\n 0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000 ]\n\n #no test for corrected p-values, but they are inherited\n #same number of rejection as in BKY paper:\n #single step-up:4, two-stage:8, iterated two-step:9\n #also alpha_star is the same as theirs for TST\n #print fdrcorrection0(pvals, alpha=0.05, method='indep')\n #print fdrcorrection_twostage(pvals, alpha=0.05, iter=False)\n res_tst = fdrcorrection_twostage(pvals, alpha=0.05, iter=False)\n assert_almost_equal([0.047619, 0.0649], res_tst[-1][:2],3) #alpha_star for stage 2\n assert_equal(8, res_tst[0].sum())\n #print fdrcorrection_twostage(pvals, alpha=0.05, iter=True)\n\ndef test_tukeyhsd():\n #example multicomp in R p 83\n\n res = '''\\\n pair diff lwr upr p adj\n P-M 8.150000 -10.037586 26.3375861 0.670063958\n S-M -3.258333 -21.445919 14.9292527 0.982419709\n T-M 23.808333 5.620747 41.9959194 0.006783701\n V-M 4.791667 -13.395919 22.9792527 0.931020848\n S-P -11.408333 -29.595919 6.7792527 0.360680099\n T-P 15.658333 -2.529253 33.8459194 0.113221634\n V-P -3.358333 -21.545919 14.8292527 0.980350080\n T-S 27.066667 8.879081 45.2542527 0.002027122\n V-S 8.050000 -10.137586 26.2375861 0.679824487\n V-T -19.016667 -37.204253 -0.8290806 0.037710044\n '''\n\n res = np.array([[ 8.150000, -10.037586, 26.3375861, 0.670063958],\n [-3.258333, -21.445919, 14.9292527, 0.982419709],\n [23.808333, 5.620747, 41.9959194, 0.006783701],\n [ 4.791667, -13.395919, 22.9792527, 0.931020848],\n [-11.408333, -29.595919, 6.7792527, 0.360680099],\n [15.658333, -2.529253, 33.8459194, 0.113221634],\n [-3.358333, -21.545919, 14.8292527, 0.980350080],\n [27.066667, 8.879081, 45.2542527, 0.002027122],\n [ 8.050000, -10.137586, 26.2375861, 0.679824487],\n [-19.016667, -37.204253, -0.8290806, 0.037710044]])\n\n m_r = [94.39167, 102.54167, 91.13333, 118.20000, 99.18333]\n myres = tukeyhsd(m_r, 6, 110.8, alpha=0.05, df=4)\n from numpy.testing import assert_almost_equal, assert_equal\n pairs, reject, meandiffs, std_pairs, confint, q_crit = myres[:6]\n assert_almost_equal(meandiffs, res[:, 0], decimal=5)\n assert_almost_equal(confint, res[:, 1:3], decimal=2)\n assert_equal(reject, res[:, 3]<0.05)\n\n" ]
[ [ "numpy.array", "numpy.testing.assert_equal", "numpy.linspace", "numpy.testing.assert_almost_equal" ] ]
olantwin/zfit
[ "dae89fd95fc2158c0e7530664d8ca999db4802c5" ]
[ "zfit/core/loss.py" ]
[ "import abc\nfrom collections import OrderedDict\n\nimport tensorflow as tf\nfrom typing import Optional, Union, List\n\nfrom zfit import ztf\nfrom zfit.util import ztyping\nfrom zfit.util.cache import Cachable\nfrom zfit.util.graph import get_dependents_auto\nfrom .baseobject import BaseObject, BaseDependentsMixin\nfrom .interfaces import ZfitLoss, ZfitSpace, ZfitModel, ZfitData, ZfitPDF\nfrom ..models.functions import SimpleFunc\nfrom ..util.container import convert_to_container, is_container\nfrom ..util.exception import IntentionNotUnambiguousError, NotExtendedPDFError\nfrom zfit.settings import ztypes\n\n\ndef _unbinned_nll_tf(model: ztyping.PDFInputType, data: ztyping.DataInputType, fit_range: ZfitSpace):\n \"\"\"Return unbinned negative log likelihood graph for a PDF\n\n Args:\n model (ZfitModel): PDFs with a `.pdf` method. Has to be as many models as data\n data (ZfitData):\n fit_range ():\n\n Returns:\n graph: the unbinned nll\n\n Raises:\n ValueError: if both `probs` and `log_probs` are specified.\n \"\"\"\n\n if is_container(model):\n nlls = [_unbinned_nll_tf(model=p, data=d, fit_range=r)\n for p, d, r in zip(model, data, fit_range)]\n nll_finished = tf.reduce_sum(nlls)\n else:\n with data.set_data_range(fit_range):\n probs = model.pdf(data, norm_range=fit_range)\n log_probs = tf.log(probs)\n if data.weights is not None:\n log_probs *= data.weights # because it's prob ** weights\n nll = -tf.reduce_sum(log_probs)\n nll_finished = nll\n return nll_finished\n\n\ndef _nll_constraints_tf(constraints):\n if not constraints:\n return ztf.constant(0.) # adding 0 to nll\n probs = []\n for param, dist in constraints.items():\n probs.append(dist.pdf(param))\n # probs = [dist.pdf(param) for param, dist in constraints.items()]\n constraints_neg_log_prob = -tf.reduce_sum(tf.log(probs))\n return constraints_neg_log_prob\n\n\nclass BaseLoss(BaseDependentsMixin, ZfitLoss, Cachable, BaseObject):\n\n def __init__(self, model, data, fit_range: ztyping.LimitsTypeInput = None, constraints: List[tf.Tensor] = None):\n # first doc line left blank on purpose, subclass adds class docstring (Sphinx autodoc adds the two)\n \"\"\"\n\n A \"simultaneous fit\" can be performed by giving one or more `model`, `data`, `fit_range`\n to the loss. The length of each has to match the length of the others.\n\n Args:\n model (Iterable[ZfitModel]): The model or models to evaluate the data on\n data (Iterable[ZfitData]): Data to use\n fit_range (Iterable[:py:class:`~zfit.Space`]): The fitting range. It's the norm_range for the models (if\n they\n have a norm_range) and the data_range for the data.\n constraints (Iterable[tf.Tensor): A Tensor representing a loss constraint. Using\n `zfit.constraint.*` allows for easy use of predefined constraints.\n \"\"\"\n super().__init__(name=type(self).__name__)\n model, data, fit_range = self._input_check(pdf=model, data=data, fit_range=fit_range)\n self._model = model\n self._data = data\n self._fit_range = fit_range\n if constraints is None:\n constraints = []\n self._constraints = convert_to_container(constraints, list)\n\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n cls._name = \"UnnamedSubBaseLoss\"\n\n def _input_check(self, pdf, data, fit_range):\n if is_container(pdf) ^ is_container(data):\n raise ValueError(\"`pdf` and `data` either both have to be a list or not.\")\n if not is_container(pdf):\n if isinstance(fit_range, list):\n raise TypeError(\"`pdf` and `data` are not a `list`, `fit_range` can't be a `list` then.\")\n if isinstance(pdf, tuple):\n raise TypeError(\"`pdf` has to be a pdf or a list of pdfs, not a tuple.\")\n\n if isinstance(data, tuple):\n raise TypeError(\"`data` has to be a data or a list of data, not a tuple.\")\n\n pdf, data = (convert_to_container(obj, non_containers=[tuple]) for obj in (pdf, data))\n # TODO: data, range consistency?\n if fit_range is None:\n fit_range = []\n for p, d in zip(pdf, data):\n if not p.norm_range == d.data_range:\n raise IntentionNotUnambiguousError(\"No `fit_range` is specified and `pdf` {} as \"\n \"well as `data` {} have different ranges they\"\n \"are defined in. Either make them (all) consistent\"\n \"or specify the `fit_range`\")\n fit_range.append(p.norm_range)\n else:\n fit_range = convert_to_container(fit_range, non_containers=[tuple])\n\n # simultaneous fit\n # if is_container(pdf):\n # if not is_container(fit_range) or not isinstance(fit_range[0], Space):\n # raise ValueError(\n # \"If several pdfs are specified, the `fit_range` has to be given as a list of `Space` \"\n # \"objects and not as pure tuples.\")\n\n # else:\n # fit_range = pdf.convert_sort_space(limits=fit_range) # fit_range may be a tuple\n if not len(pdf) == len(data) == len(fit_range):\n raise ValueError(\"pdf, data and fit_range don't have the same number of components:\"\n \"\\npdf: {}\"\n \"\\ndata: {}\"\n \"\\nfit_range: {}\".format(pdf, data, fit_range))\n\n # sanitize fit_range\n fit_range = [p.convert_sort_space(limits=range_) for p, range_ in zip(pdf, fit_range)]\n # TODO: sanitize pdf, data?\n self.add_cache_dependents(cache_dependents=pdf)\n self.add_cache_dependents(cache_dependents=data)\n self.add_cache_dependents(cache_dependents=fit_range)\n return pdf, data, fit_range\n\n def gradients(self, params: ztyping.ParamTypeInput = None) -> List[tf.Tensor]:\n if params is None:\n params = list(self.get_dependents())\n else:\n params = convert_to_container(params)\n return self._gradients(params=params)\n\n def add_constraints(self, constraints):\n return self._add_constraints(constraints)\n\n def _add_constraints(self, constraints):\n constraints = convert_to_container(constraints, container=list)\n self._constraints.extend(constraints)\n return constraints\n\n @property\n def name(self):\n return self._name\n\n @property\n def model(self):\n return self._model\n\n @property\n def data(self):\n return self._data\n\n @property\n def fit_range(self):\n fit_range = self._fit_range\n return fit_range\n\n @property\n def constraints(self):\n return self._constraints\n\n def _get_dependents(self):\n pdf_dependents = self._extract_dependents(self.model)\n return pdf_dependents\n\n @abc.abstractmethod\n def _loss_func(self, model, data, fit_range, constraints):\n raise NotImplementedError\n\n def value(self):\n return self._value()\n\n def _value(self):\n try:\n return self._loss_func(model=self.model, data=self.data, fit_range=self.fit_range,\n constraints=self.constraints)\n except NotImplementedError:\n raise NotImplementedError(\"_loss_func not properly defined!\")\n\n def __add__(self, other):\n if not isinstance(other, BaseLoss):\n raise TypeError(\"Has to be a subclass of `BaseLoss` or overwrite `__add__`.\")\n if not type(other) == type(self):\n raise ValueError(\"cannot safely add two different kind of loss.\")\n model = self.model + other.model\n data = self.data + other.data\n fit_range = self.fit_range + other.fit_range\n constraints = self.constraints + other.constraints\n loss = type(self)(model=model, data=data, fit_range=fit_range, constraints=constraints)\n return loss\n\n def _gradients(self, params):\n return tf.gradients(self.value(), params)\n\n\nclass CachedLoss(BaseLoss):\n\n def __init__(self, model, data, fit_range=None, constraints=None):\n super().__init__(model=model, data=data, fit_range=fit_range, constraints=constraints)\n\n @abc.abstractmethod\n def _cache_add_constraints(self, constraints):\n raise NotImplementedError\n\n def _value(self):\n if self._cache.get('loss') is None:\n loss = super()._value()\n self._cache['loss'] = loss\n else:\n loss = self._cache['loss']\n return loss\n\n def _add_constraints(self, constraints):\n super()._add_constraints(constraints=constraints)\n self._cache_add_constraints(constraints=constraints)\n\n def _gradients(self, params):\n params_cache = self._cache.get('gradients', {})\n params_todo = []\n for param in params:\n if param not in params_cache:\n params_todo.append(param)\n if params_todo:\n gradients = {(p, grad) for p, grad in zip(params_todo, super()._gradients(params_todo))}\n params_cache.update(gradients)\n\n self._cache['gradients'] = params_cache\n\n param_gradients = [params_cache[param] for param in params]\n return param_gradients\n\n\nclass UnbinnedNLL(CachedLoss):\n \"\"\"The Unbinned Negative Log Likelihood.\"\"\"\n\n _name = \"UnbinnedNLL\"\n\n def _loss_func(self, model, data, fit_range, constraints):\n nll = _unbinned_nll_tf(model=model, data=data, fit_range=fit_range)\n if constraints:\n constraints = ztf.reduce_sum(constraints)\n nll += constraints\n return nll\n\n def _cache_add_constraints(self, constraints):\n if self._cache.get('loss') is not None:\n self._cache['loss'] += ztf.reduce_sum(constraints)\n\n @property\n def errordef(self) -> Union[float, int]:\n return 0.5\n\n\nclass ExtendedUnbinnedNLL(UnbinnedNLL):\n \"\"\"An Unbinned Negative Log Likelihood with an additional poisson term for the\"\"\"\n\n def _loss_func(self, model, data, fit_range, constraints):\n nll = super()._loss_func(model=model, data=data, fit_range=fit_range, constraints=constraints)\n poisson_terms = []\n for mod, dat in zip(model, data):\n if not mod.is_extended:\n raise NotExtendedPDFError(\"The pdf {} is not extended but has to be (for an extended fit)\".format(mod))\n nevents = dat.nevents if dat.weights is None else ztf.reduce_sum(dat.weights)\n poisson_terms.append(-mod.get_yield() + ztf.to_real(nevents) * tf.log(mod.get_yield()))\n nll -= tf.reduce_sum(poisson_terms)\n return nll\n\n\nclass SimpleLoss(CachedLoss):\n _name = \"SimpleLoss\"\n\n def __init__(self, func, dependents=None, errordef=None):\n self._simple_func = func\n self._simple_errordef = errordef\n self._simple_func_dependents = convert_to_container(dependents, container=set)\n\n super().__init__(model=[], data=[], fit_range=[])\n\n def _get_dependents(self):\n dependents = self._simple_func_dependents\n if dependents is None:\n independent_params = tf.get_collection(\"zfit_independent\")\n dependents = get_dependents_auto(tensor=self.value(), candidates=independent_params)\n self._simple_func_dependents = dependents\n return dependents\n\n @property\n def errordef(self):\n errordef = self._simple_errordef\n if errordef is None:\n errordef = -999\n # raise RuntimeError(\"For this SimpleLoss, no error calculation is possible.\")\n else:\n return errordef\n\n def _loss_func(self, model, data, fit_range, constraints=None):\n loss = self._simple_func\n return loss()\n\n def __add__(self, other):\n raise IntentionNotUnambiguousError(\"Cannot add a SimpleLoss, 'addition' of losses can mean anything.\"\n \"Add them manually\")\n" ]
[ [ "tensorflow.log", "tensorflow.get_collection", "tensorflow.reduce_sum" ] ]
HuangShiqing/Paddle-Lite
[ "061f94e4624c7cc657ff914c0e589bf1192d73c5" ]
[ "lite/tests/unittest_py/op/test_scale_op.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append('../')\n\nfrom auto_scan_test import AutoScanTest, IgnoreReasons\nfrom program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\nimport unittest\n\nimport hypothesis\nfrom hypothesis import given, settings, seed, example, assume\nimport hypothesis.strategies as st\nimport numpy as np\nfrom functools import partial\nimport argparse\n\n\nclass TestScaleOp(AutoScanTest):\n def __init__(self, *args, **kwargs):\n AutoScanTest.__init__(self, *args, **kwargs)\n self.enable_testing_on_place(\n TargetType.ARM,\n PrecisionType.FP32,\n DataLayoutType.NCHW,\n thread=[1, 4])\n self.enable_testing_on_place(\n TargetType.X86,\n PrecisionType.FP32,\n DataLayoutType.NCHW,\n thread=[1, 4])\n opencl_places = [\n Place(TargetType.OpenCL, PrecisionType.FP16,\n DataLayoutType.ImageDefault), Place(\n TargetType.OpenCL, PrecisionType.FP16,\n DataLayoutType.ImageFolder),\n Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),\n Place(TargetType.OpenCL, PrecisionType.Any,\n DataLayoutType.ImageDefault), Place(\n TargetType.OpenCL, PrecisionType.Any,\n DataLayoutType.ImageFolder),\n Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),\n Place(TargetType.Host, PrecisionType.FP32)\n ]\n self.enable_testing_on_place(places=opencl_places)\n metal_places = [\n Place(TargetType.Metal, PrecisionType.FP32,\n DataLayoutType.MetalTexture2DArray),\n Place(TargetType.Metal, PrecisionType.FP16,\n DataLayoutType.MetalTexture2DArray),\n Place(TargetType.ARM, PrecisionType.FP32),\n Place(TargetType.Host, PrecisionType.FP32)\n ]\n self.enable_testing_on_place(places=metal_places)\n self.enable_testing_on_place(\n TargetType.ARM,\n PrecisionType.FP16,\n DataLayoutType.NCHW,\n thread=[1, 4])\n self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)\n self.enable_devices_on_nnadapter(device_names=[\n \"kunlunxin_xtcl\", \"cambricon_mlu\", \"nvidia_tensorrt\"\n ])\n\n def is_program_valid(self,\n program_config: ProgramConfig,\n predictor_config: CxxConfig) -> bool:\n x_dtype = program_config.inputs[\"input_data\"].dtype\n target_type = predictor_config.target()\n if target_type in [TargetType.ARM]:\n if predictor_config.precision(\n ) == PrecisionType.FP16 and x_dtype != np.float32:\n return False\n if target_type == TargetType.NNAdapter:\n if program_config.inputs[\"input_data\"].dtype != np.float32:\n return False\n return True\n\n def sample_program_configs(self, draw):\n in_shape = draw(\n st.lists(\n st.integers(\n min_value=1, max_value=8), min_size=1, max_size=4))\n bias = draw(st.floats(min_value=-5, max_value=5))\n bias_after_scale = draw(st.booleans())\n scale = draw(st.floats(min_value=-5, max_value=5))\n input_type = draw(st.sampled_from([\"int32\", \"int64\", \"float32\"]))\n has_scale_tensor = False # draw(st.booleans())\n\n def generate_data(*args, **kwargs):\n low, high = -10, 10\n dtype = \"float32\"\n shape = kwargs[\"shape\"]\n if \"low\" in kwargs:\n low = kwargs[\"low\"]\n if \"high\" in kwargs:\n high = kwargs[\"high\"]\n if \"dtype\" in kwargs:\n dtype = kwargs[\"dtype\"]\n\n if dtype == \"int32\":\n if low == high:\n return low * np.ones(shape).astype(np.int32)\n else:\n return np.random.randint(low, high, shape).astype(np.int32)\n elif dtype == \"int64\":\n if low == high:\n return low * np.ones(shape).astype(np.int64)\n else:\n return np.random.randint(low, high, shape).astype(np.int64)\n elif dtype == \"float32\":\n return (high - low\n ) * np.random.random(shape).astype(np.float32) + low\n\n input_dict = {\"X\": [\"input_data\"]}\n input_data_dict = {\n \"input_data\": TensorConfig(data_gen=partial(\n generate_data, dtype=input_type, shape=in_shape))\n }\n if has_scale_tensor:\n input_dict[\"ScaleTensor\"] = \"scale_tensor_data\"\n input_data_dict[\"scale_tensor_data\"] = TensorConfig(shape=[1, ])\n\n scale_op = OpConfig(\n type=\"scale\",\n inputs=input_dict,\n outputs={\"Out\": [\"output_data\"]},\n attrs={\n \"bias\": bias,\n \"bias_after_scale\": bias_after_scale,\n \"scale\": scale\n })\n\n program_config = ProgramConfig(\n ops=[scale_op],\n weights={},\n inputs=input_data_dict,\n outputs=[\"output_data\"])\n\n return program_config\n\n def sample_predictor_configs(self):\n atol, rtol = 1e-5, 1e-5\n target_str = self.get_target()\n if target_str == \"Metal\":\n atol, rtol = 1e-2, 1e-2\n return self.get_predictor_configs(), [\"scale\"], (atol, rtol)\n\n def add_ignore_pass_case(self):\n def _teller1(program_config, predictor_config):\n target_type = predictor_config.target()\n in_shape = list(program_config.inputs[\"input_data\"].shape)\n in_data_type = program_config.inputs[\"input_data\"].dtype\n if target_type == TargetType.Metal:\n if len(in_shape) != 4 or in_data_type != \"float32\":\n return True\n\n def _teller2(program_config, predictor_config):\n target_type = predictor_config.target()\n if target_type == TargetType.Metal:\n return True\n\n def _teller3(program_config, predictor_config):\n target_type = predictor_config.target()\n x_dtype = program_config.inputs[\"input_data\"].dtype\n if target_type == TargetType.OpenCL:\n if x_dtype == np.int32 or x_dtype == np.int64:\n return True\n\n self.add_ignore_check_case(\n _teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,\n \"Lite does not support this op in a specific case. We need to fix it as soon as possible.\"\n )\n self.add_ignore_check_case(\n _teller2, IgnoreReasons.ACCURACY_ERROR,\n \"The op output has diff in a specific case on metal. We need to fix it as soon as possible.\"\n )\n self.add_ignore_check_case(\n _teller3, IgnoreReasons.PADDLELITE_NOT_SUPPORT,\n \"Lite does not support this op when dtype is int32 or int64 on Opencl. \"\n )\n\n def test(self, *args, **kwargs):\n target_str = self.get_target()\n max_examples = 100\n if target_str in [\"OpenCL\", \"Metal\"]:\n # Make sure to generate enough valid cases for specific targets\n max_examples = 2000\n elif target_str in [\"NNAdapter\"]:\n # Make sure to generate enough valid cases for specific targets\n max_examples = 300\n self.run_and_statis(\n quant=False, min_success_num=25, max_examples=max_examples)\n\n\nif __name__ == \"__main__\":\n unittest.main(argv=[''])\n" ]
[ [ "numpy.random.random", "numpy.ones", "numpy.random.randint" ] ]
nuttamas/PycQED_py3
[ "1ee35c7428d36ed42ba4afb5d4bda98140b2283e" ]
[ "pycqed/measurement/CBox_sweep_functions.py" ]
[ "import numpy as np\nimport logging\nfrom pycqed.measurement import sweep_functions as swf\nfrom pycqed.measurement.sweep_functions import Soft_Sweep\nfrom pycqed.measurement.waveform_control_CC import waveform as wf\n\n# FIXME: Commented out as there is no module named Experiments.CLEAR.prepare_for_CLEAR.prepare_for_CLEAR\n# from Experiments.CLEAR.prepare_for_CLEAR import prepare_for_CLEAR\n\nimport time\nimport imp\ngauss_width = 10\nimp.reload(wf)\n\n\nclass CBox_Sweep(swf.Hard_Sweep):\n\n def __init__(self, Duplexer=False, **kw):\n self.sweep_control = 'hard'\n if not hasattr(self, 'cal_points'):\n self.cal_points = kw.pop('cal_points', 10)\n\n def prepare(self, **kw):\n pass\n\n def finish(self, **kw):\n pass\n\n######################\n# Time domain sweeps #\n######################\n\n\nclass T1(CBox_Sweep):\n '''\n Performs a T1 measurement using a tektronix and the CBox.\n The tektronix is used for timing the pulses, a Pi-pulse is loaded onto the\n CBox.\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", NoSegments=70, stepsize=2000,\n amp180=4000, f_modulation=-0.02, **kw):\n self.name = 'T1'\n self.parameter_name = 'time'\n self.unit = 'ns'\n # Available stepsizes needs to be verified! this is copy from AWG_swf\n self.available_stepsizes = [50, 100, 200, 500, 1000, 1500, 2000, 3000]\n\n self.filename = 'FPGA_T1_%i_5014' % (stepsize)\n\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.NoSegments = NoSegments\n self.amp180 = amp180\n self.gauss_width = gauss_width\n self.f_modulation = f_modulation\n\n if stepsize not in self.available_stepsizes:\n logging.error('Stepsize not available')\n super(self.__class__, self).__init__(**kw)\n\n def prepare(self):\n self.CBox.set_awg_mode(0, 1)\n self.CBox.set_awg_mode(1, 1)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n\n Wave_I, Wave_Q = wf.mod_gauss(self.amp180, self.gauss_width,\n self.f_modulation)\n self.CBox.set_awg_lookuptable(0, 0, 1, np.round(Wave_I))\n self.CBox.set_awg_lookuptable(0, 0, 0, np.round(Wave_Q))\n # additionally loading to AWG1 for scope\n self.CBox.set_awg_lookuptable(1, 0, 1, np.round(Wave_I))\n self.CBox.set_awg_lookuptable(1, 0, 0, np.round(Wave_Q))\n\n # These two lines should be combined to CBox.set_No_Samples but is\n # untested\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n\n\nclass Lutman_par_with_reload(Soft_Sweep):\n\n def __init__(self, LutMan, parameter):\n '''\n Generic sweep function that combines setting a LutMan parameter\n with reloading lookuptables.\n '''\n super().__init__()\n self.LutMan = LutMan\n self.parameter = parameter\n self.name = parameter.name\n self.parameter_name = parameter.label\n self.unit = parameter.unit\n\n def set_parameter(self, val):\n self.parameter.set(val)\n self.LutMan.load_pulses_onto_AWG_lookuptable()\n\n\nclass Lutman_par_with_reload_single_pulse(Soft_Sweep):\n\n def __init__(self, LutMan, parameter, pulse_names=['X180']):\n '''\n Generic sweep function that combines setting a LutMan parameter\n with reloading lookuptables.\n '''\n super().__init__()\n self.LutMan = LutMan\n self.parameter = parameter\n self.name = parameter.name\n self.parameter_name = parameter.label\n self.unit = parameter.unit\n self.pulse_names = pulse_names\n self.label = parameter.label\n\n def set_parameter(self, val):\n self.parameter.set(val)\n for pulse_name in self.pulse_names:\n self.LutMan.load_pulse_onto_AWG_lookuptable(pulse_name)\n\n\nclass LutMan_amp180_90(Soft_Sweep):\n '''\n Sweeps both the amp180 parameter and the amp90 of the CBox_lut_man\n Automatically sets amp90 to half of amp180.\n The amp180 is the sweep parameter that is set and tracked.\n '''\n\n def __init__(self, LutMan, reload_pulses=True, awg_nr=0):\n super(self.__class__, self).__init__()\n self.awg_nr = awg_nr\n self.reload_pulses = reload_pulses\n self.name = 'lookuptable amp180'\n self.parameter_name = 'amp180'\n self.unit = 'mV'\n self.LutMan = LutMan\n\n def set_parameter(self, val):\n self.LutMan.set('Q_amp180', val)\n self.LutMan.set('Q_amp90', val/2.0)\n if self.reload_pulses:\n self.LutMan.load_pulses_onto_AWG_lookuptable()\n\n\nclass DAC_offset(CBox_Sweep):\n '''\n Varies DAC offsets in CBox AWG's. Additionally identity pulses are loaded\n in the lookuptable 0, of I and Q channels\n '''\n\n def __init__(self, AWG_nr, dac_ch, CBox):\n super(self.__class__, self).__init__()\n self.sweep_control = 'soft' # Overwrites 'hard sweep part'\n self.name = 'CBox DAC offset'\n self.parameter_name = 'Voltage'\n self.unit = 'mV'\n self.filename = 'FPGA_DAC_offset_sweep_5014'\n self.dac_channel = dac_ch\n self.AWG_nr = AWG_nr\n self.CBox = CBox\n # any arbitrary sequence that is not time dependent on the pulse\n # trigger will do\n\n def set_parameter(self, val):\n self.CBox.set_dac_offset(self.AWG_nr, self.dac_channel, val)\n\n\nclass Ramsey(CBox_Sweep):\n '''\n Performs a T2 Ramsey measurement using a tektronix and the CBox.\n The tektronix is used for timing the pulses.\n Codewords are used to determine what pulse will be used.\n\n WARNING:\n The artificial detuning is applied by delaying the pi/2 pulses as the\n sideband modulation is the same for every pulse.\n This creates an error in the x-values of the sweep points<50ns.\n This should be taken care of when interpreting data for shorter timescales.\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", NoSegments=70, stepsize=50, **kw):\n print('WARNING, this function is deprecated. use Ramsey_tape()')\n # self.name = 'Ramsey'\n # self.parameter_name = 'time'\n # self.unit = 'ns'\n # self.available_stepsizes = [50, 100, 200, 500, 1000, 1500]\n # # NOTE: stepsizes below 50ns are not available because of SBmod freq\n # # self.available_stepsizes = [5, 10, 30, 100, 200, 500, 1000, 1500]\n # self.CBox_lut_man = qt.instruments['CBox_lut_man']\n # self.filename = 'FPGA_Codeword_Ramsey_%i_5014' % (stepsize)\n\n # base_pulse_delay = 200\n # self.sweep_points = np.linspace(stepsize+base_pulse_delay,\n # NoSegments*stepsize + base_pulse_delay,\n # NoSegments)\n # self.NoSegments = NoSegments\n\n # if stepsize not in self.available_stepsizes:\n # raise Exception('Stepsize not available')\n # super(self.__class__, self).__init__(**kw)\n\n # def prepare(self):\n # self.CBox.set_acquisition_mode(0)\n # self.CBox.set_awg_mode(0, 0)\n # self.CBox.set_awg_mode(1, 0)\n # self.AWG.stop()\n # self.AWG.set_setup_filename(self.filename,\n # force_load=False)\n\n # self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n # self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n # self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass Ramsey_tape(CBox_Sweep):\n '''\n Performs a T2 Ramsey measurement using a tektronix and the CBox.\n The tektronix is used for timing the pulses.\n Codewords are used to determine what pulse will be used.\n\n Artificial detuning is applied by delaying the triggers 5 ns\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", NoSegments=70, stepsize=50, **kw):\n self.name = 'Ramsey'\n print('Using tape mode Ramsey')\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.available_stepsizes = [50, 100, 200, 500, 1000, 1500]\n # NOTE: stepsizes below 50ns are not available because of SBmod freq\n # self.available_stepsizes = [5, 10, 30, 100, 200, 500, 1000, 1500]\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.TD_Meas = qt.instruments['TD_Meas']\n self.filename = 'FPGA_Ramsey_%i_5014' % (stepsize)\n\n base_pulse_delay = 200\n self.sweep_points = np.arange(stepsize+base_pulse_delay,\n NoSegments*(stepsize+5) +\n base_pulse_delay,\n stepsize + 5, dtype=float)\n\n self.NoSegments = NoSegments\n self.NoCalpoints = 10\n\n if stepsize not in self.available_stepsizes:\n raise Exception('Stepsize not available')\n super(self.__class__, self).__init__(**kw)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 2)\n self.CBox.set_awg_mode(1, 2)\n self.AWG.stop()\n self.TD_Meas.set_CBox_tape_mode(True)\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n ramsey_tape = [3, 3] * int((self.NoSegments - self.NoCalpoints))\n cal_zero_tape = [0] * int(self.NoCalpoints/2)\n cal_one_tape = [1] * int(self.NoCalpoints/2)\n tape = np.array(ramsey_tape+cal_zero_tape+cal_one_tape)\n\n self.CBox.set_awg_tape(0, len(tape), tape)\n self.CBox.set_awg_tape(1, len(tape), tape)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass Echo(CBox_Sweep):\n '''\n Performs a T2 Echo measurement using a tektronix and the CBox.\n The tektronix is used for timing the pulses, a Pi-pulse is loaded onto the\n CBox.\n '''\n\n def __init__(self, stepsize,\n amp180, amp90, gauss_width=25,\n qubit_suffix=\"\", NoSegments=70, f_modulation=-0.02, **kw):\n print(\"amp180\", amp180)\n self.name = 'Echo'\n self.parameter_name = 'time'\n self.unit = 'ns'\n # Available stepsizes needs to be verified! this is copy from AWG_swf\n self.available_stepsizes = [50, 100, 200, 500, 1000, 1500, 2000, 3000]\n\n self.filename = 'FPGA_Echo_%i_5014' % (stepsize)\n\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.NoSegments = NoSegments\n self.amp180 = amp180\n self.amp90 = amp90\n self.gauss_width = gauss_width\n self.f_modulation = f_modulation\n\n if stepsize not in self.available_stepsizes:\n logging.error('Stepsize not available')\n super(self.__class__, self).__init__(**kw)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 0)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n\n Wave_I_180, Wave_Q_180 = wf.mod_gauss(self.amp180, self.gauss_width,\n self.f_modulation)\n Wave_I_90, Wave_Q_90 = wf.mod_gauss(self.amp90, self.gauss_width,\n self.f_modulation)\n self.CBox.set_awg_lookuptable(0, 7, 1, np.round(Wave_I_180))\n self.CBox.set_awg_lookuptable(0, 7, 0, np.round(Wave_Q_180))\n self.CBox.set_awg_lookuptable(0, 0, 1, np.round(Wave_I_90))\n self.CBox.set_awg_lookuptable(0, 0, 0, np.round(Wave_Q_90))\n\n # These two lines should be combined to CBox.set_No_Samples but is\n # untested\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n\n\nclass T1_tape(CBox_Sweep):\n '''\n Performs a T1 measurement using a tektronix for metronome and the CBox to\n produce pulses in tape mode. The tektronix is used for timing the pulses, a\n Pi-pulse is loaded onto the CBox.\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", NoSegments=70, stepsize=4000,\n amp180=4000, amp90=2000, f_modulation=-0.02, cal_points=10, **kw):\n self.name = 'T1_tape'\n self.parameter_name = 'time'\n self.unit = 'ns'\n # Available stepsizes needs to be verified! this is copy from AWG_swf\n self.available_stepsizes = [50, 100, 200,\n 500, 1000, 1500, 2000, 3000, 4000]\n\n self.filename = 'FPGA_Tape_T1_%i_5014' % (stepsize)\n\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.NoSegments = NoSegments\n self.amp180 = amp180\n self.amp90 = amp90\n self.gauss_width = gauss_width\n self.f_modulation = f_modulation\n\n if stepsize not in self.available_stepsizes:\n logging.error('Stepsize not available')\n super(self.__class__, self).__init__(**kw)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n print(\"CBox set to mode 0\")\n self.CBox.set_awg_mode(0, 0)\n self.CBox.set_awg_mode(1, 0)\n self.AWG.stop()\n print(\"AWG is stopped\")\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n\n Wave_I_180, Wave_Q_180 = wf.mod_gauss(self.amp180, self.gauss_width,\n self.f_modulation)\n Wave_I_0 = Wave_I_180*0\n Wave_Q_0 = Wave_I_0\n\n self.CBox.set_awg_lookuptable(0, 0, 1, np.round(Wave_I_180))\n self.CBox.set_awg_lookuptable(0, 0, 0, np.round(Wave_Q_180))\n print(\"1\")\n self.CBox.set_awg_lookuptable(0, 7, 1, np.round(Wave_I_0))\n self.CBox.set_awg_lookuptable(0, 7, 0, np.round(Wave_Q_0))\n # copying the tables to AWG2 for scope\n self.CBox.set_awg_lookuptable(1, 0, 1, np.round(Wave_I_180))\n self.CBox.set_awg_lookuptable(1, 0, 0, np.round(Wave_Q_180))\n print(\"2\")\n self.CBox.set_awg_lookuptable(1, 7, 1, np.round(Wave_I_0))\n self.CBox.set_awg_lookuptable(1, 7, 0, np.round(Wave_Q_0))\n sequence_points = self.NoSegments-self.cal_points\n tape_length = (sequence_points)*self.NoSegments\n tape = 7*np.ones(tape_length)\n print(\"tape_length\", tape_length)\n for i in range(sequence_points):\n tape[(i+1)*(sequence_points)-i-1] = 0\n print(tape[i*(sequence_points):i *\n (sequence_points)+sequence_points])\n print(\"done first part\")\n # adding calibration points\n for i in range(self.cal_points):\n first_cal_segment = (sequence_points)**2\n segment = first_cal_segment+(i+1)*(sequence_points)-1\n # print segment\n if i > (self.cal_points/2-1):\n tape[segment] = 0\n # print segment-(sequence_points)+1\n print(tape[segment-sequence_points+1:segment+1])\n print(i)\n print(\"3\")\n self.CBox.set_awg_tape(0, len(tape), tape)\n print(\"tape length\", len(tape))\n # copying the tables to AWG2 for scope\n self.CBox.set_awg_tape(1, len(tape), tape)\n # These two lines should be combined to CBox.set_No_Samples but is\n # untested\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n print(\"tape is loaded\")\n # for i in range(len(tape)):\n # if np.mod(i,20) == 0:\n # print (\"#\\n\")\n\n\nclass OnOff_touch_n_go(CBox_Sweep):\n '''\n Performs OnOff measurement using the CBox to produce pulses in codeword\n tape mode.\n '''\n\n def __init__(self,\n NoSegments=2, stepsize=2000, pulses='OffOn',\n NoShots=8000, **kw):\n self.name = 'FPGA_touch_n_go_calibration'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.NoSegments = NoSegments\n self.NoShots = NoShots\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, stepsize*NoSegments,\n NoSegments)\n self.pulses = pulses\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_log_length(self.NoShots)\n self.CBox.set_awg_mode(0, 2)\n self.AWG.stop()\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n if self.pulses == 'OffOn':\n tape = np.array([0, 1])\n if self.pulses == 'OffOff':\n tape = np.array([0, 0])\n if self.pulses == 'OnOn':\n tape = np.array([1, 1])\n self.CBox.set_awg_tape(0, len(tape), tape)\n print(\"tape\", tape)\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n\n\nclass custom_tape_touch_n_go(CBox_Sweep):\n\n def __init__(self,\n NoSegments=2, stepsize=2000,\n custom_tape=None, NoShots=8000, **kw):\n self.name = 'custom_tape_touch_n_go'\n self.parameter_name = 'msmt index'\n self.unit = ''\n self.NoSegments = NoSegments\n self.NoShots = NoShots\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.arange(NoSegments)\n self.custom_tape = custom_tape\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_run_mode(0)\n print('setting nr of shots to', self.NoShots)\n self.CBox.set_log_length(self.NoShots)\n self.CBox.set_awg_mode(0, 2)\n self.AWG.stop()\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n if self.custom_tape is None:\n tape = np.array([0, 0])\n else:\n tape = self.custom_tape\n print(\"using the custom tape \", tape)\n self.CBox.set_awg_tape(0, len(tape), tape)\n\n\nclass random_telegraph_tape_touch_n_go(CBox_Sweep):\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=2, stepsize=2000,\n p_switch_us=0, NoShots=8000, pulse_a=0, pulse_b=1, **kw):\n self.name = 'random_telegraph_tape_touch_n_go'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.NoSegments = NoSegments\n self.NoShots = NoShots\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, stepsize*NoSegments,\n NoSegments)\n self.p_switch_us = p_switch_us\n self.p_switch = 1-(1-self.p_switch_us)**(stepsize/1000)\n self.pulse_a = pulse_a\n self.pulse_b = pulse_b\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_log_length(self.NoShots)\n self.CBox.set_awg_mode(0, 2)\n self.AWG.stop()\n\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n if self.NoShots > 4000:\n tape_elements = 4000\n else:\n tape_elements = self.NoShots\n tape = np.zeros(tape_elements)\n tape[0] = self.pulse_a\n for i in range(tape_elements-1):\n if np.random.rand(1) < self.p_switch: # flipping with chance p_switch\n if tape[i] == self.pulse_a:\n tape[i+1] = self.pulse_b\n else:\n tape[i+1] = self.pulse_a\n else: # no flipping event\n tape[i+1] = tape[i]\n self.CBox.set_awg_tape(0, len(tape), tape)\n\n\nclass AllXY(CBox_Sweep):\n '''\n Performs AllXY measurement using the CBox to produce pulses in codeword\n trigger mode. The tektronix is used for the coded trigges.\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", NoSegments=42, stepsize=1, **kw):\n print('Deprecated, recommend using AllXY_tape() instead')\n\n self.name = 'AllXY'\n self.parameter_name = 'time'\n self.unit = 'ns'\n # Available stepsizes needs to be verified! this is copy from AWG_swf\n self.filename = 'FPGA_AllXY_5014'\n self.cal_points = [list(range(10)), list(range(-8, 0))]\n self.NoSegments = NoSegments\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 0)\n self.CBox.set_awg_mode(1, 0)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n\n\nclass AllXY_tape(CBox_Sweep):\n '''\n Performs AllXY measurement using the CBox toproduce pulses in tape mode.\n The tektronix is used to time the pulses.\n '''\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=42, stepsize=1, **kw):\n self.name = 'AllXY_tape'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.filename = 'FPGA_Tape_AllXY_5014'\n self.cal_points = [list(range(10)), list(range(-8, 0))]\n self.NoSegments = NoSegments\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.TD_Meas = qt.instruments['TD_Meas']\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 2)\n self.CBox.set_awg_mode(2, 2)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(2)\n self.TD_Meas.set_CBox_tape_mode(True)\n # print \"AWG 1 luts loaded\"\n tape = np.array([0, 0, 0, 0, 1, 1, 1, 1, # 1, 3\n 2, 2, 2, 2, 1, 2, 1, 2, # 5, 7\n 2, 1, 2, 1, 3, 0, 3, 0, # 9, 11\n 4, 0, 4, 0, 3, 4, 3, 4, # 13, 15\n 4, 3, 4, 3, 3, 2, 3, 2, # 17, 19\n 4, 1, 4, 1, 1, 4, 1, 4, # 21,23\n 2, 3, 2, 3, 3, 1, 3, 1, # 25, 27\n 1, 3, 1, 3, 4, 2, 4, 2, # 29, 31\n 2, 4, 2, 4, 1, 0, 1, 0, # 33, 35\n 2, 0, 2, 0, 3, 3, 3, 3, # 37, 39\n 4, 4, 4, 4]) # 41\n\n self.CBox.set_awg_tape(0, len(tape), tape)\n self.CBox.set_awg_tape(2, len(tape), tape)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass OnOff_tape(CBox_Sweep):\n '''\n Performs OnOff measurement using the CBox toproduce pulses in tape mode.\n The tektronix is used to time the pulses.\n '''\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=2, stepsize=1, pulses='OffOn', **kw):\n self.name = 'OnOff_tape'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.filename = 'FPGA_Tape_OnOff_5014'\n self.NoSegments = NoSegments\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.pulses = pulses\n print('New version tape')\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 2)\n self.CBox.set_awg_mode(1, 2)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n\n # print \"AWG 1 luts loaded\"\n if self.pulses == 'OffOn':\n tape = np.array([0, 1])\n if self.pulses == 'OffOff':\n tape = np.array([0, 0])\n if self.pulses == 'OnOn':\n tape = np.array([1, 1])\n self.CBox.set_awg_tape(0, len(tape), tape)\n self.CBox.set_awg_tape(1, len(tape), tape)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass OnOff_transients(CBox_Sweep):\n '''\n Performs OnOff measurement using the CBox toproduce pulses in tape mode.\n The tektronix is used to time the pulses.\n '''\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=2, stepsize=1, pulses='OffOn', **kw):\n self.name = 'OnOff_tape'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.filename = 'FPGA_Tape_OnOff_5014'\n self.NoSegments = NoSegments\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.pulses = pulses\n print('New version tape')\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 2)\n self.CBox.set_awg_mode(1, 2)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n\n # print \"AWG 1 luts loaded\"\n if self.pulses == 'OffOn':\n tape = np.array([0, 1])\n if self.pulses == 'OffOff':\n tape = np.array([0, 0])\n if self.pulses == 'OnOn':\n tape = np.array([1, 1])\n self.CBox.set_awg_tape(0, len(tape), tape)\n self.CBox.set_awg_tape(1, len(tape), tape)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass single_element_tape_test(CBox_Sweep):\n '''\n Performs a measurement similar to AllXY in the syndrome it produces\n but only uses a single pulse per segment.\n '''\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=42, stepsize=1, **kw):\n self.name = 'Single_element_test_tape'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.filename = 'FPGA_tape_single_test_5014'\n self.cal_points = [list(range(10)), list(range(-8, 0))]\n self.NoSegments = NoSegments\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n print('New version tape')\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 2)\n self.CBox.set_awg_mode(1, 2)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n\n tape = np.array([0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, # 10 times identity\n\n 3, 4, 3, 4, 3,\n 4, 3, 4, 3, 4,\n 3, 4, 3, 4, 3,\n 4, 3, 4, 3, 4,\n 3, 4, 3, 4, # 24 times pi/2 pulses\n 1, 2, 1, 2, 1,\n 2, 1, 2 # 8 times pi pulse\n ])\n\n tape = np.array([0, 0, 1, 1, 2,\n 2, 1, 2, 2, 1, # 10 times identity\n\n 3, 0, 4, 0, 3,\n 4, 4, 3, 3, 2,\n 4, 1, 1, 4, 2,\n 3, 3, 1, 1, 3,\n 4, 2, 2, 4, # 24 times pi/2 pulses\n\n # 1, 2, 1, 2, 1,\n # 2, 1, 2\n 1, 0, 2, 0, 3,\n 3, 4, 4 # 8 times pi pulse\n ])\n\n self.CBox.set_awg_tape(0, len(tape), tape)\n self.CBox.set_awg_tape(1, len(tape), tape)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass drag_detuning(CBox_Sweep):\n '''\n Performs drag_detuning measurement using the CBox to produce pulses in codeword\n trigger mode. The tektronix is used for the coded trigges.\n '''\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=2, stepsize=1, **kw):\n self.name = 'drag_detuning'\n self.parameter_name = 'time'\n self.unit = 'ns'\n # Available stepsizes needs to be verified! this is copy from AWG_swf\n self.filename = 'FPGA_DragDetuning_5014'\n self.NoSegments = NoSegments\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 0)\n self.CBox.set_awg_mode(1, 0)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n\n\nclass flipping_sequence(CBox_Sweep):\n '''\n Loads a codeword trigger sequence that consists of applying a X90 pulse\n follwed by N X180 pulses. With 1<N<50 followed by 10 calibration points.\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", **kw):\n self.name = 'Flipping sequence'\n self.parameter_name = 'number of X180 pulses '\n self.unit = 'N'\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.filename = 'FPGA_X90_N_X180_5014'\n self.NoSegments = 60\n self.sweep_points = np.linspace(\n 1, 2 * self.NoSegments, self.NoSegments)\n super(self.__class__, self).__init__(**kw)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 0)\n self.CBox.set_awg_mode(1, 0)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\n######################\n# CLEAR sweeps #\n######################\n\n# Rampdown sweepfunctions\nclass CBox_CLEAR_amplitude_1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude 1'\n self.parameter_name = 'CLEAR pulse amplitude 1'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_amplitude_2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude 2'\n self.parameter_name = 'CLEAR pulse amplitude 2'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_amplitude_a1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_a1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude a1'\n self.parameter_name = 'CLEAR pulse amplitude a1'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_a1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_amplitude_a2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_a2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude a2'\n self.parameter_name = 'CLEAR pulse amplitude a2'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_a2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_amplitude_b1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_b1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude b1'\n self.parameter_name = 'CLEAR pulse amplitude b1'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_b1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_amplitude_b2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_b2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude b2'\n self.parameter_name = 'CLEAR pulse amplitude b2'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_b2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase 1'\n self.parameter_name = 'CLEAR pulse phase 1'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase 2'\n self.parameter_name = 'CLEAR pulse phase 2'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_a1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_a1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase a1'\n self.parameter_name = 'CLEAR pulse phase a1'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_a1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_a2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_a2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase a2'\n self.parameter_name = 'CLEAR pulse phase a2'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_a2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_b1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_b1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase b1'\n self.parameter_name = 'CLEAR pulse phase b1'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_b1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_b2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_b2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase b2'\n self.parameter_name = 'CLEAR pulse phase b2'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_b2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_length_unc(Soft_Sweep):\n '''\n Setting the length of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_length_unc, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse length unconditional'\n self.parameter_name = 'CLEAR pulse length unconditional'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_length_CLEAR_unc(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_length_c(Soft_Sweep):\n '''\n Setting the length of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_length_unc, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse length conditional'\n self.parameter_name = 'CLEAR pulse length conditional'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_length_CLEAR_c(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_tng_RO_Pulse_length(Soft_Sweep):\n '''\n Setting the length of the tng Readout Pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_tng_RO_Pulse_length, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.name = 'CBox_tng_RO_Pulse_length'\n self.parameter_name = 'Readout pulse length'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n self.CBox.set_tng_readout_pulse_length(val)\n\n\nclass CBox_integration_length(Soft_Sweep):\n '''\n Setting the length of the tng Readout Pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_integration_length, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.name = 'CBox_integration_length'\n self.parameter_name = 'Readout integration length'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n self.CBox.set_integration_length(int(val/5))\n\n\nclass CBox_tng_heartbeat_interval(Soft_Sweep):\n '''\n Setting the length of the tng heartbeat interval\n '''\n\n def __init__(self, **kw):\n super(CBox_tng_heartbeat_interval, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.name = 'CBox_tng_heartbeat_interval'\n self.parameter_name = 'heartbeat_interval'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n self.CBox.set_tng_heartbeat_interval(val)\n\n\nclass CBox_tng_burst_heartbeat_and_heartbeat_interval(Soft_Sweep):\n '''\n Setting the length burst heartbeat interval\n Setting the heartbeat to: burst heartbeat interval * iterations\n +200000 for relaxation to steady state\n '''\n\n def __init__(self, **kw):\n super(CBox_tng_burst_heartbeat_and_heartbeat_interval, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.name = 'CBox_tng_burst_heartbeat_interval'\n self.parameter_name = 'burst_heartbeat_interval'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n iterations = self.CBox.get_tng_burst_heartbeat_n()\n self.CBox.set_tng_heartbeat_interval(val*iterations+200000)\n self.CBox.set_tng_burst_heartbeat_interval(val)\n\n\nclass CBox_tng_Ramsey_idle_and_heartbeat(Soft_Sweep):\n '''\n Setting the length of the tng Readout Pulse\n '''\n\n def __init__(self, heartbeat_start, **kw):\n super(CBox_tng_Ramsey_idle_and_heartbeat, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.name = 'CBox_tng_Ramsey_idle_and_heartbeat'\n self.parameter_name = 'Ramsey_idle'\n self.unit = 'ns'\n self.heartbeat_start = heartbeat_start\n\n def set_parameter(self, val):\n self.CBox_lut_man.set_Ramsey_idling(val)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox.set_tng_readout_delay(100+val)\n self.CBox.set_tng_heartbeat_interval(self.heartbeat_start+val)\n\n\nclass CBox_tng_Ramsey_idle_and_heartbeat_v2(Soft_Sweep):\n '''\n Setting the length of the tng Readout Pulse\n Differs from old version that it uses 2! pulses with a delay between them\n '''\n\n def __init__(self, burst_heartbeat_start, **kw):\n super(CBox_tng_Ramsey_idle_and_heartbeat_v2, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.name = 'CBox_tng_Ramsey_idle_and_heartbeat'\n self.parameter_name = 'Ramsey_idle'\n self.unit = 'ns'\n self.burst_heartbeat_start = burst_heartbeat_start\n\n def set_parameter(self, val):\n self.CBox.set_tng_readout_delay(100)\n self.CBox.set_tng_second_pre_rotation_delay(100+val)\n self.CBox.set_tng_burst_heartbeat_interval(self.burst_heartbeat_start\n + val)\n\n\nclass None_Sweep_tape_restart(Soft_Sweep):\n\n def __init__(self, sweep_control='soft', **kw):\n super(None_Sweep_tape_restart, self).__init__()\n self.sweep_control = sweep_control\n self.name = 'None_Sweep_tape_restart'\n self.parameter_name = 'pts'\n self.unit = 'arb. unit'\n self.CBox = qt.instruments['CBox']\n\n def set_parameter(self, val):\n '''\n Set the parameter(s) to be sweeped. Differs per sweep function\n '''\n self.CBox.restart_awg_tape(0)\n self.CBox.restart_awg_tape(1)\n self.CBox.restart_awg_tape(2)\n\n\nclass prepare_for_conditional_depletion(Soft_Sweep):\n\n def __init__(self, AllXY_trigger=200, sweep_control='soft', double_pulse_Ramsey_idling=100, RTF_qubit_pulses=False, **kw):\n super(prepare_for_conditional_depletion, self).__init__()\n import Experiments.CLEAR.prepare_for_CLEAR as pfC # FIXME: import error\n self.pfC = pfC\n self.sweep_control = sweep_control\n self.name = 'prepare_for_conditional_depletion'\n self.parameter_name = 'depletion_pulse_length'\n self.unit = 'ns'\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.AllXY_trigger = AllXY_trigger\n self.double_pulse_Ramsey_idling = double_pulse_Ramsey_idling\n self.CBox = qt.instruments['CBox']\n self.RTF_qubit_pulses = RTF_qubit_pulses\n\n def set_parameter(self, val):\n '''\n Set the parameter(s) to be sweeped. Differs per sweep function\n '''\n\n self.pfC.prepare_for_CLEAR(length=300, depletion=True,\n integration=400,\n conditional=True, CLEAR_length=val,\n CLEAR_double_segment=False,\n CLEAR_double_frequency=True,\n cost_function='AllXY',\n AllXY_trigger=self.AllXY_trigger)\n if self.RTF_qubit_pulses:\n self.CBox_lut_man.set_lut_mapping(['I', 'X180', 'X90_X180_mX90',\n 'X90_X90', 'X90_X180_X90'])\n # This sets the idling in the X90_X90 element\n self.CBox_lut_man.set_Ramsey_idling(\n self.double_pulse_Ramsey_idling)\n self.CBox.set_tng_readout_delay(\n 100 + self.double_pulse_Ramsey_idling)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n\n\nclass prepare_for_unconditional_depletion(Soft_Sweep):\n\n def __init__(self, AllXY_trigger=200, sweep_control='soft', RTF_qubit_pulses=False, double_pulse_Ramsey_idling=100, **kw):\n super(prepare_for_unconditional_depletion, self).__init__()\n import Experiments.CLEAR.prepare_for_CLEAR as pfC # FIXME: import error\n self.pfC = pfC\n self.sweep_control = sweep_control\n self.name = 'prepare_for_unconditional_depletion'\n self.parameter_name = 'depletion_pulse_length'\n self.unit = 'ns'\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.CBox = qt.instruments['CBox']\n self.AllXY_trigger = AllXY_trigger\n self.RTF_qubit_pulses = RTF_qubit_pulses\n self.double_pulse_Ramsey_idling = double_pulse_Ramsey_idling\n\n def set_parameter(self, val):\n '''\n Set the parameter(s) to be sweeped. Differs per sweep function\n '''\n self.pfC.prepare_for_CLEAR(length=300, depletion=True,\n integration=460,\n conditional=False,\n CLEAR_length=val,\n CLEAR_double_segment=True,\n CLEAR_double_frequency=True,\n cost_function='AllXY',\n AllXY_trigger=self.AllXY_trigger)\n if self.RTF_qubit_pulses:\n self.CBox_lut_man.set_lut_mapping(['I', 'X180', 'X90_X180_mX90',\n 'X90_X90', 'X90_X180_X90'])\n # This sets the idling in the X90_X90 element\n self.CBox_lut_man.set_Ramsey_idling(\n self.double_pulse_Ramsey_idling)\n self.CBox.set_tng_readout_delay(\n 100 + self.double_pulse_Ramsey_idling)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n" ]
[ [ "numpy.ones", "numpy.array", "numpy.zeros", "numpy.arange", "numpy.random.rand", "numpy.round", "numpy.linspace" ] ]
darya-chyzhyk/nilearn
[ "c2a597586314ee1fe9da260d7d7ee00ef8b4eef5" ]
[ "nilearn/decoding/tests/test_space_net.py" ]
[ "import itertools\nfrom functools import partial\nfrom nose import SkipTest\nfrom nose.tools import (assert_equal, assert_true, assert_false,\n assert_raises)\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.utils import extmath\nfrom sklearn.linear_model import Lasso\nfrom sklearn.utils import check_random_state\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom nilearn._utils.testing import assert_raises_regex, assert_warns\nfrom nilearn.decoding.space_net import (\n _EarlyStoppingCallback, _space_net_alpha_grid, path_scores, BaseSpaceNet,\n _crop_mask, _univariate_feature_screening, SpaceNetClassifier,\n SpaceNetRegressor)\nfrom nilearn._utils.param_validation import _adjust_screening_percentile\nfrom nilearn.decoding.space_net_solvers import (_graph_net_logistic,\n _graph_net_squared_loss)\n\nmni152_brain_mask = (\n \"/usr/share/fsl/data/standard/MNI152_T1_1mm_brain_mask.nii.gz\")\nlogistic_path_scores = partial(path_scores, is_classif=True)\nsquared_loss_path_scores = partial(path_scores, is_classif=False)\n\n# Data used in almost all tests\nfrom .test_same_api import to_niimgs\nsize = 4\nfrom .simulate_graph_net_data import create_graph_net_simulation_data\nX_, y, w, mask = create_graph_net_simulation_data(\n snr=1., n_samples=10, size=size, n_points=5, random_state=42)\nX, mask = to_niimgs(X_, [size] * 3)\n\n\ndef test_space_net_alpha_grid(n_samples=4, n_features=3):\n rng = check_random_state(42)\n X = rng.randn(n_samples, n_features)\n y = np.arange(n_samples)\n\n for l1_ratio, is_classif in itertools.product([.5, 1.], [True, False]):\n alpha_max = np.max(np.abs(np.dot(X.T, y))) / l1_ratio\n np.testing.assert_almost_equal(_space_net_alpha_grid(\n X, y, n_alphas=1, l1_ratio=l1_ratio,\n logistic=is_classif), alpha_max)\n\n for l1_ratio, is_classif in itertools.product([.5, 1.], [True, False]):\n alpha_max = np.max(np.abs(np.dot(X.T, y))) / l1_ratio\n for n_alphas in range(1, 10):\n alphas = _space_net_alpha_grid(\n X, y, n_alphas=n_alphas, l1_ratio=l1_ratio,\n logistic=is_classif)\n np.testing.assert_almost_equal(alphas.max(), alpha_max)\n np.testing.assert_almost_equal(n_alphas, len(alphas))\n\n\ndef test_space_net_alpha_grid_same_as_sk():\n try:\n from sklearn.linear_model.coordinate_descent import _alpha_grid\n iris = load_iris()\n X = iris.data\n y = iris.target\n np.testing.assert_almost_equal(_space_net_alpha_grid(\n X, y, n_alphas=5), X.shape[0] * _alpha_grid(X, y, n_alphas=5,\n fit_intercept=False))\n except ImportError:\n raise SkipTest\n\n\ndef test_early_stopping_callback_object(n_samples=10, n_features=30):\n # This test evolves w so that every line of th _EarlyStoppingCallback\n # code is executed a some point. This a kind of code fuzzing.\n rng = check_random_state(42)\n X_test = rng.randn(n_samples, n_features)\n y_test = np.dot(X_test, np.ones(n_features))\n w = np.zeros(n_features)\n escb = _EarlyStoppingCallback(X_test, y_test, False)\n for counter in range(50):\n k = min(counter, n_features - 1)\n w[k] = 1\n\n # jitter\n if k > 0 and rng.rand() > .9:\n w[k - 1] = 1 - w[k - 1]\n\n escb(dict(w=w, counter=counter))\n assert_equal(len(escb.test_scores), counter + 1)\n\n # restart\n if counter > 20:\n w *= 0.\n\n\ndef test_params_correctly_propagated_in_constructors():\n for (penalty, is_classif, n_alphas, l1_ratio, n_jobs,\n cv, perc) in itertools.product([\"graph-net\", \"tv-l1\"],\n [True, False], [.1, .01],\n [.5, 1.], [1, -1], [2, 3],\n [5, 10]):\n cvobj = BaseSpaceNet(\n mask=\"dummy\", n_alphas=n_alphas, n_jobs=n_jobs, l1_ratios=l1_ratio,\n cv=cv, screening_percentile=perc, penalty=penalty,\n is_classif=is_classif)\n assert_equal(cvobj.n_alphas, n_alphas)\n assert_equal(cvobj.l1_ratios, l1_ratio)\n assert_equal(cvobj.n_jobs, n_jobs)\n assert_equal(cvobj.cv, cv)\n assert_equal(cvobj.screening_percentile, perc)\n\n\ndef test_screening_space_net():\n for verbose in [0, 2]:\n screening_percentile = assert_warns(UserWarning,\n _adjust_screening_percentile, 10,\n mask, verbose)\n screening_percentile = assert_warns(UserWarning,\n _adjust_screening_percentile, 10, mask)\n # We gave here a very small mask, judging by standards of brain size\n # thus the screening_percentile_ corrected for brain size should\n # be 100%\n assert_equal(screening_percentile, 100)\n\n\ndef test_logistic_path_scores():\n iris = load_iris()\n X, y = iris.data, iris.target\n _, mask = to_niimgs(X, [2, 2, 2])\n mask = mask.get_data().astype(np.bool)\n alphas = [1., .1, .01]\n test_scores, best_w = logistic_path_scores(\n _graph_net_logistic, X, y, mask, alphas, .5,\n np.arange(len(X)), np.arange(len(X)), {})[:2]\n test_scores = test_scores[0]\n assert_equal(len(test_scores), len(alphas))\n assert_equal(X.shape[1] + 1, len(best_w))\n\n\ndef test_squared_loss_path_scores():\n iris = load_iris()\n X, y = iris.data, iris.target\n _, mask = to_niimgs(X, [2, 2, 2])\n mask = mask.get_data().astype(np.bool)\n alphas = [1., .1, .01]\n test_scores, best_w = squared_loss_path_scores(\n _graph_net_squared_loss, X, y, mask, alphas, .5,\n np.arange(len(X)), np.arange(len(X)), {})[:2]\n test_scores = test_scores[0]\n assert_equal(len(test_scores), len(alphas))\n assert_equal(X.shape[1] + 1, len(best_w))\n\n\ndef test_tv_regression_simple():\n rng = check_random_state(42)\n dim = (4, 4, 4)\n W_init = np.zeros(dim)\n W_init[2:3, 1:2, -2:] = 1\n n = 10\n p = np.prod(dim)\n X = np.ones((n, 1)) + W_init.ravel().T\n X += rng.randn(n, p)\n y = np.dot(X, W_init.ravel())\n X, mask = to_niimgs(X, dim)\n print(\"%s %s\" % (X.shape, mask.get_data().sum()))\n alphas = [.1, 1.]\n\n for l1_ratio in [1.]:\n for debias in [True]:\n BaseSpaceNet(mask=mask, alphas=alphas, l1_ratios=l1_ratio,\n penalty=\"tv-l1\", is_classif=False, max_iter=10,\n debias=debias).fit(X, y)\n\n\ndef test_tv_regression_3D_image_doesnt_crash():\n rng = check_random_state(42)\n dim = (3, 4, 5)\n W_init = np.zeros(dim)\n W_init[2:3, 3:, 1:3] = 1\n\n n = 10\n p = dim[0] * dim[1] * dim[2]\n X = np.ones((n, 1)) + W_init.ravel().T\n X += rng.randn(n, p)\n y = np.dot(X, W_init.ravel())\n alpha = 1.\n X, mask = to_niimgs(X, dim)\n\n for l1_ratio in [0., .5, 1.]:\n BaseSpaceNet(mask=mask, alphas=alpha, l1_ratios=l1_ratio,\n penalty=\"tv-l1\", is_classif=False, max_iter=10).fit(X, y)\n\n\ndef test_graph_net_classifier_score():\n iris = load_iris()\n X, y = iris.data, iris.target\n y = 2 * (y > 0) - 1\n X_, mask = to_niimgs(X, (2, 2, 2))\n gnc = SpaceNetClassifier(mask=mask, alphas=1. / .01 / X.shape[0],\n l1_ratios=1., tol=1e-10,\n standardize=False, verbose=0,\n screening_percentile=100.).fit(X_, y)\n accuracy = gnc.score(X_, y)\n assert_equal(accuracy, accuracy_score(y, gnc.predict(X_)))\n\n\ndef test_log_reg_vs_graph_net_two_classes_iris(C=.01, tol=1e-10,\n zero_thr=1e-4):\n # Test for one of the extreme cases of Graph-Net: That is, with\n # l1_ratio = 1 (pure Lasso), we compare Graph-Net's coefficients'\n # performance with the coefficients obtained from Scikit-Learn's\n # LogisticRegression, with L1 penalty, in a 2 classes classification task\n iris = load_iris()\n X, y = iris.data, iris.target\n y = 2 * (y > 0) - 1\n X_, mask = to_niimgs(X, (2, 2, 2))\n tvl1 = SpaceNetClassifier(\n mask=mask, alphas=1. / C / X.shape[0], l1_ratios=1., tol=tol,\n verbose=0, max_iter=1000, penalty=\"tv-l1\", standardize=False,\n screening_percentile=100.).fit(X_, y)\n sklogreg = LogisticRegression(penalty=\"l1\",\n fit_intercept=True,\n solver='liblinear',\n tol=tol,\n C=C,\n ).fit(X, y)\n\n # compare supports\n np.testing.assert_array_equal((np.abs(tvl1.coef_) < zero_thr),\n (np.abs(sklogreg.coef_) < zero_thr))\n\n # compare predictions\n np.testing.assert_array_equal(tvl1.predict(X_), sklogreg.predict(X))\n\n\ndef test_lasso_vs_graph_net():\n # Test for one of the extreme cases of Graph-Net: That is, with\n # l1_ratio = 1 (pure Lasso), we compare Graph-Net's performance with\n # Scikit-Learn lasso\n lasso = Lasso(max_iter=100, tol=1e-8, normalize=False)\n graph_net = BaseSpaceNet(mask=mask, alphas=1. * X_.shape[0],\n l1_ratios=1, is_classif=False,\n penalty=\"graph-net\", max_iter=100)\n lasso.fit(X_, y)\n graph_net.fit(X, y)\n lasso_perf = 0.5 / y.size * extmath.norm(np.dot(\n X_, lasso.coef_) - y) ** 2 + np.sum(np.abs(lasso.coef_))\n graph_net_perf = 0.5 * ((graph_net.predict(X) - y) ** 2).mean()\n np.testing.assert_almost_equal(graph_net_perf, lasso_perf, decimal=3)\n\n\ndef test_params_correctly_propagated_in_constructors_biz():\n for penalty, is_classif, alpha, l1_ratio in itertools.product(\n [\"graph-net\", \"tv-l1\"], [True, False], [.4, .01], [.5, 1.]):\n cvobj = BaseSpaceNet(\n mask=\"dummy\", penalty=penalty, is_classif=is_classif, alphas=alpha,\n l1_ratios=l1_ratio)\n assert_equal(cvobj.alphas, alpha)\n assert_equal(cvobj.l1_ratios, l1_ratio)\n\n\ndef test_crop_mask():\n rng = np.random.RandomState(42)\n mask = np.zeros((3, 4, 5), dtype=np.bool)\n box = mask[:2, :3, :4]\n box[rng.rand(*box.shape) < 3.] = 1 # mask covers 30% of brain\n idx = np.where(mask)\n assert_true(idx[1].max() < 3)\n tight_mask = _crop_mask(mask)\n assert_equal(mask.sum(), tight_mask.sum())\n assert_true(np.prod(tight_mask.shape) <= np.prod(box.shape))\n\n\ndef test_univariate_feature_screening(dim=(11, 12, 13), n_samples=10):\n rng = np.random.RandomState(42)\n mask = rng.rand(*dim) > 100. / np.prod(dim)\n assert_true(mask.sum() >= 100.)\n mask[dim[0] // 2, dim[1] // 3:, -dim[2] // 2:] = 1 # put spatial structure\n n_features = mask.sum()\n X = rng.randn(n_samples, n_features)\n w = rng.randn(n_features)\n w[rng.rand(n_features) > .8] = 0.\n y = X.dot(w)\n for is_classif in [True, False]:\n X_, mask_, support_ = _univariate_feature_screening(\n X, y, mask, is_classif, 20.)\n n_features_ = support_.sum()\n assert_equal(X_.shape[1], n_features_)\n assert_equal(mask_.sum(), n_features_)\n assert_true(n_features_ <= n_features)\n\n\ndef test_space_net_classifier_subclass():\n for penalty, alpha, l1_ratio, verbose in itertools.product(\n [\"graph-net\", \"tv-l1\"], [.4, .01], [.5, 1.], [True, False]):\n cvobj = SpaceNetClassifier(\n mask=\"dummy\", penalty=penalty, alphas=alpha, l1_ratios=l1_ratio,\n verbose=verbose)\n assert_equal(cvobj.alphas, alpha)\n assert_equal(cvobj.l1_ratios, l1_ratio)\n\n\ndef test_space_net_regressor_subclass():\n for penalty, alpha, l1_ratio, verbose in itertools.product(\n [\"graph-net\", \"tv-l1\"], [.4, .01], [.5, 1.], [True, False]):\n cvobj = SpaceNetRegressor(\n mask=\"dummy\", penalty=penalty, alphas=alpha, l1_ratios=l1_ratio,\n verbose=verbose)\n assert_equal(cvobj.alphas, alpha)\n assert_equal(cvobj.l1_ratios, l1_ratio)\n\n\ndef test_space_net_alpha_grid_pure_spatial():\n rng = check_random_state(42)\n X = rng.randn(10, 100)\n y = np.arange(X.shape[0])\n for is_classif in [True, False]:\n assert_false(np.any(np.isnan(_space_net_alpha_grid(\n X, y, l1_ratio=0., logistic=is_classif))))\n\n\ndef test_string_params_case():\n # penalty\n assert_raises(ValueError, BaseSpaceNet, penalty='TV-L1')\n assert_raises(ValueError, BaseSpaceNet, penalty='Graph-Net')\n\n\ndef test_crop_mask_empty_mask():\n assert_raises_regex(ValueError, \"Empty mask:.\", _crop_mask, np.array([]))\n assert_raises_regex(ValueError, \"Empty mask:\", _crop_mask,\n np.zeros((2, 2, 2)))\n\n\ndef test_space_net_no_crash_not_fitted():\n \"\"\"Regression test.\"\"\"\n iris = load_iris()\n X, y = iris.data, iris.target\n X, mask = to_niimgs(X, [2, 2, 2])\n for model in [SpaceNetRegressor, SpaceNetClassifier]:\n assert_raises_regex(RuntimeError,\n \"This %s instance is not fitted yet\" % (\n model.__name__), model().predict, X)\n model(mask=mask, alphas=1.).fit(X, y).predict(X)\n\n\ndef test_space_net_one_alpha_no_crash():\n \"\"\"Regression test.\"\"\"\n iris = load_iris()\n X, y = iris.data, iris.target\n X, mask = to_niimgs(X, [2, 2, 2])\n for model in [SpaceNetRegressor, SpaceNetClassifier]:\n model(n_alphas=1, mask=mask).fit(X, y)\n model(alphas=None, n_alphas=2, mask=mask).fit(X, y)\n\n\ndef test_checking_inputs_length():\n iris = load_iris()\n X, y = iris.data, iris.target\n y = 2 * (y > 0) - 1\n X_, mask = to_niimgs(X, (2, 2, 2))\n\n # Remove ten samples from y\n y = y[:-10]\n\n for model in [SpaceNetRegressor, SpaceNetClassifier]:\n\n assert_raises(ValueError, model(mask=mask,\n alphas=1. / .01 / X.shape[0],\n l1_ratios=1., tol=1e-10,\n screening_percentile=100.).fit, X_, y)\n\n\ndef test_targets_in_y_space_net_regressor():\n # This tests whether raises an error when unique targets given in y\n # are single.\n iris = load_iris()\n X, _ = iris.data, iris.target\n y = np.ones((iris.target.shape))\n\n imgs, mask = to_niimgs(X, (2, 2, 2))\n regressor = SpaceNetRegressor(mask=mask)\n assert_raises_regex(ValueError,\n \"The given input y must have atleast 2 targets\",\n regressor.fit, imgs, y)\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.ones", "sklearn.utils.check_random_state", "numpy.zeros", "numpy.dot", "numpy.abs", "numpy.arange", "numpy.random.RandomState", "sklearn.linear_model.LogisticRegression", "numpy.prod", "numpy.array", "numpy.where", "sklearn.linear_model.Lasso", "sklearn.linear_model.coordinate_descent._alpha_grid", "sklearn.datasets.load_iris" ] ]
fmamashli/mne-python
[ "52f064415e7c9fa8fe243d22108dcdf3d86505b9", "52f064415e7c9fa8fe243d22108dcdf3d86505b9" ]
[ "examples/time_frequency/plot_source_power_spectrum.py", "mne/_digitization/base.py" ]
[ "\"\"\"\n======================================================\nCompute source power spectral density (PSD) in a label\n======================================================\n\nReturns an STC file containing the PSD (in dB) of each of the sources\nwithin a label.\n\"\"\"\n# Authors: Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne import io\nfrom mne.datasets import sample\nfrom mne.minimum_norm import read_inverse_operator, compute_source_psd\n\nprint(__doc__)\n\n###############################################################################\n# Set parameters\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'\nfname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'\nfname_label = data_path + '/MEG/sample/labels/Aud-lh.label'\n\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname, verbose=False)\nevents = mne.find_events(raw, stim_channel='STI 014')\ninverse_operator = read_inverse_operator(fname_inv)\nraw.info['bads'] = ['MEG 2443', 'EEG 053']\n\n# picks MEG gradiometers\npicks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,\n stim=False, exclude='bads')\n\ntmin, tmax = 0, 120 # use the first 120s of data\nfmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz\nn_fft = 2048 # the FFT size (n_fft). Ideally a power of 2\nlabel = mne.read_label(fname_label)\n\nstc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method=\"dSPM\",\n tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,\n pick_ori=\"normal\", n_fft=n_fft, label=label,\n dB=True)\n\nstc.save('psd_dSPM')\n\n###############################################################################\n# View PSD of sources in label\nplt.plot(1e3 * stc.times, stc.data.T)\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('PSD (dB)')\nplt.title('Source Power Spectrum (PSD)')\nplt.show()\n", "# -*- coding: utf-8 -*-\n# Authors: Alexandre Gramfort <[email protected]>\n# Eric Larson <[email protected]>\n# Joan Massich <[email protected]>\n#\n# License: BSD (3-clause)\nimport numpy as np\nfrom copy import deepcopy\nfrom collections import Counter\n\nfrom ..transforms import _coord_frame_name\nfrom ..io.constants import FIFF\n\n_dig_kind_dict = {\n 'cardinal': FIFF.FIFFV_POINT_CARDINAL,\n 'hpi': FIFF.FIFFV_POINT_HPI,\n 'eeg': FIFF.FIFFV_POINT_EEG,\n 'extra': FIFF.FIFFV_POINT_EXTRA,\n}\n_dig_kind_ints = tuple(sorted(_dig_kind_dict.values()))\n_dig_kind_proper = {'cardinal': 'Cardinal',\n 'hpi': 'HPI',\n 'eeg': 'EEG',\n 'extra': 'Extra',\n 'unknown': 'Unknown'}\n_dig_kind_rev = {val: key for key, val in _dig_kind_dict.items()}\n_cardinal_kind_rev = {1: 'LPA', 2: 'Nasion', 3: 'RPA', 4: 'Inion'}\n\n\ndef _format_dig_points(dig):\n \"\"\"Format the dig points nicely.\"\"\"\n dig_points = [DigPoint(d) for d in dig] if dig is not None else dig\n return Digitization(dig_points)\n\n\ndef _get_dig_eeg(dig):\n return [d for d in dig if d['kind'] == FIFF.FIFFV_POINT_EEG]\n\n\ndef _count_points_by_type(dig):\n \"\"\"Get the number of points of each type.\"\"\"\n occurrences = Counter([d['kind'] for d in dig])\n return dict(\n fid=occurrences[FIFF.FIFFV_POINT_CARDINAL],\n hpi=occurrences[FIFF.FIFFV_POINT_HPI],\n eeg=occurrences[FIFF.FIFFV_POINT_EEG],\n extra=occurrences[FIFF.FIFFV_POINT_EXTRA],\n )\n\n\nclass DigPoint(dict):\n \"\"\"Container for a digitization point.\n\n This is a simple subclass of the standard dict type designed to provide\n a readable string representation.\n\n Parameters\n ----------\n kind : int\n The kind of channel,\n e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``.\n r : array, shape (3,)\n 3D position in m. and coord_frame.\n ident : int\n Number specifying the identity of the point.\n e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``,\n or 42 if kind is ``FIFFV_POINT_EEG``.\n coord_frame : int\n The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``.\n \"\"\"\n\n def __repr__(self): # noqa: D105\n if self['kind'] == FIFF.FIFFV_POINT_CARDINAL:\n id_ = _cardinal_kind_rev.get(\n self.get('ident', -1), 'Unknown cardinal')\n else:\n id_ = _dig_kind_proper[\n _dig_kind_rev.get(self.get('kind', -1), 'unknown')]\n id_ = ('%s #%s' % (id_, self.get('ident', -1)))\n id_ = id_.rjust(10)\n cf = _coord_frame_name(self['coord_frame'])\n pos = ('(%0.1f, %0.1f, %0.1f) mm' % tuple(1000 * self['r'])).ljust(25)\n return ('<DigPoint | %s : %s : %s frame>' % (id_, pos, cf))\n\n def __eq__(self, other): # noqa: D105\n \"\"\"Compare two DigPoints.\n\n Two digpoints are equal if they are the same kind, share the same\n coordinate frame and position.\n \"\"\"\n my_keys = ['kind', 'ident', 'coord_frame']\n if sorted(self.keys()) != sorted(other.keys()):\n return False\n elif any([self[_] != other[_] for _ in my_keys]):\n return False\n else:\n return np.allclose(self['r'], other['r'])\n\n\nclass Digitization(list):\n \"\"\"Represent a list of DigPoint objects.\n\n Parameters\n ----------\n elements : list | None\n A list of DigPoint objects.\n \"\"\"\n\n def __init__(self, elements=None):\n\n elements = list() if elements is None else elements\n\n if not all([isinstance(_, DigPoint) for _ in elements]):\n _msg = 'Digitization expected a iterable of DigPoint objects.'\n raise ValueError(_msg)\n else:\n super(Digitization, self).__init__(deepcopy(elements))\n\n def __eq__(self, other): # noqa: D105\n if not isinstance(other, (Digitization, list)) or \\\n len(self) != len(other):\n return False\n else:\n return all([ss == oo for ss, oo in zip(self, other)])\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ], [ "numpy.allclose" ] ]
dopplerchase/GewitterGefahr
[ "d819874d616f98a25187bfd3091073a2e6d5279e" ]
[ "gewittergefahr/scripts/convert_examples_myrorss_to_gridrad.py" ]
[ "\"\"\"Converts examples from MYRORSS to GridRad format.\"\"\"\n\nimport os.path\nimport argparse\nimport numpy\nfrom gewittergefahr.gg_utils import radar_utils\nfrom gewittergefahr.gg_utils import time_conversion\nfrom gewittergefahr.deep_learning import input_examples\nfrom gewittergefahr.deep_learning import training_validation_io as trainval_io\n\nAZ_SHEAR_TO_VORTICITY = 0.5\nMAX_LL_SHEAR_HEIGHT_M_AGL = 2000\nREFL_HEIGHTS_M_AGL = numpy.array(\n [1000, 2000, 3000, 4000, 5000, 6000], dtype=int\n)\nNEW_RADAR_HEIGHTS_M_AGL = numpy.array(\n [0, 1000, 2000, 3000, 4000, 5000, 6000], dtype=int\n)\n\nINPUT_DIR_ARG_NAME = 'input_example_dir_name'\nFIRST_DATE_ARG_NAME = 'first_spc_date_string'\nLAST_DATE_ARG_NAME = 'last_spc_date_string'\nNUM_EX_PER_BATCH_ARG_NAME = 'num_examples_per_batch'\nOUTPUT_DIR_ARG_NAME = 'output_example_dir_name'\n\nINPUT_DIR_HELP_STRING = (\n 'Name of top-level directory with original examples (in MYRORSS format). '\n 'Files therein will be found by `input_examples.find_example_file` and read'\n ' by `input_examples.read_example_file`.')\n\nSPC_DATE_HELP_STRING = (\n 'SPC date (format \"yyyymmdd\"). Examples will be converted for all SPC '\n 'dates in period `{0:s}`...`{1:s}`.'\n).format(FIRST_DATE_ARG_NAME, LAST_DATE_ARG_NAME)\n\nNUM_EX_PER_BATCH_HELP_STRING = (\n 'Number of examples per batch. Examples will read and written in batches '\n 'of this size.')\n\nOUTPUT_DIR_HELP_STRING = (\n 'Name of top-level directory for new examples (in GridRad format). Files '\n 'will be written by `input_examples.write_example_file` to locations '\n 'therein determined by `input_examples.find_example_file`.')\n\nINPUT_ARG_PARSER = argparse.ArgumentParser()\nINPUT_ARG_PARSER.add_argument(\n '--' + INPUT_DIR_ARG_NAME, type=str, required=True,\n help=INPUT_DIR_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + FIRST_DATE_ARG_NAME, type=str, required=True,\n help=SPC_DATE_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + LAST_DATE_ARG_NAME, type=str, required=True,\n help=SPC_DATE_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + NUM_EX_PER_BATCH_ARG_NAME, type=int, required=False, default=1000,\n help=NUM_EX_PER_BATCH_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,\n help=OUTPUT_DIR_HELP_STRING)\n\n\ndef _convert_one_file_selected_examples(\n input_file_name, output_file_name, full_storm_id_strings,\n storm_times_unix_sec, append_to_file):\n \"\"\"Converts selected examples in one file from MYRORSS to GridRad format.\n\n E = number of examples\n\n :param input_file_name: See doc for `_convert_one_file`.\n :param output_file_name: Same.\n :param full_storm_id_strings: length-E list of storm IDs.\n :param storm_times_unix_sec: length-E numpy array of storm times.\n :param append_to_file: Boolean flag. If True, will append new examples to\n output file. If False, will overwrite output file.\n \"\"\"\n\n print('Reading MYRORSS examples from: \"{0:s}\"...'.format(input_file_name))\n example_dict = input_examples.read_specific_examples(\n netcdf_file_name=input_file_name, read_all_target_vars=True,\n full_storm_id_strings=full_storm_id_strings,\n storm_times_unix_sec=storm_times_unix_sec,\n radar_heights_to_keep_m_agl=REFL_HEIGHTS_M_AGL)\n\n # Add surface reflectivity, then double horizontal resolution.\n reflectivity_matrix_dbz = example_dict[\n input_examples.REFL_IMAGE_MATRIX_KEY][..., 0]\n\n reflectivity_matrix_dbz = numpy.concatenate(\n (reflectivity_matrix_dbz, reflectivity_matrix_dbz[..., [0]]), axis=-1\n )\n\n reflectivity_matrix_dbz = trainval_io.upsample_reflectivity(\n reflectivity_matrix_dbz=reflectivity_matrix_dbz, upsampling_factor=2)\n\n # Create vorticity matrix.\n shear_field_names = example_dict[input_examples.RADAR_FIELDS_KEY]\n ll_shear_index = shear_field_names.index(radar_utils.LOW_LEVEL_SHEAR_NAME)\n ml_shear_index = shear_field_names.index(radar_utils.MID_LEVEL_SHEAR_NAME)\n\n ll_shear_matrix_s01 = example_dict[\n input_examples.AZ_SHEAR_IMAGE_MATRIX_KEY\n ][..., ll_shear_index]\n\n ml_shear_matrix_s01 = example_dict[\n input_examples.AZ_SHEAR_IMAGE_MATRIX_KEY\n ][..., ml_shear_index]\n\n num_radar_heights = len(NEW_RADAR_HEIGHTS_M_AGL)\n these_dimensions = numpy.array(\n ll_shear_matrix_s01.shape + (num_radar_heights,), dtype=int\n )\n vorticity_matrix_s01 = numpy.full(these_dimensions, numpy.nan)\n\n for k in range(num_radar_heights):\n if NEW_RADAR_HEIGHTS_M_AGL[k] > MAX_LL_SHEAR_HEIGHT_M_AGL:\n vorticity_matrix_s01[..., k] = ml_shear_matrix_s01\n else:\n vorticity_matrix_s01[..., k] = ll_shear_matrix_s01\n\n vorticity_matrix_s01 *= AZ_SHEAR_TO_VORTICITY\n radar_matrix = numpy.stack(\n (reflectivity_matrix_dbz, vorticity_matrix_s01), axis=-1\n )\n\n example_dict[input_examples.RADAR_IMAGE_MATRIX_KEY] = radar_matrix\n example_dict[input_examples.RADAR_HEIGHTS_KEY] = NEW_RADAR_HEIGHTS_M_AGL\n example_dict[input_examples.RADAR_FIELDS_KEY] = [\n radar_utils.REFL_NAME, radar_utils.VORTICITY_NAME\n ]\n example_dict[input_examples.ROTATED_GRID_SPACING_KEY] *= 0.5\n\n example_dict.pop(input_examples.REFL_IMAGE_MATRIX_KEY, None)\n example_dict.pop(input_examples.AZ_SHEAR_IMAGE_MATRIX_KEY, None)\n\n print('Writing examples in GridRad format to: \"{0:s}\"...'.format(\n output_file_name\n ))\n\n input_examples.write_example_file(\n netcdf_file_name=output_file_name, example_dict=example_dict,\n append_to_file=append_to_file)\n\n\ndef _convert_one_file(input_file_name, output_file_name,\n num_examples_per_batch):\n \"\"\"Converts examples in one file from MYRORSS to GridRad format.\n\n :param input_file_name: Path to input file (with MYRORSS examples). Will be\n read by `input_examples.read_example_file`.\n :param output_file_name: Path to output file (with the same examples but in\n GridRad format). Will be written by\n `input_examples.write_example_file`.\n :param num_examples_per_batch: See documentation at top of file.\n \"\"\"\n\n print('Reading metadata from: \"{0:s}\"...'.format(input_file_name))\n example_dict = input_examples.read_example_file(\n netcdf_file_name=input_file_name, read_all_target_vars=True,\n metadata_only=True)\n\n full_storm_id_strings = example_dict[input_examples.FULL_IDS_KEY]\n storm_times_unix_sec = example_dict[input_examples.STORM_TIMES_KEY]\n num_examples = len(full_storm_id_strings)\n\n for i in range(0, num_examples, num_examples_per_batch):\n this_first_index = i\n this_last_index = min(\n [i + num_examples_per_batch - 1, num_examples - 1]\n )\n\n _convert_one_file_selected_examples(\n input_file_name=input_file_name,\n output_file_name=output_file_name,\n full_storm_id_strings=\n full_storm_id_strings[this_first_index:(this_last_index + 1)],\n storm_times_unix_sec=\n storm_times_unix_sec[this_first_index:(this_last_index + 1)],\n append_to_file=i > 0\n )\n\n\ndef _run(top_input_dir_name, first_spc_date_string, last_spc_date_string,\n num_examples_per_batch, top_output_dir_name):\n \"\"\"Converts examples from MYRORSS to GridRad format.\n\n This is effectively the main method.\n\n :param top_input_dir_name: See documentation at top of file.\n :param first_spc_date_string: Same.\n :param last_spc_date_string: Same.\n :param num_examples_per_batch: Same.\n :param top_output_dir_name: Same.\n \"\"\"\n\n spc_date_strings = time_conversion.get_spc_dates_in_range(\n first_spc_date_string=first_spc_date_string,\n last_spc_date_string=last_spc_date_string)\n\n input_file_names = [\n input_examples.find_example_file(\n top_directory_name=top_input_dir_name, shuffled=False,\n spc_date_string=d, raise_error_if_missing=False\n )\n for d in spc_date_strings\n ]\n\n output_file_names = [\n input_examples.find_example_file(\n top_directory_name=top_output_dir_name, shuffled=False,\n spc_date_string=d, raise_error_if_missing=False\n )\n for d in spc_date_strings\n ]\n\n num_spc_dates = len(spc_date_strings)\n\n for i in range(num_spc_dates):\n if not os.path.isfile(input_file_names[i]):\n continue\n\n _convert_one_file(\n input_file_name=input_file_names[i],\n output_file_name=output_file_names[i],\n num_examples_per_batch=num_examples_per_batch\n )\n\n print('\\n')\n\n\nif __name__ == '__main__':\n INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()\n\n _run(\n top_input_dir_name=getattr(INPUT_ARG_OBJECT, INPUT_DIR_ARG_NAME),\n first_spc_date_string=getattr(INPUT_ARG_OBJECT, FIRST_DATE_ARG_NAME),\n last_spc_date_string=getattr(INPUT_ARG_OBJECT, LAST_DATE_ARG_NAME),\n num_examples_per_batch=getattr(\n INPUT_ARG_OBJECT, NUM_EX_PER_BATCH_ARG_NAME),\n top_output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)\n )\n" ]
[ [ "numpy.array", "numpy.stack", "numpy.concatenate", "numpy.full" ] ]
takuseno/configurable-control-gym
[ "9837e6c03c5a7fe6711b32dc70fe5e432e7d96c3" ]
[ "configurable_control_gym/envs/pendulum.py" ]
[ "import gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\n\nclass PendulumEnv(gym.Env):\n metadata = {\n 'render.modes' : ['human', 'rgb_array'],\n 'video.frames_per_second' : 30\n }\n\n def __init__(self, force=10.0, length=1.0, mass=1.0):\n if isinstance(force, list):\n self.g_set = force\n else:\n self.g_set = None\n self.g = force\n\n if isinstance(length, list):\n self.l_set = length\n else:\n self.l_set = None\n self.l = length\n\n if isinstance(mass, list):\n self.m_set = mass\n else:\n self.m_set = None\n self.m = mass\n\n self.max_speed=8\n self.max_torque=2.\n self.dt=.05\n self.viewer = None\n\n high = np.array([1., 1., self.max_speed])\n self.action_space = spaces.Box(low=-self.max_torque, high=self.max_torque, shape=(1,), dtype=np.float32)\n self.observation_space = spaces.Box(low=-high, high=high, dtype=np.float32)\n\n self.steps_in_top = 0\n\n self.seed()\n\n def _sample_parameter(self):\n if self.g_set is not None:\n set_index = self.np_random.randint(len(self.g_set))\n self.g = self.np_random.uniform(self.g_set[set_index][0],\n self.g_set[set_index][1])\n if self.l_set is not None:\n set_index = self.np_random.randint(len(self.l_set))\n self.l = self.np_random.uniform(self.l_set[set_index][0],\n self.l_set[set_index][1])\n if self.m_set is not None:\n set_index = self.np_random.randint(len(self.m_set))\n self.m = self.np_random.uniform(self.m_set[set_index][0],\n self.m_set[set_index][1])\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self,u):\n th, thdot = self.state # th := theta\n\n g = self.g\n m = self.m\n l = self.l\n dt = self.dt\n\n u = np.clip(u, -self.max_torque, self.max_torque)[0]\n self.last_u = u # for rendering\n costs = angle_normalize(th)**2 + .1*thdot**2 + .001*(u**2)\n\n newthdot = thdot + (-3*g/(2*l) * np.sin(th + np.pi) + 3./(m*l**2)*u) * dt\n newth = th + newthdot*dt\n newthdot = np.clip(newthdot, -self.max_speed, self.max_speed) #pylint: disable=E1111\n\n self.state = np.array([newth, newthdot])\n\n target = np.pi / 3.0\n _newth = newth\n if np.abs(_newth) > 2.0 * np.pi:\n _newth = np.sign(_newth) * (np.abs(_newth) - 2.0 * np.pi * (_newth // (2.0 * np.pi)))\n if np.abs(_newth) < target or (2.0 * np.pi - np.abs(_newth)) < target:\n self.steps_in_top += 1\n else:\n self.steps_in_top = 0\n\n info = {}\n info['success'] = self.steps_in_top >= 100\n\n return self._get_obs(), -costs, False, info\n\n def reset(self):\n self._sample_parameter()\n high = np.array([np.pi, 1])\n self.state = self.np_random.uniform(low=-high, high=high)\n self.last_u = None\n self.steps_in_top = 0\n return self._get_obs()\n\n def _get_obs(self):\n theta, thetadot = self.state\n return np.array([np.cos(theta), np.sin(theta), thetadot])\n\n def render(self, mode='human'):\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(500,500)\n self.viewer.set_bounds(-2.2,2.2,-2.2,2.2)\n rod = rendering.make_capsule(self.l, .2)\n rod.set_color(.8, .3, .3)\n self.pole_transform = rendering.Transform()\n rod.add_attr(self.pole_transform)\n self.viewer.add_geom(rod)\n axle = rendering.make_circle(.05)\n axle.set_color(0,0,0)\n self.viewer.add_geom(axle)\n fname = path.join(path.dirname(gym.envs.classic_control.pendulum.__file__), \"assets/clockwise.png\")\n self.img = rendering.Image(fname, 1., 1.)\n self.imgtrans = rendering.Transform()\n self.img.add_attr(self.imgtrans)\n\n self.viewer.add_onetime(self.img)\n self.pole_transform.set_rotation(self.state[0] + np.pi/2)\n if self.last_u:\n self.imgtrans.scale = (-self.last_u/2, np.abs(self.last_u)/2)\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n\ndef angle_normalize(x):\n return (((x+np.pi) % (2*np.pi)) - np.pi)\n" ]
[ [ "numpy.sign", "numpy.abs", "numpy.cos", "numpy.clip", "numpy.array", "numpy.sin" ] ]
tpeng/magnitude
[ "aec98628b5547773ca8c4114ec6d1ad51e21b230" ]
[ "pymagnitude/third_party/allennlp/semparse/worlds/atis_world.py" ]
[ "\nfrom __future__ import absolute_import\nfrom copy import deepcopy\n#typing\nimport numpy\n\nfrom parsimonious.grammar import Grammar\n\nfrom allennlp.semparse.contexts.atis_tables import * # pylint: disable=wildcard-import,unused-wildcard-import\nfrom allennlp.semparse.contexts.sql_table_context import\\\n SqlTableContext, SqlVisitor, generate_one_of_string, format_action\n\nfrom allennlp.data.tokenizers import Token, WordTokenizer\ntry:\n from itertools import izip\nexcept:\n izip = zip\n\n\ndef get_strings_from_utterance(tokenized_utterance ) :\n u\"\"\"\n Based on the current utterance, return a dictionary where the keys are the strings in the utterance\n that map to lists of the token indices that they are linked to.\n \"\"\"\n string_linking_scores = defaultdict(list)\n for index, (first_token, second_token) in enumerate(izip(tokenized_utterance, tokenized_utterance[1:])):\n for string in ATIS_TRIGGER_DICT.get(first_token.text.lower(), []):\n string_linking_scores[string].append(index)\n\n bigram = \"{first_token.text} {second_token.text}\".lower()\n for string in ATIS_TRIGGER_DICT.get(bigram, []):\n string_linking_scores[string].extend([index, index + 1])\n\n if tokenized_utterance[-1].text.lower() in ATIS_TRIGGER_DICT:\n for string in ATIS_TRIGGER_DICT[tokenized_utterance[-1].text.lower()]:\n string_linking_scores[string].append(len(tokenized_utterance)-1)\n\n date = get_date_from_utterance(tokenized_utterance)\n if date:\n for day in DAY_OF_WEEK_INDEX[date.weekday()]:\n string_linking_scores[day] = []\n\n return string_linking_scores\n\nclass AtisWorld(object):\n u\"\"\"\n World representation for the Atis SQL domain. This class has a ``SqlTableContext`` which holds the base\n grammars, it then augments this grammar with the entities that are detected from utterances.\n\n Parameters\n ----------\n utterances: ``List[str]``\n A list of utterances in the interaction, the last element in this list is the\n current utterance that we are interested in.\n \"\"\"\n sql_table_context = SqlTableContext(TABLES)\n\n def __init__(self, utterances , tokenizer=None) :\n self.utterances = utterances\n self.tokenizer = tokenizer if tokenizer else WordTokenizer()\n self.tokenized_utterances = [self.tokenizer.tokenize(utterance) for utterance in self.utterances]\n valid_actions, linking_scores = self.init_all_valid_actions()\n self.valid_actions = valid_actions\n\n # This has shape (num_entities, num_utterance_tokens).\n self.linking_scores: numpy.ndarray = linking_scores\n self.grammar_str: unicode = self.get_grammar_str()\n self.grammar_with_context: Grammar = Grammar(self.grammar_str)\n\n def get_valid_actions(self) :\n return self.valid_actions\n\n def init_all_valid_actions(self) :\n u\"\"\"\n We initialize the valid actions with the global actions. We then iterate through the\n utterances up to and including the current utterance and add the valid strings.\n \"\"\"\n\n valid_actions = deepcopy(self.sql_table_context.valid_actions)\n linking_scores = []\n current_tokenized_utterance = [] if not self.tokenized_utterances\\\n else self.tokenized_utterances[-1]\n\n strings = set()\n for tokenized_utterance in self.tokenized_utterances:\n string_linking_dict = get_strings_from_utterance(tokenized_utterance)\n strings.update(list(string_linking_dict.keys()))\n\n # We want to sort things in reverse here to be consistent with the grammar.\n # The parser is greedy which means that if we have a rule that has\n # multiple options for the right hand side, the first one that succeeds is\n # the one that is used. For example, if ``1400`` appears in the query, and\n # both ``1400`` and ``1`` are valid numbers, then we want to try to match\n # ``1400`` first. Otherwise, ``1`` will succeed but nothing will match ``400``.\n # The same applies for strings here.\n strings_list = sorted(strings, reverse=True)\n\n # We construct the linking scores for strings from the ``string_linking_dict`` here.\n string_linking_scores = []\n for string in strings_list:\n entity_linking = [0 for token in current_tokenized_utterance]\n # string_linking_dict has the strings and linking scores from the last utterance.\n # If the string is not in the last utterance, then the linking scores will be all 0.\n for token_index in string_linking_dict.get(string, []):\n entity_linking[token_index] = 1\n string_linking_scores.append(entity_linking)\n linking_scores.extend(string_linking_scores)\n\n for string in strings_list:\n action = format_action(u'string', string)\n if action not in valid_actions[u'string']:\n valid_actions[u'string'].append(action)\n\n numbers = set([u'0', u'1'])\n number_linking_dict = {}\n\n for utterance, tokenized_utterance in izip(self.utterances, self.tokenized_utterances):\n number_linking_dict = get_numbers_from_utterance(utterance, tokenized_utterance)\n numbers.update(list(number_linking_dict.keys()))\n numbers_list = sorted(numbers, reverse=True)\n\n # We construct the linking scores for numbers from the ``number_linking_dict`` here.\n number_linking_scores = []\n for number in numbers_list:\n entity_linking = [0 for token in current_tokenized_utterance]\n # number_linking_scores has the numbers and linking scores from the last utterance.\n # If the number is not in the last utterance, then the linking scores will be all 0.\n for token_index in number_linking_dict.get(number, []):\n entity_linking[token_index] = 1\n number_linking_scores.append(entity_linking)\n linking_scores.extend(number_linking_scores)\n\n for number in list(numbers_list):\n action = format_action(u'number', number)\n valid_actions[u'number'].append(action)\n return valid_actions, numpy.array(linking_scores)\n\n def get_grammar_str(self) :\n u\"\"\"\n Generate a string that can be used to instantiate a ``Grammar`` object. The string is a sequence of\n rules that define the grammar.\n \"\"\"\n grammar_str_with_context = self.sql_table_context.grammar_str\n numbers = [number.split(u\" -> \")[1].lstrip(u'[\"').rstrip(u'\"]') for\\\n number in sorted(self.valid_actions[u'number'], reverse=True)]\n strings = [string .split(u\" -> \")[1].lstrip(u'[\"').rstrip(u'\"]') for\\\n string in sorted(self.valid_actions[u'string'], reverse=True)]\n\n grammar_str_with_context += generate_one_of_string(u\"number\", numbers)\n grammar_str_with_context += generate_one_of_string(u\"string\", strings)\n return grammar_str_with_context\n\n\n def get_action_sequence(self, query ) :\n sql_visitor = SqlVisitor(self.grammar_with_context)\n if query:\n action_sequence = sql_visitor.parse(query)\n return action_sequence\n return []\n\n def all_possible_actions(self) :\n u\"\"\"\n Return a sorted list of strings representing all possible actions\n of the form: nonterminal -> [right_hand_side]\n \"\"\"\n all_actions = set()\n for _, action_list in list(self.valid_actions.items()):\n for action in action_list:\n all_actions.add(action)\n return sorted(all_actions)\n\n def __eq__(self, other):\n if isinstance(self, other.__class__):\n return all([self.valid_actions == other.valid_actions,\n numpy.array_equal(self.linking_scores, other.linking_scores),\n self.utterances == other.utterances,\n self.grammar_str == other.grammar_str])\n return False\n" ]
[ [ "numpy.array", "numpy.array_equal" ] ]
mdengler/pandas
[ "ca010142309076bf24a06ca83fb822915e49fa80" ]
[ "pandas/tests/arithmetic/test_datetime64.py" ]
[ "# Arithmetic tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\n# Specifically for datetime64 and datetime64tz dtypes\nfrom datetime import (\n datetime,\n time,\n timedelta,\n)\nfrom itertools import (\n product,\n starmap,\n)\nimport operator\nimport warnings\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs.conversion import localize_pydatetime\nfrom pandas._libs.tslibs.offsets import shift_months\nfrom pandas.compat import np_datetime64_compat\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import (\n DateOffset,\n DatetimeIndex,\n NaT,\n Period,\n Series,\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n DatetimeArray,\n TimedeltaArray,\n)\nfrom pandas.core.ops import roperator\nfrom pandas.tests.arithmetic.common import (\n assert_invalid_addsub_type,\n assert_invalid_comparison,\n get_upcast_box,\n)\n\n# ------------------------------------------------------------------\n# Comparisons\n\n\nclass TestDatetime64ArrayLikeComparisons:\n # Comparison tests for datetime64 vectors fully parametrized over\n # DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison\n # tests will eventually end up here.\n\n def test_compare_zerodim(self, tz_naive_fixture, box_with_array):\n # Test comparison with zero-dimensional array is unboxed\n tz = tz_naive_fixture\n box = box_with_array\n xbox = (\n box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray\n )\n dti = date_range(\"20130101\", periods=3, tz=tz)\n\n other = np.array(dti.to_numpy()[0])\n\n dtarr = tm.box_expected(dti, box)\n result = dtarr <= other\n expected = np.array([True, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n \"foo\",\n -1,\n 99,\n 4.0,\n object(),\n timedelta(days=2),\n # GH#19800, GH#19301 datetime.date comparison raises to\n # match DatetimeIndex/Timestamp. This also matches the behavior\n # of stdlib datetime.datetime\n datetime(2001, 1, 1).date(),\n # GH#19301 None and NaN are *not* cast to NaT for comparisons\n None,\n np.nan,\n ],\n )\n def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):\n # GH#22074, GH#15966\n tz = tz_naive_fixture\n\n rng = date_range(\"1/1/2000\", periods=10, tz=tz)\n dtarr = tm.box_expected(rng, box_with_array)\n assert_invalid_comparison(dtarr, other, box_with_array)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n list(range(10)),\n np.arange(10),\n np.arange(10).astype(np.float32),\n np.arange(10).astype(object),\n pd.timedelta_range(\"1ns\", periods=10).array,\n np.array(pd.timedelta_range(\"1ns\", periods=10)),\n list(pd.timedelta_range(\"1ns\", periods=10)),\n pd.timedelta_range(\"1 Day\", periods=10).astype(object),\n pd.period_range(\"1971-01-01\", freq=\"D\", periods=10).array,\n pd.period_range(\"1971-01-01\", freq=\"D\", periods=10).astype(object),\n ],\n )\n def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture):\n # We don't parametrize this over box_with_array because listlike\n # other plays poorly with assert_invalid_comparison reversed checks\n tz = tz_naive_fixture\n\n dta = date_range(\"1970-01-01\", freq=\"ns\", periods=10, tz=tz)._data\n assert_invalid_comparison(dta, other, tm.to_array)\n\n def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):\n tz = tz_naive_fixture\n\n dta = date_range(\"1970-01-01\", freq=\"h\", periods=5, tz=tz)._data\n\n other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])\n result = dta == other\n expected = np.array([False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dta != other\n tm.assert_numpy_array_equal(result, ~expected)\n\n msg = \"Invalid comparison between|Cannot compare type|not supported between\"\n with pytest.raises(TypeError, match=msg):\n dta < other\n with pytest.raises(TypeError, match=msg):\n dta > other\n with pytest.raises(TypeError, match=msg):\n dta <= other\n with pytest.raises(TypeError, match=msg):\n dta >= other\n\n def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):\n # GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly\n tz = tz_naive_fixture\n box = box_with_array\n xbox = box if box not in [pd.Index, pd.array] else np.ndarray\n\n ts = Timestamp.now(tz)\n ser = Series([ts, NaT])\n\n obj = tm.box_expected(ser, box)\n\n expected = Series([True, False], dtype=np.bool_)\n expected = tm.box_expected(expected, xbox)\n\n result = obj == ts\n tm.assert_equal(result, expected)\n\n\nclass TestDatetime64SeriesComparison:\n # TODO: moved from tests.series.test_operators; needs cleanup\n\n @pytest.mark.parametrize(\n \"pair\",\n [\n (\n [Timestamp(\"2011-01-01\"), NaT, Timestamp(\"2011-01-03\")],\n [NaT, NaT, Timestamp(\"2011-01-03\")],\n ),\n (\n [Timedelta(\"1 days\"), NaT, Timedelta(\"3 days\")],\n [NaT, NaT, Timedelta(\"3 days\")],\n ),\n (\n [Period(\"2011-01\", freq=\"M\"), NaT, Period(\"2011-03\", freq=\"M\")],\n [NaT, NaT, Period(\"2011-03\", freq=\"M\")],\n ),\n ],\n )\n @pytest.mark.parametrize(\"reverse\", [True, False])\n @pytest.mark.parametrize(\"dtype\", [None, object])\n @pytest.mark.parametrize(\n \"op, expected\",\n [\n (operator.eq, Series([False, False, True])),\n (operator.ne, Series([True, True, False])),\n (operator.lt, Series([False, False, False])),\n (operator.gt, Series([False, False, False])),\n (operator.ge, Series([False, False, True])),\n (operator.le, Series([False, False, True])),\n ],\n )\n def test_nat_comparisons(\n self,\n dtype,\n index_or_series,\n reverse,\n pair,\n op,\n expected,\n ):\n box = index_or_series\n l, r = pair\n if reverse:\n # add lhs / rhs switched data\n l, r = r, l\n\n left = Series(l, dtype=dtype)\n right = box(r, dtype=dtype)\n\n result = op(left, right)\n\n tm.assert_series_equal(result, expected)\n\n def test_comparison_invalid(self, tz_naive_fixture, box_with_array):\n # GH#4968\n # invalid date/int comparisons\n tz = tz_naive_fixture\n ser = Series(range(5))\n ser2 = Series(date_range(\"20010101\", periods=5, tz=tz))\n\n ser = tm.box_expected(ser, box_with_array)\n ser2 = tm.box_expected(ser2, box_with_array)\n\n assert_invalid_comparison(ser, ser2, box_with_array)\n\n @pytest.mark.parametrize(\n \"data\",\n [\n [Timestamp(\"2011-01-01\"), NaT, Timestamp(\"2011-01-03\")],\n [Timedelta(\"1 days\"), NaT, Timedelta(\"3 days\")],\n [Period(\"2011-01\", freq=\"M\"), NaT, Period(\"2011-03\", freq=\"M\")],\n ],\n )\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_nat_comparisons_scalar(self, dtype, data, box_with_array):\n box = box_with_array\n if box_with_array is tm.to_array and dtype is object:\n # dont bother testing ndarray comparison methods as this fails\n # on older numpys (since they check object identity)\n return\n\n xbox = box if box not in [pd.Index, pd.array] else np.ndarray\n\n left = Series(data, dtype=dtype)\n left = tm.box_expected(left, box)\n\n expected = [False, False, False]\n expected = tm.box_expected(expected, xbox)\n if box is pd.array and dtype is object:\n expected = pd.array(expected, dtype=\"bool\")\n\n tm.assert_equal(left == NaT, expected)\n tm.assert_equal(NaT == left, expected)\n\n expected = [True, True, True]\n expected = tm.box_expected(expected, xbox)\n if box is pd.array and dtype is object:\n expected = pd.array(expected, dtype=\"bool\")\n tm.assert_equal(left != NaT, expected)\n tm.assert_equal(NaT != left, expected)\n\n expected = [False, False, False]\n expected = tm.box_expected(expected, xbox)\n if box is pd.array and dtype is object:\n expected = pd.array(expected, dtype=\"bool\")\n tm.assert_equal(left < NaT, expected)\n tm.assert_equal(NaT > left, expected)\n tm.assert_equal(left <= NaT, expected)\n tm.assert_equal(NaT >= left, expected)\n\n tm.assert_equal(left > NaT, expected)\n tm.assert_equal(NaT < left, expected)\n tm.assert_equal(left >= NaT, expected)\n tm.assert_equal(NaT <= left, expected)\n\n @pytest.mark.parametrize(\"val\", [datetime(2000, 1, 4), datetime(2000, 1, 5)])\n def test_series_comparison_scalars(self, val):\n series = Series(date_range(\"1/1/2000\", periods=10))\n\n result = series > val\n expected = Series([x > val for x in series])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"left,right\", [(\"lt\", \"gt\"), (\"le\", \"ge\"), (\"eq\", \"eq\"), (\"ne\", \"ne\")]\n )\n def test_timestamp_compare_series(self, left, right):\n # see gh-4982\n # Make sure we can compare Timestamps on the right AND left hand side.\n ser = Series(date_range(\"20010101\", periods=10), name=\"dates\")\n s_nat = ser.copy(deep=True)\n\n ser[0] = Timestamp(\"nat\")\n ser[3] = Timestamp(\"nat\")\n\n left_f = getattr(operator, left)\n right_f = getattr(operator, right)\n\n # No NaT\n expected = left_f(ser, Timestamp(\"20010109\"))\n result = right_f(Timestamp(\"20010109\"), ser)\n tm.assert_series_equal(result, expected)\n\n # NaT\n expected = left_f(ser, Timestamp(\"nat\"))\n result = right_f(Timestamp(\"nat\"), ser)\n tm.assert_series_equal(result, expected)\n\n # Compare to Timestamp with series containing NaT\n expected = left_f(s_nat, Timestamp(\"20010109\"))\n result = right_f(Timestamp(\"20010109\"), s_nat)\n tm.assert_series_equal(result, expected)\n\n # Compare to NaT with series containing NaT\n expected = left_f(s_nat, Timestamp(\"nat\"))\n result = right_f(Timestamp(\"nat\"), s_nat)\n tm.assert_series_equal(result, expected)\n\n def test_dt64arr_timestamp_equality(self, box_with_array):\n # GH#11034\n xbox = (\n box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray\n )\n\n ser = Series([Timestamp(\"2000-01-29 01:59:00\"), Timestamp(\"2000-01-30\"), \"NaT\"])\n ser = tm.box_expected(ser, box_with_array)\n\n result = ser != ser\n expected = tm.box_expected([False, False, True], xbox)\n tm.assert_equal(result, expected)\n\n warn = FutureWarning if box_with_array is pd.DataFrame else None\n with tm.assert_produces_warning(warn):\n # alignment for frame vs series comparisons deprecated\n result = ser != ser[0]\n expected = tm.box_expected([False, True, True], xbox)\n tm.assert_equal(result, expected)\n\n with tm.assert_produces_warning(warn):\n # alignment for frame vs series comparisons deprecated\n result = ser != ser[2]\n expected = tm.box_expected([True, True, True], xbox)\n tm.assert_equal(result, expected)\n\n result = ser == ser\n expected = tm.box_expected([True, True, False], xbox)\n tm.assert_equal(result, expected)\n\n with tm.assert_produces_warning(warn):\n # alignment for frame vs series comparisons deprecated\n result = ser == ser[0]\n expected = tm.box_expected([True, False, False], xbox)\n tm.assert_equal(result, expected)\n\n with tm.assert_produces_warning(warn):\n # alignment for frame vs series comparisons deprecated\n result = ser == ser[2]\n expected = tm.box_expected([False, False, False], xbox)\n tm.assert_equal(result, expected)\n\n\nclass TestDatetimeIndexComparisons:\n\n # TODO: moved from tests.indexes.test_base; parametrize and de-duplicate\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],\n )\n def test_comparators(self, op):\n index = tm.makeDateIndex(100)\n element = index[len(index) // 2]\n element = Timestamp(element).to_datetime64()\n\n arr = np.array(index)\n arr_result = op(arr, element)\n index_result = op(index, element)\n\n assert isinstance(index_result, np.ndarray)\n tm.assert_numpy_array_equal(arr_result, index_result)\n\n @pytest.mark.parametrize(\n \"other\",\n [datetime(2016, 1, 1), Timestamp(\"2016-01-01\"), np.datetime64(\"2016-01-01\")],\n )\n def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):\n tz = tz_naive_fixture\n dti = date_range(\"2016-01-01\", periods=2, tz=tz)\n if tz is not None:\n if isinstance(other, np.datetime64):\n # no tzaware version available\n return\n other = localize_pydatetime(other, dti.tzinfo)\n\n result = dti == other\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti > other\n expected = np.array([False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti >= other\n expected = np.array([True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti < other\n expected = np.array([False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti <= other\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_dti_cmp_nat(self, dtype, box_with_array):\n if box_with_array is tm.to_array and dtype is object:\n # dont bother testing ndarray comparison methods as this fails\n # on older numpys (since they check object identity)\n return\n\n xbox = (\n box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray\n )\n\n left = DatetimeIndex([Timestamp(\"2011-01-01\"), NaT, Timestamp(\"2011-01-03\")])\n right = DatetimeIndex([NaT, NaT, Timestamp(\"2011-01-03\")])\n\n left = tm.box_expected(left, box_with_array)\n right = tm.box_expected(right, box_with_array)\n\n lhs, rhs = left, right\n if dtype is object:\n lhs, rhs = left.astype(object), right.astype(object)\n\n result = rhs == lhs\n expected = np.array([False, False, True])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n result = lhs != rhs\n expected = np.array([True, True, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n expected = np.array([False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs == NaT, expected)\n tm.assert_equal(NaT == rhs, expected)\n\n expected = np.array([True, True, True])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs != NaT, expected)\n tm.assert_equal(NaT != lhs, expected)\n\n expected = np.array([False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs < NaT, expected)\n tm.assert_equal(NaT > lhs, expected)\n\n def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):\n fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])\n fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])\n\n didx1 = DatetimeIndex(\n [\"2014-01-01\", NaT, \"2014-03-01\", NaT, \"2014-05-01\", \"2014-07-01\"]\n )\n didx2 = DatetimeIndex(\n [\"2014-02-01\", \"2014-03-01\", NaT, NaT, \"2014-06-01\", \"2014-07-01\"]\n )\n darr = np.array(\n [\n np_datetime64_compat(\"2014-02-01 00:00Z\"),\n np_datetime64_compat(\"2014-03-01 00:00Z\"),\n np_datetime64_compat(\"nat\"),\n np.datetime64(\"nat\"),\n np_datetime64_compat(\"2014-06-01 00:00Z\"),\n np_datetime64_compat(\"2014-07-01 00:00Z\"),\n ]\n )\n\n cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]\n\n # Check pd.NaT is handles as the same as np.nan\n with tm.assert_produces_warning(None):\n for idx1, idx2 in cases:\n\n result = idx1 < idx2\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 > idx1\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= idx2\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 >= idx1\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == idx2\n expected = np.array([False, False, False, False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != idx2\n expected = np.array([True, True, True, True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n with tm.assert_produces_warning(None):\n for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:\n result = idx1 < val\n expected = np.array([False, False, False, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 > val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= val\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 >= val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != val\n expected = np.array([True, True, True, True, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n # Check pd.NaT is handles as the same as np.nan\n with tm.assert_produces_warning(None):\n for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:\n result = idx1 < val\n expected = np.array([True, False, False, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 > val\n expected = np.array([False, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= val\n expected = np.array([True, False, True, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 >= val\n expected = np.array([False, False, True, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == val\n expected = np.array([False, False, True, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != val\n expected = np.array([True, True, False, True, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n def test_comparison_tzawareness_compat(self, op, box_with_array):\n # GH#18162\n box = box_with_array\n\n dr = date_range(\"2016-01-01\", periods=6)\n dz = dr.tz_localize(\"US/Pacific\")\n\n dr = tm.box_expected(dr, box)\n dz = tm.box_expected(dz, box)\n\n if box is pd.DataFrame:\n tolist = lambda x: x.astype(object).values.tolist()[0]\n else:\n tolist = list\n\n if op not in [operator.eq, operator.ne]:\n msg = (\n r\"Invalid comparison between dtype=datetime64\\[ns.*\\] \"\n \"and (Timestamp|DatetimeArray|list|ndarray)\"\n )\n with pytest.raises(TypeError, match=msg):\n op(dr, dz)\n\n with pytest.raises(TypeError, match=msg):\n op(dr, tolist(dz))\n with pytest.raises(TypeError, match=msg):\n op(dr, np.array(tolist(dz), dtype=object))\n with pytest.raises(TypeError, match=msg):\n op(dz, dr)\n\n with pytest.raises(TypeError, match=msg):\n op(dz, tolist(dr))\n with pytest.raises(TypeError, match=msg):\n op(dz, np.array(tolist(dr), dtype=object))\n\n # The aware==aware and naive==naive comparisons should *not* raise\n assert np.all(dr == dr)\n assert np.all(dr == tolist(dr))\n assert np.all(tolist(dr) == dr)\n assert np.all(np.array(tolist(dr), dtype=object) == dr)\n assert np.all(dr == np.array(tolist(dr), dtype=object))\n\n assert np.all(dz == dz)\n assert np.all(dz == tolist(dz))\n assert np.all(tolist(dz) == dz)\n assert np.all(np.array(tolist(dz), dtype=object) == dz)\n assert np.all(dz == np.array(tolist(dz), dtype=object))\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):\n # GH#18162\n dr = date_range(\"2016-01-01\", periods=6)\n dz = dr.tz_localize(\"US/Pacific\")\n\n dr = tm.box_expected(dr, box_with_array)\n dz = tm.box_expected(dz, box_with_array)\n\n # Check comparisons against scalar Timestamps\n ts = Timestamp(\"2000-03-14 01:59\")\n ts_tz = Timestamp(\"2000-03-14 01:59\", tz=\"Europe/Amsterdam\")\n\n assert np.all(dr > ts)\n msg = r\"Invalid comparison between dtype=datetime64\\[ns.*\\] and Timestamp\"\n if op not in [operator.eq, operator.ne]:\n with pytest.raises(TypeError, match=msg):\n op(dr, ts_tz)\n\n assert np.all(dz > ts_tz)\n if op not in [operator.eq, operator.ne]:\n with pytest.raises(TypeError, match=msg):\n op(dz, ts)\n\n if op not in [operator.eq, operator.ne]:\n # GH#12601: Check comparison against Timestamps and DatetimeIndex\n with pytest.raises(TypeError, match=msg):\n op(ts, dz)\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n @pytest.mark.parametrize(\n \"other\",\n [datetime(2016, 1, 1), Timestamp(\"2016-01-01\"), np.datetime64(\"2016-01-01\")],\n )\n # Bug in NumPy? https://github.com/numpy/numpy/issues/13841\n # Raising in __eq__ will fallback to NumPy, which warns, fails,\n # then re-raises the original exception. So we just need to ignore.\n @pytest.mark.filterwarnings(\"ignore:elementwise comp:DeprecationWarning\")\n @pytest.mark.filterwarnings(\"ignore:Converting timezone-aware:FutureWarning\")\n def test_scalar_comparison_tzawareness(\n self, op, other, tz_aware_fixture, box_with_array\n ):\n box = box_with_array\n tz = tz_aware_fixture\n dti = date_range(\"2016-01-01\", periods=2, tz=tz)\n xbox = box if box not in [pd.Index, pd.array] else np.ndarray\n\n dtarr = tm.box_expected(dti, box_with_array)\n if op in [operator.eq, operator.ne]:\n exbool = op is operator.ne\n expected = np.array([exbool, exbool], dtype=bool)\n expected = tm.box_expected(expected, xbox)\n\n result = op(dtarr, other)\n tm.assert_equal(result, expected)\n\n result = op(other, dtarr)\n tm.assert_equal(result, expected)\n else:\n msg = (\n r\"Invalid comparison between dtype=datetime64\\[ns, .*\\] \"\n f\"and {type(other).__name__}\"\n )\n with pytest.raises(TypeError, match=msg):\n op(dtarr, other)\n with pytest.raises(TypeError, match=msg):\n op(other, dtarr)\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n def test_nat_comparison_tzawareness(self, op):\n # GH#19276\n # tzaware DatetimeIndex should not raise when compared to NaT\n dti = DatetimeIndex(\n [\"2014-01-01\", NaT, \"2014-03-01\", NaT, \"2014-05-01\", \"2014-07-01\"]\n )\n expected = np.array([op == operator.ne] * len(dti))\n result = op(dti, NaT)\n tm.assert_numpy_array_equal(result, expected)\n\n result = op(dti.tz_localize(\"US/Pacific\"), NaT)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_dti_cmp_str(self, tz_naive_fixture):\n # GH#22074\n # regardless of tz, we expect these comparisons are valid\n tz = tz_naive_fixture\n rng = date_range(\"1/1/2000\", periods=10, tz=tz)\n other = \"1/1/2000\"\n\n result = rng == other\n expected = np.array([True] + [False] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng != other\n expected = np.array([False] + [True] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng < other\n expected = np.array([False] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng <= other\n expected = np.array([True] + [False] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng > other\n expected = np.array([False] + [True] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng >= other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_dti_cmp_list(self):\n rng = date_range(\"1/1/2000\", periods=10)\n\n result = rng == list(rng)\n expected = rng == rng\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n pd.timedelta_range(\"1D\", periods=10),\n pd.timedelta_range(\"1D\", periods=10).to_series(),\n pd.timedelta_range(\"1D\", periods=10).asi8.view(\"m8[ns]\"),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_dti_cmp_tdi_tzawareness(self, other):\n # GH#22074\n # reversion test that we _don't_ call _assert_tzawareness_compat\n # when comparing against TimedeltaIndex\n dti = date_range(\"2000-01-01\", periods=10, tz=\"Asia/Tokyo\")\n\n result = dti == other\n expected = np.array([False] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti != other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n msg = \"Invalid comparison between\"\n with pytest.raises(TypeError, match=msg):\n dti < other\n with pytest.raises(TypeError, match=msg):\n dti <= other\n with pytest.raises(TypeError, match=msg):\n dti > other\n with pytest.raises(TypeError, match=msg):\n dti >= other\n\n def test_dti_cmp_object_dtype(self):\n # GH#22074\n dti = date_range(\"2000-01-01\", periods=10, tz=\"Asia/Tokyo\")\n\n other = dti.astype(\"O\")\n\n result = dti == other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n other = dti.tz_localize(None)\n result = dti != other\n tm.assert_numpy_array_equal(result, expected)\n\n other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)\n result = dti == other\n expected = np.array([True] * 5 + [False] * 5)\n tm.assert_numpy_array_equal(result, expected)\n msg = \">=' not supported between instances of 'Timestamp' and 'Timedelta'\"\n with pytest.raises(TypeError, match=msg):\n dti >= other\n\n\n# ------------------------------------------------------------------\n# Arithmetic\n\n\nclass TestDatetime64Arithmetic:\n # This class is intended for \"finished\" tests that are fully parametrized\n # over DataFrame/Series/Index/DatetimeArray\n\n # -------------------------------------------------------------\n # Addition/Subtraction of timedelta-like\n\n @pytest.mark.arm_slow\n def test_dt64arr_add_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n # GH#22005, GH#22163 check DataFrame doesn't raise TypeError\n tz = tz_naive_fixture\n\n rng = date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = date_range(\"2000-01-01 02:00\", \"2000-02-01 02:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng + two_hours\n tm.assert_equal(result, expected)\n\n def test_dt64arr_iadd_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n tz = tz_naive_fixture\n\n rng = date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = date_range(\"2000-01-01 02:00\", \"2000-02-01 02:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n rng += two_hours\n tm.assert_equal(rng, expected)\n\n def test_dt64arr_sub_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n tz = tz_naive_fixture\n\n rng = date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = date_range(\"1999-12-31 22:00\", \"2000-01-31 22:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng - two_hours\n tm.assert_equal(result, expected)\n\n def test_dt64arr_isub_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n tz = tz_naive_fixture\n\n rng = date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = date_range(\"1999-12-31 22:00\", \"2000-01-31 22:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n rng -= two_hours\n tm.assert_equal(rng, expected)\n\n # TODO: redundant with test_dt64arr_add_timedeltalike_scalar\n def test_dt64arr_add_td64_scalar(self, box_with_array):\n # scalar timedeltas/np.timedelta64 objects\n # operate with np.timedelta64 correctly\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n\n expected = Series(\n [Timestamp(\"20130101 9:01:01\"), Timestamp(\"20130101 9:02:01\")]\n )\n\n dtarr = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr + np.timedelta64(1, \"s\")\n tm.assert_equal(result, expected)\n result = np.timedelta64(1, \"s\") + dtarr\n tm.assert_equal(result, expected)\n\n expected = Series(\n [Timestamp(\"20130101 9:01:00.005\"), Timestamp(\"20130101 9:02:00.005\")]\n )\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr + np.timedelta64(5, \"ms\")\n tm.assert_equal(result, expected)\n result = np.timedelta64(5, \"ms\") + dtarr\n tm.assert_equal(result, expected)\n\n def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):\n # GH#23320 special handling for timedelta64(\"NaT\")\n tz = tz_naive_fixture\n\n dti = date_range(\"1994-04-01\", periods=9, tz=tz, freq=\"QS\")\n other = np.timedelta64(\"NaT\")\n expected = DatetimeIndex([\"NaT\"] * 9, tz=tz)\n\n obj = tm.box_expected(dti, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = obj + other\n tm.assert_equal(result, expected)\n result = other + obj\n tm.assert_equal(result, expected)\n result = obj - other\n tm.assert_equal(result, expected)\n msg = \"cannot subtract\"\n with pytest.raises(TypeError, match=msg):\n other - obj\n\n def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):\n\n tz = tz_naive_fixture\n dti = date_range(\"2016-01-01\", periods=3, tz=tz)\n tdi = TimedeltaIndex([\"-1 Day\", \"-1 Day\", \"-1 Day\"])\n tdarr = tdi.values\n\n expected = date_range(\"2015-12-31\", \"2016-01-02\", periods=3, tz=tz)\n\n dtarr = tm.box_expected(dti, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr + tdarr\n tm.assert_equal(result, expected)\n result = tdarr + dtarr\n tm.assert_equal(result, expected)\n\n expected = date_range(\"2016-01-02\", \"2016-01-04\", periods=3, tz=tz)\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr - tdarr\n tm.assert_equal(result, expected)\n msg = \"cannot subtract|(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n tdarr - dtarr\n\n # -----------------------------------------------------------------\n # Subtraction of datetime-like scalars\n\n @pytest.mark.parametrize(\n \"ts\",\n [\n Timestamp(\"2013-01-01\"),\n Timestamp(\"2013-01-01\").to_pydatetime(),\n Timestamp(\"2013-01-01\").to_datetime64(),\n ],\n )\n def test_dt64arr_sub_dtscalar(self, box_with_array, ts):\n # GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype\n idx = date_range(\"2013-01-01\", periods=3)._with_freq(None)\n idx = tm.box_expected(idx, box_with_array)\n\n expected = TimedeltaIndex([\"0 Days\", \"1 Day\", \"2 Days\"])\n expected = tm.box_expected(expected, box_with_array)\n\n result = idx - ts\n tm.assert_equal(result, expected)\n\n def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):\n # GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano\n # for DataFrame operation\n dt64 = np.datetime64(\"2013-01-01\")\n assert dt64.dtype == \"datetime64[D]\"\n\n dti = date_range(\"20130101\", periods=3)._with_freq(None)\n dtarr = tm.box_expected(dti, box_with_array)\n\n expected = TimedeltaIndex([\"0 Days\", \"1 Day\", \"2 Days\"])\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr - dt64\n tm.assert_equal(result, expected)\n\n result = dt64 - dtarr\n tm.assert_equal(result, -expected)\n\n def test_dt64arr_sub_timestamp(self, box_with_array):\n ser = date_range(\"2014-03-17\", periods=2, freq=\"D\", tz=\"US/Eastern\")\n ser = ser._with_freq(None)\n ts = ser[0]\n\n ser = tm.box_expected(ser, box_with_array)\n\n delta_series = Series([np.timedelta64(0, \"D\"), np.timedelta64(1, \"D\")])\n expected = tm.box_expected(delta_series, box_with_array)\n\n tm.assert_equal(ser - ts, expected)\n tm.assert_equal(ts - ser, -expected)\n\n def test_dt64arr_sub_NaT(self, box_with_array):\n # GH#18808\n dti = DatetimeIndex([NaT, Timestamp(\"19900315\")])\n ser = tm.box_expected(dti, box_with_array)\n\n result = ser - NaT\n expected = Series([NaT, NaT], dtype=\"timedelta64[ns]\")\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n dti_tz = dti.tz_localize(\"Asia/Tokyo\")\n ser_tz = tm.box_expected(dti_tz, box_with_array)\n\n result = ser_tz - NaT\n expected = Series([NaT, NaT], dtype=\"timedelta64[ns]\")\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n # -------------------------------------------------------------\n # Subtraction of datetime-like array-like\n\n def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):\n dti = date_range(\"2016-01-01\", periods=3, tz=tz_naive_fixture)\n expected = dti - dti\n\n obj = tm.box_expected(dti, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = obj - obj.astype(object)\n tm.assert_equal(result, expected)\n\n def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):\n dti = date_range(\"2016-01-01\", periods=3, tz=None)\n dt64vals = dti.values\n\n dtarr = tm.box_expected(dti, box_with_array)\n\n expected = dtarr - dtarr\n result = dtarr - dt64vals\n tm.assert_equal(result, expected)\n result = dt64vals - dtarr\n tm.assert_equal(result, expected)\n\n def test_dt64arr_aware_sub_dt64ndarray_raises(\n self, tz_aware_fixture, box_with_array\n ):\n\n tz = tz_aware_fixture\n dti = date_range(\"2016-01-01\", periods=3, tz=tz)\n dt64vals = dti.values\n\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"subtraction must have the same timezones or\"\n with pytest.raises(TypeError, match=msg):\n dtarr - dt64vals\n with pytest.raises(TypeError, match=msg):\n dt64vals - dtarr\n\n # -------------------------------------------------------------\n # Addition of datetime-like others (invalid)\n\n def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):\n\n tz = tz_naive_fixture\n dti = date_range(\"2016-01-01\", periods=3, tz=tz)\n dt64vals = dti.values\n\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"cannot add\"\n with pytest.raises(TypeError, match=msg):\n dtarr + dt64vals\n with pytest.raises(TypeError, match=msg):\n dt64vals + dtarr\n\n def test_dt64arr_add_timestamp_raises(self, box_with_array):\n # GH#22163 ensure DataFrame doesn't cast Timestamp to i8\n idx = DatetimeIndex([\"2011-01-01\", \"2011-01-02\"])\n idx = tm.box_expected(idx, box_with_array)\n msg = \"cannot add\"\n with pytest.raises(TypeError, match=msg):\n idx + Timestamp(\"2011-01-01\")\n with pytest.raises(TypeError, match=msg):\n Timestamp(\"2011-01-01\") + idx\n\n # -------------------------------------------------------------\n # Other Invalid Addition/Subtraction\n\n @pytest.mark.parametrize(\n \"other\",\n [\n 3.14,\n np.array([2.0, 3.0]),\n # GH#13078 datetime +/- Period is invalid\n Period(\"2011-01-01\", freq=\"D\"),\n # https://github.com/pandas-dev/pandas/issues/10329\n time(1, 2, 3),\n ],\n )\n @pytest.mark.parametrize(\"dti_freq\", [None, \"D\"])\n def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):\n dti = DatetimeIndex([\"2011-01-01\", \"2011-01-02\"], freq=dti_freq)\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"|\".join(\n [\n \"unsupported operand type\",\n \"cannot (add|subtract)\",\n \"cannot use operands with types\",\n \"ufunc '?(add|subtract)'? cannot use operands with types\",\n \"Concatenation operation is not implemented for NumPy arrays\",\n ]\n )\n assert_invalid_addsub_type(dtarr, other, msg)\n\n @pytest.mark.parametrize(\"pi_freq\", [\"D\", \"W\", \"Q\", \"H\"])\n @pytest.mark.parametrize(\"dti_freq\", [None, \"D\"])\n def test_dt64arr_add_sub_parr(\n self, dti_freq, pi_freq, box_with_array, box_with_array2\n ):\n # GH#20049 subtracting PeriodIndex should raise TypeError\n dti = DatetimeIndex([\"2011-01-01\", \"2011-01-02\"], freq=dti_freq)\n pi = dti.to_period(pi_freq)\n\n dtarr = tm.box_expected(dti, box_with_array)\n parr = tm.box_expected(pi, box_with_array2)\n msg = \"|\".join(\n [\n \"cannot (add|subtract)\",\n \"unsupported operand\",\n \"descriptor.*requires\",\n \"ufunc.*cannot use operands\",\n ]\n )\n assert_invalid_addsub_type(dtarr, parr, msg)\n\n def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):\n # https://github.com/pandas-dev/pandas/issues/10329\n\n tz = tz_naive_fixture\n\n obj1 = date_range(\"2012-01-01\", periods=3, tz=tz)\n obj2 = [time(i, i, i) for i in range(3)]\n\n obj1 = tm.box_expected(obj1, box_with_array)\n obj2 = tm.box_expected(obj2, box_with_array)\n\n with warnings.catch_warnings(record=True):\n # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being\n # applied to Series or DatetimeIndex\n # we aren't testing that here, so ignore.\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n\n # If `x + y` raises, then `y + x` should raise here as well\n\n msg = (\n r\"unsupported operand type\\(s\\) for -: \"\n \"'(Timestamp|DatetimeArray)' and 'datetime.time'\"\n )\n with pytest.raises(TypeError, match=msg):\n obj1 - obj2\n\n msg = \"|\".join(\n [\n \"cannot subtract DatetimeArray from ndarray\",\n \"ufunc (subtract|'subtract') cannot use operands with types \"\n r\"dtype\\('O'\\) and dtype\\('<M8\\[ns\\]'\\)\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n obj2 - obj1\n\n msg = (\n r\"unsupported operand type\\(s\\) for \\+: \"\n \"'(Timestamp|DatetimeArray)' and 'datetime.time'\"\n )\n with pytest.raises(TypeError, match=msg):\n obj1 + obj2\n\n msg = \"|\".join(\n [\n r\"unsupported operand type\\(s\\) for \\+: \"\n \"'(Timestamp|DatetimeArray)' and 'datetime.time'\",\n \"ufunc (add|'add') cannot use operands with types \"\n r\"dtype\\('O'\\) and dtype\\('<M8\\[ns\\]'\\)\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n obj2 + obj1\n\n\nclass TestDatetime64DateOffsetArithmetic:\n\n # -------------------------------------------------------------\n # Tick DateOffsets\n\n # TODO: parametrize over timezone?\n def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):\n # GH#4532\n # operate with pd.offsets\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n expected = Series(\n [Timestamp(\"20130101 9:01:05\"), Timestamp(\"20130101 9:02:05\")]\n )\n\n ser = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = ser + pd.offsets.Second(5)\n tm.assert_equal(result, expected)\n\n result2 = pd.offsets.Second(5) + ser\n tm.assert_equal(result2, expected)\n\n def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):\n # GH#4532\n # operate with pd.offsets\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n expected = Series(\n [Timestamp(\"20130101 9:00:55\"), Timestamp(\"20130101 9:01:55\")]\n )\n\n ser = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = ser - pd.offsets.Second(5)\n tm.assert_equal(result, expected)\n\n result2 = -pd.offsets.Second(5) + ser\n tm.assert_equal(result2, expected)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n pd.offsets.Second(5) - ser\n\n @pytest.mark.parametrize(\n \"cls_name\", [\"Day\", \"Hour\", \"Minute\", \"Second\", \"Milli\", \"Micro\", \"Nano\"]\n )\n def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):\n # GH#4532\n # smoke tests for valid DateOffsets\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n ser = tm.box_expected(ser, box_with_array)\n\n offset_cls = getattr(pd.offsets, cls_name)\n ser + offset_cls(5)\n offset_cls(5) + ser\n ser - offset_cls(5)\n\n def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):\n # GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype\n tz = tz_aware_fixture\n if tz == \"US/Pacific\":\n dates = date_range(\"2012-11-01\", periods=3, tz=tz)\n offset = dates + pd.offsets.Hour(5)\n assert dates[0] + pd.offsets.Hour(5) == offset[0]\n\n dates = date_range(\"2010-11-01 00:00\", periods=3, tz=tz, freq=\"H\")\n expected = DatetimeIndex(\n [\"2010-11-01 05:00\", \"2010-11-01 06:00\", \"2010-11-01 07:00\"],\n freq=\"H\",\n tz=tz,\n )\n\n dates = tm.box_expected(dates, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n # TODO: parametrize over the scalar being added? radd? sub?\n offset = dates + pd.offsets.Hour(5)\n tm.assert_equal(offset, expected)\n offset = dates + np.timedelta64(5, \"h\")\n tm.assert_equal(offset, expected)\n offset = dates + timedelta(hours=5)\n tm.assert_equal(offset, expected)\n\n # -------------------------------------------------------------\n # RelativeDelta DateOffsets\n\n def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):\n # GH#10699\n vec = DatetimeIndex(\n [\n Timestamp(\"2000-01-05 00:15:00\"),\n Timestamp(\"2000-01-31 00:23:00\"),\n Timestamp(\"2000-01-01\"),\n Timestamp(\"2000-03-31\"),\n Timestamp(\"2000-02-29\"),\n Timestamp(\"2000-12-31\"),\n Timestamp(\"2000-05-15\"),\n Timestamp(\"2001-06-15\"),\n ]\n )\n vec = tm.box_expected(vec, box_with_array)\n vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec\n\n # DateOffset relativedelta fastpath\n relative_kwargs = [\n (\"years\", 2),\n (\"months\", 5),\n (\"days\", 3),\n (\"hours\", 5),\n (\"minutes\", 10),\n (\"seconds\", 2),\n (\"microseconds\", 5),\n ]\n for i, (unit, value) in enumerate(relative_kwargs):\n off = DateOffset(**{unit: value})\n\n expected = DatetimeIndex([x + off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + off)\n\n expected = DatetimeIndex([x - off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - off)\n\n off = DateOffset(**dict(relative_kwargs[: i + 1]))\n\n expected = DatetimeIndex([x + off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + off)\n\n expected = DatetimeIndex([x - off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - off)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n off - vec\n\n # -------------------------------------------------------------\n # Non-Tick, Non-RelativeDelta DateOffsets\n\n # TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes\n # tz-aware cases which this does not\n @pytest.mark.parametrize(\n \"cls_and_kwargs\",\n [\n \"YearBegin\",\n (\"YearBegin\", {\"month\": 5}),\n \"YearEnd\",\n (\"YearEnd\", {\"month\": 5}),\n \"MonthBegin\",\n \"MonthEnd\",\n \"SemiMonthEnd\",\n \"SemiMonthBegin\",\n \"Week\",\n (\"Week\", {\"weekday\": 3}),\n \"Week\",\n (\"Week\", {\"weekday\": 6}),\n \"BusinessDay\",\n \"BDay\",\n \"QuarterEnd\",\n \"QuarterBegin\",\n \"CustomBusinessDay\",\n \"CDay\",\n \"CBMonthEnd\",\n \"CBMonthBegin\",\n \"BMonthBegin\",\n \"BMonthEnd\",\n \"BusinessHour\",\n \"BYearBegin\",\n \"BYearEnd\",\n \"BQuarterBegin\",\n (\"LastWeekOfMonth\", {\"weekday\": 2}),\n (\n \"FY5253Quarter\",\n {\n \"qtr_with_extra_week\": 1,\n \"startingMonth\": 1,\n \"weekday\": 2,\n \"variation\": \"nearest\",\n },\n ),\n (\"FY5253\", {\"weekday\": 0, \"startingMonth\": 2, \"variation\": \"nearest\"}),\n (\"WeekOfMonth\", {\"weekday\": 2, \"week\": 2}),\n \"Easter\",\n (\"DateOffset\", {\"day\": 4}),\n (\"DateOffset\", {\"month\": 5}),\n ],\n )\n @pytest.mark.parametrize(\"normalize\", [True, False])\n @pytest.mark.parametrize(\"n\", [0, 5])\n def test_dt64arr_add_sub_DateOffsets(\n self, box_with_array, n, normalize, cls_and_kwargs\n ):\n # GH#10699\n # assert vectorized operation matches pointwise operations\n\n if isinstance(cls_and_kwargs, tuple):\n # If cls_name param is a tuple, then 2nd entry is kwargs for\n # the offset constructor\n cls_name, kwargs = cls_and_kwargs\n else:\n cls_name = cls_and_kwargs\n kwargs = {}\n\n if n == 0 and cls_name in [\n \"WeekOfMonth\",\n \"LastWeekOfMonth\",\n \"FY5253Quarter\",\n \"FY5253\",\n ]:\n # passing n = 0 is invalid for these offset classes\n return\n\n vec = DatetimeIndex(\n [\n Timestamp(\"2000-01-05 00:15:00\"),\n Timestamp(\"2000-01-31 00:23:00\"),\n Timestamp(\"2000-01-01\"),\n Timestamp(\"2000-03-31\"),\n Timestamp(\"2000-02-29\"),\n Timestamp(\"2000-12-31\"),\n Timestamp(\"2000-05-15\"),\n Timestamp(\"2001-06-15\"),\n ]\n )\n vec = tm.box_expected(vec, box_with_array)\n vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec\n\n offset_cls = getattr(pd.offsets, cls_name)\n\n with warnings.catch_warnings(record=True):\n # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being\n # applied to Series or DatetimeIndex\n # we aren't testing that here, so ignore.\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n\n offset = offset_cls(n, normalize=normalize, **kwargs)\n\n expected = DatetimeIndex([x + offset for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + offset)\n\n expected = DatetimeIndex([x - offset for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - offset)\n\n expected = DatetimeIndex([offset + x for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, offset + vec)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n offset - vec\n\n def test_dt64arr_add_sub_DateOffset(self, box_with_array):\n # GH#10699\n s = date_range(\"2000-01-01\", \"2000-01-31\", name=\"a\")\n s = tm.box_expected(s, box_with_array)\n result = s + DateOffset(years=1)\n result2 = DateOffset(years=1) + s\n exp = date_range(\"2001-01-01\", \"2001-01-31\", name=\"a\")._with_freq(None)\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n result = s - DateOffset(years=1)\n exp = date_range(\"1999-01-01\", \"1999-01-31\", name=\"a\")._with_freq(None)\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n\n s = DatetimeIndex(\n [\n Timestamp(\"2000-01-15 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-15\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n s = tm.box_expected(s, box_with_array)\n result = s + pd.offsets.Day()\n result2 = pd.offsets.Day() + s\n exp = DatetimeIndex(\n [\n Timestamp(\"2000-01-16 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-16\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n s = DatetimeIndex(\n [\n Timestamp(\"2000-01-15 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-15\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n s = tm.box_expected(s, box_with_array)\n result = s + pd.offsets.MonthEnd()\n result2 = pd.offsets.MonthEnd() + s\n exp = DatetimeIndex(\n [\n Timestamp(\"2000-01-31 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-29\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),\n np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),\n np.array( # matching offsets\n [pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]\n ),\n ],\n )\n @pytest.mark.parametrize(\"op\", [operator.add, roperator.radd, operator.sub])\n @pytest.mark.parametrize(\"box_other\", [True, False])\n def test_dt64arr_add_sub_offset_array(\n self, tz_naive_fixture, box_with_array, box_other, op, other\n ):\n # GH#18849\n # GH#10699 array of offsets\n\n tz = tz_naive_fixture\n dti = date_range(\"2017-01-01\", periods=2, tz=tz)\n dtarr = tm.box_expected(dti, box_with_array)\n\n other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])\n expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])\n expected = tm.box_expected(expected, box_with_array)\n\n if box_other:\n other = tm.box_expected(other, box_with_array)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = op(dtarr, other)\n\n tm.assert_equal(res, expected)\n\n @pytest.mark.parametrize(\n \"op, offset, exp, exp_freq\",\n [\n (\n \"__add__\",\n DateOffset(months=3, days=10),\n [\n Timestamp(\"2014-04-11\"),\n Timestamp(\"2015-04-11\"),\n Timestamp(\"2016-04-11\"),\n Timestamp(\"2017-04-11\"),\n ],\n None,\n ),\n (\n \"__add__\",\n DateOffset(months=3),\n [\n Timestamp(\"2014-04-01\"),\n Timestamp(\"2015-04-01\"),\n Timestamp(\"2016-04-01\"),\n Timestamp(\"2017-04-01\"),\n ],\n \"AS-APR\",\n ),\n (\n \"__sub__\",\n DateOffset(months=3, days=10),\n [\n Timestamp(\"2013-09-21\"),\n Timestamp(\"2014-09-21\"),\n Timestamp(\"2015-09-21\"),\n Timestamp(\"2016-09-21\"),\n ],\n None,\n ),\n (\n \"__sub__\",\n DateOffset(months=3),\n [\n Timestamp(\"2013-10-01\"),\n Timestamp(\"2014-10-01\"),\n Timestamp(\"2015-10-01\"),\n Timestamp(\"2016-10-01\"),\n ],\n \"AS-OCT\",\n ),\n ],\n )\n def test_dti_add_sub_nonzero_mth_offset(\n self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array\n ):\n # GH 26258\n tz = tz_aware_fixture\n date = date_range(start=\"01 Jan 2014\", end=\"01 Jan 2017\", freq=\"AS\", tz=tz)\n date = tm.box_expected(date, box_with_array, False)\n mth = getattr(date, op)\n result = mth(offset)\n\n expected = DatetimeIndex(exp, tz=tz)\n expected = tm.box_expected(expected, box_with_array, False)\n tm.assert_equal(result, expected)\n\n\nclass TestDatetime64OverflowHandling:\n # TODO: box + de-duplicate\n\n def test_dt64_overflow_masking(self, box_with_array):\n # GH#25317\n left = Series([Timestamp(\"1969-12-31\")])\n right = Series([NaT])\n\n left = tm.box_expected(left, box_with_array)\n right = tm.box_expected(right, box_with_array)\n\n expected = TimedeltaIndex([NaT])\n expected = tm.box_expected(expected, box_with_array)\n\n result = left - right\n tm.assert_equal(result, expected)\n\n def test_dt64_series_arith_overflow(self):\n # GH#12534, fixed by GH#19024\n dt = Timestamp(\"1700-01-31\")\n td = Timedelta(\"20000 Days\")\n dti = date_range(\"1949-09-30\", freq=\"100Y\", periods=4)\n ser = Series(dti)\n msg = \"Overflow in int64 addition\"\n with pytest.raises(OverflowError, match=msg):\n ser - dt\n with pytest.raises(OverflowError, match=msg):\n dt - ser\n with pytest.raises(OverflowError, match=msg):\n ser + td\n with pytest.raises(OverflowError, match=msg):\n td + ser\n\n ser.iloc[-1] = NaT\n expected = Series(\n [\"2004-10-03\", \"2104-10-04\", \"2204-10-04\", \"NaT\"], dtype=\"datetime64[ns]\"\n )\n res = ser + td\n tm.assert_series_equal(res, expected)\n res = td + ser\n tm.assert_series_equal(res, expected)\n\n ser.iloc[1:] = NaT\n expected = Series([\"91279 Days\", \"NaT\", \"NaT\", \"NaT\"], dtype=\"timedelta64[ns]\")\n res = ser - dt\n tm.assert_series_equal(res, expected)\n res = dt - ser\n tm.assert_series_equal(res, -expected)\n\n def test_datetimeindex_sub_timestamp_overflow(self):\n dtimax = pd.to_datetime([\"now\", Timestamp.max])\n dtimin = pd.to_datetime([\"now\", Timestamp.min])\n\n tsneg = Timestamp(\"1950-01-01\")\n ts_neg_variants = [\n tsneg,\n tsneg.to_pydatetime(),\n tsneg.to_datetime64().astype(\"datetime64[ns]\"),\n tsneg.to_datetime64().astype(\"datetime64[D]\"),\n ]\n\n tspos = Timestamp(\"1980-01-01\")\n ts_pos_variants = [\n tspos,\n tspos.to_pydatetime(),\n tspos.to_datetime64().astype(\"datetime64[ns]\"),\n tspos.to_datetime64().astype(\"datetime64[D]\"),\n ]\n msg = \"Overflow in int64 addition\"\n for variant in ts_neg_variants:\n with pytest.raises(OverflowError, match=msg):\n dtimax - variant\n\n expected = Timestamp.max.value - tspos.value\n for variant in ts_pos_variants:\n res = dtimax - variant\n assert res[1].value == expected\n\n expected = Timestamp.min.value - tsneg.value\n for variant in ts_neg_variants:\n res = dtimin - variant\n assert res[1].value == expected\n\n for variant in ts_pos_variants:\n with pytest.raises(OverflowError, match=msg):\n dtimin - variant\n\n def test_datetimeindex_sub_datetimeindex_overflow(self):\n # GH#22492, GH#22508\n dtimax = pd.to_datetime([\"now\", Timestamp.max])\n dtimin = pd.to_datetime([\"now\", Timestamp.min])\n\n ts_neg = pd.to_datetime([\"1950-01-01\", \"1950-01-01\"])\n ts_pos = pd.to_datetime([\"1980-01-01\", \"1980-01-01\"])\n\n # General tests\n expected = Timestamp.max.value - ts_pos[1].value\n result = dtimax - ts_pos\n assert result[1].value == expected\n\n expected = Timestamp.min.value - ts_neg[1].value\n result = dtimin - ts_neg\n assert result[1].value == expected\n msg = \"Overflow in int64 addition\"\n with pytest.raises(OverflowError, match=msg):\n dtimax - ts_neg\n\n with pytest.raises(OverflowError, match=msg):\n dtimin - ts_pos\n\n # Edge cases\n tmin = pd.to_datetime([Timestamp.min])\n t1 = tmin + Timedelta.max + Timedelta(\"1us\")\n with pytest.raises(OverflowError, match=msg):\n t1 - tmin\n\n tmax = pd.to_datetime([Timestamp.max])\n t2 = tmax + Timedelta.min - Timedelta(\"1us\")\n with pytest.raises(OverflowError, match=msg):\n tmax - t2\n\n\nclass TestTimestampSeriesArithmetic:\n def test_empty_series_add_sub(self):\n # GH#13844\n a = Series(dtype=\"M8[ns]\")\n b = Series(dtype=\"m8[ns]\")\n tm.assert_series_equal(a, a + b)\n tm.assert_series_equal(a, a - b)\n tm.assert_series_equal(a, b + a)\n msg = \"cannot subtract\"\n with pytest.raises(TypeError, match=msg):\n b - a\n\n def test_operators_datetimelike(self):\n\n # ## timedelta64 ###\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n # ## datetime64 ###\n dt1 = Series(\n [\n Timestamp(\"20111230\"),\n Timestamp(\"20120101\"),\n Timestamp(\"20120103\"),\n ]\n )\n dt1.iloc[2] = np.nan\n dt2 = Series(\n [\n Timestamp(\"20111231\"),\n Timestamp(\"20120102\"),\n Timestamp(\"20120104\"),\n ]\n )\n dt1 - dt2\n dt2 - dt1\n\n # datetime64 with timetimedelta\n dt1 + td1\n td1 + dt1\n dt1 - td1\n\n # timetimedelta with datetime64\n td1 + dt1\n dt1 + td1\n\n def test_dt64ser_sub_datetime_dtype(self):\n ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))\n dt = datetime(1993, 6, 22, 13, 30)\n ser = Series([ts])\n result = pd.to_timedelta(np.abs(ser - dt))\n assert result.dtype == \"timedelta64[ns]\"\n\n # -------------------------------------------------------------\n # TODO: This next block of tests came from tests.series.test_operators,\n # needs to be de-duplicated and parametrized over `box` classes\n\n def test_operators_datetimelike_invalid(self, all_arithmetic_operators):\n # these are all TypeEror ops\n op_str = all_arithmetic_operators\n\n def check(get_ser, test_ser):\n\n # check that we are getting a TypeError\n # with 'operate' (from core/ops.py) for the ops that are not\n # defined\n op = getattr(get_ser, op_str, None)\n # Previously, _validate_for_numeric_binop in core/indexes/base.py\n # did this for us.\n with pytest.raises(\n TypeError, match=\"operate|[cC]annot|unsupported operand\"\n ):\n op(test_ser)\n\n # ## timedelta64 ###\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n # ## datetime64 ###\n dt1 = Series(\n [Timestamp(\"20111230\"), Timestamp(\"20120101\"), Timestamp(\"20120103\")]\n )\n dt1.iloc[2] = np.nan\n dt2 = Series(\n [Timestamp(\"20111231\"), Timestamp(\"20120102\"), Timestamp(\"20120104\")]\n )\n if op_str not in [\"__sub__\", \"__rsub__\"]:\n check(dt1, dt2)\n\n # ## datetime64 with timetimedelta ###\n # TODO(jreback) __rsub__ should raise?\n if op_str not in [\"__add__\", \"__radd__\", \"__sub__\"]:\n check(dt1, td1)\n\n # 8260, 10763\n # datetime64 with tz\n tz = \"US/Eastern\"\n dt1 = Series(date_range(\"2000-01-01 09:00:00\", periods=5, tz=tz), name=\"foo\")\n dt2 = dt1.copy()\n dt2.iloc[2] = np.nan\n td1 = Series(pd.timedelta_range(\"1 days 1 min\", periods=5, freq=\"H\"))\n td2 = td1.copy()\n td2.iloc[1] = np.nan\n\n if op_str not in [\"__add__\", \"__radd__\", \"__sub__\", \"__rsub__\"]:\n check(dt2, td2)\n\n def test_sub_single_tz(self):\n # GH#12290\n s1 = Series([Timestamp(\"2016-02-10\", tz=\"America/Sao_Paulo\")])\n s2 = Series([Timestamp(\"2016-02-08\", tz=\"America/Sao_Paulo\")])\n result = s1 - s2\n expected = Series([Timedelta(\"2days\")])\n tm.assert_series_equal(result, expected)\n result = s2 - s1\n expected = Series([Timedelta(\"-2days\")])\n tm.assert_series_equal(result, expected)\n\n def test_dt64tz_series_sub_dtitz(self):\n # GH#19071 subtracting tzaware DatetimeIndex from tzaware Series\n # (with same tz) raises, fixed by #19024\n dti = date_range(\"1999-09-30\", periods=10, tz=\"US/Pacific\")\n ser = Series(dti)\n expected = Series(TimedeltaIndex([\"0days\"] * 10))\n\n res = dti - ser\n tm.assert_series_equal(res, expected)\n res = ser - dti\n tm.assert_series_equal(res, expected)\n\n def test_sub_datetime_compat(self):\n # see GH#14088\n s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])\n dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)\n exp = Series([Timedelta(\"1 days\"), NaT])\n tm.assert_series_equal(s - dt, exp)\n tm.assert_series_equal(s - Timestamp(dt), exp)\n\n def test_dt64_series_add_mixed_tick_DateOffset(self):\n # GH#4532\n # operate with pd.offsets\n s = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n\n result = s + pd.offsets.Milli(5)\n result2 = pd.offsets.Milli(5) + s\n expected = Series(\n [Timestamp(\"20130101 9:01:00.005\"), Timestamp(\"20130101 9:02:00.005\")]\n )\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)\n expected = Series(\n [Timestamp(\"20130101 9:06:00.005\"), Timestamp(\"20130101 9:07:00.005\")]\n )\n tm.assert_series_equal(result, expected)\n\n def test_datetime64_ops_nat(self):\n # GH#11349\n datetime_series = Series([NaT, Timestamp(\"19900315\")])\n nat_series_dtype_timestamp = Series([NaT, NaT], dtype=\"datetime64[ns]\")\n single_nat_dtype_datetime = Series([NaT], dtype=\"datetime64[ns]\")\n\n # subtraction\n tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)\n msg = \"Unary negative expects\"\n with pytest.raises(TypeError, match=msg):\n -single_nat_dtype_datetime + datetime_series\n\n tm.assert_series_equal(\n -NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n with pytest.raises(TypeError, match=msg):\n -single_nat_dtype_datetime + nat_series_dtype_timestamp\n\n # addition\n tm.assert_series_equal(\n nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n\n # -------------------------------------------------------------\n # Invalid Operations\n # TODO: this block also needs to be de-duplicated and parametrized\n\n @pytest.mark.parametrize(\n \"dt64_series\",\n [\n Series([Timestamp(\"19900315\"), Timestamp(\"19900315\")]),\n Series([NaT, Timestamp(\"19900315\")]),\n Series([NaT, NaT], dtype=\"datetime64[ns]\"),\n ],\n )\n @pytest.mark.parametrize(\"one\", [1, 1.0, np.array(1)])\n def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):\n # multiplication\n msg = \"cannot perform .* with this index type\"\n with pytest.raises(TypeError, match=msg):\n dt64_series * one\n with pytest.raises(TypeError, match=msg):\n one * dt64_series\n\n # division\n with pytest.raises(TypeError, match=msg):\n dt64_series / one\n with pytest.raises(TypeError, match=msg):\n one / dt64_series\n\n # TODO: parametrize over box\n @pytest.mark.parametrize(\"op\", [\"__add__\", \"__radd__\", \"__sub__\", \"__rsub__\"])\n def test_dt64_series_add_intlike(self, tz_naive_fixture, op):\n # GH#19123\n tz = tz_naive_fixture\n dti = DatetimeIndex([\"2016-01-02\", \"2016-02-03\", \"NaT\"], tz=tz)\n ser = Series(dti)\n\n other = Series([20, 30, 40], dtype=\"uint8\")\n\n method = getattr(ser, op)\n msg = \"|\".join(\n [\n \"Addition/subtraction of integers and integer-arrays\",\n \"cannot subtract .* from ndarray\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n method(1)\n with pytest.raises(TypeError, match=msg):\n method(other)\n with pytest.raises(TypeError, match=msg):\n method(np.array(other))\n with pytest.raises(TypeError, match=msg):\n method(pd.Index(other))\n\n # -------------------------------------------------------------\n # Timezone-Centric Tests\n\n def test_operators_datetimelike_with_timezones(self):\n tz = \"US/Eastern\"\n dt1 = Series(date_range(\"2000-01-01 09:00:00\", periods=5, tz=tz), name=\"foo\")\n dt2 = dt1.copy()\n dt2.iloc[2] = np.nan\n\n td1 = Series(pd.timedelta_range(\"1 days 1 min\", periods=5, freq=\"H\"))\n td2 = td1.copy()\n td2.iloc[1] = np.nan\n\n result = dt1 + td1[0]\n exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 + td2[0]\n exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n # odd numpy behavior with scalar timedeltas\n result = td1[0] + dt1\n exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = td2[0] + dt2\n exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt1 - td1[0]\n exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n td1[0] - dt1\n\n result = dt2 - td2[0]\n exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n with pytest.raises(TypeError, match=msg):\n td2[0] - dt2\n\n result = dt1 + td1\n exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 + td2\n exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt1 - td1\n exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 - td2\n exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n msg = \"cannot (add|subtract)\"\n with pytest.raises(TypeError, match=msg):\n td1 - dt1\n with pytest.raises(TypeError, match=msg):\n td2 - dt2\n\n\nclass TestDatetimeIndexArithmetic:\n\n # -------------------------------------------------------------\n # Binary operations DatetimeIndex and int\n\n def test_dti_addsub_int(self, tz_naive_fixture, one):\n # Variants of `one` for #19012\n tz = tz_naive_fixture\n rng = date_range(\"2000-01-01 09:00\", freq=\"H\", periods=10, tz=tz)\n msg = \"Addition/subtraction of integers\"\n\n with pytest.raises(TypeError, match=msg):\n rng + one\n with pytest.raises(TypeError, match=msg):\n rng += one\n with pytest.raises(TypeError, match=msg):\n rng - one\n with pytest.raises(TypeError, match=msg):\n rng -= one\n\n # -------------------------------------------------------------\n # __add__/__sub__ with integer arrays\n\n @pytest.mark.parametrize(\"freq\", [\"H\", \"D\"])\n @pytest.mark.parametrize(\"int_holder\", [np.array, pd.Index])\n def test_dti_add_intarray_tick(self, int_holder, freq):\n # GH#19959\n dti = date_range(\"2016-01-01\", periods=2, freq=freq)\n other = int_holder([4, -1])\n\n msg = \"Addition/subtraction of integers|cannot subtract DatetimeArray from\"\n assert_invalid_addsub_type(dti, other, msg)\n\n @pytest.mark.parametrize(\"freq\", [\"W\", \"M\", \"MS\", \"Q\"])\n @pytest.mark.parametrize(\"int_holder\", [np.array, pd.Index])\n def test_dti_add_intarray_non_tick(self, int_holder, freq):\n # GH#19959\n dti = date_range(\"2016-01-01\", periods=2, freq=freq)\n other = int_holder([4, -1])\n\n msg = \"Addition/subtraction of integers|cannot subtract DatetimeArray from\"\n assert_invalid_addsub_type(dti, other, msg)\n\n @pytest.mark.parametrize(\"int_holder\", [np.array, pd.Index])\n def test_dti_add_intarray_no_freq(self, int_holder):\n # GH#19959\n dti = DatetimeIndex([\"2016-01-01\", \"NaT\", \"2017-04-05 06:07:08\"])\n other = int_holder([9, 4, -1])\n msg = \"|\".join(\n [\"cannot subtract DatetimeArray from\", \"Addition/subtraction of integers\"]\n )\n assert_invalid_addsub_type(dti, other, msg)\n\n # -------------------------------------------------------------\n # Binary operations DatetimeIndex and TimedeltaIndex/array\n\n def test_dti_add_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = date_range(\"2017-01-01\", periods=10, tz=tz)\n expected = expected._with_freq(None)\n\n # add with TimdeltaIndex\n result = dti + tdi\n tm.assert_index_equal(result, expected)\n\n result = tdi + dti\n tm.assert_index_equal(result, expected)\n\n # add with timedelta64 array\n result = dti + tdi.values\n tm.assert_index_equal(result, expected)\n\n result = tdi.values + dti\n tm.assert_index_equal(result, expected)\n\n def test_dti_iadd_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = date_range(\"2017-01-01\", periods=10, tz=tz)\n expected = expected._with_freq(None)\n\n # iadd with TimdeltaIndex\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result += tdi\n tm.assert_index_equal(result, expected)\n\n result = pd.timedelta_range(\"0 days\", periods=10)\n result += dti\n tm.assert_index_equal(result, expected)\n\n # iadd with timedelta64 array\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result += tdi.values\n tm.assert_index_equal(result, expected)\n\n result = pd.timedelta_range(\"0 days\", periods=10)\n result += dti\n tm.assert_index_equal(result, expected)\n\n def test_dti_sub_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = date_range(\"2017-01-01\", periods=10, tz=tz, freq=\"-1D\")\n expected = expected._with_freq(None)\n\n # sub with TimedeltaIndex\n result = dti - tdi\n tm.assert_index_equal(result, expected)\n\n msg = \"cannot subtract .*TimedeltaArray\"\n with pytest.raises(TypeError, match=msg):\n tdi - dti\n\n # sub with timedelta64 array\n result = dti - tdi.values\n tm.assert_index_equal(result, expected)\n\n msg = \"cannot subtract DatetimeArray from\"\n with pytest.raises(TypeError, match=msg):\n tdi.values - dti\n\n def test_dti_isub_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = date_range(\"2017-01-01\", periods=10, tz=tz, freq=\"-1D\")\n expected = expected._with_freq(None)\n\n # isub with TimedeltaIndex\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result -= tdi\n tm.assert_index_equal(result, expected)\n\n msg = \"cannot subtract .* from a TimedeltaArray\"\n with pytest.raises(TypeError, match=msg):\n tdi -= dti\n\n # isub with timedelta64 array\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result -= tdi.values\n tm.assert_index_equal(result, expected)\n\n msg = \"|\".join(\n [\n \"cannot perform __neg__ with this index type:\",\n \"ufunc subtract cannot use operands with types\",\n \"cannot subtract DatetimeArray from\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n tdi.values -= dti\n\n # -------------------------------------------------------------\n # Binary Operations DatetimeIndex and datetime-like\n # TODO: A couple other tests belong in this section. Move them in\n # A PR where there isn't already a giant diff.\n\n @pytest.mark.parametrize(\n \"addend\",\n [\n datetime(2011, 1, 1),\n DatetimeIndex([\"2011-01-01\", \"2011-01-02\"]),\n DatetimeIndex([\"2011-01-01\", \"2011-01-02\"]).tz_localize(\"US/Eastern\"),\n np.datetime64(\"2011-01-01\"),\n Timestamp(\"2011-01-01\"),\n ],\n ids=lambda x: type(x).__name__,\n )\n @pytest.mark.parametrize(\"tz\", [None, \"US/Eastern\"])\n def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):\n # GH#9631\n dti = DatetimeIndex([\"2011-01-01\", \"2011-01-02\"]).tz_localize(tz)\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"cannot add DatetimeArray and\"\n\n with pytest.raises(TypeError, match=msg):\n dtarr + addend\n with pytest.raises(TypeError, match=msg):\n addend + dtarr\n\n # -------------------------------------------------------------\n\n def test_dta_add_sub_index(self, tz_naive_fixture):\n # Check that DatetimeArray defers to Index classes\n dti = date_range(\"20130101\", periods=3, tz=tz_naive_fixture)\n dta = dti.array\n result = dta - dti\n expected = dti - dti\n tm.assert_index_equal(result, expected)\n\n tdi = result\n result = dta + tdi\n expected = dti + tdi\n tm.assert_index_equal(result, expected)\n\n result = dta - tdi\n expected = dti - tdi\n tm.assert_index_equal(result, expected)\n\n def test_sub_dti_dti(self):\n # previously performed setop (deprecated in 0.16.0), now changed to\n # return subtraction -> TimeDeltaIndex (GH ...)\n\n dti = date_range(\"20130101\", periods=3)\n dti_tz = date_range(\"20130101\", periods=3).tz_localize(\"US/Eastern\")\n dti_tz2 = date_range(\"20130101\", periods=3).tz_localize(\"UTC\")\n expected = TimedeltaIndex([0, 0, 0])\n\n result = dti - dti\n tm.assert_index_equal(result, expected)\n\n result = dti_tz - dti_tz\n tm.assert_index_equal(result, expected)\n msg = \"DatetimeArray subtraction must have the same timezones or\"\n with pytest.raises(TypeError, match=msg):\n dti_tz - dti\n\n with pytest.raises(TypeError, match=msg):\n dti - dti_tz\n\n with pytest.raises(TypeError, match=msg):\n dti_tz - dti_tz2\n\n # isub\n dti -= dti\n tm.assert_index_equal(dti, expected)\n\n # different length raises ValueError\n dti1 = date_range(\"20130101\", periods=3)\n dti2 = date_range(\"20130101\", periods=4)\n msg = \"cannot add indices of unequal length\"\n with pytest.raises(ValueError, match=msg):\n dti1 - dti2\n\n # NaN propagation\n dti1 = DatetimeIndex([\"2012-01-01\", np.nan, \"2012-01-03\"])\n dti2 = DatetimeIndex([\"2012-01-02\", \"2012-01-03\", np.nan])\n expected = TimedeltaIndex([\"1 days\", np.nan, np.nan])\n result = dti2 - dti1\n tm.assert_index_equal(result, expected)\n\n # -------------------------------------------------------------------\n # TODO: Most of this block is moved from series or frame tests, needs\n # cleanup, box-parametrization, and de-duplication\n\n @pytest.mark.parametrize(\"op\", [operator.add, operator.sub])\n def test_timedelta64_equal_timedelta_supported_ops(self, op):\n ser = Series(\n [\n Timestamp(\"20130301\"),\n Timestamp(\"20130228 23:00:00\"),\n Timestamp(\"20130228 22:00:00\"),\n Timestamp(\"20130228 21:00:00\"),\n ]\n )\n\n intervals = [\"D\", \"h\", \"m\", \"s\", \"us\"]\n\n def timedelta64(*args):\n # see casting notes in NumPy gh-12927\n return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))\n\n for d, h, m, s, us in product(*([range(2)] * 5)):\n nptd = timedelta64(d, h, m, s, us)\n pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)\n lhs = op(ser, nptd)\n rhs = op(ser, pytd)\n\n tm.assert_series_equal(lhs, rhs)\n\n def test_ops_nat_mixed_datetime64_timedelta64(self):\n # GH#11349\n timedelta_series = Series([NaT, Timedelta(\"1s\")])\n datetime_series = Series([NaT, Timestamp(\"19900315\")])\n nat_series_dtype_timedelta = Series([NaT, NaT], dtype=\"timedelta64[ns]\")\n nat_series_dtype_timestamp = Series([NaT, NaT], dtype=\"datetime64[ns]\")\n single_nat_dtype_datetime = Series([NaT], dtype=\"datetime64[ns]\")\n single_nat_dtype_timedelta = Series([NaT], dtype=\"timedelta64[ns]\")\n\n # subtraction\n tm.assert_series_equal(\n datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(\n datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n -single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp\n )\n\n # without a Series wrapping the NaT, it is ambiguous\n # whether it is a datetime64 or timedelta64\n # defaults to interpreting it as timedelta64\n tm.assert_series_equal(\n nat_series_dtype_timestamp - single_nat_dtype_datetime,\n nat_series_dtype_timedelta,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp - single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n -single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n msg = \"cannot subtract a datelike\"\n with pytest.raises(TypeError, match=msg):\n timedelta_series - single_nat_dtype_datetime\n\n # addition\n tm.assert_series_equal(\n nat_series_dtype_timestamp + single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp + single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timedelta + single_nat_dtype_datetime,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_datetime + nat_series_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n\n def test_ufunc_coercions(self):\n idx = date_range(\"2011-01-01\", periods=3, freq=\"2D\", name=\"x\")\n\n delta = np.timedelta64(1, \"D\")\n exp = date_range(\"2011-01-02\", periods=3, freq=\"2D\", name=\"x\")\n for result in [idx + delta, np.add(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == \"2D\"\n\n exp = date_range(\"2010-12-31\", periods=3, freq=\"2D\", name=\"x\")\n\n for result in [idx - delta, np.subtract(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == \"2D\"\n\n # When adding/subtracting an ndarray (which has no .freq), the result\n # does not infer freq\n idx = idx._with_freq(None)\n delta = np.array(\n [np.timedelta64(1, \"D\"), np.timedelta64(2, \"D\"), np.timedelta64(3, \"D\")]\n )\n exp = DatetimeIndex([\"2011-01-02\", \"2011-01-05\", \"2011-01-08\"], name=\"x\")\n\n for result in [idx + delta, np.add(idx, delta)]:\n tm.assert_index_equal(result, exp)\n assert result.freq == exp.freq\n\n exp = DatetimeIndex([\"2010-12-31\", \"2011-01-01\", \"2011-01-02\"], name=\"x\")\n for result in [idx - delta, np.subtract(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == exp.freq\n\n def test_dti_add_series(self, tz_naive_fixture, names):\n # GH#13905\n tz = tz_naive_fixture\n index = DatetimeIndex(\n [\"2016-06-28 05:30\", \"2016-06-28 05:31\"], tz=tz, name=names[0]\n )\n ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])\n expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])\n\n # passing name arg isn't enough when names[2] is None\n expected.name = names[2]\n assert expected.dtype == index.dtype\n result = ser + index\n tm.assert_series_equal(result, expected)\n result2 = index + ser\n tm.assert_series_equal(result2, expected)\n\n expected = index + Timedelta(seconds=5)\n result3 = ser.values + index\n tm.assert_index_equal(result3, expected)\n result4 = index + ser.values\n tm.assert_index_equal(result4, expected)\n\n @pytest.mark.parametrize(\"op\", [operator.add, roperator.radd, operator.sub])\n def test_dti_addsub_offset_arraylike(\n self, tz_naive_fixture, names, op, index_or_series\n ):\n # GH#18849, GH#19744\n box = pd.Index\n other_box = index_or_series\n\n tz = tz_naive_fixture\n dti = date_range(\"2017-01-01\", periods=2, tz=tz, name=names[0])\n other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])\n\n xbox = get_upcast_box(box, other)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = op(dti, other)\n\n expected = DatetimeIndex(\n [op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq=\"infer\"\n )\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(res, expected)\n\n @pytest.mark.parametrize(\"other_box\", [pd.Index, np.array])\n def test_dti_addsub_object_arraylike(\n self, tz_naive_fixture, box_with_array, other_box\n ):\n tz = tz_naive_fixture\n\n dti = date_range(\"2017-01-01\", periods=2, tz=tz)\n dtarr = tm.box_expected(dti, box_with_array)\n other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)])\n xbox = get_upcast_box(box_with_array, other)\n\n expected = DatetimeIndex([\"2017-01-31\", \"2017-01-06\"], tz=tz_naive_fixture)\n expected = tm.box_expected(expected, xbox)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = dtarr + other\n tm.assert_equal(result, expected)\n\n expected = DatetimeIndex([\"2016-12-31\", \"2016-12-29\"], tz=tz_naive_fixture)\n expected = tm.box_expected(expected, xbox)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = dtarr - other\n tm.assert_equal(result, expected)\n\n\[email protected](\"years\", [-1, 0, 1])\[email protected](\"months\", [-2, 0, 2])\ndef test_shift_months(years, months):\n dti = DatetimeIndex(\n [\n Timestamp(\"2000-01-05 00:15:00\"),\n Timestamp(\"2000-01-31 00:23:00\"),\n Timestamp(\"2000-01-01\"),\n Timestamp(\"2000-02-29\"),\n Timestamp(\"2000-12-31\"),\n ]\n )\n actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))\n\n raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]\n expected = DatetimeIndex(raw)\n tm.assert_index_equal(actual, expected)\n\n\ndef test_dt64arr_addsub_object_dtype_2d():\n # block-wise DataFrame operations will require operating on 2D\n # DatetimeArray/TimedeltaArray, so check that specifically.\n dti = date_range(\"1994-02-13\", freq=\"2W\", periods=4)\n dta = dti._data.reshape((4, 1))\n\n other = np.array([[pd.offsets.Day(n)] for n in range(4)])\n assert other.shape == dta.shape\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = dta + other\n with tm.assert_produces_warning(PerformanceWarning):\n expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)\n\n assert isinstance(result, DatetimeArray)\n assert result.freq is None\n tm.assert_numpy_array_equal(result._data, expected._data)\n\n with tm.assert_produces_warning(PerformanceWarning):\n # Case where we expect to get a TimedeltaArray back\n result2 = dta - dta.astype(object)\n\n assert isinstance(result2, TimedeltaArray)\n assert result2.shape == (4, 1)\n assert result2.freq is None\n assert (result2.asi8 == 0).all()\n" ]
[ [ "pandas._testing.assert_numpy_array_equal", "pandas.timedelta_range", "pandas.tests.arithmetic.common.assert_invalid_comparison", "pandas.Series", "pandas._testing.box_expected", "pandas.array", "pandas.Period", "numpy.subtract", "pandas._testing.assert_series_equal", "numpy.add", "numpy.datetime64", "pandas.DateOffset", "pandas.offsets.Minute", "pandas._testing.makeDateIndex", "pandas.period_range", "numpy.timedelta64", "pandas._libs.tslibs.offsets.shift_months", "pandas._testing.assert_produces_warning", "pandas.Timestamp.now", "numpy.abs", "pandas._testing.assert_equal", "pandas._libs.tslibs.conversion.localize_pydatetime", "pandas.to_datetime", "pandas.offsets.Day", "pandas.Timestamp", "pandas.offsets.MonthEnd", "pandas.date_range", "pandas.compat.np_datetime64_compat", "pandas.tests.arithmetic.common.get_upcast_box", "pandas.TimedeltaIndex", "pandas.Timedelta", "numpy.arange", "numpy.all", "pandas.tests.arithmetic.common.assert_invalid_addsub_type", "pandas.Index", "pandas.DatetimeIndex", "pandas.offsets.Hour", "pandas.offsets.DateOffset", "pandas.offsets.Milli", "pandas._testing.assert_index_equal", "pandas.offsets.Second", "numpy.array" ] ]
jhuapl-boss/intern
[ "cd6513e9f3ef3af02d3a82e3dda5d905a4003d2c" ]
[ "intern/convenience/array.py" ]
[ "\"\"\"\nCopyright 2018-2021 The Johns Hopkins University Applied Physics Laboratory.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n# Standard imports\nfrom typing import Optional, Union, Tuple\nimport abc\nimport json\nfrom collections import namedtuple\nfrom urllib.parse import unquote\n\nfrom intern.service.boss.httperrorlist import HTTPErrorList\n\nfrom .uri import parse_fquri\n\n\n# Pip-installable imports\nimport numpy as np\n\nfrom intern.resource.boss.resource import (\n CollectionResource,\n ChannelResource,\n CoordinateFrameResource,\n ExperimentResource,\n)\nfrom intern.service.boss.metadata import MetadataService\nfrom intern.remote.boss import BossRemote\n\n# A named tuple that represents a bossDB URI.\nbossdbURI = namedtuple(\n \"bossdbURI\", [\"collection\", \"experiment\", \"channel\", \"resolution\"]\n)\n\n_DEFAULT_BOSS_OPTIONS = {\n \"protocol\": \"https\",\n \"host\": \"api.bossdb.io\",\n \"token\": \"public\",\n}\n\n\nclass VolumeProvider(abc.ABC):\n \"\"\"\n A provider for the common get/put cutout operations on a Remote.\n\n TODO: This should ultimately be subsumed back into the Remote API.\n\n \"\"\"\n\n def get_channel(self, channel: str, collection: str, experiment: str):\n ...\n\n def get_project(self, resource):\n ...\n\n def create_project(self, resource):\n ...\n\n def get_cutout(\n self,\n channel: ChannelResource,\n resolution: int,\n xs: Tuple[int, int],\n ys: Tuple[int, int],\n zs: Tuple[int, int],\n ):\n ...\n\n def create_cutout(\n self,\n channel: ChannelResource,\n resolution: int,\n xs: Tuple[int, int],\n ys: Tuple[int, int],\n zs: Tuple[int, int],\n data,\n ):\n ...\n\n\nclass _InternVolumeProvider(VolumeProvider):\n \"\"\"\n A VolumeProvider that backends the intern.BossRemote API.\n\n This is used instead of directly accessing the BossRemote so that the\n convenience `array` can be easily stripped out. (The array module was\n originally a visitor from another Python package called `emboss`, so moving\n VolumeProvider endpoints back into the Remote API is an outstanding TODO.)\n \"\"\"\n\n def __init__(self, boss: BossRemote = None):\n if boss is None:\n try:\n boss = BossRemote()\n except:\n boss = BossRemote(_DEFAULT_BOSS_OPTIONS)\n self.boss = boss\n\n def get_channel(self, channel: str, collection: str, experiment: str):\n return self.boss.get_channel(channel, collection, experiment)\n\n def get_project(self, resource):\n return self.boss.get_project(resource)\n\n def create_project(self, resource):\n return self.boss.create_project(resource)\n\n def get_cutout(\n self,\n channel: ChannelResource,\n resolution: int,\n xs: Tuple[int, int],\n ys: Tuple[int, int],\n zs: Tuple[int, int],\n ):\n return self.boss.get_cutout(channel, resolution, xs, ys, zs)\n\n def create_cutout(\n self,\n channel: ChannelResource,\n resolution: int,\n xs: Tuple[int, int],\n ys: Tuple[int, int],\n zs: Tuple[int, int],\n data,\n ):\n return self.boss.create_cutout(channel, resolution, xs, ys, zs, data)\n\n\ndef _construct_boss_url(boss, col, exp, chan, res, xs, ys, zs) -> str:\n # TODO: use boss host\n return f\"https://api.theboss.io/v1/cutout/{col}/{exp}/{chan}/{res}/{xs[0]}:{xs[1]}/{ys[0]}:{ys[1]}/{zs[0]}:{zs[1]}\"\n\n\ndef parse_bossdb_uri(uri: str) -> bossdbURI:\n \"\"\"\n Parse a bossDB URI and handle malform errors.\n\n Arguments:\n uri (str): URI of the form bossdb://<collection>/<experiment>/<channel>\n\n Returns:\n bossdbURI\n\n \"\"\"\n t = uri.split(\"://\")[1].split(\"/\")\n if len(t) == 3:\n return bossdbURI(t[0], t[1], t[2], None)\n if len(t) == 4:\n return bossdbURI(t[0], t[1], t[2], int(t[3]))\n raise ValueError(f\"Cannot parse URI {uri}.\")\n\n\nclass AxisOrder:\n XYZ = \"XYZ\"\n ZYX = \"ZYX\"\n\n\nclass _MetadataProvider:\n \"\"\"\n Serves as a dictionary-like API for resource metadata.\n\n \"\"\"\n\n def __init__(self, dataset) -> None:\n \"\"\"\n Create a new metadata provider.\n\n Arguments:\n dataset (array)\n\n \"\"\"\n self._array = dataset\n self._resource = dataset._channel\n self._remote = dataset.volume_provider.boss\n\n def keys(self):\n return self._remote.list_metadata(self._resource)\n\n def items(self):\n for key in self.keys():\n yield (key, self[key])\n\n def __delitem__(self, key):\n return self._remote.delete_metadata(self._resource, [key])\n\n def __contains__(self, key):\n try:\n self[key]\n return True\n except KeyError:\n return False\n\n def __getitem__(self, key):\n try:\n return self._remote.get_metadata(self._resource, [key])[key]\n except HTTPErrorList as err:\n raise KeyError(\n f\"The key {key!s} was not found in the metadata database.\"\n ) from err\n\n def __setitem__(self, key, value):\n return self._remote.create_metadata(self._resource, {key: value})\n\n def update_item(self, key, value):\n return self._remote.update_metadata(self._resource, {key: value})\n\n def bulk_update(self, items: dict):\n return self._remote.create_metadata(self._resource, items)\n\n def bulk_delete(self, keys: list):\n return self._remote.delete_metadata(self._resource, keys)\n\nclass array:\n \"\"\"\n An intern/bossDB-backed numpy array.\n\n Like a numpy.memmap array, an `intern.array` is backed by data that lives\n outside of conventional memory. The data can live in, for example, a bossDB\n that lives in AWS, or it can live in a local or remote bossphorus instance.\n\n Data are downloaded when a request is made. This means that even \"simple\"\n commands like `array#[:]sum()` are very network-heavy (don't do this!).\n\n Examples:\n\n >>> import intern.array\n >>> data = array(\"bossdb://collection/experiment/channel\")\n >>> downloaded_sample = data[100, 100:200, 100:200]\n\n \"\"\"\n\n def __init__(\n self,\n channel: Union[ChannelResource, Tuple, str],\n resolution: int = 0,\n volume_provider: VolumeProvider = None,\n axis_order: str = AxisOrder.ZYX,\n create_new: bool = False,\n description: Optional[str] = None,\n dtype: Optional[str] = None,\n extents: Optional[Tuple[int, int, int]] = None,\n voxel_size: Optional[Tuple[int, int, int]] = None,\n voxel_unit: Optional[str] = None,\n downsample_levels: int = 6,\n downsample_method: Optional[str] = \"anisotropic\",\n coordinate_frame_name: Optional[str] = None,\n coordinate_frame_desc: Optional[str] = None,\n collection_desc: Optional[str] = None,\n experiment_desc: Optional[str] = None,\n source_channel: Optional[str] = None,\n boss_config: Optional[dict] = None,\n ) -> None:\n \"\"\"\n Construct a new intern-backed array.\n\n Arguments:\n channel (intern.resource.boss.ChannelResource): The channel from\n which data will be downloaded.\n resolution (int: 0): The native resolution or MIP to use\n volume_provider (VolumeProvider): The remote-like to use\n axis_order (str = AxisOrder.ZYX): The axis-ordering to use for data\n cutouts. Defaults to ZYX. DOES NOT affect the `voxel_size` or\n `extents` arguments to this constructor.\n create_new (bool: False): Whether to create new Resources if they\n do not exist. Does not work with public token.\n dtype (str): Only required if `create_new = True`. Specifies the\n numpy-style datatype for this new dataset (e.g. \"uint8\").\n description (str): Only required if `create_new = True`. Sets the\n description for the newly-created collection, experiment,\n channel, and coordframe resources.\n extents: Optional[Tuple[int, int, int]]: Only required if\n `create_new = True`. Specifies the total dataset extents of\n this new dataset, in ZYX order.\n voxel_size: Optional[Tuple[int, int, int]]: Only required if\n `create_new = True`. Specifies the voxel dimensions of this new\n dataset, in ZYX order.\n voxel_unit: Optional[str]: Only required if `create_new = True`.\n Specifies the voxel-dimension unit. For example, \"nanometers\".\n downsample_levels (int: 6): The number of downsample levels.\n downsample_method (Optional[str]): The type of downsample to use.\n If unset, defaults to 'anisotropic'.\n coordinate_frame_name (Optional[str]): If set, the name to use for\n the newly created coordinate frame. If not set, the name of the\n coordinate frame will be chosen automatically.\n coordinate_frame_desc (Optional[str]): If set, the description text\n to use for the newly created coordinate frame. If not set, the\n description will be chosen automatically.\n collection_desc (Optional[str]): The description text to use for a\n newly created collection. If not set, the description will be\n chosen automatically.\n experiment_desc (Optional[str]): The description text to use for a\n newly created experiment. If not set, the description will be\n chosen automatically.\n source_channel (Optional[str]): The channel to use as the source\n for this new channel, if `create_new` is True and this is\n going to be an annotation channel (dtype!=uint8).\n boss_config (Optional[dict]): The BossRemote configuration dict to\n use in order to authenticate with a BossDB remote. This option\n is mutually exclusive with the VolumeProvider configuration. If\n the `volume_provider` arg is set, this will be ignored.\n\n \"\"\"\n self.axis_order = axis_order\n\n # Handle custom Remote:\n self.volume_provider = volume_provider\n if volume_provider is None:\n if boss_config:\n self.volume_provider = _InternVolumeProvider(BossRemote(boss_config))\n else:\n self.volume_provider = _InternVolumeProvider()\n\n if create_new:\n\n # We'll need at least `extents` and `voxel_size`.\n description = description or \"Created with intern\"\n dtype = dtype or \"uint8\"\n\n if extents is None:\n raise ValueError(\n \"If `create_new` is True, you must specify the extents of the new coordinate frame as a [x, y, z].\"\n )\n if voxel_size is None:\n raise ValueError(\n \"If `create_new` is True, you must specify the voxel_size of the new coordinate frame as a [x, y, z].\"\n )\n\n uri = parse_bossdb_uri(channel)\n\n # create collection if it doesn't exist:\n try:\n # Try to get an existing collection:\n collection = self.volume_provider.get_project(\n CollectionResource(uri.collection)\n )\n except:\n # Create the collection:\n collection = CollectionResource(\n uri.collection, description=collection_desc or description\n )\n self.volume_provider.create_project(collection)\n\n # create coordframe if it doesn't exist:\n try:\n # Try to get an existing coordframe:\n coordframe = self.volume_provider.get_project(\n CoordinateFrameResource(\n coordinate_frame_name or f\"CF_{uri.collection}_{uri.experiment}\"\n )\n )\n except:\n # Default to nanometers if a voxel unit isn't provided\n voxel_unit = voxel_unit or \"nanometers\"\n # Create the coordframe:\n coordframe = CoordinateFrameResource(\n coordinate_frame_name or f\"CF_{uri.collection}_{uri.experiment}\",\n description=coordinate_frame_desc or description,\n x_start=0,\n y_start=0,\n z_start=0,\n x_stop=extents[2],\n y_stop=extents[1],\n z_stop=extents[0],\n x_voxel_size=voxel_size[2],\n y_voxel_size=voxel_size[1],\n z_voxel_size=voxel_size[0],\n voxel_unit=voxel_unit,\n )\n self.volume_provider.create_project(coordframe)\n\n # create experiment if it doesn't exist:\n try:\n # Try to get an existing experiment:\n experiment = self.volume_provider.get_project(\n ExperimentResource(uri.experiment, uri.collection)\n )\n except:\n # Create the experiment:\n experiment = ExperimentResource(\n uri.experiment,\n uri.collection,\n description=experiment_desc or description,\n coord_frame=coordframe.name,\n num_hierarchy_levels=downsample_levels,\n hierarchy_method=downsample_method,\n )\n self.volume_provider.create_project(experiment)\n\n # create channel if it doesn't exist:\n try:\n # Try to get an existing channel:\n channel = self.volume_provider.get_project(\n ChannelResource(uri.channel, uri.collection, uri.experiment)\n )\n except:\n # Create the channel:\n channel = ChannelResource(\n uri.channel,\n uri.collection,\n uri.experiment,\n description=description,\n type=\"image\" if dtype in [\"uint8\", \"uint16\"] else \"annotation\",\n datatype=dtype,\n sources=[source_channel] if source_channel else [],\n )\n self.volume_provider.create_project(channel)\n\n self.resolution = resolution\n # If the channel is set as a Resource, then use that resource.\n if isinstance(channel, ChannelResource):\n self._channel = channel\n # If it is set as a string, then parse the channel and generate an\n # intern.Resource from a bossDB URI.\n elif isinstance(channel, str):\n uri = parse_bossdb_uri(channel)\n self.resolution = (\n uri.resolution if not (uri.resolution is None) else self.resolution\n )\n self._channel = self.volume_provider.get_channel(\n uri.channel, uri.collection, uri.experiment\n )\n else:\n raise NotImplementedError(\n \"You must specify a channel of the form \"\n \"'bossdb://collection/experiment/channel' or you must \"\n \"provide an intern.Remote.\"\n )\n\n # Set empty experiment (will be dict)\n self._exp = None\n # Set empty coordframe (will be dict)\n self._coord_frame = None\n\n # Set col/exp/chan based upon the channel or URI provided.\n self.collection_name = self._channel.coll_name\n self.experiment_name = self._channel.exp_name\n self.channel_name = self._channel.name\n\n # Create a pointer to the metadata for the channel.\n self._channel_metadata = _MetadataProvider(self)\n\n @property\n def metadata(self):\n \"\"\"\n Returns a pointer to the metadata provider.\n \"\"\"\n return self._channel_metadata\n\n @property\n def dtype(self):\n \"\"\"\n Return the datatype of the array.\n\n Will default to the dtype of the channel.\n \"\"\"\n return self._channel.datatype\n\n @property\n def url(self):\n \"\"\"\n Get a pointer to this Channel on the BossDB page.\n \"\"\"\n return f\"{self.volume_provider.boss._project._base_protocol}://{self.volume_provider.boss._project._base_url}/v1/mgmt/resources/{self.collection_name}/{self.experiment_name}/{self.channel_name}\"\n\n @property\n def visualize(self):\n \"\"\"\n Get a pointer to this Channel on the BossDB page.\n \"\"\"\n return \"https://neuroglancer.bossdb.io/#!{'layers':{'image':{'source':'boss://__replace_me__'}}}\".replace(\n \"__replace_me__\",\n f\"{self.volume_provider.boss._project._base_protocol}://{self.volume_provider.boss._project._base_url}/{self.collection_name}/{self.experiment_name}/{self.channel_name}\",\n )\n\n @property\n def shape(self):\n \"\"\"\n Get the dimensions (numpy-flavored) of the array.\n\n Will return (1, 1, 1) if a coordinate frame does not exist (as in cases\n of pre-v2 bossphorus instances); this will not restrict indexing.\n \"\"\"\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n # From the coordinate frame, get the x, y, and z sizes. Note that this\n # is the SIZE, not the extents; in other words, a cframe that starts at\n # x=10 and extends to x=110 will have a size of 100 here.\n if self.axis_order == AxisOrder.XYZ:\n return (\n int(\n (self._coord_frame.y_stop - self._coord_frame.y_start)\n / (2 ** self.resolution)\n ),\n int(\n (self._coord_frame.x_stop - self._coord_frame.x_start)\n / (2 ** self.resolution)\n ),\n (self._coord_frame.z_stop - self._coord_frame.z_start),\n )\n elif self.axis_order == AxisOrder.ZYX:\n return (\n (self._coord_frame.z_stop - self._coord_frame.z_start),\n int(\n (self._coord_frame.y_stop - self._coord_frame.y_start)\n / (2 ** self.resolution)\n ),\n int(\n (self._coord_frame.x_stop - self._coord_frame.x_start)\n / (2 ** self.resolution)\n ),\n )\n\n @property\n def voxel_size(self):\n \"\"\"\n Get the dimensions (numpy-flavored) of the array.\n\n Will return (1, 1, 1) if a coordinate frame does not exist (as in cases\n of pre-v2 bossphorus instances); this will not restrict indexing.\n \"\"\"\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n if self.axis_order == AxisOrder.XYZ:\n vox_size = (\n self._coord_frame.x_voxel_size,\n self._coord_frame.y_voxel_size,\n self._coord_frame.z_voxel_size,\n )\n elif self.axis_order == AxisOrder.ZYX:\n vox_size = (\n self._coord_frame.z_voxel_size,\n self._coord_frame.y_voxel_size,\n self._coord_frame.x_voxel_size,\n )\n return vox_size\n\n @property\n def voxel_unit(self):\n if self._coord_frame is None:\n self._populate_coord_frame()\n return self._coord_frame.voxel_unit\n\n def _populate_exp(self):\n \"\"\"\n Populate the experiment component of this array.\n\n Cache the results for later.\n \"\"\"\n self._exp = self.volume_provider.get_project(\n ExperimentResource(self._channel.exp_name, self._channel.coll_name)\n )\n\n def _populate_coord_frame(self):\n \"\"\"\n Populate the array coordinate frame.\n\n Cache the results for later.\n \"\"\"\n if self._exp is None:\n self._populate_exp()\n self._coord_frame = self.volume_provider.get_project(\n CoordinateFrameResource(self._exp.coord_frame)\n )\n \n @property\n def downsample_status(self):\n \"\"\"\n Return the downsample status of the underlying channel.\n \"\"\"\n return self._channel.downsample_status\n \n @property\n def available_resolutions(self):\n \"\"\"\n Return a list of available resolutions for this channel.\n \n Arguments:\n None\n \n Returns:\n List[int]: A list of resolutions at which this dataset can be downloaded\n \n \"\"\"\n self._populate_exp()\n return list(range(dataset._exp.num_hierarchy_levels))\n\n def __getitem__(self, key: Tuple) -> np.array:\n \"\"\"\n Get a subarray or subvolume.\n\n Uses one of two indexing methods:\n 1. Start/Stop (`int:int`)\n 2. Single index (`int`)\n\n Each element of the key can be one of those two options. For example,\n\n myarray[1, 1:100, 2]\n\n \"\"\"\n # If the user has requested XYZ mode, the first thing to do is reverse\n # the array indices. Then you can continue this fn without any\n # additional changes.\n if self.axis_order == AxisOrder.XYZ:\n key = (key[2], key[1], key[0])\n\n # Next, we need to get the shape of the dataset. We do this currently\n # by getting the coordinate frame, which means that we need the\n # coordframe data and experiment data if we don't have it already. In\n # the future, we may also want to allow the user to specify general\n # shape information so that we can avoid calling the API.\n\n # Populate the experiment metadata if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Populate the coordinate frame metadata if not yet set:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n # Now we can begin. There is a wide variety of indexing options\n # available, including single-integer indexing, tuple-of-slices\n # indexing, tuple-of-int indexing...\n\n # First we'll address if the user presents a single integer.\n # ```\n # my_array[500]\n # ```\n # In this case, the user is asking for a single Z slice (or single X\n # slice if in XYZ order... But that's a far less common use case.)\n # We will get the full XY extents and download a single 2D array:\n if isinstance(key, int):\n # Get the full Z slice:\n xs = (0, self.shape[2])\n ys = (0, self.shape[1])\n zs = (key, key + 1)\n else:\n # We also support indexing with units. For example, you can ask for\n # ```\n # my_array[0:10, 0:10, 0:10, \"nanometers\"]\n # ```\n # which will download as many pixels as are required in order to\n # download 10nm in each dimension. We do this by storing a\n # \"normalized units\" measure which is a rescale factor for each\n # dimension (in the same order, e.g. ZYX, as the array).\n _normalize_units = (1, 1, 1)\n if isinstance(key[-1], str) and len(key) == 4:\n if key[-1] != self._coord_frame.voxel_unit:\n raise NotImplementedError(\n \"Can only reference voxels in native size format which is \"\n f\"{self._coord_frame.voxel_unit} for this dataset.\"\n )\n _normalize_units = self.voxel_size\n\n # We will now do the following codeblock three times, for X,Y,Z:\n # First, we check to see if this index is a single integer. If so,\n # the user is requesting a 2D array with zero depth along this\n # dimension. For example, if the user asks for\n # ```\n # my_data[0:120, 0:120, 150]\n # ```\n # Then \"150\" suggests that the user just wants one single X slice.\n if isinstance(key[2], int):\n xs = (key[2], key[2] + 1)\n else:\n # If the key is a Slice, then it has .start and .stop attrs.\n # (The user is requesting an array with more than one slice\n # in this dimension.)\n start = key[2].start if key[2].start else 0\n stop = key[2].stop if key[2].stop else self.shape[0]\n\n start = int(start / _normalize_units[0])\n stop = int(stop / _normalize_units[0])\n\n # Cast the coords to integers (since Boss needs int coords)\n xs = (int(start), int(stop))\n\n # Do the same thing again for the next dimension: Either a single\n # integer, or a slice...\n if isinstance(key[1], int):\n ys = (key[1], key[1] + 1)\n else:\n start = key[1].start if key[1].start else 0\n stop = key[1].stop if key[1].stop else self.shape[1]\n\n start = start / _normalize_units[1]\n stop = stop / _normalize_units[1]\n\n ys = (int(start), int(stop))\n\n # Do the same thing again for the last dimension: Either a single\n # integer, or a slice...\n if isinstance(key[0], int):\n zs = (key[0], key[0] + 1)\n else:\n start = key[0].start if key[0].start else 0\n stop = key[0].stop if key[0].stop else self.shape[2]\n\n start = start / _normalize_units[2]\n stop = stop / _normalize_units[2]\n\n zs = (int(start), int(stop))\n\n # Finally, we can perform the cutout itself, using the x, y, and z\n # coordinates that we computed in the previous step.\n cutout = self.volume_provider.get_cutout(\n self._channel, self.resolution, xs, ys, zs\n )\n\n # Data are returned in ZYX order:\n if self.axis_order == AxisOrder.XYZ:\n data = np.rollaxis(np.rollaxis(cutout, 1), 2)\n elif self.axis_order == AxisOrder.ZYX:\n data = cutout\n\n # If any of the dimensions are of length 1, it's because the user\n # requested a single slice in their key; flatten the array in that\n # dimension. For example, if you request `[10, 0:10, 0:10]` then the\n # result should be 2D (no Z component).\n _shape = data.shape\n if _shape[0] == 1:\n data = data[0, :, :]\n if _shape[1] == 1:\n data = data[:, 0, :]\n if _shape[2] == 1:\n data = data[:, :, 0]\n return data\n\n def __setitem__(self, key: Tuple, value: np.array) -> np.array:\n \"\"\"\n Set a subarray or subvolume.\n\n Uses one of two indexing methods:\n 1. Start/Stop (`int:int`)\n 2. Single index (`int`)\n\n Each element of the key can be one of those two options. For example,\n\n myarray[1, 1:100, 2]\n\n Start-only (`10:`) or stop-only (`:10`) indexing is unsupported.\n \"\"\"\n\n if self.axis_order == AxisOrder.XYZ:\n key = (key[2], key[1], key[0])\n\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n _normalize_units = (1, 1, 1)\n if isinstance(key[-1], str) and len(key) == 4:\n if key[-1] != self._coord_frame.voxel_unit:\n raise NotImplementedError(\n \"Can only reference voxels in native size format which is \"\n f\"{self._coord_frame.voxel_unit} for this dataset.\"\n )\n _normalize_units = self.voxel_size\n\n if isinstance(key[2], int):\n xs = (key[2], key[2] + 1)\n else:\n start = key[2].start if key[2].start else 0\n stop = key[2].stop if key[2].stop else self.shape[0]\n\n start = start / _normalize_units[0]\n stop = stop / _normalize_units[0]\n\n xs = (int(start), int(stop))\n\n if isinstance(key[1], int):\n ys = (key[1], key[1] + 1)\n else:\n start = key[1].start if key[1].start else 0\n stop = key[1].stop if key[1].stop else self.shape[1]\n\n start = start / _normalize_units[1]\n stop = stop / _normalize_units[1]\n\n ys = (int(start), int(stop))\n\n if isinstance(key[0], int):\n zs = (key[0], key[0] + 1)\n else:\n start = key[0].start if key[0].start else 0\n stop = key[0].stop if key[0].stop else self.shape[2]\n\n start = start / _normalize_units[2]\n stop = stop / _normalize_units[2]\n\n zs = (int(start), int(stop))\n\n if len(value.shape) == 2:\n # TODO: Support other 2D shapes as well\n value = np.array([value])\n\n cutout = self.volume_provider.create_cutout(\n self._channel, self.resolution, xs, ys, zs, value\n )\n\n\ndef arrays_from_neuroglancer(url: str):\n \"\"\"\n Construct array(s) from a neuroglancer link.\n\n Arguments:\n url (str): The neuroglancer link to parse\n\n Returns:\n Dict[str, array]: A dictionary of arrays, where each is keyed by\n the name of the channel in neuroglancer.\n\n \"\"\"\n ngl_state = json.loads(unquote(url).split(\"#!\")[1])\n\n arrays = {}\n for source in ngl_state[\"layers\"]:\n source_url = \"\"\n if \"boss://\" in source[\"source\"]:\n source_url = source[\"source\"]\n elif (\n isinstance(source[\"source\"], dict) and \"boss://\" in source[\"source\"][\"url\"]\n ):\n source_url = source[\"source\"][\"url\"]\n else:\n continue\n remote, channel = parse_fquri(source_url)\n arrays[source[\"name\"]] = array(\n channel=channel, volume_provider=_InternVolumeProvider(remote)\n )\n return arrays\n\n\ndef volumes_from_neuroglancer(\n url: str, radius_zyx: Tuple[int, int, int] = (10, 1024, 1024)\n):\n \"\"\"\n Download numpy arrays from BossDB based upon a neuroglancer URL.\n\n Arguments:\n url (str): The neuroglancer link to parse\n radius_zyx (Tuple[int, int, int]): The amount of data along each axis\n to download, centered at the position from the URL.\n\n Returns:\n Dict[str, np.ndarray]: A dictionary of np.arrays, where each is keyed\n by the name of the channel in neuroglancer.\n\n\n \"\"\"\n ngl_state = json.loads(unquote(url).split(\"#!\")[1])\n\n x, y, z = ngl_state[\"position\"]\n zr, yr, xr = radius_zyx\n\n arrays = arrays_from_neuroglancer(url)\n return {\n key: dataset[z - zr : z + zr, y - yr : y + yr, x - xr : x + xr]\n for key, dataset in arrays.items()\n }\n" ]
[ [ "numpy.array", "numpy.rollaxis" ] ]
annapasca/ephypype
[ "6dbacdd6913234a28b690b401862ff062accecc7" ]
[ "examples/plot_inverse.py" ]
[ "\"\"\"\n.. _source_reconstruction:\n\n========================\nCompute inverse solution\n========================\nThe inverse solution pipeline performs source reconstruction starting either\nfrom raw/epoched data (*.fif* format) specified by the user or from the output\nof the Preprocessing pipeline (cleaned raw data).\n\"\"\"\n\n# Authors: Annalisa Pascarella <[email protected]>\n# License: BSD (3-clause)\n\n# sphinx_gallery_thumbnail_number = 2\n\nimport os.path as op\nimport numpy as np\nimport nipype.pipeline.engine as pe\nimport nipype.interfaces.io as nio\n\nimport ephypype\nfrom ephypype.nodes import create_iterator\nfrom ephypype.datasets import fetch_omega_dataset\n\n\n###############################################################################\n# Let us fetch the data first. It is around 675 MB download.\n\nbase_path = op.join(op.dirname(ephypype.__file__), '..', 'examples')\ndata_path = fetch_omega_dataset(base_path)\n\n###############################################################################\n# then read the parameters for experiment and inverse problem from a\n# :download:`json <https://github.com/neuropycon/ephypype/tree/master/examples/params.json>`\n# file and print it\n\nimport json # noqa\nimport pprint # noqa\nparams = json.load(open(\"params.json\"))\n\npprint.pprint({'experiment parameters': params[\"general\"]})\nsubject_ids = params[\"general\"][\"subject_ids\"] # sub-003\nsession_ids = params[\"general\"][\"session_ids\"] # ses-0001\nNJOBS = params[\"general\"][\"NJOBS\"]\n\npprint.pprint({'inverse parameters': params[\"inverse\"]})\nspacing = params[\"inverse\"]['spacing'] # ico-5 vs oct-6\nsnr = params[\"inverse\"]['snr'] # use smaller SNR for raw data\ninv_method = params[\"inverse\"]['img_method'] # sLORETA, MNE, dSPM, LCMV\nparc = params[\"inverse\"]['parcellation'] # parcellation to use: 'aparc' vs 'aparc.a2009s' # noqa\n# noise covariance matrix filename template\nnoise_cov_fname = params[\"inverse\"]['noise_cov_fname']\n\n# set sbj dir path, i.e. where the FS folfers are\nsubjects_dir = op.join(data_path, params[\"general\"][\"subjects_dir\"])\n\n###############################################################################\n# Then, we create our workflow and specify the `base_dir` which tells\n# nipype the directory in which to store the outputs.\n\n# workflow directory within the `base_dir`\nsrc_reconstruction_pipeline_name = 'source_reconstruction_' + \\\n inv_method + '_' + parc.replace('.', '')\n\nmain_workflow = pe.Workflow(name=src_reconstruction_pipeline_name)\nmain_workflow.base_dir = data_path\n\n###############################################################################\n# Then we create a node to pass input filenames to DataGrabber from nipype\n\ninfosource = create_iterator(['subject_id', 'session_id'],\n [subject_ids, session_ids])\n\n###############################################################################\n# and a node to grab data. The template_args in this node iterate upon\n# the values in the infosource node\n\ndatasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],\n outfields=['raw_file', 'trans_file']), # noqa\n name='datasource')\n\ndatasource.inputs.base_directory = data_path\ndatasource.inputs.template = '*%s/%s/meg/%s*rest*%s.fif'\n\ndatasource.inputs.template_args = dict(\n raw_file=[['subject_id', 'session_id', 'subject_id', '0_60*ica']],\n trans_file=[['subject_id', 'session_id', 'subject_id', \"-trans\"]])\n\ndatasource.inputs.sort_filelist = True\n\n###############################################################################\n# Ephypype creates for us a pipeline which can be connected to these\n# nodes we created. The inverse solution pipeline is implemented by the\n# function\n# :func:`ephypype.pipelines.preproc_meeg.create_pipeline_source_reconstruction`\n# thus to instantiate the inverse pipeline node, we import it and pass our\n# parameters to it.\n# The inverse pipeline contains three nodes that wrap the MNE Python functions\n# that perform the source reconstruction steps.\n#\n# In particular, the three nodes are:\n#\n# * :class:`ephypype.interfaces.mne.LF_computation.LFComputation` compute the\n# Lead Field matrix\n# * :class:`ephypype.interfaces.mne.Inverse_solution.NoiseCovariance` computes\n# the noise covariance matrix\n# * :class:`ephypype.interfaces.mne.Inverse_solution.InverseSolution` estimates\n# the time series of the neural sources on a set of dipoles grid\n\nfrom ephypype.pipelines import create_pipeline_source_reconstruction # noqa\ninv_sol_workflow = create_pipeline_source_reconstruction(\n data_path, subjects_dir, spacing=spacing, inv_method=inv_method, parc=parc,\n noise_cov_fname=noise_cov_fname)\n\n###############################################################################\n# We then connect the nodes two at a time. First, we connect the two outputs\n# (subject_id and session_id) of the infosource node to the datasource node.\n# So, these two nodes taken together can grab data.\n\nmain_workflow.connect(infosource, 'subject_id', datasource, 'subject_id')\nmain_workflow.connect(infosource, 'session_id', datasource, 'session_id')\n\n###############################################################################\n# Similarly, for the inputnode of the preproc_workflow. Things will become\n# clearer in a moment when we plot the graph of the workflow.\n\nmain_workflow.connect(infosource, 'subject_id',\n inv_sol_workflow, 'inputnode.sbj_id')\nmain_workflow.connect(datasource, 'raw_file',\n inv_sol_workflow, 'inputnode.raw')\nmain_workflow.connect(datasource, 'trans_file',\n inv_sol_workflow, 'inputnode.trans_file')\n\n###############################################################################\n# To do so, we first write the workflow graph (optional)\n\nmain_workflow.write_graph(graph2use='colored') # colored\n\n###############################################################################\n# and visualize it. Take a moment to pause and notice how the connections\n# here correspond to how we connected the nodes.\n\nimport matplotlib.pyplot as plt # noqa\nimg = plt.imread(op.join(data_path, src_reconstruction_pipeline_name, 'graph.png')) # noqa\nplt.figure(figsize=(8, 8))\nplt.imshow(img)\nplt.axis('off')\n\n###############################################################################\n# Finally, we are now ready to execute our workflow.\n\nmain_workflow.config['execution'] = {'remove_unnecessary_outputs': 'false'}\n\n# Run workflow locally on 1 CPU\nmain_workflow.run(plugin='LegacyMultiProc', plugin_args={'n_procs': NJOBS})\n\n###############################################################################\n# The output is the source reconstruction matrix stored in the workflow\n# directory defined by `base_dir`. This matrix can be used as input of\n# the Connectivity pipeline.\n#\n# .. warning:: To use this pipeline, we need a cortical segmentation of MRI\n# data, that could be provided by Freesurfer\n\n##############################################################################\n\nimport pickle # noqa\nfrom ephypype.gather import get_results # noqa\nfrom visbrain.objects import BrainObj, ColorbarObj, SceneObj # noqa\n\ntime_series_files, label_files = get_results(main_workflow.base_dir,\n main_workflow.name,\n pipeline='inverse')\n\ntime_pts = 30\n\nsc = SceneObj(size=(800, 500), bgcolor=(0, 0, 0))\nlh_file = op.join(subjects_dir, 'fsaverage', 'label/lh.aparc.annot')\nrh_file = op.join(subjects_dir, 'fsaverage', 'label/rh.aparc.annot')\ncmap = 'bwr'\ntxtcolor = 'white'\nfor inverse_file, label_file in zip(time_series_files, label_files):\n # Load files :\n with open(label_file, 'rb') as f:\n ar = pickle.load(f)\n names, xyz, colors = ar['ROI_names'], ar['ROI_coords'], ar['ROI_colors'] # noqa\n ts = np.squeeze(np.load(inverse_file))\n cen = np.array([k.mean(0) for k in xyz])\n\n # Get the data of the left / right hemisphere :\n lh_data, rh_data = ts[::2, time_pts], ts[1::2, time_pts]\n clim = (ts[:, time_pts].min(), ts[:, time_pts].max())\n roi_names = [k[0:-3] for k in np.array(names)[::2]]\n\n # Left hemisphere outside :\n b_obj_li = BrainObj('white', translucent=False, hemisphere='left')\n b_obj_li.parcellize(lh_file, select=roi_names, data=lh_data, cmap=cmap)\n sc.add_to_subplot(b_obj_li, rotate='left')\n\n # Left hemisphere inside :\n b_obj_lo = BrainObj('white', translucent=False, hemisphere='left')\n b_obj_lo.parcellize(lh_file, select=roi_names, data=lh_data, cmap=cmap)\n sc.add_to_subplot(b_obj_lo, col=1, rotate='right')\n\n # Right hemisphere outside :\n b_obj_ro = BrainObj('white', translucent=False, hemisphere='right')\n b_obj_ro.parcellize(rh_file, select=roi_names, data=rh_data, cmap=cmap)\n sc.add_to_subplot(b_obj_ro, row=1, rotate='right')\n\n # Right hemisphere inside :\n b_obj_ri = BrainObj('white', translucent=False, hemisphere='right')\n b_obj_ri.parcellize(rh_file, select=roi_names, data=rh_data, cmap=cmap)\n sc.add_to_subplot(b_obj_ri, row=1, col=1, rotate='left')\n\n # Add the colorbar :\n cbar = ColorbarObj(b_obj_li, txtsz=15, cbtxtsz=20, txtcolor=txtcolor,\n cblabel='Intensity')\n sc.add_to_subplot(cbar, col=2, row_span=2)\n\nsc.preview()\n" ]
[ [ "numpy.load", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "matplotlib.pyplot.imshow", "numpy.array" ] ]
hanke/nilearn
[ "96a3f0f72b4f25af771116251324cbec4c0d2055" ]
[ "nilearn/_utils/cache_mixin.py" ]
[ "\"\"\"\nMixin for cache with joblib\n\"\"\"\n# Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais\n# License: simplified BSD\n\nimport json\nimport warnings\nimport os\nimport shutil\nfrom distutils.version import LooseVersion\n\nimport nibabel\nfrom sklearn.externals.joblib import Memory\n\nMEMORY_CLASSES = (Memory, )\n\ntry:\n from joblib import Memory as JoblibMemory\n MEMORY_CLASSES = (Memory, JoblibMemory)\nexcept ImportError:\n pass\n\nimport nilearn\n\nfrom .compat import _basestring\n\n__CACHE_CHECKED = dict()\n\n\ndef _safe_cache(memory, func, **kwargs):\n \"\"\" A wrapper for mem.cache that flushes the cache if the version\n number of nibabel has changed.\n \"\"\"\n cachedir = memory.cachedir\n\n if cachedir is None or cachedir in __CACHE_CHECKED:\n return memory.cache(func, **kwargs)\n\n version_file = os.path.join(cachedir, 'module_versions.json')\n\n versions = dict()\n if os.path.exists(version_file):\n with open(version_file, 'r') as _version_file:\n versions = json.load(_version_file)\n\n modules = (nibabel, )\n # Keep only the major + minor version numbers\n my_versions = dict((m.__name__, LooseVersion(m.__version__).version[:2])\n for m in modules)\n commons = set(versions.keys()).intersection(set(my_versions.keys()))\n collisions = [m for m in commons if versions[m] != my_versions[m]]\n\n # Flush cache if version collision\n if len(collisions) > 0:\n if nilearn.CHECK_CACHE_VERSION:\n warnings.warn(\"Incompatible cache in %s: \"\n \"different version of nibabel. Deleting \"\n \"the cache. Put nilearn.CHECK_CACHE_VERSION \"\n \"to false to avoid this behavior.\"\n % cachedir)\n try:\n tmp_dir = (os.path.split(cachedir)[:-1]\n + ('old_%i' % os.getpid(), ))\n tmp_dir = os.path.join(*tmp_dir)\n # We use rename + unlink to be more robust to race\n # conditions\n os.rename(cachedir, tmp_dir)\n shutil.rmtree(tmp_dir)\n except OSError:\n # Another process could have removed this dir\n pass\n\n try:\n os.makedirs(cachedir)\n except OSError:\n # File exists?\n pass\n else:\n warnings.warn(\"Incompatible cache in %s: \"\n \"old version of nibabel.\" % cachedir)\n\n # Write json files if configuration is different\n if versions != my_versions:\n with open(version_file, 'w') as _version_file:\n json.dump(my_versions, _version_file)\n\n __CACHE_CHECKED[cachedir] = True\n\n return memory.cache(func, **kwargs)\n\n\ndef cache(func, memory, func_memory_level=None, memory_level=None,\n **kwargs):\n \"\"\" Return a joblib.Memory object.\n\n The memory_level determines the level above which the wrapped\n function output is cached. By specifying a numeric value for\n this level, the user can to control the amount of cache memory\n used. This function will cache the function call or not\n depending on the cache level.\n\n Parameters\n ----------\n func: function\n The function which output is to be cached.\n\n memory: instance of joblib.Memory or string\n Used to cache the function call.\n\n func_memory_level: int, optional\n The memory_level from which caching must be enabled for the wrapped\n function.\n\n memory_level: int, optional\n The memory_level used to determine if function call must\n be cached or not (if user_memory_level is equal of greater than\n func_memory_level the function is cached)\n\n kwargs: keyword arguments\n The keyword arguments passed to memory.cache\n\n Returns\n -------\n mem: joblib.MemorizedFunc\n object that wraps the function func. This object may be\n a no-op, if the requested level is lower than the value given\n to _cache()). For consistency, a joblib.Memory object is always\n returned.\n \"\"\"\n verbose = kwargs.get('verbose', 0)\n\n # memory_level and func_memory_level must be both None or both integers.\n memory_levels = [memory_level, func_memory_level]\n both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels)\n both_params_none = all(lvl is None for lvl in memory_levels)\n\n if not (both_params_integers or both_params_none):\n raise ValueError('Reference and user memory levels must be both None '\n 'or both integers.')\n\n if memory is not None and (func_memory_level is None or\n memory_level >= func_memory_level):\n if isinstance(memory, _basestring):\n memory = Memory(cachedir=memory, verbose=verbose)\n if not isinstance(memory, MEMORY_CLASSES):\n raise TypeError(\"'memory' argument must be a string or a \"\n \"joblib.Memory object. \"\n \"%s %s was given.\" % (memory, type(memory)))\n if (memory.cachedir is None and memory_level is not None\n and memory_level > 1):\n warnings.warn(\"Caching has been enabled (memory_level = %d) \"\n \"but no Memory object or path has been provided\"\n \" (parameter memory). Caching deactivated for \"\n \"function %s.\" %\n (memory_level, func.__name__),\n stacklevel=2)\n else:\n memory = Memory(cachedir=None, verbose=verbose)\n return _safe_cache(memory, func, **kwargs)\n\n\nclass CacheMixin(object):\n \"\"\"Mixin to add caching to a class.\n\n This class is a thin layer on top of joblib.Memory, that mainly adds a\n \"caching level\", similar to a \"log level\".\n\n Usage: to cache the results of a method, wrap it in self._cache()\n defined by this class. Caching is performed only if the user-specified\n cache level (self._memory_level) is greater than the value given as a\n parameter to self._cache(). See _cache() documentation for details.\n \"\"\"\n def _cache(self, func, func_memory_level=1, **kwargs):\n \"\"\"Return a joblib.Memory object.\n\n The memory_level determines the level above which the wrapped\n function output is cached. By specifying a numeric value for\n this level, the user can to control the amount of cache memory\n used. This function will cache the function call or not\n depending on the cache level.\n\n Parameters\n ----------\n func: function\n The function the output of which is to be cached.\n\n memory_level: int\n The memory_level from which caching must be enabled for the wrapped\n function.\n\n Returns\n -------\n mem: joblib.Memory\n object that wraps the function func. This object may be\n a no-op, if the requested level is lower than the value given\n to _cache()). For consistency, a joblib.Memory object is always\n returned.\n\n \"\"\"\n\n verbose = getattr(self, 'verbose', 0)\n\n # Creates attributes if they don't exist\n # This is to make creating them in __init__() optional.\n if not hasattr(self, \"memory_level\"):\n self.memory_level = 0\n if not hasattr(self, \"memory\"):\n self.memory = Memory(cachedir=None, verbose=verbose)\n if isinstance(self.memory, _basestring):\n self.memory = Memory(cachedir=self.memory, verbose=verbose)\n\n # If cache level is 0 but a memory object has been provided, set\n # memory_level to 1 with a warning.\n if self.memory_level == 0:\n if (isinstance(self.memory, _basestring)\n or self.memory.cachedir is not None):\n warnings.warn(\"memory_level is currently set to 0 but \"\n \"a Memory object has been provided. \"\n \"Setting memory_level to 1.\")\n self.memory_level = 1\n\n return cache(func, self.memory, func_memory_level=func_memory_level,\n memory_level=self.memory_level, **kwargs)\n" ]
[ [ "sklearn.externals.joblib.Memory" ] ]
m-novikov/hytra
[ "0dc28deaa2571fa8bea63ca178f0e53cc1cd7508" ]
[ "hytra/core/divisionfeatures.py" ]
[ "import numpy as np\nimport math\n\n\ndef dotproduct(v1, v2):\n return sum((a * b) for a, b in zip(v1, v2))\n\n\ndef length(v):\n return math.sqrt(dotproduct(v, v))\n\n\ndef angle(v1, v2):\n try:\n if length(v1) * length(v2) == 0:\n radians = 0\n else:\n radians = math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))\n except Exception as e:\n # print str(e), ': math.acos(', dotproduct(v1, v2) / (length(v1) * length(v2)), '), v1 =', v1, ', v2 =', v2\n radians = 0\n return (float(radians) * 180.0) / math.pi\n\n\n##### Feature base class #######\n\n\nclass Feature(object):\n name = \"Feature\"\n plugin = \"Tracking Features\"\n default_value = 0\n dimensionality = None\n\n def __init__(\n self,\n feats_name,\n default_value=None,\n delim=\"_\",\n scales=[1.0, 1.0, 1.0],\n ndim=2,\n feat_dim=1,\n ):\n self.name += str(delim) + str(feats_name)\n self.feats_name = feats_name\n if default_value != None:\n self.default_value = default_value\n self.scales = scales\n self.ndim = ndim\n self.feat_dim = feat_dim\n\n def compute(self, feats_cur, feats_next, **kwargs):\n raise NotImplementedError(\"Feature not fully implemented yet.\")\n\n def getName(self):\n return self.name\n\n def getPlugin(self):\n return self.plugin\n\n def dim(self):\n return self.dimensionality\n\n\nclass ParentChildrenRatio(Feature):\n name = \"ParentChildrenRatio\"\n dimensionality = 1\n\n def compute(self, feats_cur, feats_next, **kwargs):\n if len(feats_next) < 2:\n return np.array(len(feats_cur) * [self.default_value])\n result = np.array(feats_cur) / np.array(feats_next[0] + feats_next[1])\n for i in range(len(result)):\n if math.isnan(result[i]):\n result[i] = self.default_value\n return result\n\n def dim(self):\n return self.dimensionality * self.feat_dim\n\n\nclass ChildrenRatio(Feature):\n name = \"ChildrenRatio\"\n dimensionality = 1\n\n def compute(self, feats_cur, feats_next, **kwargs):\n if len(feats_next) < 2:\n return np.array(len(feats_cur) * [self.default_value])\n ratio = np.array(feats_next[0]) / np.array(feats_next[1])\n for i in range(len(ratio)):\n if math.isnan(ratio[i]):\n ratio[i] = self.default_value\n if ratio[i] > 1 and ratio[i] != 0:\n ratio[i] = 1.0 / ratio[i]\n return ratio\n\n def dim(self):\n return self.dimensionality * self.feat_dim\n\n\nclass SquaredDistances(Feature):\n name = \"SquaredDistances\"\n\n def compute(self, feats_cur, feats_next, **kwargs):\n return feats_cur\n\n def dim(self):\n return self.ndim\n\n\nclass ParentChildrenAngle(Feature):\n name = \"ParentChildrenAngle\"\n dimensionality = 1\n\n def compute(self, feats_cur, feats_next, **kwargs):\n angles = []\n for idx, com1 in enumerate(feats_next):\n v1 = (com1 - feats_cur) * self.scales[0 : com1.shape[0]]\n for com2 in feats_next[idx + 1 :]:\n v2 = (com2 - feats_cur) * self.scales[0 : com2.shape[0]]\n ang = angle(v1, v2)\n if ang > 180:\n assert ang <= 360.01, \"the angle must be smaller than 360 degrees\"\n ang = 360 - ang\n angles.append(ang)\n\n if len(angles) == 0:\n angles = [self.default_value]\n\n return max(angles)\n\n\nclass ParentIdentity(Feature):\n name = \"\"\n\n def compute(self, feats_cur, feats_next, **kwargs):\n return feats_cur\n\n\nclass FeatureManager(object):\n\n feature_mappings = {\n \"ParentIdentity\": ParentIdentity,\n \"SquaredDistances\": SquaredDistances,\n \"ChildrenRatio\": ChildrenRatio,\n \"ParentChildrenRatio\": ParentChildrenRatio,\n \"ParentChildrenAngle\": ParentChildrenAngle,\n }\n\n def __init__(\n self,\n scales=[1.0, 1.0, 1.0],\n n_best=3,\n com_name_cur=\"RegionCenter\",\n com_name_next=\"RegionCenter\",\n size_name=\"Count\",\n delim=\"_\",\n template_size=50,\n ndim=2,\n size_filter=4,\n squared_distance_default=9999,\n ):\n self.scales = scales[0:ndim]\n self.n_best = n_best\n self.com_name_cur = com_name_cur\n self.com_name_next = com_name_next\n self.size_name = size_name\n self.delim = delim\n self.template_size = template_size\n self.ndim = ndim\n self.size_filter = size_filter\n self.squared_distance_default = squared_distance_default\n\n def _getBestSquaredDistances(\n self, com_cur, coms_next, size_filter=None, sizes_next=[], default_value=9999\n ):\n \"\"\" returns the squared distances to the objects in the neighborhood of com_curr, optionally with size filter \"\"\"\n squaredDistances = []\n\n for label_next in coms_next.keys():\n assert label_next in sizes_next.keys()\n if size_filter != None and sizes_next[label_next] >= size_filter:\n dist = np.linalg.norm(coms_next[label_next] - com_cur * self.scales)\n squaredDistances.append([label_next, dist])\n\n squaredDistances = np.array(squaredDistances)\n # sort the array in the second column in ascending order\n squaredDistances = np.array(\n sorted(squaredDistances, key=lambda a_entry: a_entry[1])\n )\n\n # initialize with label -1 and default value\n result = np.array(\n [[-1, default_value] for x in range(self.n_best)], dtype=np.float32\n )\n if squaredDistances.shape[0] != 0:\n result[\n 0 : min(squaredDistances.shape[0], result.shape[0]), :\n ] = squaredDistances[0 : min(squaredDistances.shape[0], result.shape[0]), :]\n\n return result\n\n def computeFeatures_at(\n self, feats_cur, feats_next, img_next, feat_names, label_image_filename=None\n ):\n \"\"\"\n **Parameters:**\n \n * if `label_image_filename` is given, it is used to filter the objects from the feature dictionaries \n that belong to that label image only (in the JST setting) \n \"\"\"\n\n # n_labels = list(feats_cur.values())[0].shape[0]\n result = {}\n\n # find available features\n vigra_feat_names = set([self.com_name_cur, self.com_name_next, self.size_name])\n feat_classes = {}\n\n for name in feat_names:\n name_split = name.split(self.delim)\n if \"SquaredDistances\" in name_split:\n continue\n\n if len(name_split) != 2:\n raise ValueError(\n \"tracking features consist of an operator and a feature name only, given name={}\".format(\n name_split\n )\n )\n if len(feats_cur[name_split[1]].shape) > 1:\n feat_dim = feats_cur[name_split[1]].shape[1]\n else:\n feat_dim = 1\n feat_classes[name] = self.feature_mappings[name_split[0]](\n name_split[1], delim=self.delim, ndim=self.ndim, feat_dim=feat_dim\n )\n\n shape = (list(feats_cur.values())[0].shape[0], feat_classes[name].dim())\n result[name] = np.ones(shape) * feat_classes[name].default_value\n\n vigra_feat_names.add(name_split[1])\n\n # initialize squared distances\n for idx in range(self.n_best):\n name = \"SquaredDistances_\" + str(idx)\n result[name] = (\n np.ones((list(feats_cur.values())[0].shape[0], 1))\n * self.squared_distance_default\n )\n\n # construct mapping which we only need if label_image_filename was given and the features 'filename' and 'id' exist\n if (\n label_image_filename is not None\n and \"filename\" in feats_next\n and \"id\" in feats_next\n ):\n global_indices_current_label_image_only = [\n l\n for l, f in enumerate(feats_next[\"filename\"])\n if f == label_image_filename\n ]\n local_to_global_index_map = dict(\n [\n (feats_next[\"id\"][l], l)\n for l in global_indices_current_label_image_only\n ]\n )\n\n # for every object in this frame, check which objects are in the vicinity in the next frame\n valid_indices = [0]\n for label_cur, com_cur in enumerate(feats_cur[self.com_name_cur]):\n if (\n label_image_filename is not None\n and \"filename\" in feats_cur\n and feats_cur[\"filename\"][label_cur] != label_image_filename\n ):\n # in the JST context, only look at objects from a given segmentation hypotheses set\n continue\n if label_cur == 0:\n continue\n\n valid_indices.append(label_cur)\n feats_next_subset = {}\n for k in vigra_feat_names:\n feats_next_subset[k] = {}\n\n if feats_next is not None and img_next is not None:\n # find roi around the center of the current object\n idx_cur = [round(x) for x in com_cur]\n\n roi = []\n for idx, coord in enumerate(idx_cur):\n start = max(coord - self.template_size / 2, 0)\n stop = min(coord + self.template_size / 2, img_next.shape[idx])\n roi.append(slice(int(start), int(stop)))\n\n # find all coms in the neighborhood of com_cur by checking the next frame's labelimage in the roi\n subimg_next = img_next[roi]\n labels_next = np.unique(subimg_next).tolist()\n\n # if 'id' in features, map the labels first -- because labels_next refers image object ids,\n # whereas the features are the union of objects from several segmentations\n if \"id\" in feats_next:\n labels_next = [\n local_to_global_index_map[l] for l in labels_next if l != 0\n ]\n\n for l in labels_next:\n if l != 0:\n for n in vigra_feat_names:\n feats_next_subset[n][l] = np.array(\n [feats_next[n][l]]\n ).flatten()\n\n sq_dist_label = self._getBestSquaredDistances(\n com_cur,\n feats_next_subset[self.com_name_next],\n self.size_filter,\n feats_next_subset[self.size_name],\n default_value=self.squared_distance_default,\n )\n\n feats_next_subset_best = {}\n for n in vigra_feat_names:\n feats_next_subset_best[n] = []\n for idx, row in enumerate(sq_dist_label):\n l = row[0]\n if l != -1:\n feats_next_subset_best[n].append(feats_next_subset[n][l])\n\n # first add squared distances\n for idx in range(self.n_best):\n name = \"SquaredDistances_\" + str(idx)\n result[name][label_cur] = sq_dist_label[idx][1]\n\n # add all other features\n for name, feat_class in feat_classes.items():\n if feat_class.feats_name == \"SquaredDistances\":\n f_next = sq_dist_label[0:2, 1]\n f_cur = None\n else:\n f_cur = np.array(\n [feats_cur[feat_class.feats_name][label_cur]]\n ).flatten()\n f_next = np.array(\n [feats_next_subset_best[feat_class.feats_name]]\n ).reshape((-1, f_cur.shape[0]))\n result[name][label_cur] = feat_class.compute(f_cur, f_next)\n\n # return only valid labels\n for feature_name in result:\n result[feature_name] = result[feature_name][valid_indices]\n\n return result\n\n\nif __name__ == \"__main__\":\n import vigra\n import numpy as np\n\n img_cur = vigra.readImage(\"/home/mschiegg/tmp/segmentImage.tif\")\n img_next = img_cur\n\n labels_cur = vigra.analysis.labelImage(img_cur)\n feats_cur = vigra.analysis.extractRegionFeatures(\n labels_cur.astype(np.float32),\n labels_cur.astype(np.uint32),\n features=\"all\",\n ignoreLabel=0,\n )\n\n feat_names = [\n \"ParentChildrenRatio_Count\",\n \"ParentChildrenRatio_Mean\",\n \"ChildrenRatio_Count\",\n \"ChildrenRatio_Mean\",\n \"ParentChildrenAngle_RegionCenter\",\n \"ChildrenRatio_SquaredDistances\",\n ]\n fm = FeatureManager()\n res = fm.computeFeatures_at(feats_cur, feats_cur, img_cur, feat_names)\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.linalg.norm", "numpy.unique" ] ]
mrsempress/transfer_learning
[ "217622ca3052b6c79a07792e627394c08440ec84" ]
[ "codes/JDA/JDA.py" ]
[ "# encoding=utf-8\n\"\"\"\n Created on 9:38 2019/07/16\n @author: Chenxi Huang\n It implements \"Transfer Feature Learning with Joint Distribution Adaptation\"\n Refer to Long Mingsheng's(the writer) code in Matlab\n\"\"\"\nimport numpy as np\nimport os\nimport scipy.io\nimport scipy.linalg\nimport sklearn.metrics\nfrom sklearn import preprocessing\nfrom sklearn.neighbors import KNeighborsClassifier\nimport Network\nimport Log\n\n\ndef work(source, target, gpu, _k=100, _lambd=1.0, _ker='primal', _gamma=1.0):\n # set log information\n log = Log.Log()\n log.set_dir('JDA', source, target)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu\n # domains = ['caltech.mat', 'amazon.mat', 'webcam.mat', 'dslr.mat'] # databases: Office-31\n srcStr = ['Caltech10', 'Caltech10', 'Caltech10', 'amazon', 'amazon', 'amazon', 'webcam', 'webcam', 'webcam', 'dslr',\n 'dslr', 'dslr']\n tgtStr = ['amazon', 'webcam', 'dslr', 'Caltech10', 'webcam', 'dslr', 'Caltech10', 'amazon', 'dslr', 'Caltech10',\n 'amazon', 'webcam']\n # result = []\n # for i in range(12):\n # src, tar = '../data/JDA/' + srcStr[i] + '_SURF_L10.mat', '../data/JDA/' + tgtStr[i] + '_SURF_L10.mat'\n src, tar = 'data/JDA/' + source + '_SURF_L10.mat', 'data/JDA/' + target + '_SURF_L10.mat'\n print(\"src is \" + src + \", tar is \" + tar)\n # load algorithm options\n src_domain, tar_domain = scipy.io.loadmat(src), scipy.io.loadmat(tar)\n # print(src_domain['fts'])\n # print(np.size(src_domain['fts'], 0)) # 1123\n # print(np.size(src_domain['fts'], 1)) # 800\n # print(src_domain['fts'].sum(0))\n # print(np.size(src_domain['fts'].sum(0), 0)) # 800\n # print(len(src_domain['fts'])) # 1123\n Xs = src_domain['fts'] / np.tile(src_domain['fts'].sum(0), 1)\n scale1 = preprocessing.minmax_scale(Xs, feature_range=(0, 1), axis=0, copy=True)\n # print(src_domain['labels'])\n Ys = src_domain['labels']\n\n Xt = tar_domain['fts'] / np.tile(tar_domain['fts'].sum(0), 1)\n scale2 = preprocessing.minmax_scale(Xs, feature_range=(0, 1), axis=0, copy=True)\n Yt = tar_domain['labels']\n\n # 1NN evaluation\n clf = KNeighborsClassifier(n_neighbors=1)\n clf.fit(Xs, Ys.ravel())\n Y_pred = clf.predict(Xt)\n acc = sklearn.metrics.accuracy_score(Yt, Y_pred)\n print('NN = ', acc)\n\n # JDA evaluation\n # because in office-31 all are objects, so lambda = 1\n k, lambd, ker, gamma = _k, _lambd, _ker, _gamma # 'primal' | 'linear' | 'rbf'\n T = 10\n Cls = []\n Acc = []\n for t in range(T):\n print('==============================Iteration [' + str(t) + ']==============================')\n jda = Network.JDA_LMS(kernel_type=ker, dim=30, lamb=lambd, gamma=gamma)\n Z, A = jda.fit_predict(Xs, Ys, Xt, Yt)\n Z /= np.linalg.norm(Z, axis=0)\n Xs_new, Xt_new = Z[:, :len(Xs)].T, Z[:, len(Xs):].T\n\n clf = KNeighborsClassifier(n_neighbors=1)\n clf.fit(Xs_new, Ys.ravel())\n Y_pred = clf.predict(Xt_new)\n acc = sklearn.metrics.accuracy_score(Yt, Y_pred)\n Acc.append(acc)\n print('JDA iteration [{}/{}]: Acc: {:.4f}'.format(t + 1, T, acc))\n # add log\n log.add_log(t, '*', '*', acc)\n # result.append(Acc[-1])\n\n # save log\n log.save_log()\n\n\nif __name__ == '__main__':\n work('amazon', 'webcam', '3')\n" ]
[ [ "sklearn.neighbors.KNeighborsClassifier", "sklearn.preprocessing.minmax_scale", "numpy.linalg.norm" ] ]
zhengjian2322/soln-ml
[ "ceff6c432921df9809f9552aaa1b0bd5a1b7e8d2" ]
[ "solnml/components/models/object_detection/nn_utils/retinanet.py" ]
[ "\"\"\"\nRetinaNet code borrowed from\nhttps://github.com/yhenon/pytorch-retinanet/blob/master/retinanet/model.py\n\"\"\"\n\nimport torch.nn as nn\nimport torch\nimport math\nimport torch.utils.model_zoo as model_zoo\nfrom torchvision.ops import nms\nfrom .retinanet_utils import BasicBlock, Bottleneck, BBoxTransform, ClipBoxes, Anchors, FocalLoss\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\nclass PyramidFeatures(nn.Module):\n def __init__(self, C3_size, C4_size, C5_size, feature_size=256):\n super(PyramidFeatures, self).__init__()\n\n # upsample C5 to get P5 from the FPN paper\n self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)\n self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')\n self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n\n # add P5 elementwise to C4\n self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)\n self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')\n self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n\n # add P4 elementwise to C3\n self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)\n self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n\n # \"P6 is obtained via a 3x3 stride-2 conv on C5\"\n self.P6 = nn.Conv2d(C5_size, feature_size, kernel_size=3, stride=2, padding=1)\n\n # \"P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6\"\n self.P7_1 = nn.ReLU()\n self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)\n\n def forward(self, inputs):\n C3, C4, C5 = inputs\n\n P5_x = self.P5_1(C5)\n P5_upsampled_x = self.P5_upsampled(P5_x)\n P5_x = self.P5_2(P5_x)\n\n P4_x = self.P4_1(C4)\n P4_x = P5_upsampled_x + P4_x\n P4_upsampled_x = self.P4_upsampled(P4_x)\n P4_x = self.P4_2(P4_x)\n\n P3_x = self.P3_1(C3)\n P3_x = P3_x + P4_upsampled_x\n P3_x = self.P3_2(P3_x)\n\n P6_x = self.P6(C5)\n\n P7_x = self.P7_1(P6_x)\n P7_x = self.P7_2(P7_x)\n\n return [P3_x, P4_x, P5_x, P6_x, P7_x]\n\n\nclass RegressionModel(nn.Module):\n def __init__(self, num_features_in, num_anchors=9, feature_size=256):\n super(RegressionModel, self).__init__()\n\n self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)\n self.act1 = nn.ReLU()\n\n self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act2 = nn.ReLU()\n\n self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act3 = nn.ReLU()\n\n self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act4 = nn.ReLU()\n\n self.output = nn.Conv2d(feature_size, num_anchors * 4, kernel_size=3, padding=1)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.act1(out)\n\n out = self.conv2(out)\n out = self.act2(out)\n\n out = self.conv3(out)\n out = self.act3(out)\n\n out = self.conv4(out)\n out = self.act4(out)\n\n out = self.output(out)\n\n # out is B x C x W x H, with C = 4*num_anchors\n out = out.permute(0, 2, 3, 1)\n\n return out.contiguous().view(out.shape[0], -1, 4)\n\n\nclass ClassificationModel(nn.Module):\n def __init__(self, num_features_in, num_anchors=9, num_classes=80, prior=0.01, feature_size=256):\n super(ClassificationModel, self).__init__()\n\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n\n self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)\n self.act1 = nn.ReLU()\n\n self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act2 = nn.ReLU()\n\n self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act3 = nn.ReLU()\n\n self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act4 = nn.ReLU()\n\n self.output = nn.Conv2d(feature_size, num_anchors * num_classes, kernel_size=3, padding=1)\n self.output_act = nn.Sigmoid()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.act1(out)\n\n out = self.conv2(out)\n out = self.act2(out)\n\n out = self.conv3(out)\n out = self.act3(out)\n\n out = self.conv4(out)\n out = self.act4(out)\n\n out = self.output(out)\n out = self.output_act(out)\n\n # out is B x C x W x H, with C = n_classes + n_anchors\n out1 = out.permute(0, 2, 3, 1)\n\n batch_size, width, height, channels = out1.shape\n\n out2 = out1.view(batch_size, width, height, self.num_anchors, self.num_classes)\n\n return out2.contiguous().view(x.shape[0], -1, self.num_classes)\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, num_classes, block, layers):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n if block == BasicBlock:\n fpn_sizes = [self.layer2[layers[1] - 1].conv2.out_channels, self.layer3[layers[2] - 1].conv2.out_channels,\n self.layer4[layers[3] - 1].conv2.out_channels]\n elif block == Bottleneck:\n fpn_sizes = [self.layer2[layers[1] - 1].conv3.out_channels, self.layer3[layers[2] - 1].conv3.out_channels,\n self.layer4[layers[3] - 1].conv3.out_channels]\n else:\n raise ValueError(f\"Block type {block} not understood\")\n\n self.fpn = PyramidFeatures(fpn_sizes[0], fpn_sizes[1], fpn_sizes[2])\n\n self.regressionModel = RegressionModel(256)\n self.classificationModel = ClassificationModel(256, num_classes=num_classes)\n\n self.anchors = Anchors()\n\n self.regressBoxes = BBoxTransform()\n\n self.clipBoxes = ClipBoxes()\n\n self.focalLoss = FocalLoss()\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n prior = 0.01\n\n self.classificationModel.output.weight.data.fill_(0)\n self.classificationModel.output.bias.data.fill_(-math.log((1.0 - prior) / prior))\n\n self.regressionModel.output.weight.data.fill_(0)\n self.regressionModel.output.bias.data.fill_(0)\n\n self.freeze_bn()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = [block(self.inplanes, planes, stride, downsample)]\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def freeze_bn(self):\n '''Freeze BatchNorm layers.'''\n for layer in self.modules():\n if isinstance(layer, nn.BatchNorm2d):\n layer.eval()\n\n def forward(self, inputs):\n\n if self.training:\n img_batch, annotations = inputs\n else:\n img_batch = inputs\n\n x = self.conv1(img_batch)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x1 = self.layer1(x)\n x2 = self.layer2(x1)\n x3 = self.layer3(x2)\n x4 = self.layer4(x3)\n\n features = self.fpn([x2, x3, x4])\n\n regression = torch.cat([self.regressionModel(feature) for feature in features], dim=1)\n\n classification = torch.cat([self.classificationModel(feature) for feature in features], dim=1)\n\n anchors = self.anchors(img_batch)\n\n if self.training:\n return self.focalLoss(classification, regression, anchors, annotations)\n else:\n transformed_anchors = self.regressBoxes(anchors, regression)\n transformed_anchors = self.clipBoxes(transformed_anchors, img_batch)\n\n scores = torch.max(classification, dim=2, keepdim=True)[0]\n\n scores_over_thresh = (scores > 0.05)[0, :, 0]\n\n if scores_over_thresh.sum() == 0:\n # no boxes to NMS, just return\n return [torch.zeros(0), torch.zeros(0), torch.zeros(0, 4)]\n\n classification = classification[:, scores_over_thresh, :]\n transformed_anchors = transformed_anchors[:, scores_over_thresh, :]\n scores = scores[:, scores_over_thresh, :]\n\n anchors_nms_idx = nms(transformed_anchors[0, :, :], scores[0, :, 0], 0.5)\n\n nms_scores, nms_class = classification[0, anchors_nms_idx, :].max(dim=1)\n\n return [nms_scores, nms_class, transformed_anchors[0, anchors_nms_idx, :]]\n\n\ndef resnet18(num_classes, pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model\n\n\ndef resnet34(num_classes, pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)\n return model\n\n\ndef resnet50(num_classes, pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(num_classes, Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False)\n return model\n\n\ndef resnet101(num_classes, pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(num_classes, Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir='.'), strict=False)\n return model\n\n\ndef resnet152(num_classes, pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(num_classes, Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False)\n return model\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d", "torch.nn.Upsample", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.max", "torch.zeros", "torch.utils.model_zoo.load_url", "torch.nn.Sigmoid", "torch.nn.ReLU" ] ]
cassinius/right-to-forget-data
[ "5aa3a480d93e66065118866f294f06e6cfd5d3a1" ]
[ "src/multi_class/gradient_boosting.py" ]
[ "from sklearn import ensemble\nfrom src.multi_class import input_preproc\nfrom src.multi_class import calculate_metrics\n\n\ndef runClassifier(X_train, X_test, y_train, y_test):\n # GRADIENT BOOSTING\n cls = ensemble.GradientBoostingClassifier(\n n_estimators=100,\n learning_rate=0.1,\n max_depth=5,\n verbose=0\n )\n\n predictions = cls.fit(X_train, y_train).predict(X_test)\n\n # Metrics...\n precision, recall, f1, accuracy = calculate_metrics.calculateMetrics(predictions, y_test)\n print( \"intermediary results (precision | recall | F1 Score | Accuracy):\" )\n print( \"%.6f %.6f %.6f %.6f\" % (precision, recall, f1, accuracy) )\n return precision, recall, f1, accuracy\n\n\nif __name__ == \"__main__\":\n X_train, X_test, y_train, y_test = input_preproc.readIris()\n precision, recall, f1, accuracy = runClassifier(X_train, X_test, y_train, y_test)\n print( \"\\n================================\" )\n print( \"Precision | Recall | F1 Score | Accuracy: \" )\n print( \"%.6f %.6f %.6f %.6f\" % (precision, recall, f1, accuracy) )\n" ]
[ [ "sklearn.ensemble.GradientBoostingClassifier" ] ]
zhezherun/pandas
[ "1f02bf240c3d0d3da338af868d056bfc169b28c2", "36c1104b7ad9761e020f7e8198eb60da4045d169" ]
[ "pandas/tests/indexes/datetimes/test_indexing.py", "pandas/io/parsers.py" ]
[ "from datetime import datetime, time, timedelta\n\nimport numpy as np\nimport pytest\nimport pytz\n\nimport pandas.compat as compat\n\nimport pandas as pd\nfrom pandas import DatetimeIndex, Index, Timestamp, date_range, notna\nimport pandas.util.testing as tm\n\nfrom pandas.tseries.offsets import BDay, CDay\n\nSTART, END = datetime(2009, 1, 1), datetime(2010, 1, 1)\n\n\nclass TestGetItem(object):\n def test_getitem(self):\n idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')\n idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',\n tz='Asia/Tokyo', name='idx')\n\n for idx in [idx1, idx2]:\n result = idx[0]\n assert result == Timestamp('2011-01-01', tz=idx.tz)\n\n result = idx[0:5]\n expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[0:10:2]\n expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[-20:-5:3]\n expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[4::-1]\n expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',\n '2011-01-02', '2011-01-01'],\n freq='-1D', tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n def test_dti_business_getitem(self):\n rng = pd.bdate_range(START, END)\n smaller = rng[:5]\n exp = DatetimeIndex(rng.view(np.ndarray)[:5])\n tm.assert_index_equal(smaller, exp)\n\n assert smaller.freq == rng.freq\n\n sliced = rng[::5]\n assert sliced.freq == BDay() * 5\n\n fancy_indexed = rng[[4, 3, 2, 1, 0]]\n assert len(fancy_indexed) == 5\n assert isinstance(fancy_indexed, DatetimeIndex)\n assert fancy_indexed.freq is None\n\n # 32-bit vs. 64-bit platforms\n assert rng[4] == rng[np.int_(4)]\n\n def test_dti_business_getitem_matplotlib_hackaround(self):\n rng = pd.bdate_range(START, END)\n values = rng[:, None]\n expected = rng.values[:, None]\n tm.assert_numpy_array_equal(values, expected)\n\n def test_dti_custom_getitem(self):\n rng = pd.bdate_range(START, END, freq='C')\n smaller = rng[:5]\n exp = DatetimeIndex(rng.view(np.ndarray)[:5])\n tm.assert_index_equal(smaller, exp)\n assert smaller.freq == rng.freq\n\n sliced = rng[::5]\n assert sliced.freq == CDay() * 5\n\n fancy_indexed = rng[[4, 3, 2, 1, 0]]\n assert len(fancy_indexed) == 5\n assert isinstance(fancy_indexed, DatetimeIndex)\n assert fancy_indexed.freq is None\n\n # 32-bit vs. 64-bit platforms\n assert rng[4] == rng[np.int_(4)]\n\n def test_dti_custom_getitem_matplotlib_hackaround(self):\n rng = pd.bdate_range(START, END, freq='C')\n values = rng[:, None]\n expected = rng.values[:, None]\n tm.assert_numpy_array_equal(values, expected)\n\n\nclass TestWhere(object):\n def test_where_other(self):\n # other is ndarray or Index\n i = pd.date_range('20130101', periods=3, tz='US/Eastern')\n\n for arr in [np.nan, pd.NaT]:\n result = i.where(notna(i), other=np.nan)\n expected = i\n tm.assert_index_equal(result, expected)\n\n i2 = i.copy()\n i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())\n result = i.where(notna(i2), i2)\n tm.assert_index_equal(result, i2)\n\n i2 = i.copy()\n i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())\n result = i.where(notna(i2), i2.values)\n tm.assert_index_equal(result, i2)\n\n def test_where_tz(self):\n i = pd.date_range('20130101', periods=3, tz='US/Eastern')\n result = i.where(notna(i))\n expected = i\n tm.assert_index_equal(result, expected)\n\n i2 = i.copy()\n i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())\n result = i.where(notna(i2))\n expected = i2\n tm.assert_index_equal(result, expected)\n\n\nclass TestTake(object):\n def test_take(self):\n # GH#10295\n idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')\n idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',\n tz='Asia/Tokyo', name='idx')\n\n for idx in [idx1, idx2]:\n result = idx.take([0])\n assert result == Timestamp('2011-01-01', tz=idx.tz)\n\n result = idx.take([0, 1, 2])\n expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([0, 2, 4])\n expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([7, 4, 1])\n expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([3, 2, 5])\n expected = DatetimeIndex(['2011-01-04', '2011-01-03',\n '2011-01-06'],\n freq=None, tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq is None\n\n result = idx.take([-3, 2, 5])\n expected = DatetimeIndex(['2011-01-29', '2011-01-03',\n '2011-01-06'],\n freq=None, tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq is None\n\n def test_take_invalid_kwargs(self):\n idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')\n indices = [1, 6, 5, 9, 10, 13, 15, 3]\n\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n with pytest.raises(TypeError, match=msg):\n idx.take(indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, mode='clip')\n\n # TODO: This method came from test_datetime; de-dup with version above\n @pytest.mark.parametrize('tz', [None, 'US/Eastern', 'Asia/Tokyo'])\n def test_take2(self, tz):\n dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15),\n datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)]\n\n idx = DatetimeIndex(start='2010-01-01 09:00',\n end='2010-02-01 09:00', freq='H', tz=tz,\n name='idx')\n expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz)\n\n taken1 = idx.take([5, 6, 8, 12])\n taken2 = idx[[5, 6, 8, 12]]\n\n for taken in [taken1, taken2]:\n tm.assert_index_equal(taken, expected)\n assert isinstance(taken, DatetimeIndex)\n assert taken.freq is None\n assert taken.tz == expected.tz\n assert taken.name == expected.name\n\n def test_take_fill_value(self):\n # GH#12631\n idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],\n name='xxx')\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with pytest.raises(IndexError):\n idx.take(np.array([1, -5]))\n\n def test_take_fill_value_with_timezone(self):\n idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],\n name='xxx', tz='US/Eastern')\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx', tz='US/Eastern')\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],\n name='xxx', tz='US/Eastern')\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx', tz='US/Eastern')\n tm.assert_index_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with pytest.raises(IndexError):\n idx.take(np.array([1, -5]))\n\n\nclass TestDatetimeIndex(object):\n @pytest.mark.parametrize('null', [None, np.nan, pd.NaT])\n @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern'])\n def test_insert_nat(self, tz, null):\n # GH#16537, GH#18295 (test missing)\n idx = pd.DatetimeIndex(['2017-01-01'], tz=tz)\n expected = pd.DatetimeIndex(['NaT', '2017-01-01'], tz=tz)\n res = idx.insert(0, null)\n tm.assert_index_equal(res, expected)\n\n def test_insert(self):\n idx = DatetimeIndex(\n ['2000-01-04', '2000-01-01', '2000-01-02'], name='idx')\n\n result = idx.insert(2, datetime(2000, 1, 5))\n exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',\n '2000-01-02'], name='idx')\n tm.assert_index_equal(result, exp)\n\n # insertion of non-datetime should coerce to object index\n result = idx.insert(1, 'inserted')\n expected = Index([datetime(2000, 1, 4), 'inserted',\n datetime(2000, 1, 1),\n datetime(2000, 1, 2)], name='idx')\n assert not isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n\n idx = date_range('1/1/2000', periods=3, freq='M', name='idx')\n\n # preserve freq\n expected_0 = DatetimeIndex(['1999-12-31', '2000-01-31', '2000-02-29',\n '2000-03-31'], name='idx', freq='M')\n expected_3 = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31',\n '2000-04-30'], name='idx', freq='M')\n\n # reset freq to None\n expected_1_nofreq = DatetimeIndex(['2000-01-31', '2000-01-31',\n '2000-02-29',\n '2000-03-31'], name='idx',\n freq=None)\n expected_3_nofreq = DatetimeIndex(['2000-01-31', '2000-02-29',\n '2000-03-31',\n '2000-01-02'], name='idx',\n freq=None)\n\n cases = [(0, datetime(1999, 12, 31), expected_0),\n (-3, datetime(1999, 12, 31), expected_0),\n (3, datetime(2000, 4, 30), expected_3),\n (1, datetime(2000, 1, 31), expected_1_nofreq),\n (3, datetime(2000, 1, 2), expected_3_nofreq)]\n\n for n, d, expected in cases:\n result = idx.insert(n, d)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n # reset freq to None\n result = idx.insert(3, datetime(2000, 1, 2))\n expected = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31',\n '2000-01-02'], name='idx', freq=None)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq is None\n\n # see gh-7299\n idx = date_range('1/1/2000', periods=3, freq='D', tz='Asia/Tokyo',\n name='idx')\n with pytest.raises(ValueError):\n idx.insert(3, pd.Timestamp('2000-01-04'))\n with pytest.raises(ValueError):\n idx.insert(3, datetime(2000, 1, 4))\n with pytest.raises(ValueError):\n idx.insert(3, pd.Timestamp('2000-01-04', tz='US/Eastern'))\n with pytest.raises(ValueError):\n idx.insert(3, datetime(2000, 1, 4,\n tzinfo=pytz.timezone('US/Eastern')))\n\n for tz in ['US/Pacific', 'Asia/Singapore']:\n idx = date_range('1/1/2000 09:00', periods=6, freq='H', tz=tz,\n name='idx')\n # preserve freq\n expected = date_range('1/1/2000 09:00', periods=7, freq='H', tz=tz,\n name='idx')\n for d in [pd.Timestamp('2000-01-01 15:00', tz=tz),\n pytz.timezone(tz).localize(datetime(2000, 1, 1, 15))]:\n\n result = idx.insert(6, d)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n assert result.tz == expected.tz\n\n expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 10:00',\n '2000-01-01 11:00',\n '2000-01-01 12:00', '2000-01-01 13:00',\n '2000-01-01 14:00',\n '2000-01-01 10:00'], name='idx',\n tz=tz, freq=None)\n # reset freq to None\n for d in [pd.Timestamp('2000-01-01 10:00', tz=tz),\n pytz.timezone(tz).localize(datetime(2000, 1, 1, 10))]:\n result = idx.insert(6, d)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.tz == expected.tz\n assert result.freq is None\n\n def test_delete(self):\n idx = date_range(start='2000-01-01', periods=5, freq='M', name='idx')\n\n # prserve freq\n expected_0 = date_range(start='2000-02-01', periods=4, freq='M',\n name='idx')\n expected_4 = date_range(start='2000-01-01', periods=4, freq='M',\n name='idx')\n\n # reset freq to None\n expected_1 = DatetimeIndex(['2000-01-31', '2000-03-31', '2000-04-30',\n '2000-05-31'], freq=None, name='idx')\n\n cases = {0: expected_0,\n -5: expected_0,\n -1: expected_4,\n 4: expected_4,\n 1: expected_1}\n for n, expected in compat.iteritems(cases):\n result = idx.delete(n)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n with pytest.raises((IndexError, ValueError)):\n # either depending on numpy version\n idx.delete(5)\n\n for tz in [None, 'Asia/Tokyo', 'US/Pacific']:\n idx = date_range(start='2000-01-01 09:00', periods=10, freq='H',\n name='idx', tz=tz)\n\n expected = date_range(start='2000-01-01 10:00', periods=9,\n freq='H', name='idx', tz=tz)\n result = idx.delete(0)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freqstr == 'H'\n assert result.tz == expected.tz\n\n expected = date_range(start='2000-01-01 09:00', periods=9,\n freq='H', name='idx', tz=tz)\n result = idx.delete(-1)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freqstr == 'H'\n assert result.tz == expected.tz\n\n def test_delete_slice(self):\n idx = date_range(start='2000-01-01', periods=10, freq='D', name='idx')\n\n # prserve freq\n expected_0_2 = date_range(start='2000-01-04', periods=7, freq='D',\n name='idx')\n expected_7_9 = date_range(start='2000-01-01', periods=7, freq='D',\n name='idx')\n\n # reset freq to None\n expected_3_5 = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03',\n '2000-01-07', '2000-01-08', '2000-01-09',\n '2000-01-10'], freq=None, name='idx')\n\n cases = {(0, 1, 2): expected_0_2,\n (7, 8, 9): expected_7_9,\n (3, 4, 5): expected_3_5}\n for n, expected in compat.iteritems(cases):\n result = idx.delete(n)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n result = idx.delete(slice(n[0], n[-1] + 1))\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n for tz in [None, 'Asia/Tokyo', 'US/Pacific']:\n ts = pd.Series(1, index=pd.date_range(\n '2000-01-01 09:00', periods=10, freq='H', name='idx', tz=tz))\n # preserve freq\n result = ts.drop(ts.index[:5]).index\n expected = pd.date_range('2000-01-01 14:00', periods=5, freq='H',\n name='idx', tz=tz)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n assert result.tz == expected.tz\n\n # reset freq to None\n result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index\n expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 11:00',\n '2000-01-01 13:00',\n '2000-01-01 15:00', '2000-01-01 17:00'],\n freq=None, name='idx', tz=tz)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n assert result.tz == expected.tz\n\n def test_get_loc(self):\n idx = pd.date_range('2000-01-01', periods=3)\n\n for method in [None, 'pad', 'backfill', 'nearest']:\n assert idx.get_loc(idx[1], method) == 1\n assert idx.get_loc(idx[1].to_pydatetime(), method) == 1\n assert idx.get_loc(str(idx[1]), method) == 1\n\n if method is not None:\n assert idx.get_loc(idx[1], method,\n tolerance=pd.Timedelta('0 days')) == 1\n\n assert idx.get_loc('2000-01-01', method='nearest') == 0\n assert idx.get_loc('2000-01-01T12', method='nearest') == 1\n\n assert idx.get_loc('2000-01-01T12', method='nearest',\n tolerance='1 day') == 1\n assert idx.get_loc('2000-01-01T12', method='nearest',\n tolerance=pd.Timedelta('1D')) == 1\n assert idx.get_loc('2000-01-01T12', method='nearest',\n tolerance=np.timedelta64(1, 'D')) == 1\n assert idx.get_loc('2000-01-01T12', method='nearest',\n tolerance=timedelta(1)) == 1\n with pytest.raises(ValueError, match='unit abbreviation w/o a number'):\n idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')\n with pytest.raises(KeyError):\n idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')\n with pytest.raises(\n ValueError,\n match='tolerance size must match target index size'):\n idx.get_loc('2000-01-01', method='nearest',\n tolerance=[pd.Timedelta('1day').to_timedelta64(),\n pd.Timedelta('1day').to_timedelta64()])\n\n assert idx.get_loc('2000', method='nearest') == slice(0, 3)\n assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)\n\n assert idx.get_loc('1999', method='nearest') == 0\n assert idx.get_loc('2001', method='nearest') == 2\n\n with pytest.raises(KeyError):\n idx.get_loc('1999', method='pad')\n with pytest.raises(KeyError):\n idx.get_loc('2001', method='backfill')\n\n with pytest.raises(KeyError):\n idx.get_loc('foobar')\n with pytest.raises(TypeError):\n idx.get_loc(slice(2))\n\n idx = pd.to_datetime(['2000-01-01', '2000-01-04'])\n assert idx.get_loc('2000-01-02', method='nearest') == 0\n assert idx.get_loc('2000-01-03', method='nearest') == 1\n assert idx.get_loc('2000-01', method='nearest') == slice(0, 2)\n\n # time indexing\n idx = pd.date_range('2000-01-01', periods=24, freq='H')\n tm.assert_numpy_array_equal(idx.get_loc(time(12)),\n np.array([12]), check_dtype=False)\n tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),\n np.array([]), check_dtype=False)\n with pytest.raises(NotImplementedError):\n idx.get_loc(time(12, 30), method='pad')\n\n def test_get_indexer(self):\n idx = pd.date_range('2000-01-01', periods=3)\n exp = np.array([0, 1, 2], dtype=np.intp)\n tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)\n\n target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',\n '1 day 1 hour'])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),\n np.array([-1, 0, 1], dtype=np.intp))\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),\n np.array([0, 1, 2], dtype=np.intp))\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),\n np.array([0, 1, 1], dtype=np.intp))\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, 'nearest',\n tolerance=pd.Timedelta('1 hour')),\n np.array([0, -1, 1], dtype=np.intp))\n tol_raw = [pd.Timedelta('1 hour'),\n pd.Timedelta('1 hour'),\n pd.Timedelta('1 hour').to_timedelta64(), ]\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, 'nearest',\n tolerance=[np.timedelta64(x) for x in tol_raw]),\n np.array([0, -1, 1], dtype=np.intp))\n tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),\n pd.Timedelta('1 hour').to_timedelta64(),\n 'foo', ]\n with pytest.raises(\n ValueError, match='abbreviation w/o a number'):\n idx.get_indexer(target, 'nearest', tolerance=tol_bad)\n with pytest.raises(ValueError):\n idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')\n\n def test_reasonable_key_error(self):\n # GH#1062\n index = DatetimeIndex(['1/3/2000'])\n with pytest.raises(KeyError, match='2000'):\n index.get_loc('1/1/2000')\n\n @pytest.mark.parametrize('key', [pd.Timedelta(0),\n pd.Timedelta(1),\n timedelta(0)])\n def test_timedelta_invalid_key(self, key):\n # GH#20464\n dti = pd.date_range('1970-01-01', periods=10)\n with pytest.raises(TypeError):\n dti.get_loc(key)\n\n def test_get_loc_nat(self):\n # GH#20464\n index = DatetimeIndex(['1/3/2000', 'NaT'])\n assert index.get_loc(pd.NaT) == 1\n", "\"\"\"\nModule contains tools for processing files into DataFrames or other objects\n\"\"\"\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nimport csv\nimport datetime\nimport re\nimport sys\nfrom textwrap import fill\nimport warnings\n\nimport numpy as np\n\nimport pandas._libs.lib as lib\nimport pandas._libs.ops as libops\nimport pandas._libs.parsers as parsers\nfrom pandas._libs.tslibs import parsing\nimport pandas.compat as compat\nfrom pandas.compat import (\n PY3, StringIO, lrange, lzip, map, range, string_types, u, zip)\nfrom pandas.errors import (\n AbstractMethodError, EmptyDataError, ParserError, ParserWarning)\nfrom pandas.util._decorators import Appender\n\nfrom pandas.core.dtypes.cast import astype_nansafe\nfrom pandas.core.dtypes.common import (\n ensure_object, is_categorical_dtype, is_dtype_equal, is_float, is_integer,\n is_integer_dtype, is_list_like, is_object_dtype, is_scalar,\n is_string_dtype)\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core import algorithms\nfrom pandas.core.arrays import Categorical\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.index import (\n Index, MultiIndex, RangeIndex, ensure_index_from_sequences)\nfrom pandas.core.series import Series\nfrom pandas.core.tools import datetimes as tools\n\nfrom pandas.io.common import (\n _NA_VALUES, BaseIterator, UnicodeReader, UTF8Recoder, _get_handle,\n _infer_compression, _validate_header_arg, get_filepath_or_buffer,\n is_file_like)\nfrom pandas.io.date_converters import generic_parser\n\n# BOM character (byte order mark)\n# This exists at the beginning of a file to indicate endianness\n# of a file (stream). Unfortunately, this marker screws up parsing,\n# so we need to remove it if we see it.\n_BOM = u('\\ufeff')\n\n_parser_params = r\"\"\"Also supports optionally iterating or breaking of the file\ninto chunks.\n\nAdditional help can be found in the `online docs for IO Tools\n<http://pandas.pydata.org/pandas-docs/stable/io.html>`_.\n\nParameters\n----------\nfilepath_or_buffer : str, path object, or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be: file://localhost/path/to/table.csv.\n\n If you want to pass in a path object, pandas accepts either\n ``pathlib.Path`` or ``py._path.local.LocalPath``.\n\n By file-like object, we refer to objects with a ``read()`` method, such as\n a file handler (e.g. via builtin ``open`` function) or ``StringIO``.\n%s\ndelim_whitespace : boolean, default False\n Specifies whether or not whitespace (e.g. ``' '`` or ``'\\t'``) will be\n used as the sep. Equivalent to setting ``sep='\\s+'``. If this option\n is set to True, nothing should be passed in for the ``delimiter``\n parameter.\n\n .. versionadded:: 0.18.1 support for the Python parser.\n\nheader : int or list of ints, default 'infer'\n Row number(s) to use as the column names, and the start of the\n data. Default behavior is to infer the column names: if no names\n are passed the behavior is identical to ``header=0`` and column\n names are inferred from the first line of the file, if column\n names are passed explicitly then the behavior is identical to\n ``header=None``. Explicitly pass ``header=0`` to be able to\n replace existing names. The header can be a list of integers that\n specify row locations for a multi-index on the columns\n e.g. [0,1,3]. Intervening rows that are not specified will be\n skipped (e.g. 2 in this example is skipped). Note that this\n parameter ignores commented lines and empty lines if\n ``skip_blank_lines=True``, so header=0 denotes the first line of\n data rather than the first line of the file.\nnames : array-like, default None\n List of column names to use. If file contains no header row, then you\n should explicitly pass header=None. Duplicates in this list will cause\n a ``UserWarning`` to be issued.\nindex_col : int or sequence or False, default None\n Column to use as the row labels of the DataFrame. If a sequence is given, a\n MultiIndex is used. If you have a malformed file with delimiters at the end\n of each line, you might consider index_col=False to force pandas to _not_\n use the first column as the index (row names)\nusecols : list-like or callable, default None\n Return a subset of the columns. If list-like, all elements must either\n be positional (i.e. integer indices into the document columns) or strings\n that correspond to column names provided either by the user in `names` or\n inferred from the document header row(s). For example, a valid list-like\n `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Element\n order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.\n To instantiate a DataFrame from ``data`` with element order preserved use\n ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns\n in ``['foo', 'bar']`` order or\n ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``\n for ``['bar', 'foo']`` order.\n\n If callable, the callable function will be evaluated against the column\n names, returning names where the callable function evaluates to True. An\n example of a valid callable argument would be ``lambda x: x.upper() in\n ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster\n parsing time and lower memory usage.\nsqueeze : boolean, default False\n If the parsed data only contains one column then return a Series\nprefix : str, default None\n Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...\nmangle_dupe_cols : boolean, default True\n Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than\n 'X'...'X'. Passing in False will cause data to be overwritten if there\n are duplicate names in the columns.\ndtype : Type name or dict of column -> type, default None\n Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}\n Use `str` or `object` together with suitable `na_values` settings\n to preserve and not interpret dtype.\n If converters are specified, they will be applied INSTEAD\n of dtype conversion.\n%s\nconverters : dict, default None\n Dict of functions for converting values in certain columns. Keys can either\n be integers or column labels\ntrue_values : list, default None\n Values to consider as True\nfalse_values : list, default None\n Values to consider as False\nskipinitialspace : boolean, default False\n Skip spaces after delimiter.\nskiprows : list-like or integer or callable, default None\n Line numbers to skip (0-indexed) or number of lines to skip (int)\n at the start of the file.\n\n If callable, the callable function will be evaluated against the row\n indices, returning True if the row should be skipped and False otherwise.\n An example of a valid callable argument would be ``lambda x: x in [0, 2]``.\nskipfooter : int, default 0\n Number of lines at bottom of file to skip (Unsupported with engine='c')\nnrows : int, default None\n Number of rows of file to read. Useful for reading pieces of large files\nna_values : scalar, str, list-like, or dict, default None\n Additional strings to recognize as NA/NaN. If dict passed, specific\n per-column NA values. By default the following values are interpreted as\n NaN: '\"\"\" + fill(\"', '\".join(sorted(_NA_VALUES)),\n 70, subsequent_indent=\" \") + \"\"\"'.\nkeep_default_na : bool, default True\n Whether or not to include the default NaN values when parsing the data.\n Depending on whether `na_values` is passed in, the behavior is as follows:\n\n * If `keep_default_na` is True, and `na_values` are specified, `na_values`\n is appended to the default NaN values used for parsing.\n * If `keep_default_na` is True, and `na_values` are not specified, only\n the default NaN values are used for parsing.\n * If `keep_default_na` is False, and `na_values` are specified, only\n the NaN values specified `na_values` are used for parsing.\n * If `keep_default_na` is False, and `na_values` are not specified, no\n strings will be parsed as NaN.\n\n Note that if `na_filter` is passed in as False, the `keep_default_na` and\n `na_values` parameters will be ignored.\nna_filter : boolean, default True\n Detect missing value markers (empty strings and the value of na_values). In\n data without any NAs, passing na_filter=False can improve the performance\n of reading a large file\nverbose : boolean, default False\n Indicate number of NA values placed in non-numeric columns\nskip_blank_lines : boolean, default True\n If True, skip over blank lines rather than interpreting as NaN values\nparse_dates : boolean or list of ints or names or list of lists or dict, \\\ndefault False\n\n * boolean. If True -> try parsing the index.\n * list of ints or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n each as a separate date column.\n * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as\n a single date column.\n * dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result\n 'foo'\n\n If a column or index contains an unparseable date, the entire column or\n index will be returned unaltered as an object data type. For non-standard\n datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``\n\n Note: A fast-path exists for iso8601-formatted dates.\ninfer_datetime_format : boolean, default False\n If True and `parse_dates` is enabled, pandas will attempt to infer the\n format of the datetime strings in the columns, and if it can be inferred,\n switch to a faster method of parsing them. In some cases this can increase\n the parsing speed by 5-10x.\nkeep_date_col : boolean, default False\n If True and `parse_dates` specifies combining multiple columns then\n keep the original columns.\ndate_parser : function, default None\n Function to use for converting a sequence of string columns to an array of\n datetime instances. The default uses ``dateutil.parser.parser`` to do the\n conversion. Pandas will try to call `date_parser` in three different ways,\n advancing to the next if an exception occurs: 1) Pass one or more arrays\n (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the\n string values from the columns defined by `parse_dates` into a single array\n and pass that; and 3) call `date_parser` once for each row using one or\n more strings (corresponding to the columns defined by `parse_dates`) as\n arguments.\ndayfirst : boolean, default False\n DD/MM format dates, international and European format\niterator : boolean, default False\n Return TextFileReader object for iteration or getting chunks with\n ``get_chunk()``.\nchunksize : int, default None\n Return TextFileReader object for iteration.\n See the `IO Tools docs\n <http://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_\n for more information on ``iterator`` and ``chunksize``.\ncompression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'\n For on-the-fly decompression of on-disk data. If 'infer' and\n `filepath_or_buffer` is path-like, then detect compression from the\n following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no\n decompression). If using 'zip', the ZIP file must contain only one data\n file to be read in. Set to None for no decompression.\n\n .. versionadded:: 0.18.1 support for 'zip' and 'xz' compression.\n\nthousands : str, default None\n Thousands separator\ndecimal : str, default '.'\n Character to recognize as decimal point (e.g. use ',' for European data).\nfloat_precision : string, default None\n Specifies which converter the C engine should use for floating-point\n values. The options are `None` for the ordinary converter,\n `high` for the high-precision converter, and `round_trip` for the\n round-trip converter.\nlineterminator : str (length 1), default None\n Character to break file into lines. Only valid with C parser.\nquotechar : str (length 1), optional\n The character used to denote the start and end of a quoted item. Quoted\n items can include the delimiter and it will be ignored.\nquoting : int or csv.QUOTE_* instance, default 0\n Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of\n QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).\ndoublequote : boolean, default ``True``\n When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate\n whether or not to interpret two consecutive quotechar elements INSIDE a\n field as a single ``quotechar`` element.\nescapechar : str (length 1), default None\n One-character string used to escape delimiter when quoting is QUOTE_NONE.\ncomment : str, default None\n Indicates remainder of line should not be parsed. If found at the beginning\n of a line, the line will be ignored altogether. This parameter must be a\n single character. Like empty lines (as long as ``skip_blank_lines=True``),\n fully commented lines are ignored by the parameter `header` but not by\n `skiprows`. For example, if ``comment='#'``, parsing\n ``#empty\\\\na,b,c\\\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being\n treated as the header.\nencoding : str, default None\n Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python\n standard encodings\n <https://docs.python.org/3/library/codecs.html#standard-encodings>`_\ndialect : str or csv.Dialect instance, default None\n If provided, this parameter will override values (default or not) for the\n following parameters: `delimiter`, `doublequote`, `escapechar`,\n `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to\n override values, a ParserWarning will be issued. See csv.Dialect\n documentation for more details.\ntupleize_cols : boolean, default False\n .. deprecated:: 0.21.0\n This argument will be removed and will always convert to MultiIndex\n\n Leave a list of tuples on columns as is (default is to convert to\n a MultiIndex on the columns)\nerror_bad_lines : boolean, default True\n Lines with too many fields (e.g. a csv line with too many commas) will by\n default cause an exception to be raised, and no DataFrame will be returned.\n If False, then these \"bad lines\" will dropped from the DataFrame that is\n returned.\nwarn_bad_lines : boolean, default True\n If error_bad_lines is False, and warn_bad_lines is True, a warning for each\n \"bad line\" will be output.\nlow_memory : boolean, default True\n Internally process the file in chunks, resulting in lower memory use\n while parsing, but possibly mixed type inference. To ensure no mixed\n types either set False, or specify the type with the `dtype` parameter.\n Note that the entire file is read into a single DataFrame regardless,\n use the `chunksize` or `iterator` parameter to return the data in chunks.\n (Only valid with C parser)\nmemory_map : boolean, default False\n If a filepath is provided for `filepath_or_buffer`, map the file object\n directly onto memory and access the data directly from there. Using this\n option can improve performance because there is no longer any I/O overhead.\n\nReturns\n-------\nresult : DataFrame or TextParser\n\"\"\"\n\n# engine is not used in read_fwf() so is factored out of the shared docstring\n_engine_doc = \"\"\"engine : {'c', 'python'}, optional\n Parser engine to use. The C engine is faster while the python engine is\n currently more feature-complete.\"\"\"\n\n_sep_doc = r\"\"\"sep : str, default {default}\n Delimiter to use. If sep is None, the C engine cannot automatically detect\n the separator, but the Python parsing engine can, meaning the latter will\n be used and automatically detect the separator by Python's builtin sniffer\n tool, ``csv.Sniffer``. In addition, separators longer than 1 character and\n different from ``'\\s+'`` will be interpreted as regular expressions and\n will also force the use of the Python parsing engine. Note that regex\n delimiters are prone to ignoring quoted data. Regex example: ``'\\r\\t'``\ndelimiter : str, default ``None``\n Alternative argument name for sep.\"\"\"\n\n_read_csv_doc = \"\"\"\nRead CSV (comma-separated) file into DataFrame\n\n%s\n\"\"\" % (_parser_params % (_sep_doc.format(default=\"','\"), _engine_doc))\n\n_read_table_doc = \"\"\"\n\n.. deprecated:: 0.24.0\n Use :func:`pandas.read_csv` instead, passing ``sep='\\t'`` if necessary.\n\nRead general delimited file into DataFrame\n\n%s\n\"\"\" % (_parser_params % (_sep_doc.format(default=\"\\\\t (tab-stop)\"),\n _engine_doc))\n\n_fwf_widths = \"\"\"\\\ncolspecs : list of pairs (int, int) or 'infer'. optional\n A list of pairs (tuples) giving the extents of the fixed-width\n fields of each line as half-open intervals (i.e., [from, to[ ).\n String value 'infer' can be used to instruct the parser to try\n detecting the column specifications from the first 100 rows of\n the data which are not being skipped via skiprows (default='infer').\nwidths : list of ints. optional\n A list of field widths which can be used instead of 'colspecs' if\n the intervals are contiguous.\ndelimiter : str, default ``'\\t' + ' '``\n Characters to consider as filler characters in the fixed-width file.\n Can be used to specify the filler character of the fields\n if it is not spaces (e.g., '~').\n\"\"\"\n\n_read_fwf_doc = \"\"\"\nRead a table of fixed-width formatted lines into DataFrame\n\n%s\n\"\"\" % (_parser_params % (_fwf_widths, ''))\n\n\ndef _validate_integer(name, val, min_val=0):\n \"\"\"\n Checks whether the 'name' parameter for parsing is either\n an integer OR float that can SAFELY be cast to an integer\n without losing accuracy. Raises a ValueError if that is\n not the case.\n\n Parameters\n ----------\n name : string\n Parameter name (used for error reporting)\n val : int or float\n The value to check\n min_val : int\n Minimum allowed value (val < min_val will result in a ValueError)\n \"\"\"\n msg = \"'{name:s}' must be an integer >={min_val:d}\".format(name=name,\n min_val=min_val)\n\n if val is not None:\n if is_float(val):\n if int(val) != val:\n raise ValueError(msg)\n val = int(val)\n elif not (is_integer(val) and val >= min_val):\n raise ValueError(msg)\n\n return val\n\n\ndef _validate_names(names):\n \"\"\"\n Check if the `names` parameter contains duplicates.\n\n If duplicates are found, we issue a warning before returning.\n\n Parameters\n ----------\n names : array-like or None\n An array containing a list of the names used for the output DataFrame.\n\n Returns\n -------\n names : array-like or None\n The original `names` parameter.\n \"\"\"\n\n if names is not None:\n if len(names) != len(set(names)):\n msg = (\"Duplicate names specified. This \"\n \"will raise an error in the future.\")\n warnings.warn(msg, UserWarning, stacklevel=3)\n\n return names\n\n\ndef _read(filepath_or_buffer, kwds):\n \"\"\"Generic reader of line files.\"\"\"\n encoding = kwds.get('encoding', None)\n if encoding is not None:\n encoding = re.sub('_', '-', encoding).lower()\n kwds['encoding'] = encoding\n\n compression = kwds.get('compression')\n compression = _infer_compression(filepath_or_buffer, compression)\n filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(\n filepath_or_buffer, encoding, compression)\n kwds['compression'] = compression\n\n if kwds.get('date_parser', None) is not None:\n if isinstance(kwds['parse_dates'], bool):\n kwds['parse_dates'] = True\n\n # Extract some of the arguments (pass chunksize on).\n iterator = kwds.get('iterator', False)\n chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1)\n nrows = kwds.get('nrows', None)\n\n # Check for duplicates in names.\n _validate_names(kwds.get(\"names\", None))\n\n # Create the parser.\n parser = TextFileReader(filepath_or_buffer, **kwds)\n\n if chunksize or iterator:\n return parser\n\n try:\n data = parser.read(nrows)\n finally:\n parser.close()\n\n if should_close:\n try:\n filepath_or_buffer.close()\n except ValueError:\n pass\n\n return data\n\n\n_parser_defaults = {\n 'delimiter': None,\n\n 'doublequote': True,\n 'escapechar': None,\n 'quotechar': '\"',\n 'quoting': csv.QUOTE_MINIMAL,\n 'skipinitialspace': False,\n 'lineterminator': None,\n\n 'header': 'infer',\n 'index_col': None,\n 'names': None,\n 'prefix': None,\n 'skiprows': None,\n 'na_values': None,\n 'true_values': None,\n 'false_values': None,\n 'converters': None,\n 'dtype': None,\n 'skipfooter': 0,\n\n 'keep_default_na': True,\n 'thousands': None,\n 'comment': None,\n 'decimal': b'.',\n\n # 'engine': 'c',\n 'parse_dates': False,\n 'keep_date_col': False,\n 'dayfirst': False,\n 'date_parser': None,\n\n 'usecols': None,\n\n 'nrows': None,\n # 'iterator': False,\n 'chunksize': None,\n 'verbose': False,\n 'encoding': None,\n 'squeeze': False,\n 'compression': None,\n 'mangle_dupe_cols': True,\n 'tupleize_cols': False,\n 'infer_datetime_format': False,\n 'skip_blank_lines': True\n}\n\n\n_c_parser_defaults = {\n 'delim_whitespace': False,\n 'na_filter': True,\n 'low_memory': True,\n 'memory_map': False,\n 'error_bad_lines': True,\n 'warn_bad_lines': True,\n 'tupleize_cols': False,\n 'float_precision': None\n}\n\n_fwf_defaults = {\n 'colspecs': 'infer',\n 'widths': None,\n}\n\n_c_unsupported = {'skipfooter'}\n_python_unsupported = {\n 'low_memory',\n 'float_precision',\n}\n\n_deprecated_defaults = {\n 'tupleize_cols': None\n}\n_deprecated_args = {\n 'tupleize_cols',\n}\n\n\ndef _make_parser_function(name, default_sep=','):\n\n # prepare read_table deprecation\n if name == \"read_table\":\n sep = False\n else:\n sep = default_sep\n\n def parser_f(filepath_or_buffer,\n sep=sep,\n delimiter=None,\n\n # Column and Index Locations and Names\n header='infer',\n names=None,\n index_col=None,\n usecols=None,\n squeeze=False,\n prefix=None,\n mangle_dupe_cols=True,\n\n # General Parsing Configuration\n dtype=None,\n engine=None,\n converters=None,\n true_values=None,\n false_values=None,\n skipinitialspace=False,\n skiprows=None,\n nrows=None,\n\n # NA and Missing Data Handling\n na_values=None,\n keep_default_na=True,\n na_filter=True,\n verbose=False,\n skip_blank_lines=True,\n\n # Datetime Handling\n parse_dates=False,\n infer_datetime_format=False,\n keep_date_col=False,\n date_parser=None,\n dayfirst=False,\n\n # Iteration\n iterator=False,\n chunksize=None,\n\n # Quoting, Compression, and File Format\n compression='infer',\n thousands=None,\n decimal=b'.',\n lineterminator=None,\n quotechar='\"',\n quoting=csv.QUOTE_MINIMAL,\n escapechar=None,\n comment=None,\n encoding=None,\n dialect=None,\n tupleize_cols=None,\n\n # Error Handling\n error_bad_lines=True,\n warn_bad_lines=True,\n\n skipfooter=0,\n\n # Internal\n doublequote=True,\n delim_whitespace=False,\n low_memory=_c_parser_defaults['low_memory'],\n memory_map=False,\n float_precision=None):\n\n # deprecate read_table GH21948\n if name == \"read_table\":\n if sep is False and delimiter is None:\n warnings.warn(\"read_table is deprecated, use read_csv \"\n \"instead, passing sep='\\\\t'.\",\n FutureWarning, stacklevel=2)\n else:\n warnings.warn(\"read_table is deprecated, use read_csv \"\n \"instead.\",\n FutureWarning, stacklevel=2)\n if sep is False:\n sep = default_sep\n\n # Alias sep -> delimiter.\n if delimiter is None:\n delimiter = sep\n\n if delim_whitespace and delimiter != default_sep:\n raise ValueError(\"Specified a delimiter with both sep and\"\n \" delim_whitespace=True; you can only\"\n \" specify one.\")\n\n if engine is not None:\n engine_specified = True\n else:\n engine = 'c'\n engine_specified = False\n\n kwds = dict(delimiter=delimiter,\n engine=engine,\n dialect=dialect,\n compression=compression,\n engine_specified=engine_specified,\n\n doublequote=doublequote,\n escapechar=escapechar,\n quotechar=quotechar,\n quoting=quoting,\n skipinitialspace=skipinitialspace,\n lineterminator=lineterminator,\n\n header=header,\n index_col=index_col,\n names=names,\n prefix=prefix,\n skiprows=skiprows,\n na_values=na_values,\n true_values=true_values,\n false_values=false_values,\n keep_default_na=keep_default_na,\n thousands=thousands,\n comment=comment,\n decimal=decimal,\n\n parse_dates=parse_dates,\n keep_date_col=keep_date_col,\n dayfirst=dayfirst,\n date_parser=date_parser,\n\n nrows=nrows,\n iterator=iterator,\n chunksize=chunksize,\n skipfooter=skipfooter,\n converters=converters,\n dtype=dtype,\n usecols=usecols,\n verbose=verbose,\n encoding=encoding,\n squeeze=squeeze,\n memory_map=memory_map,\n float_precision=float_precision,\n\n na_filter=na_filter,\n delim_whitespace=delim_whitespace,\n warn_bad_lines=warn_bad_lines,\n error_bad_lines=error_bad_lines,\n low_memory=low_memory,\n mangle_dupe_cols=mangle_dupe_cols,\n tupleize_cols=tupleize_cols,\n infer_datetime_format=infer_datetime_format,\n skip_blank_lines=skip_blank_lines)\n\n return _read(filepath_or_buffer, kwds)\n\n parser_f.__name__ = name\n\n return parser_f\n\n\nread_csv = _make_parser_function('read_csv', default_sep=',')\nread_csv = Appender(_read_csv_doc)(read_csv)\n\nread_table = _make_parser_function('read_table', default_sep='\\t')\nread_table = Appender(_read_table_doc)(read_table)\n\n\n@Appender(_read_fwf_doc)\ndef read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds):\n # Check input arguments.\n if colspecs is None and widths is None:\n raise ValueError(\"Must specify either colspecs or widths\")\n elif colspecs not in (None, 'infer') and widths is not None:\n raise ValueError(\"You must specify only one of 'widths' and \"\n \"'colspecs'\")\n\n # Compute 'colspecs' from 'widths', if specified.\n if widths is not None:\n colspecs, col = [], 0\n for w in widths:\n colspecs.append((col, col + w))\n col += w\n\n kwds['colspecs'] = colspecs\n kwds['engine'] = 'python-fwf'\n return _read(filepath_or_buffer, kwds)\n\n\nclass TextFileReader(BaseIterator):\n \"\"\"\n\n Passed dialect overrides any of the related parser options\n\n \"\"\"\n\n def __init__(self, f, engine=None, **kwds):\n\n self.f = f\n\n if engine is not None:\n engine_specified = True\n else:\n engine = 'python'\n engine_specified = False\n\n self._engine_specified = kwds.get('engine_specified', engine_specified)\n\n if kwds.get('dialect') is not None:\n dialect = kwds['dialect']\n if dialect in csv.list_dialects():\n dialect = csv.get_dialect(dialect)\n\n # Any valid dialect should have these attributes.\n # If any are missing, we will raise automatically.\n for param in ('delimiter', 'doublequote', 'escapechar',\n 'skipinitialspace', 'quotechar', 'quoting'):\n try:\n dialect_val = getattr(dialect, param)\n except AttributeError:\n raise ValueError(\"Invalid dialect '{dialect}' provided\"\n .format(dialect=kwds['dialect']))\n provided = kwds.get(param, _parser_defaults[param])\n\n # Messages for conflicting values between the dialect instance\n # and the actual parameters provided.\n conflict_msgs = []\n\n if dialect_val != provided:\n conflict_msgs.append((\n \"Conflicting values for '{param}': '{val}' was \"\n \"provided, but the dialect specifies '{diaval}'. \"\n \"Using the dialect-specified value.\".format(\n param=param, val=provided, diaval=dialect_val)))\n\n if conflict_msgs:\n warnings.warn('\\n\\n'.join(conflict_msgs), ParserWarning,\n stacklevel=2)\n kwds[param] = dialect_val\n\n if kwds.get(\"skipfooter\"):\n if kwds.get(\"iterator\") or kwds.get(\"chunksize\"):\n raise ValueError(\"'skipfooter' not supported for 'iteration'\")\n if kwds.get(\"nrows\"):\n raise ValueError(\"'skipfooter' not supported with 'nrows'\")\n\n if kwds.get('header', 'infer') == 'infer':\n kwds['header'] = 0 if kwds.get('names') is None else None\n\n self.orig_options = kwds\n\n # miscellanea\n self.engine = engine\n self._engine = None\n self._currow = 0\n\n options = self._get_options_with_defaults(engine)\n\n self.chunksize = options.pop('chunksize', None)\n self.nrows = options.pop('nrows', None)\n self.squeeze = options.pop('squeeze', False)\n\n # might mutate self.engine\n self.engine = self._check_file_or_buffer(f, engine)\n self.options, self.engine = self._clean_options(options, engine)\n\n if 'has_index_names' in kwds:\n self.options['has_index_names'] = kwds['has_index_names']\n\n self._make_engine(self.engine)\n\n def close(self):\n self._engine.close()\n\n def _get_options_with_defaults(self, engine):\n kwds = self.orig_options\n\n options = {}\n\n for argname, default in compat.iteritems(_parser_defaults):\n value = kwds.get(argname, default)\n\n # see gh-12935\n if argname == 'mangle_dupe_cols' and not value:\n raise ValueError('Setting mangle_dupe_cols=False is '\n 'not supported yet')\n else:\n options[argname] = value\n\n for argname, default in compat.iteritems(_c_parser_defaults):\n if argname in kwds:\n value = kwds[argname]\n\n if engine != 'c' and value != default:\n if ('python' in engine and\n argname not in _python_unsupported):\n pass\n elif value == _deprecated_defaults.get(argname, default):\n pass\n else:\n raise ValueError(\n 'The %r option is not supported with the'\n ' %r engine' % (argname, engine))\n else:\n value = _deprecated_defaults.get(argname, default)\n options[argname] = value\n\n if engine == 'python-fwf':\n for argname, default in compat.iteritems(_fwf_defaults):\n options[argname] = kwds.get(argname, default)\n\n return options\n\n def _check_file_or_buffer(self, f, engine):\n # see gh-16530\n if is_file_like(f):\n next_attr = \"__next__\" if PY3 else \"next\"\n\n # The C engine doesn't need the file-like to have the \"next\" or\n # \"__next__\" attribute. However, the Python engine explicitly calls\n # \"next(...)\" when iterating through such an object, meaning it\n # needs to have that attribute (\"next\" for Python 2.x, \"__next__\"\n # for Python 3.x)\n if engine != \"c\" and not hasattr(f, next_attr):\n msg = (\"The 'python' engine cannot iterate \"\n \"through this file buffer.\")\n raise ValueError(msg)\n\n return engine\n\n def _clean_options(self, options, engine):\n result = options.copy()\n\n engine_specified = self._engine_specified\n fallback_reason = None\n\n sep = options['delimiter']\n delim_whitespace = options['delim_whitespace']\n\n # C engine not supported yet\n if engine == 'c':\n if options['skipfooter'] > 0:\n fallback_reason = (\"the 'c' engine does not support\"\n \" skipfooter\")\n engine = 'python'\n\n encoding = sys.getfilesystemencoding() or 'utf-8'\n if sep is None and not delim_whitespace:\n if engine == 'c':\n fallback_reason = (\"the 'c' engine does not support\"\n \" sep=None with delim_whitespace=False\")\n engine = 'python'\n elif sep is not None and len(sep) > 1:\n if engine == 'c' and sep == r'\\s+':\n result['delim_whitespace'] = True\n del result['delimiter']\n elif engine not in ('python', 'python-fwf'):\n # wait until regex engine integrated\n fallback_reason = (\"the 'c' engine does not support\"\n \" regex separators (separators > 1 char and\"\n r\" different from '\\s+' are\"\n \" interpreted as regex)\")\n engine = 'python'\n elif delim_whitespace:\n if 'python' in engine:\n result['delimiter'] = r'\\s+'\n elif sep is not None:\n encodeable = True\n try:\n if len(sep.encode(encoding)) > 1:\n encodeable = False\n except UnicodeDecodeError:\n encodeable = False\n if not encodeable and engine not in ('python', 'python-fwf'):\n fallback_reason = (\"the separator encoded in {encoding}\"\n \" is > 1 char long, and the 'c' engine\"\n \" does not support such separators\"\n .format(encoding=encoding))\n engine = 'python'\n\n quotechar = options['quotechar']\n if (quotechar is not None and\n isinstance(quotechar, (str, compat.text_type, bytes))):\n if (len(quotechar) == 1 and ord(quotechar) > 127 and\n engine not in ('python', 'python-fwf')):\n fallback_reason = (\"ord(quotechar) > 127, meaning the \"\n \"quotechar is larger than one byte, \"\n \"and the 'c' engine does not support \"\n \"such quotechars\")\n engine = 'python'\n\n if fallback_reason and engine_specified:\n raise ValueError(fallback_reason)\n\n if engine == 'c':\n for arg in _c_unsupported:\n del result[arg]\n\n if 'python' in engine:\n for arg in _python_unsupported:\n if fallback_reason and result[arg] != _c_parser_defaults[arg]:\n msg = (\"Falling back to the 'python' engine because\"\n \" {reason}, but this causes {option!r} to be\"\n \" ignored as it is not supported by the 'python'\"\n \" engine.\").format(reason=fallback_reason,\n option=arg)\n raise ValueError(msg)\n del result[arg]\n\n if fallback_reason:\n warnings.warn((\"Falling back to the 'python' engine because\"\n \" {0}; you can avoid this warning by specifying\"\n \" engine='python'.\").format(fallback_reason),\n ParserWarning, stacklevel=5)\n\n index_col = options['index_col']\n names = options['names']\n converters = options['converters']\n na_values = options['na_values']\n skiprows = options['skiprows']\n\n _validate_header_arg(options['header'])\n\n depr_warning = ''\n\n for arg in _deprecated_args:\n parser_default = _c_parser_defaults[arg]\n depr_default = _deprecated_defaults[arg]\n\n msg = (\"The '{arg}' argument has been deprecated \"\n \"and will be removed in a future version.\"\n .format(arg=arg))\n\n if arg == 'tupleize_cols':\n msg += (' Column tuples will then '\n 'always be converted to MultiIndex.')\n\n if result.get(arg, depr_default) != depr_default:\n # raise Exception(result.get(arg, depr_default), depr_default)\n depr_warning += msg + '\\n\\n'\n else:\n result[arg] = parser_default\n\n if depr_warning != '':\n warnings.warn(depr_warning, FutureWarning, stacklevel=2)\n\n if index_col is True:\n raise ValueError(\"The value of index_col couldn't be 'True'\")\n if _is_index_col(index_col):\n if not isinstance(index_col, (list, tuple, np.ndarray)):\n index_col = [index_col]\n result['index_col'] = index_col\n\n names = list(names) if names is not None else names\n\n # type conversion-related\n if converters is not None:\n if not isinstance(converters, dict):\n raise TypeError('Type converters must be a dict or'\n ' subclass, input was '\n 'a {0!r}'.format(type(converters).__name__))\n else:\n converters = {}\n\n # Converting values to NA\n keep_default_na = options['keep_default_na']\n na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)\n\n # handle skiprows; this is internally handled by the\n # c-engine, so only need for python parsers\n if engine != 'c':\n if is_integer(skiprows):\n skiprows = lrange(skiprows)\n if skiprows is None:\n skiprows = set()\n elif not callable(skiprows):\n skiprows = set(skiprows)\n\n # put stuff back\n result['names'] = names\n result['converters'] = converters\n result['na_values'] = na_values\n result['na_fvalues'] = na_fvalues\n result['skiprows'] = skiprows\n\n return result, engine\n\n def __next__(self):\n try:\n return self.get_chunk()\n except StopIteration:\n self.close()\n raise\n\n def _make_engine(self, engine='c'):\n if engine == 'c':\n self._engine = CParserWrapper(self.f, **self.options)\n else:\n if engine == 'python':\n klass = PythonParser\n elif engine == 'python-fwf':\n klass = FixedWidthFieldParser\n else:\n raise ValueError('Unknown engine: {engine} (valid options are'\n ' \"c\", \"python\", or' ' \"python-fwf\")'.format(\n engine=engine))\n self._engine = klass(self.f, **self.options)\n\n def _failover_to_python(self):\n raise AbstractMethodError(self)\n\n def read(self, nrows=None):\n nrows = _validate_integer('nrows', nrows)\n ret = self._engine.read(nrows)\n\n # May alter columns / col_dict\n index, columns, col_dict = self._create_index(ret)\n\n if index is None:\n if col_dict:\n # Any column is actually fine:\n new_rows = len(compat.next(compat.itervalues(col_dict)))\n index = RangeIndex(self._currow, self._currow + new_rows)\n else:\n new_rows = 0\n else:\n new_rows = len(index)\n\n df = DataFrame(col_dict, columns=columns, index=index)\n\n self._currow += new_rows\n\n if self.squeeze and len(df.columns) == 1:\n return df[df.columns[0]].copy()\n return df\n\n def _create_index(self, ret):\n index, columns, col_dict = ret\n return index, columns, col_dict\n\n def get_chunk(self, size=None):\n if size is None:\n size = self.chunksize\n if self.nrows is not None:\n if self._currow >= self.nrows:\n raise StopIteration\n size = min(size, self.nrows - self._currow)\n return self.read(nrows=size)\n\n\ndef _is_index_col(col):\n return col is not None and col is not False\n\n\ndef _is_potential_multi_index(columns):\n \"\"\"\n Check whether or not the `columns` parameter\n could be converted into a MultiIndex.\n\n Parameters\n ----------\n columns : array-like\n Object which may or may not be convertible into a MultiIndex\n\n Returns\n -------\n boolean : Whether or not columns could become a MultiIndex\n \"\"\"\n return (len(columns) and not isinstance(columns, MultiIndex) and\n all(isinstance(c, tuple) for c in columns))\n\n\ndef _evaluate_usecols(usecols, names):\n \"\"\"\n Check whether or not the 'usecols' parameter\n is a callable. If so, enumerates the 'names'\n parameter and returns a set of indices for\n each entry in 'names' that evaluates to True.\n If not a callable, returns 'usecols'.\n \"\"\"\n if callable(usecols):\n return {i for i, name in enumerate(names) if usecols(name)}\n return usecols\n\n\ndef _validate_usecols_names(usecols, names):\n \"\"\"\n Validates that all usecols are present in a given\n list of names. If not, raise a ValueError that\n shows what usecols are missing.\n\n Parameters\n ----------\n usecols : iterable of usecols\n The columns to validate are present in names.\n names : iterable of names\n The column names to check against.\n\n Returns\n -------\n usecols : iterable of usecols\n The `usecols` parameter if the validation succeeds.\n\n Raises\n ------\n ValueError : Columns were missing. Error message will list them.\n \"\"\"\n missing = [c for c in usecols if c not in names]\n if len(missing) > 0:\n raise ValueError(\n \"Usecols do not match columns, \"\n \"columns expected but not found: {missing}\".format(missing=missing)\n )\n\n return usecols\n\n\ndef _validate_skipfooter_arg(skipfooter):\n \"\"\"\n Validate the 'skipfooter' parameter.\n\n Checks whether 'skipfooter' is a non-negative integer.\n Raises a ValueError if that is not the case.\n\n Parameters\n ----------\n skipfooter : non-negative integer\n The number of rows to skip at the end of the file.\n\n Returns\n -------\n validated_skipfooter : non-negative integer\n The original input if the validation succeeds.\n\n Raises\n ------\n ValueError : 'skipfooter' was not a non-negative integer.\n \"\"\"\n\n if not is_integer(skipfooter):\n raise ValueError(\"skipfooter must be an integer\")\n\n if skipfooter < 0:\n raise ValueError(\"skipfooter cannot be negative\")\n\n return skipfooter\n\n\ndef _validate_usecols_arg(usecols):\n \"\"\"\n Validate the 'usecols' parameter.\n\n Checks whether or not the 'usecols' parameter contains all integers\n (column selection by index), strings (column by name) or is a callable.\n Raises a ValueError if that is not the case.\n\n Parameters\n ----------\n usecols : list-like, callable, or None\n List of columns to use when parsing or a callable that can be used\n to filter a list of table columns.\n\n Returns\n -------\n usecols_tuple : tuple\n A tuple of (verified_usecols, usecols_dtype).\n\n 'verified_usecols' is either a set if an array-like is passed in or\n 'usecols' if a callable or None is passed in.\n\n 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like\n is passed in or None if a callable or None is passed in.\n \"\"\"\n msg = (\"'usecols' must either be list-like of all strings, all unicode, \"\n \"all integers or a callable.\")\n if usecols is not None:\n if callable(usecols):\n return usecols, None\n # GH20529, ensure is iterable container but not string.\n elif not is_list_like(usecols):\n raise ValueError(msg)\n else:\n usecols_dtype = lib.infer_dtype(usecols)\n if usecols_dtype not in ('empty', 'integer',\n 'string', 'unicode'):\n raise ValueError(msg)\n return set(usecols), usecols_dtype\n return usecols, None\n\n\ndef _validate_parse_dates_arg(parse_dates):\n \"\"\"\n Check whether or not the 'parse_dates' parameter\n is a non-boolean scalar. Raises a ValueError if\n that is the case.\n \"\"\"\n msg = (\"Only booleans, lists, and \"\n \"dictionaries are accepted \"\n \"for the 'parse_dates' parameter\")\n\n if parse_dates is not None:\n if is_scalar(parse_dates):\n if not lib.is_bool(parse_dates):\n raise TypeError(msg)\n\n elif not isinstance(parse_dates, (list, dict)):\n raise TypeError(msg)\n\n return parse_dates\n\n\nclass ParserBase(object):\n\n def __init__(self, kwds):\n self.names = kwds.get('names')\n self.orig_names = None\n self.prefix = kwds.pop('prefix', None)\n\n self.index_col = kwds.get('index_col', None)\n self.unnamed_cols = set()\n self.index_names = None\n self.col_names = None\n\n self.parse_dates = _validate_parse_dates_arg(\n kwds.pop('parse_dates', False))\n self.date_parser = kwds.pop('date_parser', None)\n self.dayfirst = kwds.pop('dayfirst', False)\n self.keep_date_col = kwds.pop('keep_date_col', False)\n\n self.na_values = kwds.get('na_values')\n self.na_fvalues = kwds.get('na_fvalues')\n self.na_filter = kwds.get('na_filter', False)\n self.keep_default_na = kwds.get('keep_default_na', True)\n\n self.true_values = kwds.get('true_values')\n self.false_values = kwds.get('false_values')\n self.tupleize_cols = kwds.get('tupleize_cols', False)\n self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True)\n self.infer_datetime_format = kwds.pop('infer_datetime_format', False)\n\n self._date_conv = _make_date_converter(\n date_parser=self.date_parser,\n dayfirst=self.dayfirst,\n infer_datetime_format=self.infer_datetime_format\n )\n\n # validate header options for mi\n self.header = kwds.get('header')\n if isinstance(self.header, (list, tuple, np.ndarray)):\n if not all(map(is_integer, self.header)):\n raise ValueError(\"header must be integer or list of integers\")\n if kwds.get('usecols'):\n raise ValueError(\"cannot specify usecols when \"\n \"specifying a multi-index header\")\n if kwds.get('names'):\n raise ValueError(\"cannot specify names when \"\n \"specifying a multi-index header\")\n\n # validate index_col that only contains integers\n if self.index_col is not None:\n is_sequence = isinstance(self.index_col, (list, tuple,\n np.ndarray))\n if not (is_sequence and\n all(map(is_integer, self.index_col)) or\n is_integer(self.index_col)):\n raise ValueError(\"index_col must only contain row numbers \"\n \"when specifying a multi-index header\")\n\n # GH 16338\n elif self.header is not None and not is_integer(self.header):\n raise ValueError(\"header must be integer or list of integers\")\n\n self._name_processed = False\n\n self._first_chunk = True\n\n # GH 13932\n # keep references to file handles opened by the parser itself\n self.handles = []\n\n def close(self):\n for f in self.handles:\n f.close()\n\n @property\n def _has_complex_date_col(self):\n return (isinstance(self.parse_dates, dict) or\n (isinstance(self.parse_dates, list) and\n len(self.parse_dates) > 0 and\n isinstance(self.parse_dates[0], list)))\n\n def _should_parse_dates(self, i):\n if isinstance(self.parse_dates, bool):\n return self.parse_dates\n else:\n if self.index_names is not None:\n name = self.index_names[i]\n else:\n name = None\n j = self.index_col[i]\n\n if is_scalar(self.parse_dates):\n return ((j == self.parse_dates) or\n (name is not None and name == self.parse_dates))\n else:\n return ((j in self.parse_dates) or\n (name is not None and name in self.parse_dates))\n\n def _extract_multi_indexer_columns(self, header, index_names, col_names,\n passed_names=False):\n \"\"\" extract and return the names, index_names, col_names\n header is a list-of-lists returned from the parsers \"\"\"\n if len(header) < 2:\n return header[0], index_names, col_names, passed_names\n\n # the names are the tuples of the header that are not the index cols\n # 0 is the name of the index, assuming index_col is a list of column\n # numbers\n ic = self.index_col\n if ic is None:\n ic = []\n\n if not isinstance(ic, (list, tuple, np.ndarray)):\n ic = [ic]\n sic = set(ic)\n\n # clean the index_names\n index_names = header.pop(-1)\n index_names, names, index_col = _clean_index_names(index_names,\n self.index_col,\n self.unnamed_cols)\n\n # extract the columns\n field_count = len(header[0])\n\n def extract(r):\n return tuple(r[i] for i in range(field_count) if i not in sic)\n\n columns = lzip(*[extract(r) for r in header])\n names = ic + columns\n\n # If we find unnamed columns all in a single\n # level, then our header was too long.\n for n in range(len(columns[0])):\n if all(compat.to_str(c[n]) in self.unnamed_cols for c in columns):\n raise ParserError(\n \"Passed header=[%s] are too many rows for this \"\n \"multi_index of columns\"\n % ','.join(str(x) for x in self.header)\n )\n\n # Clean the column names (if we have an index_col).\n if len(ic):\n col_names = [r[0] if (len(r[0]) and\n r[0] not in self.unnamed_cols) else None\n for r in header]\n else:\n col_names = [None] * len(header)\n\n passed_names = True\n\n return names, index_names, col_names, passed_names\n\n def _maybe_dedup_names(self, names):\n # see gh-7160 and gh-9424: this helps to provide\n # immediate alleviation of the duplicate names\n # issue and appears to be satisfactory to users,\n # but ultimately, not needing to butcher the names\n # would be nice!\n if self.mangle_dupe_cols:\n names = list(names) # so we can index\n counts = defaultdict(int)\n is_potential_mi = _is_potential_multi_index(names)\n\n for i, col in enumerate(names):\n cur_count = counts[col]\n\n while cur_count > 0:\n counts[col] = cur_count + 1\n\n if is_potential_mi:\n col = col[:-1] + ('%s.%d' % (col[-1], cur_count),)\n else:\n col = '%s.%d' % (col, cur_count)\n cur_count = counts[col]\n\n names[i] = col\n counts[col] = cur_count + 1\n\n return names\n\n def _maybe_make_multi_index_columns(self, columns, col_names=None):\n # possibly create a column mi here\n if _is_potential_multi_index(columns):\n columns = MultiIndex.from_tuples(columns, names=col_names)\n return columns\n\n def _make_index(self, data, alldata, columns, indexnamerow=False):\n if not _is_index_col(self.index_col) or not self.index_col:\n index = None\n\n elif not self._has_complex_date_col:\n index = self._get_simple_index(alldata, columns)\n index = self._agg_index(index)\n elif self._has_complex_date_col:\n if not self._name_processed:\n (self.index_names, _,\n self.index_col) = _clean_index_names(list(columns),\n self.index_col,\n self.unnamed_cols)\n self._name_processed = True\n index = self._get_complex_date_index(data, columns)\n index = self._agg_index(index, try_parse_dates=False)\n\n # add names for the index\n if indexnamerow:\n coffset = len(indexnamerow) - len(columns)\n index = index.set_names(indexnamerow[:coffset])\n\n # maybe create a mi on the columns\n columns = self._maybe_make_multi_index_columns(columns, self.col_names)\n\n return index, columns\n\n _implicit_index = False\n\n def _get_simple_index(self, data, columns):\n def ix(col):\n if not isinstance(col, compat.string_types):\n return col\n raise ValueError('Index %s invalid' % col)\n\n to_remove = []\n index = []\n for idx in self.index_col:\n i = ix(idx)\n to_remove.append(i)\n index.append(data[i])\n\n # remove index items from content and columns, don't pop in\n # loop\n for i in reversed(sorted(to_remove)):\n data.pop(i)\n if not self._implicit_index:\n columns.pop(i)\n\n return index\n\n def _get_complex_date_index(self, data, col_names):\n def _get_name(icol):\n if isinstance(icol, compat.string_types):\n return icol\n\n if col_names is None:\n raise ValueError(('Must supply column order to use %s as '\n 'index') % str(icol))\n\n for i, c in enumerate(col_names):\n if i == icol:\n return c\n\n to_remove = []\n index = []\n for idx in self.index_col:\n name = _get_name(idx)\n to_remove.append(name)\n index.append(data[name])\n\n # remove index items from content and columns, don't pop in\n # loop\n for c in reversed(sorted(to_remove)):\n data.pop(c)\n col_names.remove(c)\n\n return index\n\n def _agg_index(self, index, try_parse_dates=True):\n arrays = []\n\n for i, arr in enumerate(index):\n\n if try_parse_dates and self._should_parse_dates(i):\n arr = self._date_conv(arr)\n\n if self.na_filter:\n col_na_values = self.na_values\n col_na_fvalues = self.na_fvalues\n else:\n col_na_values = set()\n col_na_fvalues = set()\n\n if isinstance(self.na_values, dict):\n col_name = self.index_names[i]\n if col_name is not None:\n col_na_values, col_na_fvalues = _get_na_values(\n col_name, self.na_values, self.na_fvalues,\n self.keep_default_na)\n\n arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)\n arrays.append(arr)\n\n names = self.index_names\n index = ensure_index_from_sequences(arrays, names)\n\n return index\n\n def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,\n converters=None, dtypes=None):\n result = {}\n for c, values in compat.iteritems(dct):\n conv_f = None if converters is None else converters.get(c, None)\n if isinstance(dtypes, dict):\n cast_type = dtypes.get(c, None)\n else:\n # single dtype or None\n cast_type = dtypes\n\n if self.na_filter:\n col_na_values, col_na_fvalues = _get_na_values(\n c, na_values, na_fvalues, self.keep_default_na)\n else:\n col_na_values, col_na_fvalues = set(), set()\n\n if conv_f is not None:\n # conv_f applied to data before inference\n if cast_type is not None:\n warnings.warn((\"Both a converter and dtype were specified \"\n \"for column {0} - only the converter will \"\n \"be used\").format(c), ParserWarning,\n stacklevel=7)\n\n try:\n values = lib.map_infer(values, conv_f)\n except ValueError:\n mask = algorithms.isin(\n values, list(na_values)).view(np.uint8)\n values = lib.map_infer_mask(values, conv_f, mask)\n\n cvals, na_count = self._infer_types(\n values, set(col_na_values) | col_na_fvalues,\n try_num_bool=False)\n else:\n # skip inference if specified dtype is object\n try_num_bool = not (cast_type and is_string_dtype(cast_type))\n\n # general type inference and conversion\n cvals, na_count = self._infer_types(\n values, set(col_na_values) | col_na_fvalues,\n try_num_bool)\n\n # type specified in dtype param\n if cast_type and not is_dtype_equal(cvals, cast_type):\n cvals = self._cast_types(cvals, cast_type, c)\n\n result[c] = cvals\n if verbose and na_count:\n print('Filled %d NA values in column %s' % (na_count, str(c)))\n return result\n\n def _infer_types(self, values, na_values, try_num_bool=True):\n \"\"\"\n Infer types of values, possibly casting\n\n Parameters\n ----------\n values : ndarray\n na_values : set\n try_num_bool : bool, default try\n try to cast values to numeric (first preference) or boolean\n\n Returns:\n --------\n converted : ndarray\n na_count : int\n \"\"\"\n na_count = 0\n if issubclass(values.dtype.type, (np.number, np.bool_)):\n mask = algorithms.isin(values, list(na_values))\n na_count = mask.sum()\n if na_count > 0:\n if is_integer_dtype(values):\n values = values.astype(np.float64)\n np.putmask(values, mask, np.nan)\n return values, na_count\n\n if try_num_bool:\n try:\n result = lib.maybe_convert_numeric(values, na_values, False)\n na_count = isna(result).sum()\n except Exception:\n result = values\n if values.dtype == np.object_:\n na_count = parsers.sanitize_objects(result,\n na_values, False)\n else:\n result = values\n if values.dtype == np.object_:\n na_count = parsers.sanitize_objects(values, na_values, False)\n\n if result.dtype == np.object_ and try_num_bool:\n result = libops.maybe_convert_bool(np.asarray(values),\n true_values=self.true_values,\n false_values=self.false_values)\n\n return result, na_count\n\n def _cast_types(self, values, cast_type, column):\n \"\"\"\n Cast values to specified type\n\n Parameters\n ----------\n values : ndarray\n cast_type : string or np.dtype\n dtype to cast values to\n column : string\n column name - used only for error reporting\n\n Returns\n -------\n converted : ndarray\n \"\"\"\n\n if is_categorical_dtype(cast_type):\n known_cats = (isinstance(cast_type, CategoricalDtype) and\n cast_type.categories is not None)\n\n if not is_object_dtype(values) and not known_cats:\n # XXX this is for consistency with\n # c-parser which parses all categories\n # as strings\n values = astype_nansafe(values, str)\n\n cats = Index(values).unique().dropna()\n values = Categorical._from_inferred_categories(\n cats, cats.get_indexer(values), cast_type\n )\n\n else:\n try:\n values = astype_nansafe(values, cast_type,\n copy=True, skipna=True)\n except ValueError:\n raise ValueError(\"Unable to convert column %s to \"\n \"type %s\" % (column, cast_type))\n return values\n\n def _do_date_conversions(self, names, data):\n # returns data, columns\n\n if self.parse_dates is not None:\n data, names = _process_date_conversion(\n data, self._date_conv, self.parse_dates, self.index_col,\n self.index_names, names, keep_date_col=self.keep_date_col)\n\n return names, data\n\n\nclass CParserWrapper(ParserBase):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, src, **kwds):\n self.kwds = kwds\n kwds = kwds.copy()\n\n ParserBase.__init__(self, kwds)\n\n if (kwds.get('compression') is None\n and 'utf-16' in (kwds.get('encoding') or '')):\n # if source is utf-16 plain text, convert source to utf-8\n if isinstance(src, compat.string_types):\n src = open(src, 'rb')\n self.handles.append(src)\n src = UTF8Recoder(src, kwds['encoding'])\n kwds['encoding'] = 'utf-8'\n\n # #2442\n kwds['allow_leading_cols'] = self.index_col is not False\n\n # GH20529, validate usecol arg before TextReader\n self.usecols, self.usecols_dtype = _validate_usecols_arg(\n kwds['usecols'])\n kwds['usecols'] = self.usecols\n\n self._reader = parsers.TextReader(src, **kwds)\n self.unnamed_cols = self._reader.unnamed_cols\n\n passed_names = self.names is None\n\n if self._reader.header is None:\n self.names = None\n else:\n if len(self._reader.header) > 1:\n # we have a multi index in the columns\n self.names, self.index_names, self.col_names, passed_names = (\n self._extract_multi_indexer_columns(\n self._reader.header, self.index_names, self.col_names,\n passed_names\n )\n )\n else:\n self.names = list(self._reader.header[0])\n\n if self.names is None:\n if self.prefix:\n self.names = ['%s%d' % (self.prefix, i)\n for i in range(self._reader.table_width)]\n else:\n self.names = lrange(self._reader.table_width)\n\n # gh-9755\n #\n # need to set orig_names here first\n # so that proper indexing can be done\n # with _set_noconvert_columns\n #\n # once names has been filtered, we will\n # then set orig_names again to names\n self.orig_names = self.names[:]\n\n if self.usecols:\n usecols = _evaluate_usecols(self.usecols, self.orig_names)\n\n # GH 14671\n if (self.usecols_dtype == 'string' and\n not set(usecols).issubset(self.orig_names)):\n _validate_usecols_names(usecols, self.orig_names)\n\n if len(self.names) > len(usecols):\n self.names = [n for i, n in enumerate(self.names)\n if (i in usecols or n in usecols)]\n\n if len(self.names) < len(usecols):\n _validate_usecols_names(usecols, self.names)\n\n self._set_noconvert_columns()\n\n self.orig_names = self.names\n\n if not self._has_complex_date_col:\n if (self._reader.leading_cols == 0 and\n _is_index_col(self.index_col)):\n\n self._name_processed = True\n (index_names, self.names,\n self.index_col) = _clean_index_names(self.names,\n self.index_col,\n self.unnamed_cols)\n\n if self.index_names is None:\n self.index_names = index_names\n\n if self._reader.header is None and not passed_names:\n self.index_names = [None] * len(self.index_names)\n\n self._implicit_index = self._reader.leading_cols > 0\n\n def close(self):\n for f in self.handles:\n f.close()\n\n # close additional handles opened by C parser (for compression)\n try:\n self._reader.close()\n except ValueError:\n pass\n\n def _set_noconvert_columns(self):\n \"\"\"\n Set the columns that should not undergo dtype conversions.\n\n Currently, any column that is involved with date parsing will not\n undergo such conversions.\n \"\"\"\n names = self.orig_names\n if self.usecols_dtype == 'integer':\n # A set of integers will be converted to a list in\n # the correct order every single time.\n usecols = list(self.usecols)\n usecols.sort()\n elif (callable(self.usecols) or\n self.usecols_dtype not in ('empty', None)):\n # The names attribute should have the correct columns\n # in the proper order for indexing with parse_dates.\n usecols = self.names[:]\n else:\n # Usecols is empty.\n usecols = None\n\n def _set(x):\n if usecols is not None and is_integer(x):\n x = usecols[x]\n\n if not is_integer(x):\n x = names.index(x)\n\n self._reader.set_noconvert(x)\n\n if isinstance(self.parse_dates, list):\n for val in self.parse_dates:\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif isinstance(self.parse_dates, dict):\n for val in self.parse_dates.values():\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif self.parse_dates:\n if isinstance(self.index_col, list):\n for k in self.index_col:\n _set(k)\n elif self.index_col is not None:\n _set(self.index_col)\n\n def set_error_bad_lines(self, status):\n self._reader.set_error_bad_lines(int(status))\n\n def read(self, nrows=None):\n try:\n data = self._reader.read(nrows)\n except StopIteration:\n if self._first_chunk:\n self._first_chunk = False\n names = self._maybe_dedup_names(self.orig_names)\n index, columns, col_dict = _get_empty_meta(\n names, self.index_col, self.index_names,\n dtype=self.kwds.get('dtype'))\n columns = self._maybe_make_multi_index_columns(\n columns, self.col_names)\n\n if self.usecols is not None:\n columns = self._filter_usecols(columns)\n\n col_dict = dict(filter(lambda item: item[0] in columns,\n col_dict.items()))\n\n return index, columns, col_dict\n\n else:\n raise\n\n # Done with first read, next time raise StopIteration\n self._first_chunk = False\n\n names = self.names\n\n if self._reader.leading_cols:\n if self._has_complex_date_col:\n raise NotImplementedError('file structure not yet supported')\n\n # implicit index, no index names\n arrays = []\n\n for i in range(self._reader.leading_cols):\n if self.index_col is None:\n values = data.pop(i)\n else:\n values = data.pop(self.index_col[i])\n\n values = self._maybe_parse_dates(values, i,\n try_parse_dates=True)\n arrays.append(values)\n\n index = ensure_index_from_sequences(arrays)\n\n if self.usecols is not None:\n names = self._filter_usecols(names)\n\n names = self._maybe_dedup_names(names)\n\n # rename dict keys\n data = sorted(data.items())\n data = {k: v for k, (i, v) in zip(names, data)}\n\n names, data = self._do_date_conversions(names, data)\n\n else:\n # rename dict keys\n data = sorted(data.items())\n\n # ugh, mutation\n names = list(self.orig_names)\n names = self._maybe_dedup_names(names)\n\n if self.usecols is not None:\n names = self._filter_usecols(names)\n\n # columns as list\n alldata = [x[1] for x in data]\n\n data = {k: v for k, (i, v) in zip(names, data)}\n\n names, data = self._do_date_conversions(names, data)\n index, names = self._make_index(data, alldata, names)\n\n # maybe create a mi on the columns\n names = self._maybe_make_multi_index_columns(names, self.col_names)\n\n return index, names, data\n\n def _filter_usecols(self, names):\n # hackish\n usecols = _evaluate_usecols(self.usecols, names)\n if usecols is not None and len(names) != len(usecols):\n names = [name for i, name in enumerate(names)\n if i in usecols or name in usecols]\n return names\n\n def _get_index_names(self):\n names = list(self._reader.header[0])\n idx_names = None\n\n if self._reader.leading_cols == 0 and self.index_col is not None:\n (idx_names, names,\n self.index_col) = _clean_index_names(names, self.index_col,\n self.unnamed_cols)\n\n return names, idx_names\n\n def _maybe_parse_dates(self, values, index, try_parse_dates=True):\n if try_parse_dates and self._should_parse_dates(index):\n values = self._date_conv(values)\n return values\n\n\ndef TextParser(*args, **kwds):\n \"\"\"\n Converts lists of lists/tuples into DataFrames with proper type inference\n and optional (e.g. string to datetime) conversion. Also enables iterating\n lazily over chunks of large files\n\n Parameters\n ----------\n data : file-like object or list\n delimiter : separator character to use\n dialect : str or csv.Dialect instance, default None\n Ignored if delimiter is longer than 1 character\n names : sequence, default\n header : int, default 0\n Row to use to parse column labels. Defaults to the first row. Prior\n rows will be discarded\n index_col : int or list, default None\n Column or columns to use as the (possibly hierarchical) index\n has_index_names: boolean, default False\n True if the cols defined in index_col have an index name and are\n not in the header\n na_values : scalar, str, list-like, or dict, default None\n Additional strings to recognize as NA/NaN.\n keep_default_na : bool, default True\n thousands : str, default None\n Thousands separator\n comment : str, default None\n Comment out remainder of line\n parse_dates : boolean, default False\n keep_date_col : boolean, default False\n date_parser : function, default None\n skiprows : list of integers\n Row numbers to skip\n skipfooter : int\n Number of line at bottom of file to skip\n converters : dict, default None\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the cell (not column) content, and return the\n transformed content.\n encoding : string, default None\n Encoding to use for UTF when reading/writing (ex. 'utf-8')\n squeeze : boolean, default False\n returns Series if only one column\n infer_datetime_format: boolean, default False\n If True and `parse_dates` is True for a column, try to infer the\n datetime format based on the first datetime string. If the format\n can be inferred, there often will be a large parsing speed-up.\n float_precision : string, default None\n Specifies which converter the C engine should use for floating-point\n values. The options are None for the ordinary converter,\n 'high' for the high-precision converter, and 'round_trip' for the\n round-trip converter.\n \"\"\"\n kwds['engine'] = 'python'\n return TextFileReader(*args, **kwds)\n\n\ndef count_empty_vals(vals):\n return sum(1 for v in vals if v == '' or v is None)\n\n\nclass PythonParser(ParserBase):\n\n def __init__(self, f, **kwds):\n \"\"\"\n Workhorse function for processing nested list into DataFrame\n\n Should be replaced by np.genfromtxt eventually?\n \"\"\"\n ParserBase.__init__(self, kwds)\n\n self.data = None\n self.buf = []\n self.pos = 0\n self.line_pos = 0\n\n self.encoding = kwds['encoding']\n self.compression = kwds['compression']\n self.memory_map = kwds['memory_map']\n self.skiprows = kwds['skiprows']\n\n if callable(self.skiprows):\n self.skipfunc = self.skiprows\n else:\n self.skipfunc = lambda x: x in self.skiprows\n\n self.skipfooter = _validate_skipfooter_arg(kwds['skipfooter'])\n self.delimiter = kwds['delimiter']\n\n self.quotechar = kwds['quotechar']\n if isinstance(self.quotechar, compat.text_type):\n self.quotechar = str(self.quotechar)\n\n self.escapechar = kwds['escapechar']\n self.doublequote = kwds['doublequote']\n self.skipinitialspace = kwds['skipinitialspace']\n self.lineterminator = kwds['lineterminator']\n self.quoting = kwds['quoting']\n self.usecols, _ = _validate_usecols_arg(kwds['usecols'])\n self.skip_blank_lines = kwds['skip_blank_lines']\n\n self.warn_bad_lines = kwds['warn_bad_lines']\n self.error_bad_lines = kwds['error_bad_lines']\n\n self.names_passed = kwds['names'] or None\n\n self.has_index_names = False\n if 'has_index_names' in kwds:\n self.has_index_names = kwds['has_index_names']\n\n self.verbose = kwds['verbose']\n self.converters = kwds['converters']\n self.dtype = kwds['dtype']\n\n self.thousands = kwds['thousands']\n self.decimal = kwds['decimal']\n\n self.comment = kwds['comment']\n self._comment_lines = []\n\n mode = 'r' if PY3 else 'rb'\n f, handles = _get_handle(f, mode, encoding=self.encoding,\n compression=self.compression,\n memory_map=self.memory_map)\n self.handles.extend(handles)\n\n # Set self.data to something that can read lines.\n if hasattr(f, 'readline'):\n self._make_reader(f)\n else:\n self.data = f\n\n # Get columns in two steps: infer from data, then\n # infer column indices from self.usecols if it is specified.\n self._col_indices = None\n (self.columns, self.num_original_columns,\n self.unnamed_cols) = self._infer_columns()\n\n # Now self.columns has the set of columns that we will process.\n # The original set is stored in self.original_columns.\n if len(self.columns) > 1:\n # we are processing a multi index column\n self.columns, self.index_names, self.col_names, _ = (\n self._extract_multi_indexer_columns(\n self.columns, self.index_names, self.col_names\n )\n )\n # Update list of original names to include all indices.\n self.num_original_columns = len(self.columns)\n else:\n self.columns = self.columns[0]\n\n # get popped off for index\n self.orig_names = list(self.columns)\n\n # needs to be cleaned/refactored\n # multiple date column thing turning into a real spaghetti factory\n\n if not self._has_complex_date_col:\n (index_names, self.orig_names, self.columns) = (\n self._get_index_name(self.columns))\n self._name_processed = True\n if self.index_names is None:\n self.index_names = index_names\n\n if self.parse_dates:\n self._no_thousands_columns = self._set_no_thousands_columns()\n else:\n self._no_thousands_columns = None\n\n if len(self.decimal) != 1:\n raise ValueError('Only length-1 decimal markers supported')\n\n if self.thousands is None:\n self.nonnum = re.compile('[^-^0-9^%s]+' % self.decimal)\n else:\n self.nonnum = re.compile('[^-^0-9^%s^%s]+' % (self.thousands,\n self.decimal))\n\n def _set_no_thousands_columns(self):\n # Create a set of column ids that are not to be stripped of thousands\n # operators.\n noconvert_columns = set()\n\n def _set(x):\n if is_integer(x):\n noconvert_columns.add(x)\n else:\n noconvert_columns.add(self.columns.index(x))\n\n if isinstance(self.parse_dates, list):\n for val in self.parse_dates:\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif isinstance(self.parse_dates, dict):\n for val in self.parse_dates.values():\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif self.parse_dates:\n if isinstance(self.index_col, list):\n for k in self.index_col:\n _set(k)\n elif self.index_col is not None:\n _set(self.index_col)\n\n return noconvert_columns\n\n def _make_reader(self, f):\n sep = self.delimiter\n\n if sep is None or len(sep) == 1:\n if self.lineterminator:\n raise ValueError('Custom line terminators not supported in '\n 'python parser (yet)')\n\n class MyDialect(csv.Dialect):\n delimiter = self.delimiter\n quotechar = self.quotechar\n escapechar = self.escapechar\n doublequote = self.doublequote\n skipinitialspace = self.skipinitialspace\n quoting = self.quoting\n lineterminator = '\\n'\n\n dia = MyDialect\n\n sniff_sep = True\n\n if sep is not None:\n sniff_sep = False\n dia.delimiter = sep\n # attempt to sniff the delimiter\n if sniff_sep:\n line = f.readline()\n while self.skipfunc(self.pos):\n self.pos += 1\n line = f.readline()\n\n line = self._check_comments([line])[0]\n\n self.pos += 1\n self.line_pos += 1\n sniffed = csv.Sniffer().sniff(line)\n dia.delimiter = sniffed.delimiter\n if self.encoding is not None:\n self.buf.extend(list(\n UnicodeReader(StringIO(line),\n dialect=dia,\n encoding=self.encoding)))\n else:\n self.buf.extend(list(csv.reader(StringIO(line),\n dialect=dia)))\n\n if self.encoding is not None:\n reader = UnicodeReader(f, dialect=dia,\n encoding=self.encoding,\n strict=True)\n else:\n reader = csv.reader(f, dialect=dia,\n strict=True)\n\n else:\n def _read():\n line = f.readline()\n\n if compat.PY2 and self.encoding:\n line = line.decode(self.encoding)\n\n pat = re.compile(sep)\n yield pat.split(line.strip())\n for line in f:\n yield pat.split(line.strip())\n reader = _read()\n\n self.data = reader\n\n def read(self, rows=None):\n try:\n content = self._get_lines(rows)\n except StopIteration:\n if self._first_chunk:\n content = []\n else:\n raise\n\n # done with first read, next time raise StopIteration\n self._first_chunk = False\n\n columns = list(self.orig_names)\n if not len(content): # pragma: no cover\n # DataFrame with the right metadata, even though it's length 0\n names = self._maybe_dedup_names(self.orig_names)\n index, columns, col_dict = _get_empty_meta(\n names, self.index_col, self.index_names, self.dtype)\n columns = self._maybe_make_multi_index_columns(\n columns, self.col_names)\n return index, columns, col_dict\n\n # handle new style for names in index\n count_empty_content_vals = count_empty_vals(content[0])\n indexnamerow = None\n if self.has_index_names and count_empty_content_vals == len(columns):\n indexnamerow = content[0]\n content = content[1:]\n\n alldata = self._rows_to_cols(content)\n data = self._exclude_implicit_index(alldata)\n\n columns = self._maybe_dedup_names(self.columns)\n columns, data = self._do_date_conversions(columns, data)\n\n data = self._convert_data(data)\n index, columns = self._make_index(data, alldata, columns, indexnamerow)\n\n return index, columns, data\n\n def _exclude_implicit_index(self, alldata):\n names = self._maybe_dedup_names(self.orig_names)\n\n if self._implicit_index:\n excl_indices = self.index_col\n\n data = {}\n offset = 0\n for i, col in enumerate(names):\n while i + offset in excl_indices:\n offset += 1\n data[col] = alldata[i + offset]\n else:\n data = {k: v for k, v in zip(names, alldata)}\n\n return data\n\n # legacy\n def get_chunk(self, size=None):\n if size is None:\n size = self.chunksize\n return self.read(rows=size)\n\n def _convert_data(self, data):\n # apply converters\n def _clean_mapping(mapping):\n \"converts col numbers to names\"\n clean = {}\n for col, v in compat.iteritems(mapping):\n if isinstance(col, int) and col not in self.orig_names:\n col = self.orig_names[col]\n clean[col] = v\n return clean\n\n clean_conv = _clean_mapping(self.converters)\n if not isinstance(self.dtype, dict):\n # handles single dtype applied to all columns\n clean_dtypes = self.dtype\n else:\n clean_dtypes = _clean_mapping(self.dtype)\n\n # Apply NA values.\n clean_na_values = {}\n clean_na_fvalues = {}\n\n if isinstance(self.na_values, dict):\n for col in self.na_values:\n na_value = self.na_values[col]\n na_fvalue = self.na_fvalues[col]\n\n if isinstance(col, int) and col not in self.orig_names:\n col = self.orig_names[col]\n\n clean_na_values[col] = na_value\n clean_na_fvalues[col] = na_fvalue\n else:\n clean_na_values = self.na_values\n clean_na_fvalues = self.na_fvalues\n\n return self._convert_to_ndarrays(data, clean_na_values,\n clean_na_fvalues, self.verbose,\n clean_conv, clean_dtypes)\n\n def _infer_columns(self):\n names = self.names\n num_original_columns = 0\n clear_buffer = True\n unnamed_cols = set()\n\n if self.header is not None:\n header = self.header\n\n if isinstance(header, (list, tuple, np.ndarray)):\n have_mi_columns = len(header) > 1\n # we have a mi columns, so read an extra line\n if have_mi_columns:\n header = list(header) + [header[-1] + 1]\n else:\n have_mi_columns = False\n header = [header]\n\n columns = []\n for level, hr in enumerate(header):\n try:\n line = self._buffered_line()\n\n while self.line_pos <= hr:\n line = self._next_line()\n\n except StopIteration:\n if self.line_pos < hr:\n raise ValueError(\n 'Passed header=%s but only %d lines in file'\n % (hr, self.line_pos + 1))\n\n # We have an empty file, so check\n # if columns are provided. That will\n # serve as the 'line' for parsing\n if have_mi_columns and hr > 0:\n if clear_buffer:\n self._clear_buffer()\n columns.append([None] * len(columns[-1]))\n return columns, num_original_columns, unnamed_cols\n\n if not self.names:\n raise EmptyDataError(\n \"No columns to parse from file\")\n\n line = self.names[:]\n\n this_columns = []\n this_unnamed_cols = []\n\n for i, c in enumerate(line):\n if c == '':\n if have_mi_columns:\n col_name = (\"Unnamed: {i}_level_{level}\"\n .format(i=i, level=level))\n else:\n col_name = \"Unnamed: {i}\".format(i=i)\n\n this_unnamed_cols.append(i)\n this_columns.append(col_name)\n else:\n this_columns.append(c)\n\n if not have_mi_columns and self.mangle_dupe_cols:\n counts = defaultdict(int)\n\n for i, col in enumerate(this_columns):\n cur_count = counts[col]\n\n while cur_count > 0:\n counts[col] = cur_count + 1\n col = \"%s.%d\" % (col, cur_count)\n cur_count = counts[col]\n\n this_columns[i] = col\n counts[col] = cur_count + 1\n elif have_mi_columns:\n\n # if we have grabbed an extra line, but its not in our\n # format so save in the buffer, and create an blank extra\n # line for the rest of the parsing code\n if hr == header[-1]:\n lc = len(this_columns)\n ic = (len(self.index_col)\n if self.index_col is not None else 0)\n unnamed_count = len(this_unnamed_cols)\n\n if lc != unnamed_count and lc - ic > unnamed_count:\n clear_buffer = False\n this_columns = [None] * lc\n self.buf = [self.buf[-1]]\n\n columns.append(this_columns)\n unnamed_cols.update({this_columns[i]\n for i in this_unnamed_cols})\n\n if len(columns) == 1:\n num_original_columns = len(this_columns)\n\n if clear_buffer:\n self._clear_buffer()\n\n if names is not None:\n if ((self.usecols is not None and\n len(names) != len(self.usecols)) or\n (self.usecols is None and\n len(names) != len(columns[0]))):\n raise ValueError('Number of passed names did not match '\n 'number of header fields in the file')\n if len(columns) > 1:\n raise TypeError('Cannot pass names with multi-index '\n 'columns')\n\n if self.usecols is not None:\n # Set _use_cols. We don't store columns because they are\n # overwritten.\n self._handle_usecols(columns, names)\n else:\n self._col_indices = None\n num_original_columns = len(names)\n columns = [names]\n else:\n columns = self._handle_usecols(columns, columns[0])\n else:\n try:\n line = self._buffered_line()\n\n except StopIteration:\n if not names:\n raise EmptyDataError(\n \"No columns to parse from file\")\n\n line = names[:]\n\n ncols = len(line)\n num_original_columns = ncols\n\n if not names:\n if self.prefix:\n columns = [['%s%d' % (self.prefix, i)\n for i in range(ncols)]]\n else:\n columns = [lrange(ncols)]\n columns = self._handle_usecols(columns, columns[0])\n else:\n if self.usecols is None or len(names) >= num_original_columns:\n columns = self._handle_usecols([names], names)\n num_original_columns = len(names)\n else:\n if (not callable(self.usecols) and\n len(names) != len(self.usecols)):\n raise ValueError(\n 'Number of passed names did not match number of '\n 'header fields in the file'\n )\n # Ignore output but set used columns.\n self._handle_usecols([names], names)\n columns = [names]\n num_original_columns = ncols\n\n return columns, num_original_columns, unnamed_cols\n\n def _handle_usecols(self, columns, usecols_key):\n \"\"\"\n Sets self._col_indices\n\n usecols_key is used if there are string usecols.\n \"\"\"\n if self.usecols is not None:\n if callable(self.usecols):\n col_indices = _evaluate_usecols(self.usecols, usecols_key)\n elif any(isinstance(u, string_types) for u in self.usecols):\n if len(columns) > 1:\n raise ValueError(\"If using multiple headers, usecols must \"\n \"be integers.\")\n col_indices = []\n\n for col in self.usecols:\n if isinstance(col, string_types):\n try:\n col_indices.append(usecols_key.index(col))\n except ValueError:\n _validate_usecols_names(self.usecols, usecols_key)\n else:\n col_indices.append(col)\n else:\n col_indices = self.usecols\n\n columns = [[n for i, n in enumerate(column) if i in col_indices]\n for column in columns]\n self._col_indices = col_indices\n return columns\n\n def _buffered_line(self):\n \"\"\"\n Return a line from buffer, filling buffer if required.\n \"\"\"\n if len(self.buf) > 0:\n return self.buf[0]\n else:\n return self._next_line()\n\n def _check_for_bom(self, first_row):\n \"\"\"\n Checks whether the file begins with the BOM character.\n If it does, remove it. In addition, if there is quoting\n in the field subsequent to the BOM, remove it as well\n because it technically takes place at the beginning of\n the name, not the middle of it.\n \"\"\"\n # first_row will be a list, so we need to check\n # that that list is not empty before proceeding.\n if not first_row:\n return first_row\n\n # The first element of this row is the one that could have the\n # BOM that we want to remove. Check that the first element is a\n # string before proceeding.\n if not isinstance(first_row[0], compat.string_types):\n return first_row\n\n # Check that the string is not empty, as that would\n # obviously not have a BOM at the start of it.\n if not first_row[0]:\n return first_row\n\n # Since the string is non-empty, check that it does\n # in fact begin with a BOM.\n first_elt = first_row[0][0]\n\n # This is to avoid warnings we get in Python 2.x if\n # we find ourselves comparing with non-Unicode\n if compat.PY2 and not isinstance(first_elt, unicode): # noqa\n try:\n first_elt = u(first_elt)\n except UnicodeDecodeError:\n return first_row\n\n if first_elt != _BOM:\n return first_row\n\n first_row = first_row[0]\n\n if len(first_row) > 1 and first_row[1] == self.quotechar:\n start = 2\n quote = first_row[1]\n end = first_row[2:].index(quote) + 2\n\n # Extract the data between the quotation marks\n new_row = first_row[start:end]\n\n # Extract any remaining data after the second\n # quotation mark.\n if len(first_row) > end + 1:\n new_row += first_row[end + 1:]\n return [new_row]\n elif len(first_row) > 1:\n return [first_row[1:]]\n else:\n # First row is just the BOM, so we\n # return an empty string.\n return [\"\"]\n\n def _is_line_empty(self, line):\n \"\"\"\n Check if a line is empty or not.\n\n Parameters\n ----------\n line : str, array-like\n The line of data to check.\n\n Returns\n -------\n boolean : Whether or not the line is empty.\n \"\"\"\n return not line or all(not x for x in line)\n\n def _next_line(self):\n if isinstance(self.data, list):\n while self.skipfunc(self.pos):\n self.pos += 1\n\n while True:\n try:\n line = self._check_comments([self.data[self.pos]])[0]\n self.pos += 1\n # either uncommented or blank to begin with\n if (not self.skip_blank_lines and\n (self._is_line_empty(\n self.data[self.pos - 1]) or line)):\n break\n elif self.skip_blank_lines:\n ret = self._remove_empty_lines([line])\n if ret:\n line = ret[0]\n break\n except IndexError:\n raise StopIteration\n else:\n while self.skipfunc(self.pos):\n self.pos += 1\n next(self.data)\n\n while True:\n orig_line = self._next_iter_line(row_num=self.pos + 1)\n self.pos += 1\n\n if orig_line is not None:\n line = self._check_comments([orig_line])[0]\n\n if self.skip_blank_lines:\n ret = self._remove_empty_lines([line])\n\n if ret:\n line = ret[0]\n break\n elif self._is_line_empty(orig_line) or line:\n break\n\n # This was the first line of the file,\n # which could contain the BOM at the\n # beginning of it.\n if self.pos == 1:\n line = self._check_for_bom(line)\n\n self.line_pos += 1\n self.buf.append(line)\n return line\n\n def _alert_malformed(self, msg, row_num):\n \"\"\"\n Alert a user about a malformed row.\n\n If `self.error_bad_lines` is True, the alert will be `ParserError`.\n If `self.warn_bad_lines` is True, the alert will be printed out.\n\n Parameters\n ----------\n msg : The error message to display.\n row_num : The row number where the parsing error occurred.\n Because this row number is displayed, we 1-index,\n even though we 0-index internally.\n \"\"\"\n\n if self.error_bad_lines:\n raise ParserError(msg)\n elif self.warn_bad_lines:\n base = 'Skipping line {row_num}: '.format(row_num=row_num)\n sys.stderr.write(base + msg + '\\n')\n\n def _next_iter_line(self, row_num):\n \"\"\"\n Wrapper around iterating through `self.data` (CSV source).\n\n When a CSV error is raised, we check for specific\n error messages that allow us to customize the\n error message displayed to the user.\n\n Parameters\n ----------\n row_num : The row number of the line being parsed.\n \"\"\"\n\n try:\n return next(self.data)\n except csv.Error as e:\n if self.warn_bad_lines or self.error_bad_lines:\n msg = str(e)\n\n if 'NULL byte' in msg:\n msg = ('NULL byte detected. This byte '\n 'cannot be processed in Python\\'s '\n 'native csv library at the moment, '\n 'so please pass in engine=\\'c\\' instead')\n\n if self.skipfooter > 0:\n reason = ('Error could possibly be due to '\n 'parsing errors in the skipped footer rows '\n '(the skipfooter keyword is only applied '\n 'after Python\\'s csv library has parsed '\n 'all rows).')\n msg += '. ' + reason\n\n self._alert_malformed(msg, row_num)\n return None\n\n def _check_comments(self, lines):\n if self.comment is None:\n return lines\n ret = []\n for l in lines:\n rl = []\n for x in l:\n if (not isinstance(x, compat.string_types) or\n self.comment not in x):\n rl.append(x)\n else:\n x = x[:x.find(self.comment)]\n if len(x) > 0:\n rl.append(x)\n break\n ret.append(rl)\n return ret\n\n def _remove_empty_lines(self, lines):\n \"\"\"\n Iterate through the lines and remove any that are\n either empty or contain only one whitespace value\n\n Parameters\n ----------\n lines : array-like\n The array of lines that we are to filter.\n\n Returns\n -------\n filtered_lines : array-like\n The same array of lines with the \"empty\" ones removed.\n \"\"\"\n\n ret = []\n for l in lines:\n # Remove empty lines and lines with only one whitespace value\n if (len(l) > 1 or len(l) == 1 and\n (not isinstance(l[0], compat.string_types) or\n l[0].strip())):\n ret.append(l)\n return ret\n\n def _check_thousands(self, lines):\n if self.thousands is None:\n return lines\n\n return self._search_replace_num_columns(lines=lines,\n search=self.thousands,\n replace='')\n\n def _search_replace_num_columns(self, lines, search, replace):\n ret = []\n for l in lines:\n rl = []\n for i, x in enumerate(l):\n if (not isinstance(x, compat.string_types) or\n search not in x or\n (self._no_thousands_columns and\n i in self._no_thousands_columns) or\n self.nonnum.search(x.strip())):\n rl.append(x)\n else:\n rl.append(x.replace(search, replace))\n ret.append(rl)\n return ret\n\n def _check_decimal(self, lines):\n if self.decimal == _parser_defaults['decimal']:\n return lines\n\n return self._search_replace_num_columns(lines=lines,\n search=self.decimal,\n replace='.')\n\n def _clear_buffer(self):\n self.buf = []\n\n _implicit_index = False\n\n def _get_index_name(self, columns):\n \"\"\"\n Try several cases to get lines:\n\n 0) There are headers on row 0 and row 1 and their\n total summed lengths equals the length of the next line.\n Treat row 0 as columns and row 1 as indices\n 1) Look for implicit index: there are more columns\n on row 1 than row 0. If this is true, assume that row\n 1 lists index columns and row 0 lists normal columns.\n 2) Get index from the columns if it was listed.\n \"\"\"\n orig_names = list(columns)\n columns = list(columns)\n\n try:\n line = self._next_line()\n except StopIteration:\n line = None\n\n try:\n next_line = self._next_line()\n except StopIteration:\n next_line = None\n\n # implicitly index_col=0 b/c 1 fewer column names\n implicit_first_cols = 0\n if line is not None:\n # leave it 0, #2442\n # Case 1\n if self.index_col is not False:\n implicit_first_cols = len(line) - self.num_original_columns\n\n # Case 0\n if next_line is not None:\n if len(next_line) == len(line) + self.num_original_columns:\n # column and index names on diff rows\n self.index_col = lrange(len(line))\n self.buf = self.buf[1:]\n\n for c in reversed(line):\n columns.insert(0, c)\n\n # Update list of original names to include all indices.\n orig_names = list(columns)\n self.num_original_columns = len(columns)\n return line, orig_names, columns\n\n if implicit_first_cols > 0:\n # Case 1\n self._implicit_index = True\n if self.index_col is None:\n self.index_col = lrange(implicit_first_cols)\n\n index_name = None\n\n else:\n # Case 2\n (index_name, columns_,\n self.index_col) = _clean_index_names(columns, self.index_col,\n self.unnamed_cols)\n\n return index_name, orig_names, columns\n\n def _rows_to_cols(self, content):\n col_len = self.num_original_columns\n\n if self._implicit_index:\n col_len += len(self.index_col)\n\n max_len = max(len(row) for row in content)\n\n # Check that there are no rows with too many\n # elements in their row (rows with too few\n # elements are padded with NaN).\n if (max_len > col_len and\n self.index_col is not False and\n self.usecols is None):\n\n footers = self.skipfooter if self.skipfooter else 0\n bad_lines = []\n\n iter_content = enumerate(content)\n content_len = len(content)\n content = []\n\n for (i, l) in iter_content:\n actual_len = len(l)\n\n if actual_len > col_len:\n if self.error_bad_lines or self.warn_bad_lines:\n row_num = self.pos - (content_len - i + footers)\n bad_lines.append((row_num, actual_len))\n\n if self.error_bad_lines:\n break\n else:\n content.append(l)\n\n for row_num, actual_len in bad_lines:\n msg = ('Expected %d fields in line %d, saw %d' %\n (col_len, row_num + 1, actual_len))\n if (self.delimiter and\n len(self.delimiter) > 1 and\n self.quoting != csv.QUOTE_NONE):\n # see gh-13374\n reason = ('Error could possibly be due to quotes being '\n 'ignored when a multi-char delimiter is used.')\n msg += '. ' + reason\n\n self._alert_malformed(msg, row_num + 1)\n\n # see gh-13320\n zipped_content = list(lib.to_object_array(\n content, min_width=col_len).T)\n\n if self.usecols:\n if self._implicit_index:\n zipped_content = [\n a for i, a in enumerate(zipped_content)\n if (i < len(self.index_col) or\n i - len(self.index_col) in self._col_indices)]\n else:\n zipped_content = [a for i, a in enumerate(zipped_content)\n if i in self._col_indices]\n return zipped_content\n\n def _get_lines(self, rows=None):\n lines = self.buf\n new_rows = None\n\n # already fetched some number\n if rows is not None:\n # we already have the lines in the buffer\n if len(self.buf) >= rows:\n new_rows, self.buf = self.buf[:rows], self.buf[rows:]\n\n # need some lines\n else:\n rows -= len(self.buf)\n\n if new_rows is None:\n if isinstance(self.data, list):\n if self.pos > len(self.data):\n raise StopIteration\n if rows is None:\n new_rows = self.data[self.pos:]\n new_pos = len(self.data)\n else:\n new_rows = self.data[self.pos:self.pos + rows]\n new_pos = self.pos + rows\n\n # Check for stop rows. n.b.: self.skiprows is a set.\n if self.skiprows:\n new_rows = [row for i, row in enumerate(new_rows)\n if not self.skipfunc(i + self.pos)]\n\n lines.extend(new_rows)\n self.pos = new_pos\n\n else:\n new_rows = []\n try:\n if rows is not None:\n for _ in range(rows):\n new_rows.append(next(self.data))\n lines.extend(new_rows)\n else:\n rows = 0\n\n while True:\n new_row = self._next_iter_line(\n row_num=self.pos + rows + 1)\n rows += 1\n\n if new_row is not None:\n new_rows.append(new_row)\n\n except StopIteration:\n if self.skiprows:\n new_rows = [row for i, row in enumerate(new_rows)\n if not self.skipfunc(i + self.pos)]\n lines.extend(new_rows)\n if len(lines) == 0:\n raise\n self.pos += len(new_rows)\n\n self.buf = []\n else:\n lines = new_rows\n\n if self.skipfooter:\n lines = lines[:-self.skipfooter]\n\n lines = self._check_comments(lines)\n if self.skip_blank_lines:\n lines = self._remove_empty_lines(lines)\n lines = self._check_thousands(lines)\n return self._check_decimal(lines)\n\n\ndef _make_date_converter(date_parser=None, dayfirst=False,\n infer_datetime_format=False):\n def converter(*date_cols):\n if date_parser is None:\n strs = _concat_date_cols(date_cols)\n\n try:\n return tools.to_datetime(\n ensure_object(strs),\n utc=None,\n box=False,\n dayfirst=dayfirst,\n errors='ignore',\n infer_datetime_format=infer_datetime_format\n )\n except ValueError:\n return tools.to_datetime(\n parsing.try_parse_dates(strs, dayfirst=dayfirst))\n else:\n try:\n result = tools.to_datetime(\n date_parser(*date_cols), errors='ignore')\n if isinstance(result, datetime.datetime):\n raise Exception('scalar parser')\n return result\n except Exception:\n try:\n return tools.to_datetime(\n parsing.try_parse_dates(_concat_date_cols(date_cols),\n parser=date_parser,\n dayfirst=dayfirst),\n errors='ignore')\n except Exception:\n return generic_parser(date_parser, *date_cols)\n\n return converter\n\n\ndef _process_date_conversion(data_dict, converter, parse_spec,\n index_col, index_names, columns,\n keep_date_col=False):\n def _isindex(colspec):\n return ((isinstance(index_col, list) and\n colspec in index_col) or\n (isinstance(index_names, list) and\n colspec in index_names))\n\n new_cols = []\n new_data = {}\n\n orig_names = columns\n columns = list(columns)\n\n date_cols = set()\n\n if parse_spec is None or isinstance(parse_spec, bool):\n return data_dict, columns\n\n if isinstance(parse_spec, list):\n # list of column lists\n for colspec in parse_spec:\n if is_scalar(colspec):\n if isinstance(colspec, int) and colspec not in data_dict:\n colspec = orig_names[colspec]\n if _isindex(colspec):\n continue\n data_dict[colspec] = converter(data_dict[colspec])\n else:\n new_name, col, old_names = _try_convert_dates(\n converter, colspec, data_dict, orig_names)\n if new_name in data_dict:\n raise ValueError('New date column already in dict %s' %\n new_name)\n new_data[new_name] = col\n new_cols.append(new_name)\n date_cols.update(old_names)\n\n elif isinstance(parse_spec, dict):\n # dict of new name to column list\n for new_name, colspec in compat.iteritems(parse_spec):\n if new_name in data_dict:\n raise ValueError('Date column %s already in dict' %\n new_name)\n\n _, col, old_names = _try_convert_dates(converter, colspec,\n data_dict, orig_names)\n\n new_data[new_name] = col\n new_cols.append(new_name)\n date_cols.update(old_names)\n\n data_dict.update(new_data)\n new_cols.extend(columns)\n\n if not keep_date_col:\n for c in list(date_cols):\n data_dict.pop(c)\n new_cols.remove(c)\n\n return data_dict, new_cols\n\n\ndef _try_convert_dates(parser, colspec, data_dict, columns):\n colset = set(columns)\n colnames = []\n\n for c in colspec:\n if c in colset:\n colnames.append(c)\n elif isinstance(c, int) and c not in columns:\n colnames.append(columns[c])\n else:\n colnames.append(c)\n\n new_name = '_'.join(str(x) for x in colnames)\n to_parse = [data_dict[c] for c in colnames if c in data_dict]\n\n new_col = parser(*to_parse)\n return new_name, new_col, colnames\n\n\ndef _clean_na_values(na_values, keep_default_na=True):\n\n if na_values is None:\n if keep_default_na:\n na_values = _NA_VALUES\n else:\n na_values = set()\n na_fvalues = set()\n elif isinstance(na_values, dict):\n old_na_values = na_values.copy()\n na_values = {} # Prevent aliasing.\n\n # Convert the values in the na_values dictionary\n # into array-likes for further use. This is also\n # where we append the default NaN values, provided\n # that `keep_default_na=True`.\n for k, v in compat.iteritems(old_na_values):\n if not is_list_like(v):\n v = [v]\n\n if keep_default_na:\n v = set(v) | _NA_VALUES\n\n na_values[k] = v\n na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()}\n else:\n if not is_list_like(na_values):\n na_values = [na_values]\n na_values = _stringify_na_values(na_values)\n if keep_default_na:\n na_values = na_values | _NA_VALUES\n\n na_fvalues = _floatify_na_values(na_values)\n\n return na_values, na_fvalues\n\n\ndef _clean_index_names(columns, index_col, unnamed_cols):\n if not _is_index_col(index_col):\n return None, columns, index_col\n\n columns = list(columns)\n\n cp_cols = list(columns)\n index_names = []\n\n # don't mutate\n index_col = list(index_col)\n\n for i, c in enumerate(index_col):\n if isinstance(c, compat.string_types):\n index_names.append(c)\n for j, name in enumerate(cp_cols):\n if name == c:\n index_col[i] = j\n columns.remove(name)\n break\n else:\n name = cp_cols[c]\n columns.remove(name)\n index_names.append(name)\n\n # Only clean index names that were placeholders.\n for i, name in enumerate(index_names):\n if isinstance(name, compat.string_types) and name in unnamed_cols:\n index_names[i] = None\n\n return index_names, columns, index_col\n\n\ndef _get_empty_meta(columns, index_col, index_names, dtype=None):\n columns = list(columns)\n\n # Convert `dtype` to a defaultdict of some kind.\n # This will enable us to write `dtype[col_name]`\n # without worrying about KeyError issues later on.\n if not isinstance(dtype, dict):\n # if dtype == None, default will be np.object.\n default_dtype = dtype or np.object\n dtype = defaultdict(lambda: default_dtype)\n else:\n # Save a copy of the dictionary.\n _dtype = dtype.copy()\n dtype = defaultdict(lambda: np.object)\n\n # Convert column indexes to column names.\n for k, v in compat.iteritems(_dtype):\n col = columns[k] if is_integer(k) else k\n dtype[col] = v\n\n # Even though we have no data, the \"index\" of the empty DataFrame\n # could for example still be an empty MultiIndex. Thus, we need to\n # check whether we have any index columns specified, via either:\n #\n # 1) index_col (column indices)\n # 2) index_names (column names)\n #\n # Both must be non-null to ensure a successful construction. Otherwise,\n # we have to create a generic emtpy Index.\n if (index_col is None or index_col is False) or index_names is None:\n index = Index([])\n else:\n data = [Series([], dtype=dtype[name]) for name in index_names]\n index = ensure_index_from_sequences(data, names=index_names)\n index_col.sort()\n\n for i, n in enumerate(index_col):\n columns.pop(n - i)\n\n col_dict = {col_name: Series([], dtype=dtype[col_name])\n for col_name in columns}\n\n return index, columns, col_dict\n\n\ndef _floatify_na_values(na_values):\n # create float versions of the na_values\n result = set()\n for v in na_values:\n try:\n v = float(v)\n if not np.isnan(v):\n result.add(v)\n except (TypeError, ValueError, OverflowError):\n pass\n return result\n\n\ndef _stringify_na_values(na_values):\n \"\"\" return a stringified and numeric for these values \"\"\"\n result = []\n for x in na_values:\n result.append(str(x))\n result.append(x)\n try:\n v = float(x)\n\n # we are like 999 here\n if v == int(v):\n v = int(v)\n result.append(\"%s.0\" % v)\n result.append(str(v))\n\n result.append(v)\n except (TypeError, ValueError, OverflowError):\n pass\n try:\n result.append(int(x))\n except (TypeError, ValueError, OverflowError):\n pass\n return set(result)\n\n\ndef _get_na_values(col, na_values, na_fvalues, keep_default_na):\n \"\"\"\n Get the NaN values for a given column.\n\n Parameters\n ----------\n col : str\n The name of the column.\n na_values : array-like, dict\n The object listing the NaN values as strings.\n na_fvalues : array-like, dict\n The object listing the NaN values as floats.\n keep_default_na : bool\n If `na_values` is a dict, and the column is not mapped in the\n dictionary, whether to return the default NaN values or the empty set.\n\n Returns\n -------\n nan_tuple : A length-two tuple composed of\n\n 1) na_values : the string NaN values for that column.\n 2) na_fvalues : the float NaN values for that column.\n \"\"\"\n\n if isinstance(na_values, dict):\n if col in na_values:\n return na_values[col], na_fvalues[col]\n else:\n if keep_default_na:\n return _NA_VALUES, set()\n\n return set(), set()\n else:\n return na_values, na_fvalues\n\n\ndef _get_col_names(colspec, columns):\n colset = set(columns)\n colnames = []\n for c in colspec:\n if c in colset:\n colnames.append(c)\n elif isinstance(c, int):\n colnames.append(columns[c])\n return colnames\n\n\ndef _concat_date_cols(date_cols):\n if len(date_cols) == 1:\n if compat.PY3:\n return np.array([compat.text_type(x) for x in date_cols[0]],\n dtype=object)\n else:\n return np.array([\n str(x) if not isinstance(x, compat.string_types) else x\n for x in date_cols[0]\n ], dtype=object)\n\n rs = np.array([' '.join(compat.text_type(y) for y in x)\n for x in zip(*date_cols)], dtype=object)\n return rs\n\n\nclass FixedWidthReader(BaseIterator):\n \"\"\"\n A reader of fixed-width lines.\n \"\"\"\n\n def __init__(self, f, colspecs, delimiter, comment, skiprows=None):\n self.f = f\n self.buffer = None\n self.delimiter = '\\r\\n' + delimiter if delimiter else '\\n\\r\\t '\n self.comment = comment\n if colspecs == 'infer':\n self.colspecs = self.detect_colspecs(skiprows=skiprows)\n else:\n self.colspecs = colspecs\n\n if not isinstance(self.colspecs, (tuple, list)):\n raise TypeError(\"column specifications must be a list or tuple, \"\n \"input was a %r\" % type(colspecs).__name__)\n\n for colspec in self.colspecs:\n if not (isinstance(colspec, (tuple, list)) and\n len(colspec) == 2 and\n isinstance(colspec[0], (int, np.integer, type(None))) and\n isinstance(colspec[1], (int, np.integer, type(None)))):\n raise TypeError('Each column specification must be '\n '2 element tuple or list of integers')\n\n def get_rows(self, n, skiprows=None):\n \"\"\"\n Read rows from self.f, skipping as specified.\n\n We distinguish buffer_rows (the first <= n lines)\n from the rows returned to detect_colspecs because\n it's simpler to leave the other locations with\n skiprows logic alone than to modify them to deal\n with the fact we skipped some rows here as well.\n\n Parameters\n ----------\n n : int\n Number of rows to read from self.f, not counting\n rows that are skipped.\n skiprows: set, optional\n Indices of rows to skip.\n\n Returns\n -------\n detect_rows : list of str\n A list containing the rows to read.\n\n \"\"\"\n if skiprows is None:\n skiprows = set()\n buffer_rows = []\n detect_rows = []\n for i, row in enumerate(self.f):\n if i not in skiprows:\n detect_rows.append(row)\n buffer_rows.append(row)\n if len(detect_rows) >= n:\n break\n self.buffer = iter(buffer_rows)\n return detect_rows\n\n def detect_colspecs(self, n=100, skiprows=None):\n # Regex escape the delimiters\n delimiters = ''.join(r'\\%s' % x for x in self.delimiter)\n pattern = re.compile('([^%s]+)' % delimiters)\n rows = self.get_rows(n, skiprows)\n if not rows:\n raise EmptyDataError(\"No rows from which to infer column width\")\n max_len = max(map(len, rows))\n mask = np.zeros(max_len + 1, dtype=int)\n if self.comment is not None:\n rows = [row.partition(self.comment)[0] for row in rows]\n for row in rows:\n for m in pattern.finditer(row):\n mask[m.start():m.end()] = 1\n shifted = np.roll(mask, 1)\n shifted[0] = 0\n edges = np.where((mask ^ shifted) == 1)[0]\n edge_pairs = list(zip(edges[::2], edges[1::2]))\n return edge_pairs\n\n def __next__(self):\n if self.buffer is not None:\n try:\n line = next(self.buffer)\n except StopIteration:\n self.buffer = None\n line = next(self.f)\n else:\n line = next(self.f)\n # Note: 'colspecs' is a sequence of half-open intervals.\n return [line[fromm:to].strip(self.delimiter)\n for (fromm, to) in self.colspecs]\n\n\nclass FixedWidthFieldParser(PythonParser):\n \"\"\"\n Specialization that Converts fixed-width fields into DataFrames.\n See PythonParser for details.\n \"\"\"\n\n def __init__(self, f, **kwds):\n # Support iterators, convert to a list.\n self.colspecs = kwds.pop('colspecs')\n PythonParser.__init__(self, f, **kwds)\n\n def _make_reader(self, f):\n self.data = FixedWidthReader(f, self.colspecs, self.delimiter,\n self.comment, self.skiprows)\n" ]
[ [ "pandas.DatetimeIndex", "pandas.compat.iteritems", "pandas.date_range", "pandas.util.testing.assert_numpy_array_equal", "pandas.notna", "numpy.timedelta64", "pandas.bdate_range", "pandas.tseries.offsets.CDay", "numpy.int_", "pandas.Timedelta", "pandas.to_datetime", "pandas.util.testing.assert_index_equal", "pandas.tseries.offsets.BDay", "numpy.array", "pandas.to_timedelta", "pandas.Timestamp" ], [ "pandas.errors.EmptyDataError", "pandas.compat.iteritems", "pandas.io.common._infer_compression", "pandas.compat.map", "numpy.asarray", "pandas.compat.to_str", "pandas.compat.zip", "pandas.core.dtypes.common.is_integer", "pandas._libs.parsers.TextReader", "pandas._libs.tslibs.parsing.try_parse_dates", "pandas.core.dtypes.common.is_list_like", "pandas.io.common._get_handle", "pandas.io.common.is_file_like", "pandas.core.dtypes.common.is_float", "pandas._libs.lib.map_infer", "pandas.compat.itervalues", "pandas.errors.ParserError", "pandas.core.dtypes.common.is_categorical_dtype", "pandas._libs.parsers.sanitize_objects", "pandas.compat.StringIO", "pandas.core.index.RangeIndex", "pandas.core.dtypes.cast.astype_nansafe", "pandas._libs.lib.maybe_convert_numeric", "numpy.isnan", "pandas.io.date_converters.generic_parser", "numpy.where", "pandas.io.common.UTF8Recoder", "pandas.core.dtypes.common.ensure_object", "pandas._libs.lib.infer_dtype", "numpy.zeros", "pandas._libs.lib.to_object_array", "pandas._libs.lib.is_bool", "pandas.core.index.ensure_index_from_sequences", "pandas.io.common._validate_header_arg", "numpy.putmask", "pandas.core.dtypes.common.is_scalar", "pandas.core.dtypes.missing.isna", "pandas.io.common.get_filepath_or_buffer", "pandas.compat.u", "pandas.core.dtypes.common.is_string_dtype", "pandas.util._decorators.Appender", "numpy.roll", "pandas._libs.lib.map_infer_mask", "pandas.core.series.Series", "pandas.io.common.UnicodeReader", "pandas.core.index.MultiIndex.from_tuples", "pandas.core.dtypes.common.is_dtype_equal", "pandas.core.index.Index", "pandas.core.dtypes.common.is_object_dtype", "pandas.errors.AbstractMethodError", "pandas.compat.range", "pandas.compat.text_type", "pandas.compat.lrange", "pandas.core.frame.DataFrame", "pandas.core.dtypes.common.is_integer_dtype" ] ]
zmlabe/ExtremeEvents
[ "701c274c074dd2c4ae7c7294ec20f35c64e6ea2b" ]
[ "Scripts/calc_LRP.py" ]
[ "\"\"\"\nFunctions are useful untilities for interpretation of ANN\n \nNotes\n-----\n Author : Zachary Labe\n Date : 22 July 2020\n \nUsage\n-----\n [1] deepTaylorAnalysis(model,XXt,YYt,biasBool,annType,classChunk,startYear)\n [2] def _gradient_descent_for_bwo(cnn_model_object, loss_tensor,\n init_function_or_matrices,\n num_iterations,learning_rate):\n [3] bwo_for_class(cnn_model_object,target_class,init_function_or_matrices,\n num_iterations=DEFAULT_NUM_BWO_ITERATIONS,\n learning_rate=DEFAULT_BWO_LEARNING_RATE)\n [4] optimal_input(model,input_img,target_class,num_iterations=200,\n learning_rate = 0.01)\n\"\"\"\n\n###############################################################################\n###############################################################################\n###############################################################################\n\ndef deepTaylorAnalysis(model,XXt,YYt,biasBool,annType,classChunk,startYear):\n \"\"\"\n Calculate Deep Taylor for LRP\n \"\"\"\n print('<<<< Started deepTaylorAnalysis() >>>>')\n \n ### Import modules\n import numpy as np \n import innvestigate\n import calc_Stats as SSS\n \n ### Define useful functions\n def invert_year_output(ypred,startYear):\n inverted_years = SSS.convert_fuzzyDecade_toYear(ypred,startYear,\n classChunk)\n \n return inverted_years\n \n ### Define prediction error\n yearsUnique = np.unique(YYt)\n percCutoff = 90\n withinYearInc = 2.\n errTolerance = withinYearInc \n if(annType=='class'):\n err = YYt[:,0] - invert_year_output(model.predict(XXt),\n startYear)\n \n ###########################################################################\n ###########################################################################\n ###########################################################################\n ### Create the innvestigate analyzer instance for each sample\n if(annType=='class'):\n model_nosoftmax = innvestigate.utils.model_wo_softmax(model)\n # analyzer = innvestigate.analyzer.relevance_based.relevance_analyzer.LRPAlphaBeta(\n # model_nosoftmax,alpha=1,beta=0,bias=biasBool)\n analyzer = innvestigate.analyzer.relevance_based.relevance_analyzer.LRPZ(model_nosoftmax)\n # analyzer = innvestigate.analyzer.relevance_based.relevance_analyzer.LRPEpsilon(model_nosoftmax, \n # epsilon=1e-07, bias=biasBool)\n\n deepTaylorMaps = np.empty(np.shape(XXt))\n deepTaylorMaps[:] = np.nan\n\n # analyze each input via the analyzer\n for i in np.arange(0,np.shape(XXt)[0]):\n\n # ensure error is small, i.e. model was correct\n if(np.abs(err[i])<=errTolerance):\n sample = XXt[i]\n analyzer_output = analyzer.analyze(sample[np.newaxis,...])\n deepTaylorMaps[i] = analyzer_output/np.sum(analyzer_output.flatten())\n\n print('done with Deep Taylor analyzer normalization') \n \n ###########################################################################\n ###########################################################################\n ###########################################################################\n ### Compute the frequency of data at each point and the average relevance \n ### normalized by the sum over the area and the frequency above the 90th \n ### percentile of the map\n yearsUnique = np.unique(YYt)\n summaryDT = np.zeros((len(yearsUnique),np.shape(deepTaylorMaps)[1]))\n summaryDTFreq = np.zeros((len(yearsUnique),np.shape(deepTaylorMaps)[1]))\n summaryNanCount = np.zeros((len(yearsUnique),1))\n\n for i, year in enumerate(yearsUnique):\n ### Years within N years of each year\n j = np.where(np.abs(YYt-year)<=withinYearInc)[0] \n\n ### Average relevance\n a = np.nanmean(deepTaylorMaps[j,:],axis=0)\n summaryDT[i,:] = a[np.newaxis,...]\n\n ### Frequency of non-nans\n nancount = np.count_nonzero(~np.isnan(deepTaylorMaps[j,1]))\n summaryNanCount[i] = nancount\n\n ### Frequency above percentile cutoff\n count = 0\n for k in j:\n b = deepTaylorMaps[k,:]\n if(~np.isnan(b[0])):\n count = count + 1\n pVal = np.percentile(b,percCutoff)\n summaryDTFreq[i,:] = summaryDTFreq[i,:]+np.where(b>=pVal,1,0)\n if(count==0):\n summaryDTFreq[i,:] = 0\n else:\n summaryDTFreq[i,:] = summaryDTFreq[i,:]/count \n \n print('<<<< Completed deepTaylorAnalysis() >>>>') \n return(summaryDT,summaryDTFreq,summaryNanCount)\n\n###############################################################################\n###############################################################################\n###############################################################################\n\ndef _gradient_descent_for_bwo(\n cnn_model_object, loss_tensor, init_function_or_matrices,\n num_iterations, learning_rate):\n \"\"\"\n Does gradient descent (the nitty-gritty part) for backwards optimization.\n :param cnn_model_object: Trained instance of `keras.models.Model`.\n :param loss_tensor: Keras tensor, defining the loss function to be\n minimized.\n :param init_function_or_matrices: Either a function or list of numpy arrays.\n If function, will be used to initialize input matrices. See\n `create_gaussian_initializer` for an example.\n If list of numpy arrays, these are the input matrices themselves. Matrices\n should be processed in the exact same way that training data were processed\n (e.g., normalization method). Matrices must also be in the same order as\n training matrices, and the [q]th matrix in this list must have the same\n shape as the [q]th training matrix.\n :param num_iterations: Number of gradient-descent iterations (number of\n times that the input matrices are adjusted).\n :param learning_rate: Learning rate. At each iteration, each input value x\n will be decremented by `learning_rate * gradient`, where `gradient` is\n the gradient of the loss function with respect to x.\n :return: list_of_optimized_input_matrices: length-T list of optimized input\n matrices (numpy arrays), where T = number of input tensors to the model.\n If the input arg `init_function_or_matrices` is a list of numpy arrays\n (rather than a function), `list_of_optimized_input_matrices` will have\n the exact same shape, just with different values.\n \"\"\"\n ### Import modules\n import numpy as np\n import keras.backend as K\n import copy\n\n if isinstance(cnn_model_object.input, list):\n list_of_input_tensors = cnn_model_object.input\n else:\n list_of_input_tensors = [cnn_model_object.input]\n\n num_input_tensors = len(list_of_input_tensors)\n list_of_gradient_tensors = K.gradients(loss_tensor, list_of_input_tensors)\n\n for i in range(num_input_tensors):\n list_of_gradient_tensors[i] /= K.maximum(\n K.sqrt(K.mean(list_of_gradient_tensors[i] ** 2)),\n K.epsilon()\n )\n\n inputs_to_loss_and_gradients = K.function(\n list_of_input_tensors + [K.learning_phase()],\n ([loss_tensor] + list_of_gradient_tensors)\n )\n\n if isinstance(init_function_or_matrices, list):\n list_of_optimized_input_matrices = copy.deepcopy(\n init_function_or_matrices)\n else:\n list_of_optimized_input_matrices = [None] * num_input_tensors\n\n for i in range(num_input_tensors):\n these_dimensions = np.array(\n [1] + list_of_input_tensors[i].get_shape().as_list()[1:],\n dtype=int\n )\n\n list_of_optimized_input_matrices[i] = init_function_or_matrices(\n these_dimensions)\n\n for j in range(num_iterations):\n these_outputs = inputs_to_loss_and_gradients(\n list_of_optimized_input_matrices + [0]\n )\n\n if np.mod(j, 100) == 0:\n print('Loss after {0:d} of {1:d} iterations: {2:.2e}'.format(\n j, num_iterations, these_outputs[0]\n ))\n\n for i in range(num_input_tensors):\n list_of_optimized_input_matrices[i] -= (\n these_outputs[i + 1] * learning_rate\n )\n\n print('Loss after {0:d} iterations: {1:.2e}'.format(\n num_iterations, these_outputs[0]\n ))\n\n return list_of_optimized_input_matrices\n\n###############################################################################\n###############################################################################\n###############################################################################\n\ndef bwo_for_class(\n cnn_model_object, target_class, init_function_or_matrices,\n num_iterations,learning_rate):\n \"\"\"\n Does backwards optimization to maximize probability of target class.\n :param cnn_model_object: Trained instance of `keras.models.Model`.\n :param target_class: Synthetic input data will be created to maximize\n probability of this class.\n :param init_function_or_matrices: See doc for `_gradient_descent_for_bwo`.\n :param num_iterations: Same.\n :param learning_rate: Same.\n :return: list_of_optimized_input_matrices: Same.\n \"\"\"\n ### Import modules\n import numpy as np\n import keras.backend as K\n\n target_class = int(np.round(target_class))\n num_iterations = int(np.round(num_iterations))\n\n assert target_class >= 0\n assert num_iterations > 0\n assert learning_rate > 0.\n assert learning_rate < 1.\n\n num_output_neurons = (\n cnn_model_object.layers[-1].output.get_shape().as_list()[-1]\n )\n\n if num_output_neurons == 1:\n assert target_class <= 1\n\n if target_class == 1:\n loss_tensor = K.mean(\n (cnn_model_object.layers[-1].output[..., 0] - 1) ** 2\n )\n else:\n loss_tensor = K.mean(\n cnn_model_object.layers[-1].output[..., 0] ** 2\n )\n else:\n assert target_class < num_output_neurons\n\n loss_tensor = K.mean(\n (cnn_model_object.layers[-1].output[..., target_class] - 1) ** 2\n )\n\n return _gradient_descent_for_bwo(\n cnn_model_object=cnn_model_object, loss_tensor=loss_tensor,\n init_function_or_matrices=init_function_or_matrices,\n num_iterations=num_iterations, learning_rate=learning_rate)\n\n###############################################################################\n###############################################################################\n###############################################################################\n\ndef optimal_input(model,input_img,target_class,num_iterations=200,learning_rate = 0.01):\n \"\"\" \n OI\n \"\"\"\n ### Define modules\n import numpy as np\n import keras.backend as K\n \n ### Need to change the out_loss calculation to use your loss equation\n ### Need to use the target_output variable\n # out_loss = - K.sum(target_output * K.log(model.layers[-1].output))\n out_loss = K.mean(\n (model.layers[-1].output[..., int(target_class)] - 1) ** 2\n )\n\n ### Calculate the gradients at the input layer WRT your output loss\n grad = K.gradients(out_loss, [model.input])[0]\n\n ### Create a function to iterate the loss and gradient\n ### Inputs are an image and the learning phase (0 for false)\n ### Outputs are the loss for the output and gradients WRT input layer\n iterate_fcn = K.function([model.input, K.learning_phase()], \n [out_loss, grad])\n\n for iterVal in np.arange(0,num_iterations):\n\n ### Calculate the loss and the gradients at the input layer based on the \n ### current stage of the input image\n out_loss, out_grad = iterate_fcn([input_img, 0])\n\n ### Take a step along gradient WRT input -- \n ### updates the input slightly towards its optimal input\n input_img -= out_grad*learning_rate\n \n return input_img\n\n###############################################################################\n###############################################################################\n###############################################################################\n" ]
[ [ "numpy.nanmean", "numpy.abs", "numpy.mod", "numpy.arange", "numpy.percentile", "numpy.shape", "numpy.isnan", "numpy.round", "numpy.where", "numpy.unique" ] ]
hdelecki/alpha-zero-general-ut3
[ "1b38aad66c3ac38d815fbd21c34475bfa7573706" ]
[ "ut3/UT3Game.py" ]
[ "from __future__ import print_function\nimport sys\nsys.path.append('..')\nfrom Game import Game\nfrom .UT3Logic import Board\nimport numpy as np\n\n\"\"\"\nGame class implementation for the game of Ultimate TicTacToe.\n\nAuthor: Harrison Delecki, github.com/hdelecki\n\nBased on the OthelloGame by Surag Nair.\n\"\"\"\nclass UT3Game(Game):\n def __init__(self, n=3, conv=True):\n self.conv = conv\n self.n = n\n #self.last_move = None\n\n def getArray(self, board):\n if self.conv:\n global_rep = np.repeat(np.repeat(board.global_pieces, 3, axis=1), 3, axis=0)\n local_rep = board.local_pieces\n play_rep = np.repeat(np.repeat(board.play_map, 3, axis=1), 3, axis=0)\n #valid_rep = np.zeros(local_rep.shape)\n #0valids = board.get_legal_moves(player=1)\n #valid_rep[tuple(valids.T.tolist())] = 1.0\n return np.stack((local_rep, global_rep, play_rep))\n else:\n raise NotImplementedError()\n\n def getBoardChannels(self):\n #return 2\n if self.conv:\n return 3\n else:\n return 1\n\n def getInitBoard(self):\n # return initial board (numpy board)\n #self.last_move = None\n b = Board(self.n)\n return self.getArray(b)\n\n def getBoardSize(self):\n # (a,b) tuple\n return (self.n**2, self.n**2)\n\n def getActionSize(self):\n # return number of actions\n return self.n**4\n\n def getNextState(self, board, player, action):\n # if player takes action on board, return next (board,player)\n # action must be a valid move\n # if action == self.n*self.n:\n # return (board, -player)\n # b = Board(self.n)\n # b.pieces = np.copy(board)\n # move = (int(action/self.n), action%self.n)\n # b.execute_move(move, player)\n # return (b.pieces, -player)\n b = Board(self.n)\n b.local_pieces = np.copy(board[0])\n b.global_pieces = np.copy(board[1][::3, ::3])\n b.play_map = np.copy(board[2][::3, ::3])\n #b.last_move = self.last_move\n move = np.unravel_index(action, (self.n**2, self.n**2))\n #move = int(action/self.n**2), action%self.n**2\n b.execute_move(move, player)\n #self.last_move = b.last_move\n return self.getArray(b), -player\n\n def getValidMoves(self, board, player):\n # return a fixed size binary vector\n #valid = [0]*self.getActionSize()\n valid = np.zeros(self.getActionSize())\n b = Board(self.n)\n b.local_pieces = np.copy(board[0])\n b.global_pieces = np.copy(board[1][::3, ::3])\n b.play_map = np.copy(board[2][::3, ::3])\n valid_coords = b.get_legal_moves(player)\n valid_idx = np.ravel_multi_index(valid_coords.T, (self.n**2, self.n**2))\n valid[valid_idx] = True\n return valid\n\n\n def getGameEnded(self, board, player):\n # return 0 if not ended, 1 if player 1 won, -1 if player 1 lost\n # player = 1\n brd = Board(self.n)\n brd.local_pieces = np.copy(board[0])\n brd.global_pieces = np.copy(board[1][::3, ::3])\n brd.play_map = np.copy(board[2][::3, ::3])\n \n if brd.is_win(1):\n return player\n elif brd.is_win(-1):\n return -player\n elif brd.is_full():\n return brd.draw\n\n # for player in -1, 1:\n # if brd.is_win(player):\n # return player\n # if brd.is_full():\n # return brd.draw\n return 0\n\n def getCanonicalForm(self, board, player):\n # return state if player==1, else return -state if player==-1\n #return np.where(board, player*board, board)\n if player == 1:\n return board\n else:\n board[:2,:,:] *= -1\n return board\n \n def getSymmetries(self, board, pi):\n # rotate, mirror\n assert(len(pi) == self.getActionSize()) # 1 for pass\n pi_board = np.reshape(pi, self.getBoardSize())\n sym, x, y = [], -2, -1\n \n # sym.append((board, pi))\n # return sym\n\n for rot in range(1, 5):\n for flip in True, False:\n newB = np.rot90(board, rot, (x, y))\n newPi = np.rot90(pi_board, rot, (x, y))\n if flip:\n newB = np.flip(newB, y)\n newPi = np.flip(newPi, y)\n sym.append((newB, list(newPi.ravel())))\n return sym\n\n def stringRepresentation(self, board):\n return board.tostring()\n\n\n def display(self, board, indent=' '):\n # print('Last Move:')\n # print(board.last_move)\n print('')\n print(indent + ' 0 | 1 | 2 ‖ 3 | 4 | 5 ‖ 6 | 7 | 8')\n print('')\n for n, row in enumerate(board[0]):\n if n:\n if n % 3:\n sep = '---+---+---'\n print(indent + '- ' + sep + '‖' + sep + '‖' + sep)\n else:\n sep = '==========='\n print(indent + '= ' + sep + '#' + sep + '#' + sep)\n row = ' ‖ '.join(' | '.join(map(str, map(int, row[i:i+3]))) for i in range(0, len(row), 3))\n print(indent + str(n) + ' ' + row.replace('-1','O').replace('1','X').replace('0','.'))\n print('')\n\ndef display(board, indent=' '):\n # print('Last Move:')\n # print(board.last_move)\n print('')\n print(indent + ' 0 | 1 | 2 ‖ 3 | 4 | 5 ‖ 6 | 7 | 8')\n print('')\n for n, row in enumerate(board[0]):\n if n:\n if n % 3:\n sep = '---+---+---'\n print(indent + '- ' + sep + '‖' + sep + '‖' + sep)\n else:\n sep = '==========='\n print(indent + '= ' + sep + '#' + sep + '#' + sep)\n row = ' ‖ '.join(' | '.join(map(str, map(int, row[i:i+3]))) for i in range(0, len(row), 3))\n print(indent + str(n) + ' ' + row.replace('-1','O').replace('1','X').replace('0','.'))\n print('')\n\n # @staticmethod\n # def display(board):\n # n = board.shape[0]\n\n # print(\" \", end=\"\")\n # for y in range(n):\n # print (y,\"\", end=\"\")\n # print(\"\")\n # print(\" \", end=\"\")\n # for _ in range(n):\n # print (\"-\", end=\"-\")\n # print(\"--\")\n # for y in range(n):\n # print(y, \"|\",end=\"\") # print the row #\n # for x in range(n):\n # piece = board[y][x] # get the piece to print\n # if piece == -1: print(\"X \",end=\"\")\n # elif piece == 1: print(\"O \",end=\"\")\n # else:\n # if x==n:\n # print(\"-\",end=\"\")\n # else:\n # print(\"- \",end=\"\")\n # print(\"|\")\n\n # print(\" \", end=\"\")\n # for _ in range(n):\n # print (\"-\", end=\"-\")\n # print(\"--\")\n" ]
[ [ "numpy.ravel_multi_index", "numpy.repeat", "numpy.copy", "numpy.rot90", "numpy.flip", "numpy.stack", "numpy.unravel_index" ] ]
AmericaGL/TrashTalk_Dapp
[ "401f17289261b5f537b239e7759dc039d53211e1" ]
[ "opencv-3.3.0/samples/python/mouse_and_match.py" ]
[ "#!/usr/bin/env python\n'''\nmouse_and_match.py [-i path | --input path: default ../data/]\n\nDemonstrate using a mouse to interact with an image:\n Read in the images in a directory one by one\n Allow the user to select parts of an image with a mouse\n When they let go of the mouse, it correlates (using matchTemplate) that patch with the image.\n\n SPACE for next image\n ESC to exit\n'''\n\n# Python 2/3 compatibility\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\n\n# built-in modules\nimport os\nimport sys\nimport glob\nimport argparse\nfrom math import *\n\n\ndrag_start = None\nsel = (0,0,0,0)\n\ndef onmouse(event, x, y, flags, param):\n global drag_start, sel\n if event == cv2.EVENT_LBUTTONDOWN:\n drag_start = x, y\n sel = 0,0,0,0\n elif event == cv2.EVENT_LBUTTONUP:\n if sel[2] > sel[0] and sel[3] > sel[1]:\n patch = gray[sel[1]:sel[3],sel[0]:sel[2]]\n result = cv2.matchTemplate(gray,patch,cv2.TM_CCOEFF_NORMED)\n result = np.abs(result)**3\n val, result = cv2.threshold(result, 0.01, 0, cv2.THRESH_TOZERO)\n result8 = cv2.normalize(result,None,0,255,cv2.NORM_MINMAX,cv2.CV_8U)\n cv2.imshow(\"result\", result8)\n drag_start = None\n elif drag_start:\n #print flags\n if flags & cv2.EVENT_FLAG_LBUTTON:\n minpos = min(drag_start[0], x), min(drag_start[1], y)\n maxpos = max(drag_start[0], x), max(drag_start[1], y)\n sel = minpos[0], minpos[1], maxpos[0], maxpos[1]\n img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)\n cv2.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1)\n cv2.imshow(\"gray\", img)\n else:\n print(\"selection is complete\")\n drag_start = None\n\nif __name__ == '__main__':\n print(__doc__)\n\n parser = argparse.ArgumentParser(description='Demonstrate mouse interaction with images')\n parser.add_argument(\"-i\",\"--input\", default='../data/', help=\"Input directory.\")\n args = parser.parse_args()\n path = args.input\n\n cv2.namedWindow(\"gray\",1)\n cv2.setMouseCallback(\"gray\", onmouse)\n '''Loop through all the images in the directory'''\n for infile in glob.glob( os.path.join(path, '*.*') ):\n ext = os.path.splitext(infile)[1][1:] #get the filename extenstion\n if ext == \"png\" or ext == \"jpg\" or ext == \"bmp\" or ext == \"tiff\" or ext == \"pbm\":\n print(infile)\n\n img=cv2.imread(infile,1)\n if img is None:\n continue\n sel = (0,0,0,0)\n drag_start = None\n gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n cv2.imshow(\"gray\",gray)\n if cv2.waitKey() == 27:\n break\n cv2.destroyAllWindows()\n" ]
[ [ "numpy.abs" ] ]
slohani-ai/machine-learning-for-physical-sciences
[ "f10f04d768b8eb0966953d76e6a553d3b11af92f" ]
[ "mlphys/deepqis/Simulator/Distributions.py" ]
[ "\"\"\"\nauthor: Sanjaya Lohani\nemail: [email protected]\nLicence: Apache-2.0\n\"\"\"\n\nimport numpy as np\nimport qiskit.quantum_info as qi\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\n\nclass Haar_State:\n\n def __init__(self, qs):\n self._qs = qs\n\n def pure_states(self, _):\n state = qi.random_statevector(dims=2 ** self._qs)\n state_dm = state.to_operator()\n state_np = state_dm.data\n return state_np\n\n def sample_dm(self, n_size): # K == D in equation (3) in the bias paper\n q_dm = list(map(self.pure_states, range(n_size)))\n q_dm = np.array(q_dm).reshape(n_size, 2 ** self._qs,\n 2 ** self._qs) # [self.n_size, 2 ** self._qs, 2 ** self._qs]\n return q_dm\n\n\nclass Hilbert_Schmidt:\n\n def __init__(self, qs):\n self._qs = qs\n\n def hs_states(self, _):\n dm = qi.random_density_matrix(dims=2 ** self._qs) # defualt is Hilbert-Schmidth\n dm_np = dm.data\n return dm_np\n\n def sample_dm(self, n_size):\n hs_dm = list(map(self.hs_states, range(n_size)))\n hs_dm = np.array(hs_dm).reshape(n_size, 2 ** self._qs, 2 ** self._qs)\n return hs_dm\n\n\nclass Bures:\n\n def __init__(self, qs):\n self._qs = qs\n\n def hs_states(self, _):\n dm = qi.random_density_matrix(dims=2 ** self._qs, method='Bures') # defualt is Hilbert-Schmidth\n dm_np = dm.data\n return dm_np\n\n def sample_dm(self, n_size):\n hs_dm = list(map(self.hs_states, range(n_size)))\n hs_dm = np.array(hs_dm).reshape(n_size, 2 ** self._qs, 2 ** self._qs)\n return hs_dm\n\n\nclass eye_NN:\n\n def __init__(self, qs):\n self._qs = qs\n\n def I_states(self, _):\n state = np.identity(2 ** self._qs)\n return state\n\n def sample_dm(self, n_size): # K == D in equation (3) in the bias paper\n q_dm = list(map(self.I_states, range(n_size)))\n q_dm = np.array(q_dm).reshape(n_size, 2 ** self._qs,\n 2 ** self._qs) # [self.n_size, 2 ** self._qs, 2 ** self._qs]\n return 1 / 4 * q_dm\n\n\nclass HS_Haar:\n\n def __init__(self, qs):\n self._qs = qs\n\n def sample_dm(self, n_size, Haar_to_HS=None): # a fraction for Haar_to_HS. For eg. 10% --> 0.1\n haar_dm = Haar_State(qs=self._qs).sample_dm(n_size=n_size)\n hs_dm = Hilbert_Schmidt(qs=self._qs).sample_dm(n_size=n_size)\n if Haar_to_HS is None:\n a = np.random.uniform(low=0.0, high=1.0, size=[n_size, 1, 1])\n else:\n a = Haar_to_HS\n hs_haar_dm = (1 - a) * hs_dm + a * haar_dm\n return hs_haar_dm\n\n\nclass Mix_eye:\n\n def __init__(self, qs):\n self._qs = qs\n\n def sample_dm(self, n_size, eye_to_mix=None, states='HS'): # a fraction for I_to_Mix. For eg. 10% --> 0.1\n if states == 'HS':\n mix_dm = Hilbert_Schmidt(qs=self._qs).sample_dm(n_size=n_size)\n if states == 'Haar':\n mix_dm = Haar_State(qs=self._qs).sample_dm(n_size=n_size)\n\n I_dm = eye_NN(qs=self._qs).sample_dm(n_size=n_size)\n if eye_to_mix is None:\n a = np.random.uniform(low=0.0, high=1.0, size=[n_size, 1, 1])\n else:\n a = eye_to_mix\n\n hs_haar_dm = (1 - a) * mix_dm + a * I_dm\n return hs_haar_dm\n\n\nclass MaiAlquierDist_Symmetric:\n\n def __init__(self,\n qs=2,\n alpha=tf.TensorSpec(shape=1, dtype=tf.float32)) -> object:\n self.alpha = alpha\n self._qs = qs\n\n @staticmethod\n def _cast_complex(x):\n return tf.cast(x, tf.complex128)\n\n def sample_alpha(self, n_size=tf.TensorSpec(shape=1, dtype=tf.int64)):\n alpha = tf.repeat(self.alpha, [2 ** self._qs])\n dist = tfp.distributions.Dirichlet(alpha)\n sampled = dist.sample(n_size) # [n_size, self._qs]\n sampled = tf.expand_dims(sampled, axis=-1) # [n_size, self._qs, 1]\n sampled = tf.expand_dims(sampled, axis=-1) # [n_size, self._qs, 1, 1]\n return sampled\n\n def sample_dm(self, n_size, numpy_array=True):\n q_dm = Haar_State(qs=self._qs).sample_dm(n_size=n_size * 2 ** self._qs) # [self.n_size * 2**self._qs,\n # 2 ** self._qs, 2 ** self._qs]\n haar_dm = tf.reshape(q_dm, [n_size, 2 ** self._qs, 2 ** self._qs, 2 ** self._qs]) # [n_size, self._qs,\n # self._qs, self._qs]\n alphas = self.sample_alpha(n_size) # [n_size, self._qs, 1, 1]\n alphas = self._cast_complex(alphas)\n ma_states_array = tf.multiply(alphas, haar_dm) # [n_size, self._qs, self._qs, self._qs]\n ma_states = tf.reduce_sum(ma_states_array,\n axis=1) # [n_size, self._qs --> traced out and dropped, self._qs, self._qs]\n # --> [n_size, self._qs, self._qs]\n if numpy_array:\n ma_states = ma_states.numpy()\n return ma_states\n\n\nclass MaiAlquierDist_Asymmetric:\n\n def __init__(self,\n qs=2,\n k_params=None,\n alpha=[0.1, 0.2, 0.3, 0.4]) -> object:\n self.alpha = alpha\n self._qs = qs\n self.D = 2 ** self._qs\n self.K = self.D\n if k_params is not None:\n self.K = k_params\n\n @staticmethod\n def _cast_complex(x):\n return tf.cast(x, tf.complex128)\n\n def sample_alpha(self, n_size=tf.TensorSpec(shape=1, dtype=tf.int64)):\n # if purity is not None:\n # self.alpha = self.D * (1 - purity) / (self.D * (purity * self.D - 2) + 1)\n # alpha = tf.repeat(self.alpha, [2 ** self._qs])\n dist = tfp.distributions.Dirichlet(self.alpha)\n if isinstance(self.alpha, np.ndarray):\n tf.debugging.assert_equal(self.alpha.ndim, 2, '|The given alpha must be a rank 2 tensor.')\n sampled = dist.sample(1)\n sampled = tf.squeeze(sampled)\n else:\n sampled = dist.sample(n_size) # [n_size, self._qs]\n sampled = tf.expand_dims(sampled, axis=-1) # [n_size, self._qs, 1]\n sampled = tf.expand_dims(sampled, axis=-1) # [n_size, self._qs, 1, 1]\n return sampled\n\n def sample_dm(self, n_size, numpy_array=True):\n q_dm = Haar_State(qs=self._qs).sample_dm(n_size=n_size * self.K) # [self.n_size * 2**self._qs,\n # 2 ** self._qs, 2 ** self._qs]\n haar_dm = tf.reshape(q_dm, [n_size, self.K, 2 ** self._qs, 2 ** self._qs]) # [n_size, self._qs,\n # self._qs, self._qs]\n alphas = self.sample_alpha(n_size) # [n_size, self._qs, 1, 1]\n alphas = self._cast_complex(alphas)\n ma_states_array = tf.multiply(alphas, haar_dm) # [n_size, self._qs, self._qs, self._qs]\n ma_states = tf.reduce_sum(ma_states_array,\n axis=1) # [n_size, self._qs --> traced out and dropped, self._qs, self._qs]\n # --> [n_size, self._qs, self._qs]\n if numpy_array:\n ma_states = ma_states.numpy()\n return ma_states\n\n\nclass MaiAlquierDist_Gamma:\n\n def __init__(self,\n qs=tf.TensorSpec(shape=1, dtype=tf.int64),\n alpha=tf.TensorSpec(shape=1, dtype=tf.float32)) -> object:\n self.alpha = alpha\n self._qs = qs\n\n @staticmethod\n def _cast_complex(x):\n return tf.cast(x, tf.complex128)\n\n @tf.function\n def sample_dm(self, n_size=tf.TensorSpec(shape=1, dtype=tf.int64), numpy_array=False):\n self.n_size = n_size\n x = tf.random.normal([self.n_size, 2 * 2 ** self._qs * 2 ** self._qs], 0., 1.)\n Xr = tf.reshape(x[:, :2 ** self._qs * 2 ** self._qs], [self.n_size, 2 ** self._qs, 2 ** self._qs])\n Xi = tf.reshape(x[:, 2 ** self._qs * 2 ** self._qs:], [self.n_size, 2 ** self._qs, 2 ** self._qs])\n Xr = self._cast_complex(Xr)\n Xi = self._cast_complex(Xi)\n X = Xr + 1j * Xi\n W = X / tf.expand_dims(tf.norm(X, axis=1), axis=1)\n # print('shape of W', W.shape)\n if isinstance(self.alpha, float):\n gamma_factor = self._cast_complex(tf.random.gamma([self.n_size, 2 ** self._qs], alpha=self.alpha, beta=1.))\n else:\n g_tensor = tf.vectorized_map(lambda x: tf.random.gamma([2 ** self._qs], x), self.alpha)\n gamma_factor = self._cast_complex(tf.reshape(g_tensor, [-1, 2 ** self._qs]))\n gamma_factor_norm = gamma_factor / tf.expand_dims(tf.reduce_sum(gamma_factor, axis=1), axis=1)\n gama_diag_batch = tf.vectorized_map(lambda x: tf.linalg.diag(x), gamma_factor_norm) # rank 3 tensors\n rho = tf.linalg.matmul(W, tf.linalg.matmul(gama_diag_batch, W, adjoint_b=True))\n return rho\n" ]
[ [ "numpy.random.uniform", "tensorflow.norm", "tensorflow.linalg.diag", "tensorflow.random.gamma", "tensorflow.reshape", "tensorflow.debugging.assert_equal", "tensorflow.multiply", "tensorflow.expand_dims", "tensorflow.repeat", "tensorflow.squeeze", "tensorflow.cast", "tensorflow.linalg.matmul", "tensorflow.TensorSpec", "tensorflow.random.normal", "numpy.array", "tensorflow.reduce_sum", "numpy.identity" ] ]
meichenfang/inferelator
[ "47f8ebcc5f303264a75814897c52026b47c57aef" ]
[ "inferelator/distributed/dask_functions.py" ]
[ "from inferelator.distributed.inferelator_mp import MPControl\nfrom inferelator.regression import base_regression\nfrom inferelator import utils\nimport copy\n\nimport numpy as np\nimport scipy.sparse as sps\nfrom dask import distributed\n\n\"\"\"\nThis package contains the dask-specific multiprocessing functions (these are used in place of map calls to allow the\nmore advanced memory and task tools of dask to be used)\n\"\"\"\n\nDASK_SCATTER_TIMEOUT = 120\n\n\ndef amusr_regress_dask(X, Y, priors, prior_weight, n_tasks, genes, tfs, G, remove_autoregulation=True):\n \"\"\"\n Execute multitask (AMUSR)\n\n :return: list\n Returns a list of regression results that the amusr_regression pileup_data can process\n \"\"\"\n\n assert MPControl.is_dask()\n\n from inferelator.regression.amusr_regression import format_prior, run_regression_EBIC\n DaskController = MPControl.client\n\n # Gets genes, n_tasks, prior_weight, and remove_autoregulation from regress_dask()\n # Other arguments are passed in\n def regression_maker(j, x_df, y_list, prior, tf):\n level = 0 if j % 100 == 0 else 2\n utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=genes[j], i=j, total=G),\n level=level)\n\n gene = genes[j]\n x, y, tasks = [], [], []\n\n if remove_autoregulation:\n tf = [t for t in tf if t != gene]\n else:\n pass\n\n for k, y_data in y_list:\n x.append(x_df[k].get_gene_data(tf)) # list([N, K])\n y.append(y_data)\n tasks.append(k) # [T,]\n\n prior = format_prior(prior, gene, tasks, prior_weight)\n return j, run_regression_EBIC(x, y, tf, tasks, gene, prior)\n\n def response_maker(y_df, i):\n y = []\n gene = genes[i]\n for k in range(n_tasks):\n if gene in y_df[k].gene_names:\n y.append((k, y_df[k].get_gene_data(gene, force_dense=True).reshape(-1, 1)))\n return y\n\n # Scatter common data to workers\n [scatter_x] = DaskController.client.scatter([X], broadcast=True, hash=False)\n [scatter_priors] = DaskController.client.scatter([priors], broadcast=True, hash=False)\n\n # Wait for scattering to finish before creating futures\n distributed.wait(scatter_x, timeout=DASK_SCATTER_TIMEOUT)\n distributed.wait(scatter_priors, timeout=DASK_SCATTER_TIMEOUT)\n\n future_list = [DaskController.client.submit(regression_maker, i, scatter_x, response_maker(Y, i), scatter_priors,\n tfs)\n for i in range(G)]\n\n # Collect results as they finish instead of waiting for all workers to be done\n result_list = process_futures_into_list(future_list)\n\n DaskController.client.cancel(scatter_x)\n DaskController.client.cancel(scatter_priors)\n\n return result_list\n\n\ndef bbsr_regress_dask(X, Y, pp_mat, weights_mat, G, genes, nS):\n \"\"\"\n Execute regression (BBSR)\n\n :return: list\n Returns a list of regression results that the pileup_data can process\n \"\"\"\n assert MPControl.is_dask()\n\n from inferelator.regression import bayes_stats\n DaskController = MPControl.client\n\n def regression_maker(j, x, y, pp, weights):\n level = 0 if j % 100 == 0 else 2\n utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=genes[j], i=j, total=G), level=level)\n data = bayes_stats.bbsr(x, utils.scale_vector(y), pp[j, :].flatten(), weights[j, :].flatten(), nS)\n data['ind'] = j\n return j, data\n\n # Scatter common data to workers\n [scatter_x] = DaskController.client.scatter([X.values], broadcast=True, hash=False)\n [scatter_pp] = DaskController.client.scatter([pp_mat.values], broadcast=True, hash=False)\n [scatter_weights] = DaskController.client.scatter([weights_mat.values], broadcast=True, hash=False)\n\n # Wait for scattering to finish before creating futures\n distributed.wait(scatter_x, timeout=DASK_SCATTER_TIMEOUT)\n distributed.wait(scatter_pp, timeout=DASK_SCATTER_TIMEOUT)\n distributed.wait(scatter_weights, timeout=DASK_SCATTER_TIMEOUT)\n\n future_list = [DaskController.client.submit(regression_maker, i, scatter_x,\n Y.get_gene_data(i, force_dense=True).flatten(),\n scatter_pp, scatter_weights)\n for i in range(G)]\n\n # Collect results as they finish instead of waiting for all workers to be done\n result_list = process_futures_into_list(future_list)\n\n DaskController.client.cancel(scatter_x)\n DaskController.client.cancel(scatter_pp)\n DaskController.client.cancel(scatter_weights)\n\n return result_list\n\n\ndef sklearn_regress_dask(X, Y, model, G, genes, min_coef):\n \"\"\"\n Execute regression (SKLearn)\n\n :return: list\n Returns a list of regression results that the pileup_data can process\n \"\"\"\n assert MPControl.is_dask()\n\n from inferelator.regression import sklearn_regression\n DaskController = MPControl.client\n\n def regression_maker(j, x, y):\n level = 0 if j % 100 == 0 else 2\n utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=genes[j], i=j, total=G), level=level)\n data = sklearn_regression.sklearn_gene(x, utils.scale_vector(y), copy.copy(model))\n data['ind'] = j\n return j, data\n\n # Scatter common data to workers\n [scatter_x] = DaskController.client.scatter([X.values], broadcast=True, hash=False)\n\n # Wait for scattering to finish before creating futures\n distributed.wait(scatter_x, timeout=DASK_SCATTER_TIMEOUT)\n\n future_list = [DaskController.client.submit(regression_maker, i, scatter_x,\n Y.get_gene_data(i, force_dense=True).flatten())\n for i in range(G)]\n\n # Collect results as they finish instead of waiting for all workers to be done\n result_list = process_futures_into_list(future_list)\n\n DaskController.client.cancel(scatter_x)\n\n return result_list\n\n\ndef lasso_stars_regress_dask(X, Y, alphas, num_subsamples, random_seed, method, params, G, genes):\n \"\"\"\n Execute regression (LASSO-StARS)\n\n :return: list\n Returns a list of regression results that the pileup_data can process\n \"\"\"\n assert MPControl.is_dask()\n\n from inferelator.regression import stability_selection\n DaskController = MPControl.client\n\n def regression_maker(j, x, y):\n level = 0 if j % 100 == 0 else 2\n utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=genes[j], i=j, total=G), level=level)\n data = stability_selection.stars_model_select(x, utils.scale_vector(y), alphas, num_subsamples=num_subsamples,\n method=method, random_seed=random_seed, **params)\n data['ind'] = j\n return j, data\n\n # Scatter common data to workers\n [scatter_x] = DaskController.client.scatter([X.values], broadcast=True, hash=False)\n\n # Wait for scattering to finish before creating futures\n distributed.wait(scatter_x, timeout=DASK_SCATTER_TIMEOUT)\n\n future_list = [DaskController.client.submit(regression_maker, i, scatter_x,\n Y.get_gene_data(i, force_dense=True).flatten())\n for i in range(G)]\n\n # Collect results as they finish instead of waiting for all workers to be done\n result_list = process_futures_into_list(future_list)\n\n DaskController.client.cancel(scatter_x)\n\n return result_list\n\n\ndef build_mi_array_dask(X, Y, bins, logtype):\n \"\"\"\n Calculate MI into an array with dask (the naive map is very inefficient)\n\n :param X: np.ndarray (n x m1)\n Discrete array of bins\n :param Y: np.ndarray (n x m2)\n Discrete array of bins\n :param bins: int\n The total number of bins that were used to make the arrays discrete\n :param logtype: np.log func\n Which log function to use (log2 gives bits, ln gives nats)\n :return mi: np.ndarray (m1 x m2)\n Returns the mutual information array\n \"\"\"\n\n assert MPControl.is_dask()\n\n from inferelator.regression.mi import _calc_mi, _make_table, _make_discrete\n\n # Get a reference to the Dask controller\n DaskController = MPControl.client\n\n m1, m2 = X.shape[1], Y.shape[1]\n\n def mi_make(i, x, y):\n x = _make_discrete(x, bins)\n return i, [_calc_mi(_make_table(x, y[:, j], bins), logtype=logtype) for j in range(m2)]\n\n # Scatter Y to workers and keep track as Futures\n [scatter_y] = DaskController.client.scatter([Y], broadcast=True, hash=False)\n\n # Wait for scattering to finish before creating futures\n distributed.wait(scatter_y, timeout=DASK_SCATTER_TIMEOUT)\n\n # Build an asynchronous list of Futures for each calculation of mi_make\n future_list = [DaskController.client.submit(mi_make, i,\n X[:, i].A.flatten() if sps.isspmatrix(X) else X[:, i].flatten(),\n scatter_y)\n for i in range(m1)]\n\n # Collect results as they finish instead of waiting for all workers to be done\n mi_list = process_futures_into_list(future_list)\n\n # Convert the list of lists to an array\n mi = np.array(mi_list)\n assert (m1, m2) == mi.shape, \"Array {sh} produced [({m1}, {m2}) expected]\".format(sh=mi.shape, m1=m1, m2=m2)\n\n DaskController.client.cancel(scatter_y)\n\n return mi\n\n\ndef process_futures_into_list(future_list, raise_on_error=True, check_results=True):\n \"\"\"\n Take a list of futures and turn them into a list of results\n Results must be of the form i, data (where i is the output order)\n\n :param future_list: A list of executing futures\n :type future_list: list\n :param raise_on_error: Should an error be raised if a job can't be restarted or just move on from it.\n Defaults to True\n :type raise_on_error: bool\n :param check_results: Should the result object be checked (and restarted if there's a problem)\n If False, this will raise an error with the result of a failed future is retrieved.\n Defaults to True.\n :type check_results: bool\n :return output_list: A list of results from the completed futures\n :rtype: list\n \"\"\"\n\n DaskController = MPControl.client\n output_list = [None] * len(future_list)\n complete_gen = distributed.as_completed(future_list)\n\n for finished_future in complete_gen:\n\n DaskController.check_cluster_state()\n\n # Jobs can be cancelled in certain situations\n if check_results and (finished_future.cancelled() or (finished_future.status == \"erred\")):\n error = finished_future.exception()\n utils.Debug.vprint(\"Restarting job (Error: {er})\".format(er=error), level=0)\n\n # Restart cancelled futures and put them back into the work pile\n try:\n DaskController.client.retry(finished_future)\n complete_gen.update([finished_future])\n except KeyError:\n if raise_on_error:\n raise\n\n # In the event of success, get the data\n else:\n i, result_data = finished_future.result()\n output_list[i] = result_data\n finished_future.cancel()\n\n return output_list\n\n" ]
[ [ "numpy.array", "scipy.sparse.isspmatrix" ] ]
sdss/coordio
[ "61f5c962b8e3f335259168c9f8e872b4d3fe25d8" ]
[ "coordio/sky.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# @Author: José Sánchez-Gallego ([email protected])\n# @Date: 2020-08-17\n# @Filename: sky.py\n# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)\n\n# IAU-defined sky coordinate systems and transformations.\n\nimport ctypes\n\nimport numpy\n\nfrom . import sofa\nfrom .coordinate import Coordinate, Coordinate2D, verifySite, verifyWavelength\nfrom .exceptions import CoordinateError, CoordIOError\nfrom .time import Time\nfrom .site import Site\n# from .telescope import Field\nfrom . import defaults\nfrom . import conv\n\n\n__all__ = ['ICRS', 'Observed']\n\n\nclass ICRS(Coordinate2D):\n \"\"\"A representation of ICRS coordinates.\n\n Parameters\n ----------\n value : numpy.ndarray\n A Nx2 Numpy array with the RA and Dec coordinates of the targets.\n epoch : numpy.ndarray\n A 1D array with the epoch of the coordinates for each target,\n as a TDB Julian date (although for most applications the small\n differences between scales will not matter). Defaults to J2000.\n pmra : numpy.ndarray\n A 1D array with the proper motion in the RA axis for the N targets,\n in milliarcsec/yr. Must be a true angle, i.e, it must include the\n ``cos(dec)`` term.\n pmdec : numpy.ndarray\n A 1D array with the proper motion in the RA axis for the N targets,\n in milliarcsec/yr.\n parallax : numpy.ndarray\n A 1D array with the parallax for the N targets, in milliarcsec.\n rvel : numpy.ndarray\n A 1D array with the radial velocity in km/s, positive when receding.\n wavelength : numpy.ndarray\n A 1D array with he observing wavelength, in angstrom.\n Defaults to the value in `defaults.WAVELENGTH` (GFA, sdss-r)\n\n \"\"\"\n\n __extra_arrays__ = ['epoch', 'pmra', 'pmdec', 'parallax', 'rvel', 'wavelength']\n\n def __new__(cls, value, **kwargs):\n\n verifyWavelength(kwargs, len(value), strict=False)\n\n obj = super().__new__(cls, value, **kwargs)\n\n if kwargs.get('epoch', None) is None:\n obj.epoch += defaults.EPOCH\n\n # if kwargs.get('wavelength', None) is None:\n # if hasattr(value, \"wavelength\"):\n # obj.wavelength = value.wavelength\n # else:\n # obj.wavelength += defaults.wavelength\n\n # check if a coordinate was passed that we can just\n # 'cast' into Observed\n if isinstance(value, Coordinate):\n\n if value.coordSysName == 'Observed':\n obj._fromObserved(value)\n\n else:\n raise CoordIOError(\n 'Cannot convert to ICRS from %s'%value.coordSysName\n )\n\n return obj\n\n def _fromObserved(self, obsCoords):\n \"\"\"Converts from `.Observed` coordinates. Epoch is the\n time specifified by the site.\n\n \"\"\"\n\n # We need the epoch to be J2000.0 because that's what iauAtco13 likes.\n # icrs_2000 = icrsCoords.to_epoch(2451545.0, site=self.site)\n\n # rra = numpy.radians(icrs_2000[:, 0])\n # rdec = numpy.radians(icrs_2000[:, 1])\n # rpmra = numpy.radians(icrs_2000.pmra / 1000. / 3600.) / numpy.cos(rdec)\n # rpmdec = numpy.radians(icrs_2000.pmdec / 1000. / 3600.)\n\n rlong = numpy.radians(obsCoords.site.longitude)\n rlat = numpy.radians(obsCoords.site.latitude)\n rZD = numpy.radians(90 - obsCoords[:,0])\n rAz = numpy.radians(obsCoords[:,1])\n wavelength = obsCoords.wavelength / 10000.\n _type = \"A\".encode() # coords are azimuth, zenith dist\n\n time = obsCoords.site.time\n\n utc = time.to_utc()\n utc1 = int(utc)\n utc2 = utc - utc1\n dut1 = time.get_dut1()\n\n _ra = ctypes.c_double()\n _dec = ctypes.c_double()\n\n ra = numpy.zeros(len(obsCoords))\n dec = numpy.zeros(len(obsCoords))\n\n for ii in range(len(rAz)):\n\n sofa.iauAtoc13(\n _type, rAz[ii], rZD[ii], utc1, utc2, dut1,\n rlong, rlat, obsCoords.site.altitude, 0.0, 0.0,\n obsCoords.site.pressure, obsCoords.site.temperature,\n obsCoords.site.rh, wavelength[ii], _ra, _dec\n )\n ra[ii] = numpy.degrees(_ra.value)\n dec[ii] = numpy.degrees(_dec.value)\n\n self[:, 0] = ra\n self[:, 1] = dec\n\n def to_epoch(self, jd, site=None):\n \"\"\"Convert the coordinates to a new epoch.\n\n Parameters\n ----------\n jd : float\n The Julian date, in TAI scale, of the output epoch.\n site : .Site\n The site of the observation. Used to determine the TDB-TT offset.\n If not provided, it assumes longitude and latitude zero.\n\n Returns\n -------\n icrs : `.ICRS`\n A new `.ICRS` object with the coordinates, proper motion, etc. in\n the new epoch.\n\n \"\"\"\n\n rra = numpy.radians(self[:, 0])\n rdec = numpy.radians(self[:, 1])\n rpmra = numpy.radians(self.pmra / 1000. / 3600.) / numpy.cos(rdec)\n rpmdec = numpy.radians(self.pmdec / 1000. / 3600.)\n\n # Using TDB is probably an overkill.\n\n tai = Time(jd, scale='TAI')\n\n if site:\n epoch2 = tai.to_tdb(longitude=site.longitude,\n latitude=site.latitude,\n altitude=site.altitude)\n else:\n epoch2 = tai.to_tdb()\n\n epoch2_1 = int(epoch2)\n epoch2_2 = epoch2 - epoch2_1\n\n ra2 = ctypes.c_double()\n dec2 = ctypes.c_double()\n pmra2 = ctypes.c_double()\n pmdec2 = ctypes.c_double()\n parallax2 = ctypes.c_double()\n rvel2 = ctypes.c_double()\n\n new_icrs = self.copy()\n\n for ii in range(self.shape[0]):\n\n epoch1_1 = float(int(self.epoch[ii]))\n epoch1_2 = self.epoch[ii] - epoch1_1\n\n res = sofa.iauPmsafe(rra[ii], rdec[ii], rpmra[ii], rpmdec[ii],\n self.parallax[ii] / 1000., self.rvel[ii],\n epoch1_1, epoch1_2, epoch2_1, epoch2_2,\n ra2, dec2, pmra2, pmdec2, parallax2, rvel2)\n\n if res > 1 or res < 0:\n raise CoordinateError(f'iauPmsafe return with '\n f'error code {res}.')\n\n new_icrs[ii, :] = numpy.rad2deg([ra2.value, dec2.value])\n new_icrs.pmra[ii] = numpy.rad2deg(pmra2.value) * 3600. * 1000.\n new_icrs.pmra[ii] *= numpy.cos(dec2.value)\n new_icrs.pmdec[ii] = numpy.rad2deg(pmdec2.value) * 3600. * 1000.\n new_icrs.parallax[ii] = parallax2.value * 1000.\n new_icrs.rvel[ii] = rvel2.value\n\n return new_icrs\n\n\nclass Observed(Coordinate2D):\n \"\"\"The observed coordinates of a series of targets.\n\n The array contains the Alt/Az coordinates of the targets. Their RA/Dec\n coordinates can be accessed via the ``ra`` and ``dec`` attributes.\n If `.ICRS` or `.Field` is passed, Alt/Az coordinates are computed.\n\n Parameters\n ----------\n value : numpy.ndarray\n A Nx2 Numpy array with the Alt and Az coordinates of the targets,\n in degrees. Or `.ICRS` instance. Or a `.Field` instance.\n wavelength : numpy.ndarray\n A 1D array with he observing wavelength, in angstrom.\n If not explicitly passed, it tries to inheret from value.wavelength,\n if that doesn't exist, it is set to default specified in:\n `defaults.wavelength`\n site : .Site\n The site from where observations will occur, along with the time\n of the observation. Mandatory argument.\n\n Attributes\n -----------\n ra : numpy.ndarray\n Nx1 Numpy array, observed RA in degrees\n dec : numpy.ndarray\n Nx1 Numpy array, observed Dec in degrees\n ha : numpy.ndarray\n Nx1 Numpy array, hour angle in degrees\n pa : numpy.ndarray\n Nx1 Numpy array, position angle in degrees. By SOFA: the angle between\n the direction to the north celestial pole and direction to the zenith.\n range is [-180, 180]. The sign is according to:\n -ha --> -pa, +ha --> +pa\n\n \"\"\"\n\n __extra_arrays__ = ['wavelength']\n __extra_params__ = ['site'] # mandatory\n __computed_arrays__ = ['ra', 'dec', 'ha', 'pa']\n\n def __new__(cls, value, **kwargs):\n # should we do range checks (eg alt < 90)? probably.\n\n verifySite(kwargs)\n\n # if kwargs.get('site', None) is None:\n # raise CoordIOError('Site must be passed to Observed')\n\n # else:\n # site = kwargs.get('site')\n # if not isinstance(site, Site):\n # raise CoordIOError('Must pass Site to Observed')\n # if site.time is None:\n # raise CoordIOError(\n # \"Time of observation must be specified on Site\"\n # )\n\n # should we prefer wavelength passed, or wavelength\n # existing on value (if it does exist). Here preferring passed\n # if kwargs.get('wavelength', None) is None:\n # if hasattr(value, \"wavelength\"):\n # kwargs[\"wavelength\"] = value.wavelength\n verifyWavelength(\n kwargs, len(value), strict=False\n )\n\n obj = super().__new__(cls, value, **kwargs)\n\n # if kwargs.get('wavelength', None) is None:\n # obj.wavelength += defaults.wavelength\n\n\n\n # check if a coordinate was passed that we can just\n # 'cast' into Observed\n if isinstance(value, Coordinate):\n\n if value.coordSysName == 'ICRS':\n obj._fromICRS(value)\n\n elif value.coordSysName == 'Field':\n obj._fromField(value)\n\n else:\n raise CoordIOError(\n 'Cannot convert to Observed from %s'%value.coordSysName\n )\n\n else:\n # raw numpy array supplied compute values\n obj._fromRaw()\n\n return obj\n\n def _fromICRS(self, icrsCoords):\n \"\"\"Converts from ICRS to topocentric observed coordinates for a site.\n Automatically executed after initialization with `.ICRS`.\n\n Computes and sets ra, dec, ha, pa arrays.\n\n Parameters:\n ------------\n icrsCoords : `.ICRS`\n ICRS coordinates from which to convert to observed coordinates\n\n \"\"\"\n\n # eventually move this to coordio.conv?\n\n # Prepare to call iauAtco13\n # Given:\n # rc,dc double ICRS right ascension at J2000.0 (radians)\n # pr double RA proper motion (radians/year)\n # pd double Dec proper motion (radians/year)\n # px double parallax (arcsec)\n # rv double radial velocity (km/s, +ve if receding)\n # utc1 double UTC as a 2-part...\n # utc2 double ...quasi Julian Date\n # dut1 double UT1-UTC (seconds)\n # elong double longitude (radians, east +ve)\n # phi double latitude (geodetic, radians)\n # hm double height above ellipsoid (m, geodetic)\n # xp,yp double polar motion coordinates (radians)\n # phpa double pressure at the observer (hPa = mB)\n # tc double ambient temperature at the observer (deg C)\n # rh double relative humidity at the observer (range 0-1)\n # wl double wavelength (micrometers)\n #\n # Returned:\n # aob double* observed azimuth (radians: N=0,E=90)\n # zob double* observed zenith distance (radians)\n # hob double* observed hour angle (radians)\n # dob double* observed declination (radians)\n # rob double* observed right ascension (CIO-based, radians)\n # eo double* equation of the origins (ERA-GST)\n\n # TODO: maybe write this as Cython or C?\n\n # We need the epoch to be J2000.0 because that's what iauAtco13 likes.\n icrs_2000 = icrsCoords.to_epoch(2451545.0, site=self.site)\n\n rra = numpy.radians(icrs_2000[:, 0])\n rdec = numpy.radians(icrs_2000[:, 1])\n rpmra = numpy.radians(icrs_2000.pmra / 1000. / 3600.) / numpy.cos(rdec)\n rpmdec = numpy.radians(icrs_2000.pmdec / 1000. / 3600.)\n\n rlong = numpy.radians(self.site.longitude)\n rlat = numpy.radians(self.site.latitude)\n\n time = self.site.time\n\n utc = time.to_utc()\n utc1 = int(utc)\n utc2 = utc - utc1\n dut1 = time.get_dut1()\n\n az_obs = ctypes.c_double()\n zen_obs = ctypes.c_double()\n ha_obs = ctypes.c_double()\n dec_obs = ctypes.c_double()\n ra_obs = ctypes.c_double()\n eo_obs = ctypes.c_double()\n\n for ii in range(len(rra)):\n\n sofa.iauAtco13(\n rra[ii], rdec[ii], rpmra[ii], rpmdec[ii],\n icrs_2000.parallax[ii] / 1000., icrs_2000.rvel[ii],\n utc1, utc2, dut1,\n rlong, rlat, self.site.altitude, 0.0, 0.0,\n self.site.pressure, self.site.temperature,\n self.site.rh, icrs_2000.wavelength[ii] / 10000.,\n az_obs, zen_obs, ha_obs, dec_obs, ra_obs, eo_obs\n )\n\n altAz = [\n 90 - numpy.rad2deg(zen_obs.value),\n numpy.rad2deg(az_obs.value)\n ]\n self[ii, :] = altAz\n\n self.ra[ii] = numpy.rad2deg(ra_obs.value)\n self.dec[ii] = numpy.rad2deg(dec_obs.value)\n self.ha[ii] = numpy.rad2deg(ha_obs.value)\n\n # compute the pa\n self.pa[ii] = numpy.rad2deg(\n sofa.iauHd2pa(ha_obs.value, dec_obs.value, rlat)\n )\n\n def _fromField(self, fieldCoords):\n \"\"\"Converts from field coordinates to topocentric observed\n coordinates for a site. Automatically executed after initialization\n with `.Field`.\n\n Computes and sets ra, dec, ha, pa arrays.\n\n Parameters:\n ------------\n fieldCoords : `.Field`\n Field coordinates from which to convert to observed coordinates\n\n \"\"\"\n # get field center info\n altCenter, azCenter = fieldCoords.field_center.flatten()\n pa = float(fieldCoords.field_center.pa) # parallactic angle\n\n alt, az = conv.fieldToObserved(\n fieldCoords.x, fieldCoords.y, fieldCoords.z,\n altCenter, azCenter, pa\n )\n\n self[:,0] = alt\n self[:,1] = az\n\n self._fromRaw()\n\n def _fromRaw(self):\n \"\"\"Automatically executed after initialization with\n an Nx2 `numpy.ndarray` of Alt/Az coords.\n\n Computes and sets ra, dec, ha, pa arrays.\n\n \"\"\"\n\n self[:, 1] = self[:, 1] % 360\n\n # compute ra, dec, ha, pa here...\n dec_obs = ctypes.c_double()\n ha_obs = ctypes.c_double()\n rlat = numpy.radians(self.site.latitude)\n rlong = numpy.radians(self.site.longitude)\n ut1 = self.site.time.to_ut1()\n\n for ii, (alt, az) in enumerate(self):\n raz = numpy.radians(az)\n ralt = numpy.radians(alt)\n sofa.iauAe2hd(raz, ralt, rlat, ha_obs, dec_obs)\n self.ha[ii] = numpy.degrees(ha_obs.value)\n self.dec[ii] = numpy.degrees(dec_obs.value)\n self.pa[ii] = numpy.degrees(\n sofa.iauHd2pa(ha_obs.value, dec_obs.value, rlat)\n )\n # earth rotation angle (from SOFA docs)\n # https://www.iausofa.org/2017_0420_C/sofa/sofa_ast_c.pdf\n era = sofa.iauEra00(ut1, 0) # time is sum of the 2 args\n _ra = numpy.degrees(era + rlong - ha_obs.value)\n _ra = _ra % 360 # wrap ra\n\n self.ra[ii] = _ra\n\n\n" ]
[ [ "numpy.degrees", "numpy.radians", "numpy.rad2deg", "numpy.cos" ] ]
arpanmangal/coinaction
[ "488eb4fea833ecf5df65acdf12e55592099efc93" ]
[ "mmaction/models/tenons/backbones/resnet_s3d.py" ]
[ "import logging\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\n\nfrom ....utils.misc import rgetattr, rhasattr\nfrom .resnet import ResNet \nfrom mmcv.cnn import constant_init, kaiming_init\nfrom mmcv.runner import load_checkpoint\n\nfrom ....ops.trajectory_conv_package.traj_conv import TrajConv\nfrom .. import flownets\n\n\nfrom ...registry import BACKBONES\n\ndef conv3x3x3(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1):\n \"3x3x3 convolution with padding\"\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=(temporal_stride, spatial_stride, spatial_stride),\n padding=dilation,\n dilation=dilation,\n bias=False)\n\n\ndef conv1x3x3(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1):\n \"1x3x3 convolution with padding\"\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=(1,3,3),\n stride=(temporal_stride, spatial_stride, spatial_stride),\n padding=(0, dilation, dilation),\n dilation=dilation,\n bias=False)\n\n\ndef conv3x1x1(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1, bias=False):\n \"3x1x1 convolution with padding\"\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=(3,1,1),\n stride=(temporal_stride, spatial_stride, spatial_stride),\n padding=(dilation,0,0),\n dilation=dilation,\n bias=bias)\n\ndef trajconv3x1x1(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1, bias=False):\n \"3x1x1 convolution with padding\"\n return TrajConv(\n in_planes,\n out_planes,\n kernel_size=(3,1,1),\n stride=(temporal_stride, spatial_stride, spatial_stride),\n padding=(dilation,0,0),\n dilation=dilation,\n bias=bias)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self,\n inplanes,\n planes,\n spatial_stride=1,\n temporal_stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n if_inflate=True,\n with_cp=False,\n with_trajectory=False):\n super(BasicBlock, self).__init__()\n self.conv1 = conv1x3x3(inplanes, planes, spatial_stride, 1, dilation)\n self.bn1 = nn.BatchNorm3d(planes)\n self.relu = nn.ReLU(inplace=True)\n \n self.conv2 = conv1x3x3(planes, planes)\n self.bn2 = nn.BatchNorm3d(planes)\n\n self.if_inflate = if_inflate\n\n if self.if_inflate:\n self.conv1_t = conv3x1x1(planes, planes, 1, temporal_stride, dilation, bias=True)\n self.bn1_t = nn.BatchNorm3d(planes)\n if with_trajectory:\n self.conv2_t = trajconv3x1x1(planes, planes, bias=True)\n else:\n self.conv2_t = conv3x1x1(planes, planes, bias=True)\n self.bn2_t = nn.BatchNorm3d(planes)\n\n self.downsample = downsample\n self.spatial_stride = spatial_stride\n self.temporal_stride = temporal_stride\n self.dilation = dilation\n assert not with_cp\n\n self.with_trajectory = with_trajectory\n\n def forward(self, input):\n x, traj_src = input\n\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n if self.if_inflate:\n out = self.conv1_t(out)\n out = self.bn1_t(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n \n if self.if_inflate:\n out = self.relu(out)\n if self.with_trajectory:\n assert traj_src[0] is not None\n out = self.conv2_t(out, traj_src[0])\n else:\n out = self.conv2_t(out)\n out = self.bn2_t(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out, traj_src[1:]\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self,\n inplanes,\n planes,\n spatial_stride=1,\n temporal_stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n if_inflate=True,\n with_cp=False,\n with_trajectory=False):\n \"\"\"Bottleneck block for ResNet.\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer,\n if it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n \"\"\"\n super(Bottleneck, self).__init__()\n assert style in ['pytorch', 'caffe']\n self.inplanes = inplanes\n self.planes = planes\n if style == 'pytorch':\n self.conv1_stride = 1\n self.conv2_stride = spatial_stride\n self.conv1_stride_t = 1\n self.conv2_stride_t = temporal_stride\n else:\n self.conv1_stride = spatial_stride\n self.conv2_stride = 1\n self.conv1_stride_t = temporal_stride\n self.conv2_stride_t = 1\n\n self.conv1 = nn.Conv3d(\n inplanes,\n planes,\n kernel_size=1,\n stride=(self.conv1_stride_t, self.conv1_stride, self.conv1_stride),\n bias=False)\n\n self.conv2 = nn.Conv3d(\n planes,\n planes,\n kernel_size=(1,3,3),\n stride=(1, self.conv2_stride, self.conv2_stride),\n padding=(0, dilation, dilation),\n dilation=(1, dilation, dilation),\n bias=False)\n\n self.if_inflate = if_inflate\n if self.if_inflate:\n self.conv2_t = nn.Conv3d(\n planes,\n planes,\n kernel_size=(3,1,1),\n stride=(self.conv2_stride_t,1,1),\n padding=(1,0,0),\n dilation=1,\n bias=True)\n self.bn2_t = nn.BatchNorm3d(planes)\n\n self.bn1 = nn.BatchNorm3d(planes)\n self.bn2 = nn.BatchNorm3d(planes)\n self.conv3 = nn.Conv3d(\n planes, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm3d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.spatial_tride = spatial_stride\n self.temporal_tride = temporal_stride\n self.dilation = dilation\n self.with_cp = with_cp\n\n self.with_trajectory = with_trajectory\n\n def forward(self, x):\n\n def _inner_forward(xx):\n x, traj_src = xx\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n if self.if_inflate:\n if self.with_trajectory:\n assert traj_src is not None\n out = self.conv2_t(out, traj_src[0])\n else:\n out = self.conv2_t(out)\n out = self.bn2_t(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out, traj_src[1:]\n\n if self.with_cp and x.requires_grad:\n out, traj_remains = cp.checkpoint(_inner_forward, x)\n else:\n out, traj_remains = _inner_forward(x)\n\n out = self.relu(out)\n\n return out, traj_remains\n\n\ndef make_res_layer(block,\n inplanes,\n planes,\n blocks,\n spatial_stride=1,\n temporal_stride=1,\n dilation=1,\n style='pytorch',\n inflate_freq=1,\n with_cp=False,\n traj_src_indices=-1):\n traj_src_indices = traj_src_indices if not isinstance(traj_src_indices, int) else (traj_src_indices, ) * blocks\n inflate_freq = inflate_freq if not isinstance(inflate_freq, int) else (inflate_freq, ) * blocks\n assert len(inflate_freq) == blocks\n downsample = None\n if spatial_stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv3d(\n inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=(temporal_stride, spatial_stride, spatial_stride),\n bias=False),\n nn.BatchNorm3d(planes * block.expansion),\n )\n\n layers = []\n layers.append(\n block(\n inplanes,\n planes,\n spatial_stride,\n temporal_stride,\n dilation,\n downsample,\n style=style,\n if_inflate=(inflate_freq[0] == 1),\n with_trajectory=(traj_src_indices[0]>-1),\n with_cp=with_cp))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(\n block(inplanes,\n planes,\n 1, 1,\n dilation,\n style=style,\n if_inflate= (inflate_freq[i] == 1),\n with_trajectory=(traj_src_indices[i]>-1),\n with_cp=with_cp))\n\n return nn.Sequential(*layers)\n\n\[email protected]_module\nclass ResNet_S3D(nn.Module):\n \"\"\"ResNet_S3D backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n bn_eval (bool): Whether to set BN layers to eval mode, namely, freeze\n running stats (mean and var).\n bn_frozen (bool): Whether to freeze weight and bias of BN layers.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n \"\"\"\n\n arch_settings = {\n 18: (BasicBlock, (2, 2, 2, 2)),\n 34: (BasicBlock, (3, 4, 6, 3)),\n 50: (Bottleneck, (3, 4, 6, 3)),\n 101: (Bottleneck, (3, 4, 23, 3)),\n 152: (Bottleneck, (3, 8, 36, 3))\n }\n\n def __init__(self,\n depth,\n pretrained=None,\n num_stages=4,\n spatial_strides=(1, 2, 2, 2),\n temporal_strides=(1, 1, 1, 1),\n dilations=(1, 1, 1, 1),\n out_indices=(0, 1, 2, 3),\n conv1_kernel_t=5,\n conv1_stride_t=2,\n pool1_kernel_t=1,\n pool1_stride_t=2,\n use_pool2=True,\n style='pytorch',\n frozen_stages=-1,\n inflate_freq=(1, 1, 1, 1), # For C2D baseline, this is set to -1.\n bn_eval=True,\n bn_frozen=False,\n partial_bn=False,\n with_cp=False,\n with_trajectory=False,\n trajectory_source_indices=-1,\n trajectory_downsample_method='ave',\n conv_bias=0.2):\n super(ResNet_S3D, self).__init__()\n if depth not in self.arch_settings:\n raise KeyError('invalid depth {} for resnet'.format(depth))\n self.depth = depth\n self.pretrained = pretrained\n self.num_stages = num_stages\n assert num_stages >= 1 and num_stages <= 4\n self.spatial_strides = spatial_strides\n self.temporal_strides = temporal_strides\n self.dilations = dilations\n assert len(spatial_strides) == len(temporal_strides) == len(dilations) == num_stages\n self.out_indices = out_indices\n assert max(out_indices) < num_stages\n self.style = style\n self.frozen_stages = frozen_stages\n self.inflate_freqs = inflate_freq if not isinstance(inflate_freq, int) else (inflate_freq, ) * num_stages\n self.bn_eval = bn_eval\n self.bn_frozen = bn_frozen\n self.partial_bn = partial_bn\n self.with_cp = with_cp\n\n self.with_trajectory = with_trajectory\n self.trajectory_source_indices = trajectory_source_indices \\\n if not isinstance(trajectory_source_indices, int) else [trajectory_source_indices, ] * num_stages\n self.trajectory_downsample_method = trajectory_downsample_method\n\n self.conv_bias = conv_bias\n\n self.block, stage_blocks = self.arch_settings[depth]\n self.stage_blocks = stage_blocks[:num_stages]\n for stage in range(num_stages):\n self.trajectory_source_indices[stage] = self.trajectory_source_indices[stage] \\\n if not isinstance(self.trajectory_source_indices[stage], int) else (self.trajectory_source_indices[stage], ) * self.stage_blocks[stage]\n self.inplanes = 64\n\n if conv1_kernel_t > 1:\n self.conv1 = nn.Conv3d(\n 3, 64, kernel_size=(1,7,7), stride=(1,2,2), padding=(0,3,3), bias=False)\n self.conv1_t = nn.Conv3d(\n 64, 64, kernel_size=(conv1_kernel_t,1,1), stride=(conv1_stride_t,1,1), padding=((conv1_kernel_t-1)//2,1,1), bias=True)\n self.bn1_t = nn.BatchNorm3d(64)\n else:\n self.conv1 = nn.Conv3d(\n 3, 64, kernel_size=(1,7,7), stride=(conv1_stride_t,2,2), padding=(0,3,3), bias=False)\n self.bn1 = nn.BatchNorm3d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=(pool1_kernel_t,3,3), stride=(pool1_stride_t,2,2), padding=(pool1_kernel_t//2,1,1))\n self.use_pool2 = use_pool2\n if self.use_pool2:\n self.pool2 = nn.MaxPool3d(kernel_size=(3,1,1), stride=(2,1,1), padding=(1,0,0))\n\n self.res_layers = []\n for i, num_blocks in enumerate(self.stage_blocks):\n traj_src_indices = self.trajectory_source_indices[i] \\\n if not isinstance(self.trajectory_source_indices[i], int) \\\n else (self.trajectory_source_indices[i], ) * num_blocks\n spatial_stride = spatial_strides[i]\n temporal_stride = temporal_strides[i]\n dilation = dilations[i]\n planes = 64 * 2**i\n res_layer = make_res_layer(\n self.block,\n self.inplanes,\n planes,\n num_blocks,\n spatial_stride=spatial_stride,\n temporal_stride=temporal_stride,\n dilation=dilation,\n style=self.style,\n inflate_freq=self.inflate_freqs[i],\n with_cp=with_cp,\n traj_src_indices=traj_src_indices)\n self.inplanes = planes * self.block.expansion\n layer_name = 'layer{}'.format(i + 1)\n self.add_module(layer_name, res_layer)\n self.res_layers.append(layer_name)\n\n self.feat_dim = self.block.expansion * 64 * 2**(\n len(self.stage_blocks) - 1)\n\n def init_weights(self):\n if isinstance(self.pretrained, str):\n logger = logging.getLogger()\n resnet2d = ResNet(self.depth)\n load_checkpoint(resnet2d, self.pretrained, strict=False, logger=logger)\n for name, module in self.named_modules():\n if isinstance(module, nn.Conv3d) or isinstance(module, TrajConv):\n if rhasattr(resnet2d, name):\n new_weight = rgetattr(resnet2d, name).weight.data.unsqueeze(2).expand_as(module.weight) / module.weight.data.shape[2]\n module.weight.data.copy_(new_weight)\n if hasattr(module, 'bias') and module.bias is not None:\n new_bias = rgetattr(resnet2d, name).bias.data\n module.bias.data.copy_(new_bias)\n else:\n kaiming_init(module, bias=self.conv_bias)\n elif isinstance(module, nn.BatchNorm3d):\n if rhasattr(resnet2d, name):\n for attr in ['weight', 'bias', 'running_mean', 'running_var']:\n setattr(module, attr, getattr(rgetattr(resnet2d, name), attr))\n else:\n constant_init(module, 1)\n elif self.pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n kaiming_init(m, bias=self.conv_bias)\n elif isinstance(m, nn.BatchNorm3d):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def forward(self, x, trajectory_forward=None, trajectory_backward=None):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n outs = []\n for i, layer_name in enumerate(self.res_layers):\n res_layer = getattr(self, layer_name)\n y = []\n for j in self.trajectory_source_indices[i]:\n if j > -1:\n flow_forward = trajectory_forward[j] ## N, 2*T, H, W (..x3y3x4y4..)\n flow_backward = trajectory_backward[j]\n flow_forward = flow_forward.view((flow_forward.size(0), -1, 2, flow_forward.size(2), flow_forward.size(3)))\n flow_backward = flow_backward.view((flow_backward.size(0), -1, 2, flow_backward.size(2), flow_backward.size(3)))\n flow_forward_x, flow_forward_y = torch.split(flow_forward, 1, 2)\n flow_backward_x, flow_backward_y = torch.split(flow_backward, 1, 2)\n flow_backward_x = flow_backward_x.flip(1).view((flow_backward_x.size(0), 1, flow_backward_x.size(1),\n flow_backward_x.size(3), flow_backward_x.size(4))) # N,T,1,H,W => N,1,T,H,W\n flow_backward_y = flow_backward_y.flip(1).view((flow_backward_y.size(0), 1, flow_backward_y.size(1),\n flow_backward_y.size(3), flow_backward_y.size(4)))\n flow_forward_x = flow_forward_x.view((flow_forward_x.size(0), 1, flow_forward_x.size(1),\n flow_forward_x.size(3), flow_forward_x.size(4)))\n flow_forward_y = flow_forward_y.view((flow_forward_y.size(0), 1, flow_forward_y.size(1),\n flow_forward_y.size(3), flow_forward_y.size(4)))\n flow_zero = torch.zeros_like(flow_forward_x)\n y.append(torch.cat((flow_backward_y, flow_backward_x, flow_zero, flow_zero, flow_forward_y, flow_forward_x), 1))\n else:\n y.append(None)\n \n x, remains = res_layer((x, y))\n assert len(remains) == 0 ## TODO: delete if check passes\n if i in self.out_indices:\n outs.append(x)\n if self.use_pool2 and i == 0:\n x = self.pool2(x)\n if len(outs) == 1:\n return outs[0]\n else:\n return tuple(outs)\n\n def train(self, mode=True):\n super(ResNet_S3D, self).train(mode)\n if self.bn_eval:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm3d):\n m.eval()\n if self.bn_frozen:\n for params in m.parameters():\n params.requires_grad = False\n if self.partial_bn:\n for i in range(1, self.frozen_stages + 1):\n mod = getattr(self, 'layer{}'.format(i))\n for m in mod.modules():\n if isinstance(m, nn.BatchNorm3d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n if mode and self.frozen_stages >= 0:\n for param in self.conv1.parameters():\n param.requires_grad = False\n for param in self.bn1.parameters():\n param.requires_grad = False\n self.bn1.eval()\n self.bn1.weight.requires_grad = False\n self.bn1.bias.requires_grad = False\n for i in range(1, self.frozen_stages + 1):\n mod = getattr(self, 'layer{}'.format(i))\n mod.eval()\n for param in mod.parameters():\n param.requires_grad = False\n" ]
[ [ "torch.utils.checkpoint.checkpoint", "torch.nn.MaxPool3d", "torch.nn.BatchNorm3d", "torch.split", "torch.zeros_like", "torch.nn.Sequential", "torch.cat", "torch.nn.ReLU", "torch.nn.Conv3d" ] ]
RJPenic/Orpheus
[ "1a795fa732a10d4f16b48cf00808a125548e00d5" ]
[ "orpheus/dataset.py" ]
[ "import torch\nfrom dataclasses import dataclass, field\nfrom typing import List\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torch.nn.utils.rnn import pad_sequence\nimport csv\n\nfrom nltk.corpus import stopwords\nimport random\n\nfrom tqdm import tqdm\n\n@dataclass\nclass Instance:\n label : int\n text : List[List[str]] = field(default_factory = list)\n\n def __init__(self, label : int, text : List[List[str]]):\n self.label = label\n self.text = text\n\nclass LyricsDataset(torch.utils.data.Dataset):\n def __init__(self, instances, max_vocab_size = 30000, max_lines = 30, max_words_per_line = 10, remove_stop_words = False):\n self.instances = instances\n self.stop_words = set(stopwords.words('english'))\n self.stop_words.update(['you\\'re', 'i\\'m', 'she\\'s', 'he\\'s', 'it\\'s', '\\'re', '\\'m', '\\'s'])\n\n self.max_vocab_size = max_vocab_size\n self.max_lines = max_lines\n self.max_words_per_line = max_words_per_line\n self.remove_stop_words = remove_stop_words\n\n self.text_vocab = self.construct_vocab(instances)\n\n def get_subset_vocab(self, indices):\n return self.construct_vocab([self.instances[i] for i in indices])\n\n def construct_vocab(self, instances):\n ct_txt = {}\n\n for instance in instances:\n for line in instance.text:\n for token in line:\n if not (self.remove_stop_words and token in self.stop_words):\n ct_txt[token] = ct_txt.get(token, 0) + 1\n\n return Vocab(ct_txt, self.max_lines, self.max_words_per_line, max_size = self.max_vocab_size)\n\n @staticmethod\n def from_file(filename, labels, take_rates = None, max_lines = 30, max_words_per_line = 10, skip_first_line = True, remove_stop_words = True, max_vocab_size = 30000):\n instances = []\n\n if take_rates is None:\n take_rates = [1.0] * len(labels)\n\n with open(filename) as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\n\n print(\"Loading dataset...\")\n for i, row in tqdm(enumerate(csv_reader)):\n if i == 0 and skip_first_line:\n continue\n\n label = row[5].lower()\n\n if label not in labels:\n continue\n\n if take_rates[labels.index(label)] < random.random():\n continue\n\n instances.append(Instance(\n int(labels.index(label)),\n [line.split() for line in row[6].split('\\n')[:max_lines]]\n ))\n\n print(f'Number of instances : {len(instances)}')\n\n print(\"-- Labels --\")\n for i, l in enumerate(labels):\n print(f'{i} : {l}')\n print(\"------------\")\n\n return LyricsDataset(instances, max_vocab_size, max_lines, max_words_per_line, remove_stop_words)\n\n def __getitem__(self, i):\n return self.text_vocab.encode(self.instances[i].text), self.instances[i].label\n \n def __len__(self):\n return len(self.instances)\n\nclass Vocab:\n def __init__(self, frequencies, max_lines, max_words_per_line, max_size = -1, min_freq = 0,\n special = [\"<PAD>\", \"<UNK>\"]): # maybe add additional special for line padding ???\n self.stoi = {}\n self.max_lines = max_lines\n self.max_words_per_line = max_words_per_line\n\n for s in special:\n self.stoi[s] = len(self.stoi)\n\n sorted_tokens = sorted(frequencies.keys(), key = lambda k: -frequencies[k])\n\n for t in sorted_tokens:\n if min_freq > frequencies[t] or (len(self.stoi) >= max_size and max_size != -1) :\n break\n self.stoi[t.lower()] = len(self.stoi)\n\n def encode(self, text):\n encoded = []\n\n for j, line in enumerate(text):\n if j >= self.max_lines:\n break\n\n temp = []\n for i, token in enumerate(line):\n if i >= self.max_words_per_line:\n break\n\n temp.append(self.stoi.get(token.lower(), self.stoi[\"<UNK>\"]))\n \n encoded.append(temp)\n\n return encoded\n\ndef load_vec_file_to_dict(filename):\n with open(filename, encoding=\"utf8\") as f:\n content = f.readlines()\n \n content = [x.strip() for x in content]\n \n vecs = {}\n\n print(\"Loading word vector representation...\")\n for line in tqdm(content):\n elems = line.split()\n vecs[elems[0]] = torch.Tensor([float(n) for n in elems[1:]])\n \n return vecs\n \n \ndef load_vec_repr(vocab, d = 300, file = None, freeze = False):\n emb_mat = torch.randn(len(vocab.stoi), d)\n emb_mat[0] = torch.zeros(d)\n\n if file is not None:\n vecs = load_vec_file_to_dict(file)\n \n for k in vocab.stoi:\n if k in vecs:\n emb_mat[vocab.stoi[k]] = vecs[k]\n\n\n return nn.Embedding.from_pretrained(emb_mat, padding_idx = 0, freeze = freeze)\n\ndef pad_collate_fn(batch, pad_index = 0):\n texts, labels = list(zip(*batch))\n bsz = len(labels)\n\n nums_lines = [len(lines) for lines in texts]\n nums_words = [[len(line) for line in lines] for lines in texts]\n\n max_lines = max(nums_lines)\n max_words = max([max(nw) for nw in nums_words])\n\n texts_tensor = torch.full((bsz, max_lines, max_words), pad_index).long()\n line_lens_tensor = torch.full((bsz, max_lines), pad_index).long()\n\n for i, text in enumerate(texts):\n text_len = nums_lines[i]\n line_lens_tensor[i, :text_len] = torch.LongTensor(nums_words[i])\n for j, line in enumerate(text):\n line_len = nums_words[i][j]\n texts_tensor[i, j, :line_len] = torch.LongTensor(line)\n\n return texts_tensor, torch.LongTensor(labels)\n" ]
[ [ "torch.zeros", "torch.full", "torch.LongTensor", "torch.nn.Embedding.from_pretrained" ] ]
jdailey/solarpanelcount
[ "39643e97d628c9317aca398d28e37ed25472a7f6" ]
[ "train_classification.py" ]
[ "\"\"\"Train the inception-v3 model on Solar Panel Identification dataset.\"\"\"\n\nfrom datetime import datetime\nimport os.path\nimport time\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nimport skimage\nimport skimage.io\nimport skimage.transform\nimport random\nimport pickle\nfrom collections import deque\n\nfrom inception import inception_model as inception\nfrom inception.slim import slim\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('ckpt_save_dir', 'ckpt/inception_classification',\n \"\"\"Directory for saving model checkpoint. \"\"\")\n\ntf.app.flags.DEFINE_string('ckpt_restore_dir', 'ckpt/inception_classification',\n \"\"\"Directory for restoring old model checkpoint. \"\"\")\n\ntf.app.flags.DEFINE_string('pretrained_model_ckpt_path', 'ckpt/inception-v3/model.ckpt-157585',\n \"\"\"If specified, restore this pretrained model \"\"\"\n \"\"\"before beginning any training.\"\"\")\n\ntf.app.flags.DEFINE_string('train_set_dir', 'SPI_train',\n \"\"\"Directory of training set\"\"\")\n\ntf.app.flags.DEFINE_integer('max_steps', 200000,\n \"\"\"Number of batches/steps to run.\"\"\")\n\ntf.app.flags.DEFINE_integer('num_gpus', 1,\n \"\"\"How many GPUs to use.\"\"\")\n\ntf.app.flags.DEFINE_boolean('fine_tune', True,\n \"\"\"If true, start from well-trained model on SPI dataset, else start from\n pretrained model on ImageNet\"\"\")\n\ntf.app.flags.DEFINE_float('initial_learning_rate', 0.001,\n \"\"\"Initial learning rate.\"\"\")\n\ntf.app.flags.DEFINE_float('num_epochs_per_decay', 5.0,\n \"\"\"Epochs after which learning rate decays.\"\"\")\n\ntf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.5,\n \"\"\"Learning rate decay factor.\"\"\")\n\n# basic parameters\nBATCH_SIZE = 32\nIMAGE_SIZE = 299\nNUM_CLASSES = 2\n\n# Constants dictating the learning rate schedule.\nRMSPROP_DECAY = 0.9 # Decay term for RMSProp.\nRMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.\nRMSPROP_EPSILON = 0.1 # Epsilon term for RMSProp.\n\ndef load_image(path):\n # load image and prepocess.\n rotate_angle_list = [0, 90, 180, 270]\n img = skimage.io.imread(path)\n resized_img = skimage.transform.resize(img, (IMAGE_SIZE, IMAGE_SIZE))\n if resized_img.shape[2] != 3:\n resized_img = resized_img[:, :, 0:3]\n rotate_angle = random.choice(rotate_angle_list)\n image = skimage.transform.rotate(resized_img, rotate_angle)\n return image\n\ndef train():\n # load train set list and transform it to queue.\n try:\n with open('train_set_list.pickle', 'r') as f:\n train_set_list = pickle.load(f)\n except:\n raise EnvironmentError('Data list not existed. Please run generate_data_list.py first.')\n random.shuffle(train_set_list)\n train_set_queue = deque(train_set_list)\n train_set_size = len(train_set_list)\n del train_set_list\n print ('Training set built. Size: '+str(train_set_size))\n\n # build the tensorflow graph.\n with tf.Graph().as_default() as g:\n\n global_step = tf.get_variable(\n 'global_step', [],\n initializer=tf.constant_initializer(0), trainable=False)\n\n num_batches_per_epoch = train_set_size / BATCH_SIZE\n decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,\n global_step,\n decay_steps,\n FLAGS.learning_rate_decay_factor,\n staircase=True)\n tf.summary.scalar('learning_rate', lr)\n\n # Create an optimizer that performs gradient descent.\n opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,\n momentum=RMSPROP_MOMENTUM,\n epsilon=RMSPROP_EPSILON)\n\n images = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])\n\n labels = tf.placeholder(tf.int32, shape=[BATCH_SIZE])\n\n logits = inception.inference(images, NUM_CLASSES, for_training=True,\n restore_logits=FLAGS.fine_tune,\n scope=None)\n\n inception.loss(logits, labels, batch_size=BATCH_SIZE)\n\n # Assemble all of the losses for the current tower only.\n losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope = None)\n\n # Calculate the total loss for the current tower.\n regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n total_loss = tf.add_n(losses + regularization_losses, name='total_loss')\n\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.summary.scalar(l.op.name + ' (raw)', l)\n tf.summary.scalar(l.op.name, loss_averages.average(l))\n\n with tf.control_dependencies([loss_averages_op]):\n total_loss = tf.identity(total_loss)\n\n batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,\n scope=None)\n\n # Calculate the gradients for the batch of data on this ImageNet\n # tower.\n grads = opt.compute_gradients(total_loss)\n\n # Apply gradients.\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Add histograms for trainable variables.\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name, var)\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n tf.summary.histogram(var.op.name + '/gradients', grad)\n\n # Track the moving averages of all trainable variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n inception.MOVING_AVERAGE_DECAY, global_step)\n\n variables_to_average = (tf.trainable_variables() +\n tf.moving_average_variables())\n variables_averages_op = variable_averages.apply(variables_to_average)\n\n # Group all updates to into a single train op.\n batchnorm_updates_op = tf.group(*batchnorm_updates)\n train_op = tf.group(apply_gradient_op, variables_averages_op,\n batchnorm_updates_op)\n\n # Create a saver.\n saver = tf.train.Saver(tf.all_variables())\n\n # Build the summary operation from the last tower summaries.\n summary_op = tf.summary.merge_all()\n\n # Build an initialization operation to run below.\n init = tf.global_variables_initializer()\n\n # open session and initialize\n sess = tf.Session(config=tf.ConfigProto(\n log_device_placement=True))\n sess.run(init)\n\n # restore old checkpoint\n if FLAGS.fine_tune:\n checkpoint = tf.train.get_checkpoint_state(FLAGS.ckpt_restore_dir)\n if checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n else:\n print(\"Could not find old network weights\")\n else:\n variables_to_restore = tf.get_collection(\n slim.variables.VARIABLES_TO_RESTORE)\n restorer = tf.train.Saver(variables_to_restore)\n restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)\n print('%s: Pre-trained model restored from %s' %\n (datetime.now(), FLAGS.pretrained_model_checkpoint_path))\n\n summary_writer = tf.summary.FileWriter(\n FLAGS.ckpt_save_dir,\n graph_def=sess.graph.as_graph_def(add_shapes=True))\n\n step = 1\n while step <= FLAGS.max_steps:\n start_time = time.time()\n # construct image batch and label batch for one step train\n minibatch = []\n for count in xrange(0, BATCH_SIZE):\n element = train_set_queue.pop()\n minibatch.append(element)\n train_set_queue.appendleft(element)\n\n image_list = [load_image(d[0]) for d in minibatch]\n label_list = [d[1] for d in minibatch]\n\n image_batch = np.array(image_list)\n label_batch = np.array(label_list)\n\n image_batch = np.reshape(image_batch, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])\n label_batch = np.reshape(label_batch, [BATCH_SIZE])\n\n _, loss_value = sess.run([train_op, total_loss], feed_dict={images: image_batch, labels: label_batch})\n\n duration = time.time() - start_time\n\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step == 1 or step % 10 == 0:\n num_examples_per_step = BATCH_SIZE\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = float(duration)\n\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '\n 'sec/batch)')\n\n print(format_str % (datetime.now(), step, loss_value,\n examples_per_sec, sec_per_batch))\n\n # shuttle the image list per epoch\n if step % num_batches_per_epoch == 0:\n random.shuffle(train_set_queue)\n\n # write summary periodically\n if step == 1 or step % 100 == 0:\n summary_str = sess.run(summary_op, feed_dict={images: image_batch, labels: label_batch})\n summary_writer.add_summary(summary_str, step)\n\n # Save the model checkpoint periodically.\n if step % 1000 == 0:\n checkpoint_path = os.path.join(FLAGS.ckpt_save_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n\n step += 1\n\n\nif __name__ == '__main__':\n train()\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.app.flags.DEFINE_string", "tensorflow.app.flags.DEFINE_float", "tensorflow.all_variables", "tensorflow.identity", "tensorflow.summary.histogram", "tensorflow.train.ExponentialMovingAverage", "tensorflow.global_variables_initializer", "numpy.reshape", "tensorflow.Graph", "tensorflow.train.exponential_decay", "numpy.isnan", "tensorflow.app.flags.DEFINE_integer", "tensorflow.constant_initializer", "tensorflow.add_n", "tensorflow.train.RMSPropOptimizer", "tensorflow.get_collection", "tensorflow.train.Saver", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.group", "tensorflow.ConfigProto", "tensorflow.control_dependencies", "tensorflow.placeholder", "tensorflow.summary.merge_all", "tensorflow.train.get_checkpoint_state", "tensorflow.trainable_variables", "numpy.array", "tensorflow.moving_average_variables" ] ]
maheshacherrypick/MLAlgorithms
[ "829c74cf7d79307fc6ca1d849e65b959fb10e5de" ]
[ "mla/metrics/tests/test_metrics.py" ]
[ "from __future__ import division\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_almost_equal\n\nfrom mla.metrics.base import check_data, validate_input\nfrom mla.metrics.metrics import get_metric\n\n\ndef test_data_validation():\n with pytest.raises(ValueError):\n check_data([], 1)\n\n with pytest.raises(ValueError):\n check_data([1, 2, 3], [3, 2])\n\n a, b = check_data([1, 2, 3], [3, 2, 1])\n\n assert np.all(a == np.array([1, 2, 3]))\n assert np.all(b == np.array([3, 2, 1]))\n\n\ndef metric(name):\n return validate_input(get_metric(name))\n\n\ndef test_classification_error():\n f = metric('classification_error')\n assert f([1, 2, 3, 4], [1, 2, 3, 4]) == 0\n assert f([1, 2, 3, 4], [1, 2, 3, 5]) == 0.25\n assert f([1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0]) == (1.0 / 6)\n\n\ndef test_absolute_error():\n f = metric('absolute_error')\n assert f([3], [5]) == [2]\n assert f([-1], [-4]) == [3]\n\n\ndef test_mean_absolute_error():\n f = metric('mean_absolute_error')\n assert f([1, 2, 3], [1, 2, 3]) == 0\n assert f([1, 2, 3], [3, 2, 1]) == 4 / 3\n\n\ndef test_squared_error():\n f = metric('squared_error')\n assert f([1], [1]) == [0]\n assert f([3], [1]) == [4]\n\n\ndef test_squared_log_error():\n f = metric('squared_log_error')\n assert f([1], [1]) == [0]\n assert f([3], [1]) == [np.log(2) ** 2]\n assert f([np.exp(2) - 1], [np.exp(1) - 1]) == [1.0]\n\n\ndef test_mean_squared_log_error():\n f = metric('mean_squared_log_error')\n assert f([1, 2, 3], [1, 2, 3]) == 0\n assert f([1, 2, 3, np.exp(1) - 1], [1, 2, 3, np.exp(2) - 1]) == 0.25\n\n\ndef test_root_mean_squared_log_error():\n f = metric('root_mean_squared_log_error')\n assert f([1, 2, 3], [1, 2, 3]) == 0\n assert f([1, 2, 3, np.exp(1) - 1], [1, 2, 3, np.exp(2) - 1]) == 0.5\n\n\ndef test_mean_squared_error():\n f = metric('mean_squared_error')\n assert f([1, 2, 3], [1, 2, 3]) == 0\n assert f(range(1, 5), [1, 2, 3, 6]) == 1\n\n\ndef test_root_mean_squared_error():\n f = metric('root_mean_squared_error')\n assert f([1, 2, 3], [1, 2, 3]) == 0\n assert f(range(1, 5), [1, 2, 3, 5]) == 0.5\n\n\ndef test_multiclass_logloss():\n f = metric('logloss')\n assert_almost_equal(f([1], [1]), 0)\n assert_almost_equal(f([1, 1], [1, 1]), 0)\n assert_almost_equal(f([1], [0.5]), -np.log(0.5))\n" ]
[ [ "numpy.array", "numpy.log", "numpy.exp" ] ]
wikfeldt/intro-to-dl
[ "7fb1fb6c520941143000c5e1b46c48c95db17ed6" ]
[ "day2/tf2-dvc-cnn-evaluate.py" ]
[ "\n# coding: utf-8\n\n# # Dogs-vs-cats classification with CNNs\n# \n# In this notebook, we'll train a convolutional neural network (CNN,\n# ConvNet) to classify images of dogs from images of cats using\n# TensorFlow 2.0 / Keras. This notebook is largely based on the blog\n# post [Building powerful image classification models using very\n# little data]\n# (https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html)\n# by François Chollet.\n# \n# **Note that using a GPU with this notebook is highly recommended.**\n# \n# First, the needed imports.\n\nimport os, datetime, sys\nimport random\nimport pathlib\n\nimport tensorflow as tf\n\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import (Dense, Activation, Dropout, Conv2D,\n Flatten, MaxPooling2D, InputLayer)\nfrom tensorflow.keras.preprocessing.image import (ImageDataGenerator, \n array_to_img, \n img_to_array, load_img)\nfrom tensorflow.keras import applications, optimizers\n\nfrom tensorflow.keras.callbacks import TensorBoard\n\nimport numpy as np\n\nprint('Using Tensorflow version:', tf.__version__,\n 'Keras version:', tf.keras.__version__,\n 'backend:', tf.keras.backend.backend())\n\n# ## Data\n# \n# The test set consists of 22000 images.\n\nif 'DATADIR' in os.environ:\n DATADIR = os.environ['DATADIR']\nelse:\n DATADIR = \"/scratch/project_2003747/data/\"\n\ndatapath = os.path.join(DATADIR, \"dogs-vs-cats/train-2000/\")\n\nnimages = dict()\nnimages['test'] = 22000\n\n# ### Image paths and labels\n\ndef get_paths(dataset):\n data_root = pathlib.Path(datapath+dataset)\n image_paths = list(data_root.glob('*/*'))\n image_paths = [str(path) for path in image_paths]\n image_count = len(image_paths)\n assert image_count == nimages[dataset], \"Found {} images, expected {}\".format(image_count, nimages[dataset])\n return image_paths\n\nimage_paths = dict()\nimage_paths['test'] = get_paths('test')\n\nlabel_names = sorted(item.name for item in pathlib.Path(datapath+'train').glob('*/')\n if item.is_dir())\nlabel_to_index = dict((name, index) for index,name in enumerate(label_names))\n\ndef get_labels(dataset):\n return [label_to_index[pathlib.Path(path).parent.name]\n for path in image_paths[dataset]]\n \nimage_labels = dict()\nimage_labels['test'] = get_labels('test')\n\n# ### Data augmentation\n# \n# We need to resize all test images to a fixed size. Here we'll use\n# 160x160 pixels.\n# \n# Unlike the training images, we do not apply any random\n# transformations to the test images.\n\nINPUT_IMAGE_SIZE = [160, 160, 3]\n\ndef preprocess_image(image, augment):\n image = tf.image.decode_jpeg(image, channels=3)\n if augment:\n image = tf.image.resize(image, [256, 256])\n image = tf.image.random_crop(image, INPUT_IMAGE_SIZE)\n if random.random() < 0.5:\n image = tf.image.flip_left_right(image)\n else:\n image = tf.image.resize(image, INPUT_IMAGE_SIZE[:2])\n image /= 255.0 # normalize to [0,1] range\n return image\n\ndef load_and_augment_image(path, label):\n image = tf.io.read_file(path)\n return preprocess_image(image, True), label\n\ndef load_and_not_augment_image(path, label):\n image = tf.io.read_file(path)\n return preprocess_image(image, False), label\n\n# ### TF Datasets\n# \n# Let's now define our TF Dataset\n# (https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/data/Dataset#class_dataset)\n# for the test data. First the Datasets contain the filenames of the\n# images and the corresponding labels.\n\ntest_dataset = tf.data.Dataset.from_tensor_slices((image_paths['test'],\n image_labels['test']))\n\n# We then map() the filenames to the actual image data and decode the images.\n\nBATCH_SIZE = 32\n\ntest_dataset = test_dataset.map(load_and_not_augment_image, num_parallel_calls=10)\ntest_dataset = test_dataset.batch(BATCH_SIZE, drop_remainder=False)\ntest_dataset = test_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\n# ### Initialization\n\nif len(sys.argv)<2:\n print('ERROR: model file missing')\n sys.exit()\n \nmodel = load_model(sys.argv[1])\n\nprint(model.summary())\n\n# ### Inference\n\nprint('Evaluating model', sys.argv[1])\nscores = model.evaluate(test_dataset, verbose=2)\nprint(\"Test set %s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n" ]
[ [ "tensorflow.keras.models.load_model", "tensorflow.image.resize", "tensorflow.image.random_crop", "tensorflow.image.flip_left_right", "tensorflow.keras.backend.backend", "tensorflow.io.read_file", "tensorflow.image.decode_jpeg", "tensorflow.data.Dataset.from_tensor_slices" ] ]
msohaibalam/grove
[ "8c27a5d12923d6ace57956db6a249e8d01e33f35" ]
[ "grove/tests/jordan_gradient/test_gradient_utils.py" ]
[ "import numpy as np\n\nfrom grove.alpha.jordan_gradient.gradient_utils import binary_to_real, \\\n measurements_to_bf\n\n\ndef test_binary_to_real():\n for sign in [1, -1]:\n decimal_rep = sign * 0.345703125\n binary_rep = str(sign * 0.010110001)\n\n decimal_convert = binary_to_real(binary_rep)\n\n assert(np.isclose(decimal_rep, decimal_convert))\n\n\ndef test_measurements_to_bf():\n measurements = [[1, 0, 0], [1, 0, 0], [1, 1, 0], [1, 0, 0]]\n true_bf = 0.01\n\n bf_from_measurements = measurements_to_bf(measurements)\n\n assert(np.isclose(true_bf, bf_from_measurements))\n" ]
[ [ "numpy.isclose" ] ]
spatialucr/lne
[ "951df7334c1495a133f6fc982eaa1d366f08a68b" ]
[ "LNCE/python/INCS.py" ]
[ "\"\"\"\nIndicators of Neighborhood Change\n\"\"\"\n\nfrom collections import defaultdict\nimport numpy as np\n\ndef _labels_to_neighborhoods(labels):\n \"\"\"Convert a list of labels to neighborhoods dictionary\n Arguments\n ---------\n labels: list of neighborhood labels\n Returns\n -------\n neighborhoods: dictionary\n key is the label for each neighborhood, value is the list of\n area indexes defining that neighborhood\n Examples\n --------\n >>> labels = [1,1,1,2,2,3]\n >>> neighborhoods = _labels_to_neighborhoods(labels)\n >>> neighborhoods[1]\n [0, 1, 2]\n >>> neighborhoods[2]\n [3, 4]\n >>> neighborhoods[3]\n [5]\n \"\"\"\n neighborhoods = defaultdict(list)\n for i, label in enumerate(labels):\n #if label != -9999:\n neighborhoods[label].append(i)\n return neighborhoods\n\n\ndef linc(labels_sequence):\n \"\"\"Local Indicator of Neighborhood Change\n Arguments\n ---------\n labels_sequence: sequence of neighborhood labels (n,t)\n n areas in n periods\n first element is a list of neighborhood labels per area in\n period 0, second element is a list of neighborhood labels\n per area in period 1, and so on for all T periods.\n Returns\n -------\n lincs: array\n local indicator of neighborhood change over all periods\n Notes\n -----\n The local indicator of neighborhood change defined here allows for\n singleton neighborhoods (i.e., neighborhoods composed of a single primitive\n area such as a tract or block.). This is in contrast to the initial\n implementation in :cite:`rey2011` which prohibited singletons.\n Examples\n --------\n Time period 0 has the city defined as four neighborhoods on 10 tracts:\n >>> labels_0 = [1, 1, 1, 1, 2, 2, 3, 3, 3, 4]\n Time period 1 in the same city, with slight change in composition of the four neighborhoods\n >>> labels_1 = [1, 1, 1, 1, 1, 2, 3, 3, 3, 4]\n >>> res = linc([labels_0, labels_1])\n >>> res[4]\n 1.0\n >>> res[1]\n 0.25\n >>> res[7]\n 0.0\n >>> res[-1]\n 0.0\n And, in period 2, no change\n >>> labels_2 = [1, 1, 1, 1, 1, 2, 3, 3, 3, 4]\n >>> res = linc([labels_1, labels_2])\n >>> res[0]\n 0.0\n We can pass more than two time periods, and get a \"time-wise global linc\"\n for each unit\n >>> res = linc([labels_0, labels_1, labels_2])\n >>> res[0]\n 0.25\n \"\"\"\n ltn = _labels_to_neighborhoods\n #print(labels_sequence)\n neighborhood_sequences = [ltn(labels) for labels in labels_sequence]\n #print(neighborhood_sequences[0])\n #print(neighborhood_sequences[1])\n ns = neighborhood_sequences\n n_areas = len(labels_sequence[0])\n lincs = np.zeros((n_areas,))\n\n T = len(labels_sequence)\n for i in range(n_areas):\n neighbors = []\n for t in range(T):\n if (labels_sequence[t][i] == -9999): continue\n neighbors.append(set(ns[t][labels_sequence[t][i]]))\n if (len(neighbors) < 2): \n lincs[i] = -9999\n else:\n intersection = set.intersection(*neighbors)\n union = set.union(*neighbors)\n n_union = len(union)\n if n_union == 1: # singleton at all points in time\n lincs[i] = 0.\n else:\n lincs[i] = round(1.0 - ((len(intersection)-1)/(n_union-1)),2)\n #print(\"Tract ID #\", i, \"-----------------------------------\")\t\t\n #print(\"*neighbors=\",*neighbors)\t\t\n #print(\"intersection= \",intersection)\n #print(\"union=\",union)\n #print(\" \")\n #print(\" \") \n return lincs\n\t\nif __name__ == '__main__':\t\n\t #0, 1, 2, 3, 4, 5, 6, 7, 8, 9\n tract70 = [1, 1, 2, 2, 3, 3, 1, 2, 2, 1 ]\t\n tract80 = [1, 1, 1, 3, 3, 3, 3, 2, 2, 3 ]\n tract90 = [1, 1, 3, 3, 2, 2, 3, 2, 2, 3 ]\t\n #0, 1, 2, 3, 4, 5, 6, 7, 8, 9\n #labels_0 = [1, 1, 1, 1, 2, 2, 3, 3, 4, 4] \n #labels_1 = [1, 1, 1, 1, 1, 2, 3, 3, 3, 4]\n #INC_bw_70_80 = linc([tract70, tract80])\n INC_bw_80_90 = linc([tract80, tract90])\n #INC_bw_70_80_90 = linc([tract70, tract80, tract90])\n #print(\"INC_bw_70_80= \",INC_bw_70_80)\n print(\"INC_bw_80_90= \",INC_bw_80_90)\n #print(\"INC_bw_70_80_90= \",INC_bw_70_80_90)\n\n\t #tractID: 0 1 2 3 4 5 6 7 8 9\n#labels_0 = [1, 1, 1, 1, 2, 2, 3, 3, 4, 4] \n#labels_1 = [1, 1, 1, 1, 1, 2, 3, 3, 3, 4]\n#Res = [0.25, 0.25, 0.25, 0.25, 1.00, 1.00 ,0.5, 0.5, 1.00, 1.00 ]\n" ]
[ [ "numpy.zeros" ] ]
justinpak13/Watch-Analysis
[ "a7ce05c4fef859d66675d801e8a05f3f7e8e8d4e" ]
[ "dashboard.py" ]
[ "import dash\nfrom dash.dependencies import Input, Output\nfrom dash import dash_table\nfrom dash import dcc\nfrom dash import html\nimport pandas as pd\n\n# Import data into pandas\ndf = pd.read_csv(\"data.csv\")\ndf[\"Condition\"] = df[\"Condition Category\"]\ndf = df.drop([\"Condition Category\", \"Missed Prices\", \"Index\", \"SKU\"], axis=1)\n\ndf = df[\n [\n \"Brand\",\n \"Model\",\n \"Reference\",\n \"Year\",\n \"Condition\",\n \"Papers\",\n \"Box\",\n \"Movement\",\n \"Dimensions\",\n \"Gender\",\n \"Case\",\n \"Bracelet\",\n \"Crystal\",\n \"Dial Color\",\n \"Price\",\n \"Features\",\n \"Link\",\n ]\n]\n\napp = dash.Dash(__name__)\n\nmoney = dash_table.FormatTemplate.money(0)\n# App Layout\napp.layout = html.Div(\n [\n # Title\n html.H1(\"Watch Data\", style={\"text-align\": \"center\"}),\n # Dropdowns\n html.Div(\n className=\"row\",\n children=[\n # First dropdown\n html.Div(\n children=[\n html.Label([\"Brand\"], style={\"text-align\": \"center\"},),\n dcc.Dropdown(\n id=\"brand_dropdown\",\n options=[\n {\"label\": i, \"value\": i}\n for i in df[\"Brand\"].sort_values().unique()\n ],\n value=None,\n clearable=True,\n searchable=True,\n ),\n ],\n style=dict(width=\"50%\"),\n ),\n # Second dropdown\n html.Div(\n children=[\n html.Label([\"Model\"], style={\"text-align\": \"center\"},),\n dcc.Dropdown(\n id=\"model_dropdown\",\n value=None, # [![enter image description here][1]][1]\n clearable=True,\n searchable=True,\n ),\n ],\n style=dict(width=\"50%\"),\n ),\n html.Div(\n children=[\n html.Label([\"Price\"], style={\"text-align\": \"center\"},),\n dcc.RangeSlider(\n id=\"range_slider\",\n tooltip={\"placement\": \"bottom\", \"always_visible\": True},\n ),\n ],\n style=dict(width=\"50%\"),\n ),\n ],\n style=dict(display=\"flex\"),\n ),\n html.Br(),\n html.Div(\n [\n dash_table.DataTable(\n id=\"table\",\n filter_action=\"native\",\n sort_action=\"native\",\n style_cell={\"textAlign\": \"left\", \"minWidth\": 110, \"width\": 110},\n style_table={\"minWidth\": \"100%\"},\n style_cell_conditional=[\n {\"if\": {\"column_id\": \"Features\"}, \"textAlign\": \"right\",},\n {\"if\": {\"column_id\": \"Link\"}, \"textAlign\": \"right\"},\n ],\n style_data_conditional=[\n {\n \"if\": {\"row_index\": \"odd\"},\n \"backgroundColor\": \"rgb(220, 220, 220)\",\n }\n ],\n style_header={\n \"backgroundColor\": \"rgb(210, 210, 210)\",\n \"color\": \"black\",\n \"fontWeight\": \"bold\",\n },\n )\n ]\n ),\n ]\n)\n\n# Connecting Dash Components\[email protected](\n [Output(component_id=\"model_dropdown\", component_property=\"options\")],\n [Input(component_id=\"brand_dropdown\", component_property=\"value\")],\n)\ndef update_model(brand_selected):\n\n dff = df[df[\"Brand\"] == brand_selected]\n return [[{\"label\": i, \"value\": i} for i in dff[\"Model\"].sort_values().unique()]]\n\n\[email protected](\n [\n Output(component_id=\"range_slider\", component_property=\"min\"),\n Output(component_id=\"range_slider\", component_property=\"max\"),\n Output(component_id=\"range_slider\", component_property=\"value\"),\n ],\n [\n Input(component_id=\"brand_dropdown\", component_property=\"value\"),\n Input(component_id=\"model_dropdown\", component_property=\"value\"),\n ],\n)\ndef update_slider(brand_selected, model_selected):\n\n dff = df[(df[\"Brand\"] == brand_selected) & (df[\"Model\"] == model_selected)]\n return (\n dff[\"Price\"].min(),\n dff[\"Price\"].max(),\n [dff[\"Price\"].min(), dff[\"Price\"].max()],\n )\n\n\[email protected](\n [\n Output(component_id=\"table\", component_property=\"columns\"),\n Output(component_id=\"table\", component_property=\"data\"),\n ],\n [\n Input(component_id=\"brand_dropdown\", component_property=\"value\"),\n Input(component_id=\"model_dropdown\", component_property=\"value\"),\n Input(component_id=\"range_slider\", component_property=\"value\"),\n ],\n)\ndef update_table(brand_selected, model_selected, range):\n if brand_selected is None and model_selected is None:\n dff = df\n elif model_selected is None:\n dff = df[df[\"Brand\"] == brand_selected]\n else:\n dff = df[\n (df[\"Brand\"] == brand_selected)\n & (df[\"Model\"] == model_selected)\n & (df[\"Price\"] >= range[0])\n & (df[\"Price\"] <= range[1])\n ]\n return (\n [\n {\"name\": i, \"id\": i, \"hideable\": True, \"type\": \"numeric\", \"format\": money}\n if i == \"Price\"\n else {\"name\": i, \"id\": i, \"hideable\": True}\n for i in dff.columns\n ],\n dff.to_dict(\"records\"),\n )\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n" ]
[ [ "pandas.read_csv" ] ]
xsir317/AlphaRenju
[ "d5fdcf8d1442e4e43661a4cee88c95d5c25fd45a" ]
[ "policy_value_net.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nAn implementation of the policyValueNet in Tensorflow\nTested in Tensorflow 1.4 and 1.5\n\n@author: Xiang Zhong\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass PolicyValueNet():\n def __init__(self, model_file=None):\n self.model_file = model_file\n self.loss_weight = [1.0,0.1] # policy weight and value weight\n\n #TODO https://github.com/NeymarL/ChineseChess-AlphaZero/blob/distributed/cchess_alphazero/agent/model.py 参考这个来弄一个残差网络,5层据说就很好用了。\n # Define the tensorflow neural network\n # 1. Input:\n self.input_states = tf.placeholder(\n tf.float32, shape=[None, 3, 15, 15])\n self.input_state = tf.transpose(self.input_states, [0, 2, 3, 1])\n # 2. Common Networks Layers\n self.conv1 = tf.layers.conv2d(inputs=self.input_state,\n filters=32, kernel_size=[3, 3],\n padding=\"same\", data_format=\"channels_last\",\n activation=tf.nn.relu)\n self.conv2 = tf.layers.conv2d(inputs=self.conv1, filters=64,\n kernel_size=[3, 3], padding=\"same\",\n data_format=\"channels_last\",\n activation=tf.nn.relu)\n self.conv3 = tf.layers.conv2d(inputs=self.conv2, filters=128,\n kernel_size=[3, 3], padding=\"same\",\n data_format=\"channels_last\",\n activation=tf.nn.relu)\n # 3-1 Action Networks\n self.action_conv = tf.layers.conv2d(inputs=self.conv3, filters=3,\n kernel_size=[1, 1], padding=\"same\",\n data_format=\"channels_last\",\n activation=tf.nn.relu)\n # Flatten the tensor\n self.action_conv_flat = tf.reshape(\n self.action_conv, [-1, 3 * 15 * 15])\n # 3-2 Full connected layer, the output is the log probability of moves\n # on each slot on the board\n self.action_fc = tf.layers.dense(inputs=self.action_conv_flat,\n units=15 * 15,\n activation=tf.nn.log_softmax)\n # 4 Evaluation Networks\n self.evaluation_conv = tf.layers.conv2d(inputs=self.conv3, filters=2,\n kernel_size=[1, 1],\n padding=\"same\",\n data_format=\"channels_last\",\n activation=tf.nn.relu)\n self.evaluation_conv_flat = tf.reshape(\n self.evaluation_conv, [-1, 2 * 15 * 15])\n self.evaluation_fc1 = tf.layers.dense(inputs=self.evaluation_conv_flat,\n units=64, activation=tf.nn.relu)\n # output the score of evaluation on current state\n self.evaluation_fc2 = tf.layers.dense(inputs=self.evaluation_fc1,\n units=1, activation=tf.nn.tanh)\n\n # Define the Loss function\n # 1. Label: the array containing if the game wins or not for each state\n self.labels = tf.placeholder(tf.float32, shape=[None, 1])\n # 2. Predictions: the array containing the evaluation score of each state\n # which is self.evaluation_fc2\n # 3-1. Value Loss function\n self.value_loss = tf.losses.mean_squared_error(self.labels,\n self.evaluation_fc2)\n # 3-2. Policy Loss function\n self.mcts_probs = tf.placeholder(\n tf.float32, shape=[None, 15 * 15])\n self.policy_loss = tf.negative(tf.reduce_mean(\n tf.reduce_sum(tf.multiply(self.mcts_probs, self.action_fc), 1)))\n # 3-3. L2 penalty (regularization)\n l2_penalty_beta = 1e-4\n vars = tf.trainable_variables()\n l2_penalty = l2_penalty_beta * tf.add_n(\n [tf.nn.l2_loss(v) for v in vars if 'bias' not in v.name.lower()])\n # 3-4 Add up to be the Loss function\n self.loss = self.loss_weight[0] * self.policy_loss + self.loss_weight[1] * self.value_loss + l2_penalty\n\n # Define the optimizer we use for training\n self.learning_rate = tf.placeholder(tf.float32)\n self.optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate).minimize(self.loss)\n\n # Make a session\n self.session = tf.Session()\n\n # calc policy entropy, for monitoring only\n self.entropy = tf.negative(tf.reduce_mean(\n tf.reduce_sum(tf.exp(self.action_fc) * self.action_fc, 1)))\n\n # Initialize variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n\n # For saving and restoring\n self.saver = tf.train.Saver()\n if self.model_file is not None and tf.train.checkpoint_exists(self.model_file):\n self.restore_model()\n print (\"restore from :\" , self.model_file)\n else:\n print (\"no file to load\")\n\n def policy_value(self, state_batch):\n \"\"\"\n input: a batch of states\n output: a batch of action probabilities and state values\n \"\"\"\n log_act_probs, value = self.session.run(\n [self.action_fc, self.evaluation_fc2],\n feed_dict={self.input_states: state_batch}\n )\n act_probs = np.exp(log_act_probs)\n return act_probs, value\n\n def policy_value_fn(self, board):\n \"\"\"\n input: board\n output: a list of (action, probability) tuples for each available\n action and the score of the board state\n \"\"\"\n legal_positions = board.availables\n current_state = np.ascontiguousarray(board.current_state().reshape(\n -1, 3, 15, 15))\n act_probs, value = self.policy_value(current_state)\n act_probs = zip(legal_positions, act_probs[0][legal_positions])\n return act_probs, value\n\n def train_step(self, state_batch, mcts_probs, winner_batch, lr):\n \"\"\"perform a training step\"\"\"\n winner_batch = np.reshape(winner_batch, (-1, 1))\n loss, entropy, _ = self.session.run(\n [self.loss, self.entropy, self.optimizer],\n feed_dict={self.input_states: state_batch,\n self.mcts_probs: mcts_probs,\n self.labels: winner_batch,\n self.learning_rate: lr})\n return loss, entropy\n\n def save_model(self):\n self.saver.save(self.session, self.model_file)\n\n def restore_model(self):\n self.saver.restore(self.session, self.model_file)\n" ]
[ [ "tensorflow.layers.conv2d", "tensorflow.placeholder", "tensorflow.reshape", "tensorflow.global_variables_initializer", "numpy.reshape", "tensorflow.train.checkpoint_exists", "tensorflow.train.AdamOptimizer", "numpy.exp", "tensorflow.trainable_variables", "tensorflow.multiply", "tensorflow.nn.l2_loss", "tensorflow.exp", "tensorflow.Session", "tensorflow.losses.mean_squared_error", "tensorflow.train.Saver", "tensorflow.transpose", "tensorflow.layers.dense" ] ]
lsb-riken/CUBIC-informatics
[ "e7982072bb5d892f55e86cdf671376ab379b9b29" ]
[ "script/MergeBrain.py" ]
[ "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n\"\"\"Overview:\n Downscale images & cells for altas mapping\n\nUsage:\n MergeBrain.py images PARAM_FILE [-p NUM_CPUS] [--exec <path>]\n MergeBrain.py cells PARAM_FILE\n MergeBrain.py full PARAM_FILE [-p NUM_CPUS] [--exec <path>]\n\nOptions:\n -h --help Show this screen.\n --version Show version.\n -p NUM_CPUS Number of cpus to be used [default: -1](all available).\n --exec <path> Location of the executable [default: ./build/ScaleMerge]\n\"\"\"\n\nimport json, glob, os.path, shutil\nimport tifffile\nimport functools\nfrom docopt import docopt\nimport joblib\nimport subprocess as sp\nimport pandas as pd\nimport numpy as np\n\nfrom HalfBrainCells import HalfBrainCells\nfrom HalfBrainImages import HalfBrainImages\n\n\ndt_scalemerged = np.dtype([\n ('scaled_x','f4'), ('scaled_y', 'f4'), ('scaled_z', 'f4'),\n ('is_valid', 'bool'),\n])\n\ndef run_ScaleMerge(paramfile, mergedfile, path_exec, logfile=None, print_output=True):\n mergedfile_mean,mergedfile_max,mergedfile_min = mergedfile\n cmd = \" \".join([path_exec, paramfile,\n mergedfile_mean,mergedfile_max,mergedfile_min])\n print(\"[*] Executing : {}\".format(cmd))\n out = sp.check_output([path_exec, paramfile,\n mergedfile_mean,mergedfile_max,mergedfile_min])\n if logfile:\n with open(logfile, \"wb\") as f:\n f.write(out)\n else:\n if print_output:\n print(out.decode())\n return\n\nclass WholeBrainImages(object):\n def __init__(self, paramfile, ):\n print(\"\\n[*] Initializing WholeBrain({})\".format(paramfile))\n with open(paramfile) as f:\n self.params = json.load(f)\n\n self.halfbrain_FW = HalfBrainImages(self.params[\"HDoG_paramfile\"][\"FW\"])\n self.halfbrain_RV = HalfBrainImages(self.params[\"HDoG_paramfile\"][\"RV\"])\n\n # asuume scale is equivalent for FW & RV except for direction\n assert abs(self.halfbrain_FW.scale_xy) == abs(self.halfbrain_RV.scale_xy)\n assert abs(self.halfbrain_FW.scale_z) == abs(self.halfbrain_RV.scale_z)\n self.fnames_FW = self.halfbrain_FW.list_fnames_all\n self.fnames_RV = self.halfbrain_RV.list_fnames_all\n self.zs_FW = self.halfbrain_FW.list_zs_all\n self.zs_RV = self.halfbrain_RV.list_zs_all\n self.zs_global_FW = self.halfbrain_FW.list_zs_global_all\n self.zs_global_RV = self.halfbrain_RV.list_zs_global_all\n\n # boundary position\n fname_boundary_FW = self.params[\"merge_info\"][\"boundary_fname\"][\"FW\"]\n fname_boundary_RV = self.params[\"merge_info\"][\"boundary_fname\"][\"RV\"]\n if len(self.zs_FW) > 0:\n self.iz_FW_boundary = self.zs_FW.index(int(fname_boundary_FW))\n else:\n self.iz_FW_boundary = None\n if len(self.zs_RV) > 0:\n self.iz_RV_boundary = self.zs_RV.index(int(fname_boundary_RV))\n else:\n self.iz_RV_boundary = None\n\n print(\"\\t boundary for FW ({}) at i={}\".format(fname_boundary_FW, self.iz_FW_boundary))\n print(\"\\t boundary for RV ({}) at i={}\".format(fname_boundary_RV, self.iz_RV_boundary))\n\n self.skip_z_FW = 1\n self.skip_z_RV = 1\n self.param_header_FW = \"\"\n self.param_header_RV = \"\"\n self.precompute_param_header(is_FW=True)\n self.precompute_param_header(is_FW=False)\n self.bound_z_global_FW = (-np.inf, +np.inf)\n self.bound_z_global_RV = (-np.inf, +np.inf)\n self.merged_depth = None\n\n self.single_mergedfile_mean = os.path.join(self.params[\"dst_basedir\"], \"whole.tif\")\n self.single_mergedfile_max = os.path.join(self.params[\"dst_basedir\"], \"whole_max.tif\")\n self.single_mergedfile_min = os.path.join(self.params[\"dst_basedir\"], \"whole_min.tif\")\n\n def precompute_param_header(self, is_FW):\n if is_FW:\n print(\"[*] Precomputng param header for FW\")\n halfbrain = self.halfbrain_FW\n flip_rot_before_info = self.params[\"merge_info\"][\"flip_rot\"][\"FW\"]\n else:\n print(\"[*] Precomputng param header for RV\")\n halfbrain = self.halfbrain_RV\n flip_rot_before_info = self.params[\"merge_info\"][\"flip_rot\"][\"RV\"]\n\n input_image_info = halfbrain.params[\"input_image_info\"]\n flip_rot_after_info = self.params[\"scale_info\"][\"flip_rot\"]\n\n # downscale ratio\n down_scale_xyz = self.params[\"scale_info\"][\"downscale_unit\"]\n downscale_ratio_xy = float(abs(halfbrain.scale_xy)) / down_scale_xyz # [um / um] = dimensionless\n assert down_scale_xyz % halfbrain.scale_z == 0\n downscale_ratio_z = float(abs(halfbrain.scale_z)) / down_scale_xyz # [um / um] = dimensionless\n skip_z = int(down_scale_xyz / abs(halfbrain.scale_z))\n print(\"\\t downscale ratio for xy : {}\".format(downscale_ratio_xy))\n print(\"\\t downscale ratio for z : {} (skip={})\".format(downscale_ratio_z, skip_z))\n\n flip_rot_before = 0\n flip_rot_before += 1 if flip_rot_before_info[\"flipX\"] else 0\n flip_rot_before += 2 if flip_rot_before_info[\"flipY\"] else 0\n flip_rot_before += 4 if flip_rot_before_info[\"rotCCW\"] else 0\n flip_rot_before += 8 if flip_rot_before_info[\"rotCW\"] else 0\n flip_rot_after = 0\n flip_rot_after += 1 if flip_rot_after_info[\"flipX\"] else 0\n flip_rot_after += 2 if flip_rot_after_info[\"flipY\"] else 0\n if flip_rot_before_info[\"rotCCW\"] or flip_rot_before_info[\"rotCW\"]:\n width_loaded = input_image_info[\"height\"]\n height_loaded = input_image_info[\"width\"]\n else:\n width_loaded = input_image_info[\"width\"]\n height_loaded = input_image_info[\"height\"]\n num_xnames = len(halfbrain.list_xnames)\n num_ynames = len(halfbrain.list_ynames)\n param_dict = {\n \"width\": width_loaded,\n \"height\": height_loaded,\n \"num_xnames\": num_xnames,\n \"num_ynames\": num_ynames,\n \"downscale_ratio_xy\": downscale_ratio_xy,\n \"downscale_ratio_z\": downscale_ratio_z,\n \"overlap_left\": input_image_info[\"left_margin\"],\n \"overlap_right\": input_image_info[\"right_margin\"],\n \"overlap_top\": input_image_info[\"top_margin\"],\n \"overlap_bottom\": input_image_info[\"bottom_margin\"],\n \"flip_rot_before\": flip_rot_before,\n \"flip_rot_after\": flip_rot_after,\n \"imgformat\": 1, # bin\n \"showgrid\": 0, # no grid\n }\n\n # compute ScaleMerged parameters for cell coordinate transformation\n # apply transformation as in ScaleMerge\n strip_width = input_image_info[\"width\"] - input_image_info[\"left_margin\"] - input_image_info[\"right_margin\"]\n strip_height = input_image_info[\"height\"] - input_image_info[\"top_margin\"] - input_image_info[\"bottom_margin\"]\n if flip_rot_before_info[\"rotCCW\"] or flip_rot_before_info[\"rotCW\"]:\n strip_width,strip_height = strip_height,strip_width\n # max int less than or equal strip_width * downscale_ratio_xy\n sampled_width = int(strip_width * downscale_ratio_xy)\n sampled_height = int(strip_height * downscale_ratio_xy)\n actual_downscale_ratio_x = sampled_width / strip_width # [pixel / pixel] = dimensionless\n actual_downscale_ratio_y = sampled_height / strip_height # [pixel / pixel] = dimensionless\n kernel_width = strip_width / sampled_width\n kernel_height = strip_height / sampled_height\n merged_width = sampled_width * num_xnames\n merged_height = sampled_height * num_ynames\n\n margin_left = input_image_info[\"left_margin\"] * actual_downscale_ratio_x\n margin_right = input_image_info[\"right_margin\"] * actual_downscale_ratio_x\n margin_top = input_image_info[\"top_margin\"] * actual_downscale_ratio_y\n margin_bottom = input_image_info[\"bottom_margin\"] * actual_downscale_ratio_y\n if flip_rot_before_info[\"flipX\"]:\n margin_left,margin_right = margin_right,margin_left\n if flip_rot_before_info[\"flipY\"]:\n margin_top,margin_bottom = margin_bottom,margin_top\n if flip_rot_before_info[\"rotCCW\"]:\n margin_left,margin_top,margin_right,margin_bottom = margin_top,margin_right,margin_bottom,margin_left\n if flip_rot_before_info[\"rotCW\"]:\n margin_left,margin_top,margin_right,margin_bottom = margin_bottom,margin_left,margin_top,margin_right\n if flip_rot_after_info[\"flipX\"]:\n margin_left,margin_right = margin_right,margin_left\n if flip_rot_after_info[\"flipY\"]:\n margin_top,margin_bottom = margin_bottom,margin_top\n print(\"\\t original: {} x {} x ({} x {})\".format(input_image_info[\"width\"], input_image_info[\"height\"], num_xnames, num_ynames))\n print(\"\\t strip: {} x {} x ({} x {})\".format(strip_width, strip_height, num_xnames, num_ynames))\n print(\"\\t sampled: {} x {} x ({} x {})\".format(sampled_width, sampled_height, num_xnames, num_ynames))\n print(\"\\t merged: {} x {}\".format(merged_width, merged_height))\n print(\"\\t actual downscale ratio : {:.7f} x {:.7f}\".format(actual_downscale_ratio_x, actual_downscale_ratio_y))\n print(\"\\t merged_mergin: L:{:.3f} R:{:.3f} T:{:.3f} B:{:.3f}\".format(margin_left,margin_right,margin_top, margin_bottom))\n param_dict.update({\n \"merged_margin_left\": margin_left,\n \"merged_margin_right\": margin_right,\n \"merged_margin_top\": margin_top,\n \"merged_margin_bottom\": margin_bottom,\n \"strip_width\": strip_width,\n \"strip_height\": strip_height,\n \"sampled_width\": sampled_width,\n \"sampled_height\": sampled_height,\n \"actual_downscale_ratio_x\": actual_downscale_ratio_x,\n \"actual_downscale_ratio_y\": actual_downscale_ratio_y,\n \"kernel_width\": kernel_width,\n \"kernel_height\": kernel_height,\n \"merged_width\": merged_width,\n \"merged_height\": merged_height,\n })\n if is_FW:\n self.skip_z_FW = skip_z\n self.param_scalemerge_FW = param_dict\n else:\n self.skip_z_RV = skip_z\n self.param_scalemerge_RV = param_dict\n return\n\n def scalemerge(self, num_cpus=-1, dry_run=False, path_exec=\"./ScaleMerge\"):\n print(\"[*] Starting scalemerge...\")\n # Let's start merging FW & RV using boundary information\n scale_z_FW = self.halfbrain_FW.scale_z\n scale_z_RV = self.halfbrain_RV.scale_z\n if self.params[\"merge_info\"][\"use_at_boundary\"] == \"FW\":\n use_FW_at_boundary = True\n elif self.params[\"merge_info\"][\"use_at_boundary\"] == \"RV\":\n use_FW_at_boundary = False\n else:\n raise TypeError\n\n print(\"\\t FW length: {}\".format(len(self.fnames_FW)))\n print(\"\\t RV length: {}\".format(len(self.fnames_RV)))\n indices_FW = range(len(self.fnames_FW))\n indices_RV = range(len(self.fnames_RV))\n zflip = self.params[\"scale_info\"][\"flip_rot\"][\"flipZ\"]\n print(\"\\t z flip: {}\".format(\"on\" if zflip else \"off\"))\n\n is_halfsize = False\n if len(self.zs_global_FW) > 0:\n zs_global_FW0 = self.zs_global_FW[0]\n zs_global_FW1 = self.zs_global_FW[-1]\n else:\n zs_global_FW0 = None\n zs_global_FW1 = None\n is_halfsize = True\n if len(self.zs_global_RV) > 0:\n zs_global_RV0 = self.zs_global_RV[0]\n zs_global_RV1 = self.zs_global_RV[-1]\n else:\n zs_global_RV0 = None\n zs_global_RV1 = None\n is_halfsize = True\n\n if scale_z_FW * scale_z_RV > 0:\n print(\"[Case 1-4]\")\n print(\"\\t scale_z_FW\", scale_z_FW)\n print(\"\\t zs_global_FW[0]:\", zs_global_FW0)\n print(\"\\t zs_global_FW[-1]:\", zs_global_FW1)\n print(\"\\t zs_global_RV[0]:\", zs_global_RV0)\n print(\"\\t zs_global_RV[-1]:\", zs_global_RV1)\n # suppose FW & RV is growing in the same direction,\n # there is 4 scenarios for merging.\n if scale_z_FW > 0 and (is_halfsize or self.zs_global_FW[0] < self.zs_global_RV[0]):\n print(\"->[Case 1]\")\n # [case 1]\n # merged: |-FW-->|--RV-->\n # FW: |-FW---->\n # RV: |---RV-->\n # if halfsize, case2 and case1 comes to the same\n indices_FW_strip = indices_FW[:self.iz_FW_boundary+1][::-1][::self.skip_z_FW][::-1]\n indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV]\n\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[1:]\n else:\n indices_FW_strip = indices_FW_strip[:-1]\n is_FWs = [True for _ in indices_FW_strip] + [False for _ in indices_RV_strip]\n merging_fnames = [self.fnames_FW[i] for i in indices_FW_strip] + [self.fnames_RV[i] for i in indices_RV_strip]\n\n elif scale_z_FW > 0 and self.zs_global_RV[0] < self.zs_global_FW[0]:\n print(\"->[Case 2]\")\n # [case 2]\n # mergped: |-RV-->|--FW-->\n # FW: |---FW-->\n # RV: |-RV---->\n indices_RV_strip = indices_RV[:self.iz_RV_boundary+1][::-1][::self.skip_z_RV][::-1]\n indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW]\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[:-1]\n else:\n indices_FW_strip = indices_FW_strip[1:]\n is_FWs = [False for _ in indices_RV_strip] + [True for _ in indices_FW_strip]\n merging_fnames = [self.fnames_RV[i] for i in indices_RV_strip] + [self.fnames_FW[i] for i in indices_FW_strip]\n\n elif scale_z_FW < 0 and (is_halfsize or self.zs_global_FW[0] < self.zs_global_RV[0]):\n print(\"->[Case 3]\")\n # [case 3] (reverse case 1)\n # merged: |-FW-->|--RV-->\n # FW: <-FW----|\n # RV: <---RV--|\n # if halfsize, case3 and case4 comes to the same\n indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW][::-1]\n indices_RV_strip = indices_RV[:self.iz_RV_boundary+1][::-1][::self.skip_z_RV]\n\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[1:]\n else:\n indices_FW_strip = indices_FW_strip[:-1]\n is_FWs = [True for _ in indices_FW_strip] + [False for _ in indices_RV_strip]\n merging_fnames = [self.fnames_FW[i] for i in indices_FW_strip] + [self.fnames_RV[i] for i in indices_RV_strip]\n\n elif scale_z_FW < 0 and self.zs_global_RV[0] < self.zs_global_FW[0]:\n print(\"->[Case 4]\")\n # [case 4] : reverse case2\n # mergped: |-RV-->|--FW-->\n # FW: <---FW--|\n # RV: <-RV----|\n indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV][::-1]\n indices_FW_strip = indices_FW[:self.iz_FW_boundary+1][::-1][::self.skip_z_FW]\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[:-1]\n else:\n indices_FW_strip = indices_FW_strip[1:]\n is_FWs = [False for _ in indices_RV_strip] + [True for _ in indices_FW_strip]\n merging_fnames = [self.fnames_RV[i] for i in indices_RV_strip] + [self.fnames_FW[i] for i in indices_FW_strip]\n\n else:\n raise TypeError\n elif scale_z_FW * scale_z_RV < 0:\n # suppose FW & RV is growing in the opposite direction,\n # there is 4 scenarios\n print(\"[Case 5-8]\")\n print(\"\\t scale_z_FW\", scale_z_FW)\n print(\"\\t zs_global_FW[0]:\", zs_global_FW0)\n print(\"\\t zs_global_FW[-1]:\", zs_global_FW1)\n print(\"\\t zs_global_RV[0]:\", zs_global_RV0)\n print(\"\\t zs_global_RV[-1]:\", zs_global_RV1)\n if scale_z_FW < 0 and (is_halfsize or self.zs_global_FW[-1] < self.zs_global_RV[0]):\n print(\"->[Case 5]\")\n # [case 5]\n # merged: |-FW-->|--RV-->\n # FW: <-FW----|\n # RV: |---RV-->\n indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW][::-1]\n indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV]\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[1:]\n else:\n indices_FW_strip = indices_FW_strip[:-1]\n is_FWs = [True for _ in indices_FW_strip] + [False for _ in indices_RV_strip]\n merging_fnames = [self.fnames_FW[i] for i in indices_FW_strip] + [self.fnames_RV[i] for i in indices_RV_strip]\n\n elif scale_z_FW > 0 and (is_halfsize or self.zs_global_FW[-1] > self.zs_global_RV[0]):\n print(\"->[Case 6]\")\n # [case 6]\n # merged: |-RV-->|--FW-->\n # FW: |---FW-->\n # RV: <-RV----|\n indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV][::-1]\n indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW]\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[:-1]\n else:\n indices_FW_strip = indices_FW_strip[1:]\n is_FWs = [False for _ in indices_RV_strip] + [True for _ in indices_FW_strip]\n merging_fnames = [self.fnames_RV[i] for i in indices_RV_strip] + [self.fnames_FW[i] for i in indices_FW_strip]\n\n elif scale_z_FW > 0 and self.zs_global_FW[-1] < self.zs_global_RV[0]:\n print(\"->[Case 7]\")\n # [case 7] : reverse case5\n raise NotImplementedError\n elif scale_z_FW < 0 and self.zs_global_FW[-1] > self.zs_global_RV[0]:\n print(\"->[Case 8]\")\n # [case 8] : reverse case6\n raise NotImplementedError\n else:\n raise TypeError\n else:\n raise TypeError\n\n # save boundary point for picking valid cell candidates\n if is_halfsize:\n self.bound_z_global_FW = (-np.inf, +np.inf)\n self.bound_z_global_RV = (-np.inf, +np.inf)\n elif is_FWs[0]:\n self.bound_z_global_FW = (-np.inf, self.zs_global_FW[self.iz_FW_boundary])\n self.bound_z_global_RV = (self.zs_global_RV[self.iz_RV_boundary], +np.inf)\n else:\n self.bound_z_global_RV = (-np.inf, self.zs_global_RV[self.iz_RV_boundary])\n self.bound_z_global_FW = (self.zs_global_FW[self.iz_FW_boundary], +np.inf)\n\n self.merged_depth = len(merging_fnames)\n print(\"\\tmerged depth: {}\".format(self.merged_depth))\n if is_FWs[0]:\n self.new_origin_z_global = self.zs_global_FW[indices_FW_strip[0]]\n else:\n self.new_origin_z_global = self.zs_global_RV[indices_RV_strip[0]]\n print(\"\\tnew z_global origin : {}\".format(self.new_origin_z_global))\n\n if zflip:\n is_FWs = is_FWs[::-1]\n merging_fnames = merging_fnames[::-1]\n\n # write paramfiles for each process of ScaleMerge\n total_z_merged = len(merging_fnames)\n mergedfile_mean_basedir = os.path.join(self.params[\"dst_basedir\"], \"zs_mean\")\n mergedfile_max_basedir = os.path.join(self.params[\"dst_basedir\"], \"zs_max\")\n mergedfile_min_basedir = os.path.join(self.params[\"dst_basedir\"], \"zs_min\")\n if not os.path.exists(mergedfile_mean_basedir):\n os.makedirs(mergedfile_mean_basedir)\n if not os.path.exists(mergedfile_max_basedir):\n os.makedirs(mergedfile_max_basedir)\n if not os.path.exists(mergedfile_min_basedir):\n os.makedirs(mergedfile_min_basedir)\n mergedfile_mean_basename = os.path.join(mergedfile_mean_basedir, \"{i:04d}.tif\")\n mergedfile_max_basename = os.path.join(mergedfile_max_basedir, \"{i:04d}.tif\")\n mergedfile_min_basename = os.path.join(mergedfile_min_basedir, \"{i:04d}.tif\")\n mergedfiles = [(\n mergedfile_mean_basename.format(i=i),\n mergedfile_max_basename.format(i=i),\n mergedfile_min_basename.format(i=i),\n )for i in range(total_z_merged)]\n\n paramfiles = [self.write_paramfile(i,is_FW,merging_fname)\n for i,(is_FW,merging_fname) in enumerate(zip(is_FWs, merging_fnames))]\n if not dry_run:\n joblib.Parallel(n_jobs=num_cpus, verbose=10)([\n joblib.delayed(run_ScaleMerge)(paramfile,mergedfile, path_exec, print_output=False)\n for paramfile, mergedfile in zip(paramfiles,mergedfiles)\n ])\n print(\"[*] Concatenating tiff images to single tiff({})\".format(self.single_mergedfile_mean))\n img_mergedsingle_mean = np.empty((len(mergedfiles), self.param_scalemerge_FW[\"merged_height\"], self.param_scalemerge_FW[\"merged_width\"]), dtype=np.uint16)\n img_mergedsingle_max = np.empty_like(img_mergedsingle_mean)\n img_mergedsingle_min = np.empty_like(img_mergedsingle_mean)\n\n for i,(mergedfile_mean,mergedfile_max,mergedfile_min) in enumerate(mergedfiles):\n img_mergedsingle_mean[i,:,:] = tifffile.imread(mergedfile_mean)\n img_mergedsingle_max[i,:,:] = tifffile.imread(mergedfile_max)\n img_mergedsingle_min[i,:,:] = tifffile.imread(mergedfile_min)\n tifffile.imsave(self.single_mergedfile_mean, img_mergedsingle_mean)\n tifffile.imsave(self.single_mergedfile_max, img_mergedsingle_max)\n tifffile.imsave(self.single_mergedfile_min, img_mergedsingle_min)\n\n print(\"[*] Deleting temporary tiff images\")\n shutil.rmtree(mergedfile_mean_basedir)\n shutil.rmtree(mergedfile_max_basedir)\n shutil.rmtree(mergedfile_min_basedir)\n else:\n print(\"[*] Skipping ScaleMerge for images\")\n\n for paramfile in paramfiles:\n os.remove(paramfile)\n return\n\n def write_paramfile(self, i, is_FW, merging_fname):\n paramfile = \"/tmp/param_merge_{randomID}_{i:04d}.txt\".format(randomID = np.random.randint(2**31), i=i)\n if is_FW:\n param_dict = self.param_scalemerge_FW\n halfbrain = self.halfbrain_FW\n else:\n param_dict = self.param_scalemerge_RV\n halfbrain = self.halfbrain_RV\n param_text = \"{width}:{height}:{num_xnames}:{num_ynames}:{downscale_ratio_xy}:{overlap_left}:{overlap_right}:{overlap_top}:{overlap_bottom}:{flip_rot_before}:{flip_rot_after}:{imgformat}:{showgrid}\\n\".format(**param_dict)\n\n for yname in halfbrain.list_ynames:\n for xname in halfbrain.list_xnames:\n imagestack = halfbrain.get_imagestack_by_xyname(xname,yname)\n img = imagestack.get_imagefile_by_fname(merging_fname)\n fullpath = img.fullpath if not img.is_dummy else \"\"\n param_text += fullpath + \"\\n\"\n\n with open(paramfile, \"w\") as f:\n f.write(param_text)\n\n return paramfile\n\nclass WholeBrainCells(object):\n def __init__(self, paramfile, wholebrain_images=None, clf=None):\n if wholebrain_images:\n self.wholebrain_images = wholebrain_images\n else:\n self.wholebrain_images = WholeBrainImages(paramfile)\n\n self.halfbrain_cells_FW = HalfBrainCells(\n self.wholebrain_images.params[\"HDoG_paramfile\"][\"FW\"],\n is_FW = True,\n halfbrain_images=self.wholebrain_images.halfbrain_FW,\n clf=clf\n )\n self.halfbrain_cells_RV = HalfBrainCells(\n self.wholebrain_images.params[\"HDoG_paramfile\"][\"RV\"],\n is_FW = False,\n halfbrain_images=self.wholebrain_images.halfbrain_RV,\n clf=clf\n )\n # average mode or not (default: false)\n is_ave_FW = self.halfbrain_cells_FW.halfbrain_images.params[\"HDoG_param\"].get(\"is_ave_mode\", False)\n is_ave_RV = self.halfbrain_cells_RV.halfbrain_images.params[\"HDoG_param\"].get(\"is_ave_mode\", False)\n assert is_ave_FW == is_ave_RV\n self.is_ave = is_ave_FW\n\n def scalemerge(self):\n # should be called after scalemerge()\n print(\"[*] Starting scalemerge for HDoG result...\")\n cellstacks_FW = self.halfbrain_cells_FW.dict_stacks\n cellstacks_RV = self.halfbrain_cells_RV.dict_stacks\n param_scalemerge_FW = self.wholebrain_images.param_scalemerge_FW\n param_scalemerge_RV = self.wholebrain_images.param_scalemerge_RV\n # scale and merge\n org_scale_xy_FW = float(abs(self.wholebrain_images.halfbrain_FW.scale_xy))\n org_scale_z_FW = float(abs(self.wholebrain_images.halfbrain_FW.scale_z))\n org_scale_xy_RV = float(abs(self.wholebrain_images.halfbrain_RV.scale_xy))\n org_scale_z_RV = float(abs(self.wholebrain_images.halfbrain_RV.scale_z))\n offset_x_FW = self.wholebrain_images.halfbrain_FW.list_offset_xs[0]\n offset_y_FW = self.wholebrain_images.halfbrain_FW.list_offset_ys[0]\n offset_x_RV = self.wholebrain_images.halfbrain_RV.list_offset_xs[0]\n offset_y_RV = self.wholebrain_images.halfbrain_RV.list_offset_ys[0]\n print(\"\\t offset_FW: {},{},{}\".format(offset_x_FW,offset_y_FW,self.wholebrain_images.new_origin_z_global))\n print(\"\\t offset_RV: {},{},{}\".format(offset_x_RV,offset_y_RV,self.wholebrain_images.new_origin_z_global))\n # flip rot after\n flip_rot_after_info = self.wholebrain_images.params[\"scale_info\"][\"flip_rot\"]\n A_FW = np.zeros((3,3))\n A_FW[:2,:2] = np.array(self.wholebrain_images.halfbrain_FW.params[\"coordinate_info\"][\"affine_global\"])[:2,:2]\n A_FW[2,2] = 1.\n A_FW[np.nonzero(A_FW)] = 1.\n b_FW = np.zeros(3)\n A_RV = np.zeros((3,3))\n A_RV[:2,:2] = np.array(self.wholebrain_images.halfbrain_RV.params[\"coordinate_info\"][\"affine_global\"])[:2,:2]\n A_RV[2,2] = 1.\n A_RV[np.nonzero(A_RV)] = 1.\n b_RV = np.zeros(3)\n if flip_rot_after_info[\"flipX\"]:\n b_FW[0] += param_scalemerge_FW[\"merged_width\"]\n A_FW[0,:] *= -1\n b_RV[0] += param_scalemerge_RV[\"merged_width\"]\n A_RV[0,:] *= -1\n if flip_rot_after_info[\"flipY\"]:\n b_FW[1] += param_scalemerge_FW[\"merged_height\"]\n A_FW[1,:] *= -1\n b_RV[1] += param_scalemerge_RV[\"merged_height\"]\n A_RV[1,:] *= -1\n if flip_rot_after_info[\"flipZ\"]:\n b_FW[2] += self.wholebrain_images.merged_depth\n A_FW[2,:] *= -1\n b_RV[2] += self.wholebrain_images.merged_depth\n A_RV[2,:] *= -1\n\n def process_stack(dst_path, cellstack, bound_z, margin_left, margin_top,\n offset_x, offset_y, offset_z, coeff_x, coeff_y, coeff_z, A, b):\n print(\"[*] Dumping merged data to {}\".format(dst_path))\n if bound_z[0] > bound_z[1]:\n smallest_z,largest_z = bound_z[1],bound_z[0]\n else:\n smallest_z,largest_z = bound_z\n\n data_scaled = np.zeros(cellstack.data_global.shape[0], dtype=dt_scalemerged)\n data_scaled[\"is_valid\"] = np.bitwise_and(\n smallest_z <= cellstack.data_global[\"merged_z\"],\n cellstack.data_global[\"merged_z\"] <= largest_z)\n #print(\"\\tz_range 1: {} - {}\".format(data_valid[\"centroid_z\"].min(), data_valid[\"centroid_z\"].max()))\n centroid_scaled = np.zeros((cellstack.data_global.shape[0],3), dtype=np.float32)\n centroid_scaled[:,0] = (cellstack.data_global[\"merged_x\"] - offset_x) * coeff_x - margin_left\n centroid_scaled[:,1] = (cellstack.data_global[\"merged_y\"] - offset_y) * coeff_y - margin_top\n centroid_scaled[:,2] = (cellstack.data_global[\"merged_z\"] - offset_z) * coeff_z\n #print(\"\\tz_range 2: {} - {}\".format(centroid_scaled[:,2].min(), centroid_scaled[:,2].max()))\n centroid_fliprot = A.dot(centroid_scaled.T).T + b\n data_scaled[\"scaled_x\"] = centroid_fliprot[:,0]\n data_scaled[\"scaled_y\"] = centroid_fliprot[:,1]\n data_scaled[\"scaled_z\"] = centroid_fliprot[:,2]\n #print(\"\\tz_range 3: {} - {}\".format(data_valid[\"centroid_z\"].min(), data_valid[\"centroid_z\"].max()))\n joblib.dump(data_scaled, dst_path, compress=3)\n return np.count_nonzero(data_scaled[\"is_valid\"])\n\n dst_basedir = self.wholebrain_images.params[\"dst_basedir\"]\n dst_basedir_FW = os.path.join(dst_basedir,\"FW\")\n dst_basedir_RV = os.path.join(dst_basedir,\"RV\")\n if not os.path.exists(dst_basedir_FW):\n os.makedirs(dst_basedir_FW)\n if not os.path.exists(dst_basedir_RV):\n os.makedirs(dst_basedir_RV)\n # Note: parallelizable loop\n dict_num_cells = {}\n for xyname,cellstack in cellstacks_FW.items():\n if cellstack.is_dummy: continue\n dst_path = os.path.join(dst_basedir_FW, \"{}_{}.pkl\".format(xyname[1],xyname[0]))\n num_cells = process_stack(dst_path, cellstack,\n self.wholebrain_images.bound_z_global_FW,\n param_scalemerge_FW[\"merged_margin_left\"],\n param_scalemerge_FW[\"merged_margin_top\"],\n offset_x_FW, offset_y_FW, self.wholebrain_images.new_origin_z_global,\n param_scalemerge_FW[\"actual_downscale_ratio_x\"] / org_scale_xy_FW,\n param_scalemerge_FW[\"actual_downscale_ratio_y\"] / org_scale_xy_FW,\n param_scalemerge_FW[\"downscale_ratio_z\"] / org_scale_z_FW,\n A_FW, b_FW)\n dict_num_cells[dst_path] = num_cells\n\n for xyname,cellstack in cellstacks_RV.items():\n if cellstack.is_dummy: continue\n dst_path = os.path.join(dst_basedir_RV, \"{}_{}.pkl\".format(xyname[1],xyname[0]))\n num_cells = process_stack(dst_path, cellstack,\n self.wholebrain_images.bound_z_global_RV,\n param_scalemerge_RV[\"merged_margin_left\"],\n param_scalemerge_RV[\"merged_margin_top\"],\n offset_x_RV, offset_y_RV, self.wholebrain_images.new_origin_z_global,\n param_scalemerge_RV[\"actual_downscale_ratio_x\"] / org_scale_xy_RV,\n param_scalemerge_RV[\"actual_downscale_ratio_y\"] / org_scale_xy_RV,\n param_scalemerge_RV[\"downscale_ratio_z\"] / org_scale_z_RV,\n A_RV, b_RV)\n dict_num_cells[dst_path] = num_cells\n\n # saving information\n joblib.dump(dict_num_cells, os.path.join(dst_basedir, \"info.pkl\"), compress=3)\n return\n\ndef main():\n args = docopt(__doc__)\n\n wb_images = WholeBrainImages(args[\"PARAM_FILE\"])\n\n if args[\"images\"]:\n wb_images.scalemerge(num_cpus=int(args[\"-p\"]), dry_run=False, path_exec=args[\"--exec\"])\n elif args[\"cells\"]:\n wb_images.scalemerge(num_cpus=int(args[\"-p\"]), dry_run=True, path_exec=args[\"--exec\"])\n wb_cells = WholeBrainCells(args[\"PARAM_FILE\"], wholebrain_images=wb_images)\n wb_cells.scalemerge()\n elif args[\"full\"]:\n wb_images.scalemerge(num_cpus=int(args[\"-p\"]), dry_run=False, path_exec=args[\"--exec\"])\n wb_cells = WholeBrainCells(args[\"PARAM_FILE\"], wholebrain_images=wb_images)\n wb_cells.scalemerge()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.zeros", "numpy.dtype", "numpy.count_nonzero", "numpy.empty_like", "numpy.nonzero", "numpy.array", "numpy.random.randint", "numpy.bitwise_and" ] ]
neulab/xnmt
[ "d93f8f3710f986f36eb54e9ff3976a6b683da2a4" ]
[ "xnmt/models/retrievers.py" ]
[ "import numpy as np\n\nfrom xnmt.settings import settings\nimport xnmt\nfrom xnmt import batchers, expression_seqs\nfrom xnmt.models import base as models\nfrom xnmt.persistence import serializable_init, Serializable\n\nif xnmt.backend_dynet:\n import dynet as dy\n\n##### A class for retrieval databases\n# This file contains databases used for retrieval.\n# At the moment it includes only a standard database that keeps all of the things\n# to be retrieved in a list.\n\nclass StandardRetrievalDatabase(Serializable):\n \"\"\"This is a database to be used for retrieval. Its database member\"\"\"\n\n yaml_tag = \"!StandardRetrievalDatabase\"\n\n @serializable_init\n def __init__(self, reader, database_file, dev_id_file=None, test_id_file=None):\n self.reader = reader\n self.database_file = database_file\n self.data = list(reader.read_sents(database_file))\n self.indexed = []\n self.dev_id_file = dev_id_file\n self.test_id_file = test_id_file\n\n def __getitem__(self, indices):\n trg_examples, trg_masks = batchers.pad([self.data[index] for index in indices])\n return batchers.mark_as_batch(trg_examples), trg_masks\n\n##### The actual retriever class\nclass Retriever(models.ConditionedModel, models.GeneratorModel):\n \"\"\"\n A template class implementing a retrieval model.\n \"\"\"\n\n def calc_loss(self, src, db_idx):\n \"\"\"Calculate loss based on a database index.\n\n Args:\n src: The source input.\n db_idx: The correct index in the database to be retrieved.\n Returns:\n An expression representing the loss.\n \"\"\"\n raise NotImplementedError('calc_loss must be implemented for Retriever subclasses')\n\n def index_database(self, indices=None):\n \"\"\"A function that can be called before actually performing retrieval.\n\n This will perform any necessary pre-processing to make retrieval more efficient.\n If the model is updated, assume that the indexing result is stale and no longer applicable.\n \"\"\"\n pass\n\n def generate(self, src):\n \"\"\"Perform retrieval, trying to get the sentence that most closely matches in the database.\n\n Args:\n src: The source.\n i: Id of the input\n Returns:\n The ID of the example that most closely matches in the database.\n \"\"\"\n raise NotImplementedError('generate must be implemented for Retriever subclasses')\n\n def initialize_generator(self, **kwargs):\n candidates = None\n if kwargs[\"candidate_id_file\"] is not None:\n with open(kwargs[\"candidate_id_file\"], \"r\") as f:\n candidates = sorted({int(x):1 for x in f}.keys())\n self.index_database(candidates)\n\[email protected]_dynet\nclass DotProductRetriever(Retriever, Serializable):\n \"\"\"\n A retriever trains using max-margin methods.\n \"\"\"\n\n yaml_tag = '!DotProductRetriever'\n\n @serializable_init\n def __init__(self, src_embedder, src_encoder, trg_embedder, trg_encoder, database, loss_direction=\"forward\"):\n \"\"\"Constructor.\n\n Args:\n src_embedder: A word embedder for the source language\n src_encoder: An encoder for the source language\n trg_embedder: A word embedder for the target language\n trg_encoder: An encoder for the target language\n database: A database of things to retrieve\n \"\"\"\n self.src_embedder = src_embedder\n self.src_encoder = src_encoder\n self.trg_embedder = trg_embedder\n self.trg_encoder = trg_encoder\n self.database = database\n self.loss_direction = loss_direction\n\n def exprseq_pooling(self, exprseq):\n # Reduce to vector\n exprseq = expression_seqs.ExpressionSequence(expr_tensor=exprseq.mask.add_to_tensor_expr(exprseq.as_tensor(),-1e10), mask=exprseq.mask)\n if exprseq.expr_tensor is not None:\n if len(exprseq.expr_tensor.dim()[0]) > 1:\n return dy.max_dim(exprseq.expr_tensor, d=1)\n else:\n return exprseq.expr_tensor\n else:\n return dy.emax(exprseq.expr_list)\n\n def calc_loss(self, src, db_idx, src_mask=None, trg_mask=None):\n src_embeddings = self.src_embedder.embed_sent(src, mask=src_mask)\n self.src_encoder.set_input(src)\n src_encodings = self.exprseq_pooling(self.src_encoder.transduce(src_embeddings))\n trg_batch, trg_mask = self.database[db_idx]\n # print(\"trg_mask=\\n\",trg_mask)\n trg_encodings = self.encode_trg_example(trg_batch, mask=trg_mask)\n dim = trg_encodings.dim()\n trg_reshaped = dy.reshape(trg_encodings, (dim[0][0], dim[1]))\n # ### DEBUG\n # trg_npv = trg_reshaped.npvalue()\n # for i in range(dim[1]):\n # print(\"--- trg_reshaped {}: {}\".format(i,list(trg_npv[:,i])))\n # ### DEBUG\n prod = dy.transpose(src_encodings) * trg_reshaped\n # ### DEBUG\n # prod_npv = prod.npvalue()\n # for i in range(dim[1]):\n # print(\"--- prod {}: {}\".format(i,list(prod_npv[0].transpose()[i])))\n # ### DEBUG\n id_range = list(range(len(db_idx)))\n # This is ugly:\n if self.loss_direction == \"forward\":\n prod = dy.transpose(prod)\n loss = dy.sum_batches(dy.hinge_batch(prod, id_range))\n elif self.loss_direction == \"bidirectional\":\n prod = dy.reshape(prod, (len(db_idx), len(db_idx)))\n loss = dy.sum_elems(\n dy.hinge_dim(prod, id_range, d=0) + dy.hinge_dim(prod, id_range, d=1))\n else:\n raise RuntimeError(\"Illegal loss direction {}\".format(self.loss_direction))\n\n return loss\n\n def index_database(self, indices=None):\n # Create the inverted index if necessary\n if indices is None:\n indices = range(len(self.database.data))\n self.database.inverted_index = None\n else:\n self.database.inverted_index = indices\n # Actually index everything\n self.database.indexed = []\n for index in indices:\n item = self.database.data[int(index)]\n dy.renew_cg(immediate_compute=settings.IMMEDIATE_COMPUTE, check_validity=settings.CHECK_VALIDITY)\n self.database.indexed.append(self.encode_trg_example(item).npvalue())\n # ### DEBUG\n # for i, x in enumerate(self.database.indexed):\n # print(\"--- database {}: {}\".format(i,list(x)))\n # ### DEBUG\n self.database.indexed = np.stack(self.database.indexed, axis=1)\n\n def encode_trg_example(self, example, mask=None):\n embeddings = self.trg_embedder.embed_sent(example, mask=mask)\n self.trg_encoder.set_input(example)\n encodings = self.exprseq_pooling(self.trg_encoder.transduce(embeddings))\n return encodings\n\n def generate(self, src, return_type=\"idxscore\", nbest=10):\n src_embedding = self.src_embedder.embed_sent(src)\n self.src_encoder.set_input(src)\n src_encoding = dy.transpose(self.exprseq_pooling(self.src_encoder.transduce(src_embedding))).npvalue()\n scores = np.dot(src_encoding, self.database.indexed)\n # print(\"--- scores: {}\".format(list(scores[0])))\n kbest = np.argsort(scores, axis=1)[0,-nbest:][::-1]\n # print(\"--- kbest: {}\".format(kbest))\n ids = kbest if self.database.inverted_index is None else [self.database.inverted_index[x] for x in kbest]\n\n if return_type == \"idxscore\":\n return [(i,scores[0,x]) for i, x in zip(ids, kbest)]\n elif return_type == \"idx\":\n return list(ids)\n elif return_type == \"score\":\n return [scores[0,x] for x in kbest]\n else:\n raise RuntimeError(\"Illegal return_type to retrieve: {}\".format(return_type))\n\n" ]
[ [ "numpy.stack", "numpy.dot", "numpy.argsort" ] ]
VamshikShetty/Neural-Style
[ "44806accdfb9bb98aa15d3145563bf6759e9e604" ]
[ "Fast Style Transfer/TensorFlow/train_fast_style_transfer.py" ]
[ "\nimport tensorflow as tf\ntf.reset_default_graph() \n\n\nfrom keras.applications.vgg19 import VGG19\nimport os \n\nfrom tensorflow.python.keras.preprocessing import image as kp_image\nfrom keras.models import Model\nfrom keras.layers import Dense, BatchNormalization,Dropout,concatenate \nfrom keras import backend as K\nfrom keras.models import Model,load_model,model_from_json #,evaluate_generator\nfrom keras import losses\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Conv2D, MaxPooling2D, AveragePooling2D,Flatten,GlobalAveragePooling2D\n\nimport numpy as np\nfrom PIL import Image\nimport cv2\nimport scipy\nimport transform_network as TNET\nfrom loss import Loss, get_VGG19\n\ncontent_layers = ['block3_conv3']\nstyle_layers = ['block1_conv1','block2_conv2', 'block3_conv3', 'block4_conv3']\n\nnum_content_layers = len(content_layers)\nnum_style_layers = len(style_layers)\n\n\nseed = 791\ntf.set_random_seed(seed)\nnp.random.seed(seed)\n\ncontent_dir = 'content/'\nstyle_image = 'udnie.jpg'\n\n\nheight = 352\nwidth = 352\n\ndef load_img(path_to_img, expand = True, img_shape=(height,width)):\n \n img = scipy.misc.imread(path_to_img)\n\n img = scipy.misc.imresize(img, img_shape)\n img = img.astype(\"float32\")\n if expand:\n img = np.expand_dims(img, axis=0)\n \n img = tf.keras.applications.vgg19.preprocess_input(img)\n\n return img\n\ndef load_batch(image_paths):\n x = []\n for image_path in image_paths:\n img = load_img(image_path, False)\n\n x.append(img)\n\n x = np.asarray(x)\n return x\n\n\ndef deprocess_img(processed_img, shape):\n x = processed_img.copy()\n if len(x.shape) == 4:\n x = np.squeeze(x, 0)\n assert len(x.shape) == 3, (\"Input to deprocess image must be an image of \"\n \"dimension [1, height, width, channel] or [height, width, channel]\")\n if len(x.shape) != 3:\n raise ValueError(\"Invalid input to deprocessing image\")\n \n # perform the inverse of the preprocessiing step\n x[:, :, 0] += 103.939\n x[:, :, 1] += 116.779\n x[:, :, 2] += 123.68\n\n\n\n x = np.clip(x, 0, 255).astype('uint8')\n img = scipy.misc.imresize(x, shape)\n return img\n\n\ndef run_fast_style_transfer(content_training_images, style_image_path, epochs, batch_size, content_weight=0.6, style_weight=0.4, total_variation_weight = 1e-5): \n\n with tf.Session() as sess:\n K.set_session(sess)\n\n \n input_batch = tf.placeholder(tf.float32, shape=(None, height, width, 3), name=\"input_batch\")\n init_image = TNET.get_TransformNet('transform_network', input_batch)\n\n loss = Loss(init_image, content_layers, style_layers)\n\n\n content_loss = loss.content_loss(input_batch)\n\n \n style_var = load_img(style_image_path)\n\n\n\n style_var = tf.Variable(style_var)\n style_loss = loss.style_loss(style_var)\n \n\n tv_loss = loss.tv_loss(init_image)\n\n total_loss = style_weight*style_loss + content_weight*content_loss + total_variation_weight*tv_loss\n\n\n transform_net = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='transform_network')\n opt = tf.train.AdamOptimizer(learning_rate=0.0005, beta1=0.9, epsilon=1e-08).minimize(total_loss, var_list=[transform_net])\n\n\n #sess.run(tf.variables_initializer(var_list=[input_batch]))\n \n sess.run(tf.global_variables_initializer())\n # saver = tf.train.Saver()\n\n Tnet_saver = tf.train.Saver(transform_net)\n\n # loading the weights again because tf.global_variables_initializer() resets the weights\n loss.load_weights_to_vgg19(\"vgg_weights/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5\")\n # init_image.load_weights('0-transform_network.h5')\n\n\n dir_model = \"weights/\"+style_image.split('.')[0]+\"_weights/\"\n if not os.path.exists(dir_model):\n os.makedirs(dir_model)\n \n # Tnet_saver.restore(sess, dir_model+\"model.ckpt\")\n\n\n for i in range(epochs):\n\n avg_loss = 0\n avg_cnt = 1\n\n for j in range(0, int(len(content_training_images)/batch_size)):\n\n batch = load_batch(content_training_images[j: j+batch_size])\n\n temp = sess.run([total_loss, style_loss, content_loss, tv_loss, init_image, opt],feed_dict={input_batch:batch})\n\n print('epoch: ',i,'batch: ',j,' loss: ', temp[:4], 'avg loss: ', avg_loss )\n\n avg_loss = (avg_loss*(avg_cnt-1) + temp[0] )/avg_cnt\n avg_cnt += 1\n\n\n if j%50==0: # and i%50==0:\n image = deprocess_img(temp[4][2], batch[2].shape[:-1])\n cv2.imwrite(str(i)+'-'+str(j)+'-temp.jpg',image)\n if i==0:\n image_ori = deprocess_img(batch[2], batch[2].shape[:-1])\n cv2.imwrite(str(i)+'-'+str(j)+'-temp-orgi.jpg',image_ori)\n\n\n # if (i+1)%100==0:\n print('\\n Data Saved ... ')\n Tnet_saver.save(sess, dir_model+\"model.ckpt\")\n\n sess.close()\n\n\n\ncontent_training_images = os.listdir(content_dir) # http://cocodataset.org/#download 2017 val images [5k/1GB]\nfor i in range(len(content_training_images)):\n content_training_images[i] = content_dir+content_training_images[i]\n\n#print(content_training_images)\nrun_fast_style_transfer(content_training_images, style_image, epochs=5, batch_size=6)\n#cv2.imwrite(str(num_iterations)+'-'+save_name,best)\n\n" ]
[ [ "tensorflow.placeholder", "numpy.squeeze", "tensorflow.global_variables_initializer", "tensorflow.get_collection", "tensorflow.train.AdamOptimizer", "numpy.random.seed", "scipy.misc.imresize", "numpy.asarray", "numpy.expand_dims", "tensorflow.set_random_seed", "numpy.clip", "tensorflow.Session", "tensorflow.Variable", "tensorflow.keras.applications.vgg19.preprocess_input", "tensorflow.train.Saver", "tensorflow.reset_default_graph", "scipy.misc.imread" ] ]
Aarthif-Nawaz/Home3Circuit
[ "3e2d71fe7a839a7fd820d18fbef9bb31fde608e0" ]
[ "homestayAdmin/FinancialForecasting.py" ]
[ "import pickle\r\nimport warnings\r\nimport itertools\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nwarnings.filterwarnings(\"ignore\")\r\nplt.style.use('fivethirtyeight')\r\nimport pandas as pd\r\nimport statsmodels.api as sm\r\nimport matplotlib\r\nimport mysql.connector\r\nimport os\r\nimport sys\r\n\r\nmatplotlib.rcParams['axes.labelsize'] = 14\r\nmatplotlib.rcParams['xtick.labelsize'] = 12\r\nmatplotlib.rcParams['ytick.labelsize'] = 12\r\nmatplotlib.rcParams['text.color'] = 'k'\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"\",\r\n database=\"homestay\"\r\n)\r\n\r\n\r\ndf = pd.read_csv(\"Forecasting.csv\", parse_dates=[1] ,header=0)\r\n\r\nname = sys.argv[1]\r\nprint(name)\r\nhomestay = df.loc[df['Name'] == str(name)]\r\n# homestay['Date'].min(), homestay['Date'].max()\r\n#\r\n# homestay = homestay.sort_values('Date')\r\n# homestay.isnull().sum()\r\n#\r\n# homestay = homestay.groupby('Date')['Sales'].sum().reset_index()\r\nhomestay.index = homestay['Date']\r\ny = homestay['Sales'].resample('MS').mean()\r\ny.plot(figsize=(15, 6))\r\nfrom pandas.plotting import autocorrelation_plot\r\nautocorrelation_plot(homestay['Sales'])\r\n\r\nfrom statsmodels.graphics.tsaplots import plot_pacf\r\nplot_pacf(homestay['Sales'], lags=15)\r\n\r\n\r\nfrom statsmodels.tsa.arima_model import ARIMA, ARIMAResults\r\n\r\nmodel = ARIMA(df['Sales'], order=(1,0,1))\r\nmodel_fit = model.fit()\r\nprint(model_fit.summary())\r\nresiduals = model_fit.resid\r\nresiduals.plot()\r\nprint(residuals.describe())\r\noutput = model_fit.forecast()\r\nprint(output)\r\nprint(model_fit.forecast(5)[0])\r\ntrain_size = int(df.shape[0]*0.7)\r\ntrain, test = df.Sales[0:train_size], df.Sales[train_size:]\r\ndata = train\r\npredict =[]\r\nfor t in test:\r\n model = ARIMA(data, order=(0,0,1))\r\n model_fit = model.fit()\r\n y = model_fit.forecast()\r\n print(y[0][0])\r\n predict.append(y[0][0])\r\n data = np.append(data, t)\r\n data = pd.Series(data)\r\n\r\nfrom sklearn.metrics import mean_squared_error\r\nmse = mean_squared_error(test.values, predict)\r\n# print(mse)\r\n# print(predict)\r\nmodel_fit.save('model.pkl')\r\nloaded = ARIMAResults.load('model.pkl')\r\nvalues = loaded.predict()\r\nmycursor = mydb.cursor()\r\ndelsql = \"truncate table price\"\r\n\r\nmycursor.execute(delsql)\r\n\r\n\r\n\r\nimport datetime\r\ntoday = datetime.datetime.today()\r\ndatem = datetime.datetime(today.year, today.month,1)\r\n\r\nfrom dateutil.rrule import rrule, MONTHLY\r\nfrom datetime import datetime\r\n\r\n\r\n\r\n\r\ndef months(start_month, start_year, end_month, end_year):\r\n start = datetime(start_year, start_month, 1)\r\n end = datetime(end_year, end_month, 1)\r\n return [(d.month, d.year) for d in rrule(MONTHLY, dtstart=start, until=end)]\r\n\r\nvalue = sys.argv[2]\r\nyear = today.year\r\nif(today.month + int(value) > 12):\r\n year = year +1\r\n predict_month = months(today.month, today.year, ((int(value) + 5)-12), year)\r\nelse:\r\n predict_month = months(today.month,today.year,(int(value)+5),year)\r\nlist = []\r\nfor j in predict_month:\r\n list.append(j[0])\r\nprint(list)\r\nfor i in range (1,len(values)):\r\n if(i <= int(value)):\r\n if (today.month + i <= 12):\r\n year = today.year\r\n mycursor.execute(\"INSERT INTO price(month , price) VALUES (%s,%s)\", (str(list[i])+\"-\"+str(year), float(values[i])))\r\n else:\r\n year = today.year+1\r\n mycursor.execute(\"INSERT INTO price(month , price) VALUES (%s,%s)\", (str(list[i]) + \"-\" + str(year), float(values[i])))\r\nmydb.commit()\r\n\r\n\r\n\r\n\r\n# p = d = q = range(0, 2)\r\n# pdq = list(itertools.product(p, d, q))\r\n# seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]\r\n# print('Examples of parameter combinations for Seasonal ARIMA...')\r\n# print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1]))\r\n# print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2]))\r\n# print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3]))\r\n# print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4]))\r\n#\r\n# for param in pdq:\r\n# for param_seasonal in seasonal_pdq:\r\n# try:\r\n# mod = sm.tsa.statespace.SARIMAX(y,order=param,seasonal_order=param_seasonal,enforce_stationarity=False,enforce_invertibility=False)\r\n# results = mod.fit()\r\n# #print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))\r\n# except:\r\n# continue\r\n# mod = sm.tsa.statespace.SARIMAX(y,order=(1, 1, 1),seasonal_order=(1, 1, 0, 2),enforce_stationarity=False,enforce_invertibility=False)\r\n# results = mod.fit()\r\n# #print(results.summary().tables[1])\r\n# # results.plot_diagnostics(figsize=(16, 8))\r\n# # plt.show()\r\n# pred = results.get_prediction(start=pd.to_datetime('2020-01-01'), dynamic=False)\r\n# pred_ci = pred.conf_int()\r\n# ax = y['2020-01-07':].plot(label='observed')\r\n# pred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(14, 7))\r\n# ax.fill_between(pred_ci.index,\r\n# pred_ci.iloc[:, 0],\r\n# pred_ci.iloc[:, 1], color='k', alpha=.2)\r\n# ax.set_xlabel('Date')\r\n# ax.set_ylabel('Sales')\r\n# plt.legend()\r\n# plt.show()\r\n#\r\n# y_forecasted = pred.predicted_mean\r\n# y_truth = y['2020-01-07':]\r\n# mse = ((y_forecasted - y_truth) ** 2).mean()\r\n# print('The Mean Squared Error of our forecasts is {}'.format(round(mse, 2)))\r\n#\r\n# pred_uc = results.get_forecast(steps=100)\r\n# pred_ci = pred_uc.conf_int()\r\n# ax = y.plot(label='observed', figsize=(14, 7))\r\n# pred_uc.predicted_mean.plot(ax=ax, label='Forecast')\r\n# ax.fill_between(pred_ci.index,pred_ci.iloc[:, 0],pred_ci.iloc[:, 1], color='k', alpha=.25)\r\n# ax.set_xlabel('Date')\r\n# ax.set_ylabel('Sales')\r\n# plt.legend()\r\n# plt.show()\r\n" ]
[ [ "matplotlib.pyplot.style.use", "pandas.Series", "numpy.append", "sklearn.metrics.mean_squared_error", "pandas.read_csv", "pandas.plotting.autocorrelation_plot" ] ]
eherr/anim_utils
[ "2274b86ff410c8f6feb588626cbf83664382abca" ]
[ "anim_utils/motion_editing/fabrik_chain.py" ]
[ "#!/usr/bin/env python\n#\n# Copyright 2019 DFKI GmbH.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the\n# following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n# USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n https://www.sciencedirect.com/science/article/pii/S1524070311000178?via%3Dihub\n\n based on the pseudocode by Renzo Poddighe\n https://project.dke.maastrichtuniversity.nl/robotlab/wp-content/uploads/Bachelor-thesis-Renzo-Poddighe.pdf\n\"\"\"\nimport math\nimport numpy as np\nfrom transformations import quaternion_inverse, quaternion_multiply, quaternion_from_matrix, euler_from_quaternion\nfrom .analytical_inverse_kinematics import calculate_limb_joint_rotation, calculate_limb_root_rotation, to_local_coordinate_system\n\ndef sign(x):\n return 1 if x >= 0 else -1\n\ndef quaternion_to_av(q):\n \"\"\" according to lee 2000\n the purely imaginary quaternion is identical to the angular velocity\n the sign of the real part gives the direction\n Since the unit quaternion space is folded by the antipodal equivalence,\n the angular velocity is twice as fast\n \"\"\"\n return 2 * np.array(q[1:]) * sign(q[0])\n\ndef normalize(v):\n return v/ np.linalg.norm(v)\n\ndef get_quaternion_delta(a, b):\n return quaternion_multiply(quaternion_inverse(b), a)\n\n\ndef quaternion_from_axis_angle(axis, angle):\n q = [1,0,0,0]\n q[1] = axis[0] * math.sin(angle / 2)\n q[2] = axis[1] * math.sin(angle / 2)\n q[3] = axis[2] * math.sin(angle / 2)\n q[0] = math.cos(angle / 2)\n return normalize(q)\n\n\n\ndef get_offset_quat(a, b):\n a_len = np.linalg.norm(a)\n b_len = np.linalg.norm(b)\n if a_len > 0 and b_len > 0:\n q = quaternion_from_vector_to_vector(a/a_len,b/b_len)\n q /= np.linalg.norm(q)\n return q\n else:\n return [1,0,0,0]\n\ndef quaternion_from_vector_to_vector(a, b):\n \"\"\"src: http://stackoverflow.com/questions/1171849/finding-quaternion-representing-the-rotation-from-one-vector-to-another\n http://wiki.ogre3d.org/Quaternion+and+Rotation+Primer\"\"\"\n\n v = np.cross(a, b)\n w = np.sqrt((np.linalg.norm(a) ** 2) * (np.linalg.norm(b) ** 2)) + np.dot(a, b)\n q = np.array([w, v[0], v[1], v[2]])\n if np.dot(q,q) != 0:\n return q/ np.linalg.norm(q)\n else:\n idx = np.nonzero(a)[0]\n q = np.array([0, 0, 0, 0])\n q[1 + ((idx + 1) % 2)] = 1 # [0, 0, 1, 0] for a rotation of 180 around y axis\n return q\n\ndef quaternion_from_vector_to_vector2(a, b):\n \"\"\"http://math.stackexchange.com/questions/293116/rotating-one-3d-vector-to-another\"\"\"\n if np.array_equiv(a, b):\n return [1, 0, 0, 0]\n\n axis = normalize(np.cross(a, b))\n dot = np.dot(a, b)\n if dot >= 1.0:\n return [1, 0, 0, 0]\n angle = math.acos(dot)\n q = quaternion_from_axis_angle(axis, angle)\n return q\n\ndef to_local_cos(skeleton, node_name, frame, q):\n # bring into parent coordinate system\n pm = skeleton.nodes[node_name].get_global_matrix(frame)[:3,:3]\n inv_p = quaternion_inverse(quaternion_from_matrix(pm))\n inv_p /= np.linalg.norm(inv_p)\n return quaternion_multiply(inv_p, q)\n\n\ndef orient_joint_to_target(skeleton, node, frame, src_pos, target_pos):\n if skeleton.nodes[node].parent is None:\n parent_pos = [0, 0, 0]\n else:\n parent_pos = skeleton.nodes[node].parent.get_global_position(frame)\n src_dir = normalize(src_pos - parent_pos)\n target_dir = normalize(target_pos - parent_pos)\n delta_q = quaternion_from_vector_to_vector(src_dir, target_dir)\n return normalize(delta_q)\n\n\nclass FABRIKBone(object):\n def __init__(self, name, child):\n self.name = name\n self.child = child\n self.position = np.array([0, 0, 0], np.float) # position of joint\n self.length = 0\n self.is_root = False\n self.is_leaf = False\n\n\nROOT_OFFSET = np.array([0,0,0], np.float)\n\nclass FABRIKChain(object):\n def __init__(self, skeleton, bones, node_order, tolerance=0.01, delta_tolerance=0.0001, max_iter=500, frame_offset=3, root_offset=ROOT_OFFSET, activate_constraints=False):\n self.skeleton = skeleton\n self.bones = bones\n self.node_order = node_order\n self.reversed_node_order = list(reversed(node_order))\n self.tolerance = tolerance\n self.max_iter = max_iter\n self.target = None\n self.root_pos = None\n self.chain_length = 0\n self.root_offset = root_offset\n self.activate_constraints = activate_constraints\n self.frame_offset = frame_offset\n self.delta_tolerance = delta_tolerance\n\n def set_positions_from_frame(self, frame, parent_length):\n self.skeleton.clear_cached_global_matrices()\n for idx, node in enumerate(self.node_order):\n p = self.skeleton.nodes[node].get_global_position(frame, use_cache=True)\n #print(\"pos \", node, p)\n self.bones[node].position = p\n if idx ==0:\n self.root_pos = p\n self.chain_length = 0\n for node in self.node_order:\n next_node = self.bones[node].child\n if next_node is None:\n break\n d = np.linalg.norm(self.bones[next_node].position - self.bones[node].position)\n self.bones[node].length = d\n #print(\"length \",node, d)\n self.chain_length += d\n self.parent_length = parent_length\n\n def target_is_reachable(self):\n dist = np.linalg.norm(self.target - self.root_pos)\n #print(\"unreachable\", dist, self.chain_length)\n return dist < self.chain_length+ self.parent_length\n\n def run(self, frame, target):\n self.target = target\n self.set_positions_from_frame(frame, 0)\n self.solve()\n return self.get_joint_parameters()\n\n def run_partial(self, frame, target):\n self.target = target\n self.set_positions_from_frame(frame, 0)\n self.solve()\n return self.set_partial_joint_parameters(frame)\n\n def run_partial_with_constraints(self, frame, target):\n self.target = target\n self.set_positions_from_frame(frame, 0)\n self.solve_partial(frame)\n return self.set_partial_joint_parameters(frame)\n\n def run_with_constraints(self, frame, target):\n self.target = target\n self.set_positions_from_frame(frame, 0)\n self.solve_with_constraints()\n return self.get_joint_parameters()\n\n def solve(self):\n if not self.target_is_reachable():\n print(\"unreachable\")\n # if unreachable orient joints to target\n self.orient_to_target()\n else:\n print(\"reachable\")\n # if reachable perform forward and backward reaching until tolerance is reached\n iter = 0\n distance = self.get_error()\n while distance > self.tolerance and iter< self.max_iter:\n self.backward()\n self.forward()\n iter+=1\n distance = self.get_error()\n print(\"iter\",iter, distance)\n\n def solve_with_constraints(self):\n if not self.target_is_reachable():\n print(\"unreachable\")\n # if unreachable orient joints to target\n self.orient_to_target()\n else:\n print(\"reachable\")\n # if reachable perform forward and backward reaching until tolerance is reached\n iter = 0\n distance = self.get_error()\n while distance > self.tolerance and iter < self.max_iter:\n self.backward()\n self.forward()\n self.apply_constraints()\n iter+=1\n distance = self.get_error()\n print(\"iter\",iter, distance)\n\n def solve_partial(self, frame):\n if not self.target_is_reachable():\n print(\"unreachable\")\n # if unreachable orient joints to target\n self.orient_to_target()\n else:\n print(\"reachable\")\n # if reachable perform forward and backward reaching until tolerance is reached\n iter = 0\n distance = self.get_error()\n while distance > self.tolerance and iter < self.max_iter:\n self.backward()\n self.forward()\n\n self.set_partial_joint_parameters(frame)\n self.set_positions_from_frame(frame, 0)\n\n iter += 1\n distance = self.get_error()\n print(\"iter\", iter, distance)\n\n def get_error(self):\n end_effector = self.node_order[-1]\n return np.linalg.norm(self.bones[end_effector].position - self.target)\n\n def orient_to_target(self):\n for idx, node in enumerate(self.node_order[:-1]):\n next_node = self.bones[node].child\n if next_node is None:\n print(\"Error: none at \",node)\n break\n r = np.linalg.norm(self.target - self.bones[node].position)\n if r > 0:\n l = self.bones[node].length / r\n self.bones[next_node].position = (1 - l) * self.bones[node].position + l * self.target\n\n def backward(self):\n end_effector = self.node_order[-1]\n self.bones[end_effector].position = np.array(self.target)\n n_points = len(self.node_order)\n for idx in range(n_points - 2, -1, -1):\n node = self.node_order[idx]\n next_node = self.bones[node].child\n r = np.linalg.norm(self.bones[next_node].position - self.bones[node].position)\n if r > 0:\n l = self.bones[node].length / r\n self.bones[node].position = (1 - l) * self.bones[next_node].position + l * self.bones[node].position\n\n def forward(self):\n root_node = self.node_order[0]\n self.bones[root_node].position = self.root_pos\n for idx, node in enumerate(self.node_order[:-1]): #for p_idx in range(0, self.n_points - 1, 1):\n #next_node = self.node_order[idx + 1]\n next_node = self.bones[node].child\n r = np.linalg.norm(self.bones[next_node].position - self.bones[node].position)\n if r > 0:\n l = self.bones[node].length / r\n self.bones[next_node].position = l * self.bones[next_node].position + (1 - l) * self.bones[node].position\n\n def apply_constraints(self):\n frame = self.get_joint_parameters()\n self.set_positions_from_frame(frame, 0)\n return\n\n def get_joint_parameters(self):\n n_joints = len(self.node_order) - 1\n frame = np.zeros(n_joints*4+3)\n o = 3\n prev_point = self.root_offset\n for idx, node in enumerate(self.node_order[:-1]):\n #for node in self.skeleton.animated_joints:\n next_node = self.bones[node].child\n q = self.get_global_rotation(node, next_node)\n frame[o:o + 4] = to_local_cos(self.skeleton, node, frame, q)\n if self.skeleton.nodes[node].joint_constraint is not None and self.activate_constraints:\n self.apply_constraint_with_swing(node, frame, o)\n prev_point = self.bones[next_node].position\n o += 4\n return frame\n\n def set_partial_joint_parameters(self, frame):\n o = self.frame_offset\n for idx, node in enumerate(self.node_order[:-1]):\n next_node = self.bones[node].child\n q = self.get_global_rotation_non_cos(node, next_node, frame)\n frame[o:o + 4] = to_local_coordinate_system(self.skeleton,frame, node, q)\n if self.skeleton.nodes[node].joint_constraint is not None and self.activate_constraints:\n self.apply_constraint_with_swing(node, frame, o)\n o += 4\n return frame\n\n def apply_constraint_with_swing(self, node, frame, o, eps=0.01):\n old_q = np.array(frame[o:o + 4])\n\n #remove twist rotation\n swing_q, twist_q = self.skeleton.nodes[node].joint_constraint.split(old_q)\n frame[o:o + 4] = twist_q\n\n # apply swing_q to parent\n parent_q = np.array(frame[o - 4:o])\n new_parent_q = quaternion_multiply(parent_q, swing_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n new_node_pos = self.skeleton.nodes[node].children[0].get_global_position(frame)\n diff = np.linalg.norm(new_node_pos - self.target)\n\n # calculate rotation fix if necessary\n if diff > eps:\n delta_q = orient_joint_to_target(self.skeleton, node, frame, new_node_pos, self.target)\n aligned_parent_q = quaternion_multiply(delta_q, new_parent_q)\n aligned_parent_q = normalize(aligned_parent_q)\n frame[o - 4:o] = aligned_parent_q\n\n def apply_constraint_with_swing_global(self, node, frame, o, eps=0.01):\n old_q = np.array(frame[o:o + 4])\n next_node = self.bones[node].child\n parent_m = self.skeleton.nodes[node].get_global_matrix(frame)[:3, :3]\n node_m = self.skeleton.nodes[next_node].get_global_matrix(frame)[:3, :3]\n node_q = quaternion_from_matrix(node_m)\n node_q = normalize(node_q)\n # remove twist rotation\n swing_q, twist_q = self.skeleton.nodes[node].joint_constraint.split_global(parent_m, node_q)\n frame[o:o + 4] = twist_q\n\n # apply swing_q to parent\n parent_q = np.array(frame[o - 4:o])\n new_parent_q = quaternion_multiply(parent_q, swing_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n new_node_pos = self.skeleton.nodes[node].children[0].get_global_position(frame)\n diff = np.linalg.norm(new_node_pos - self.target)\n return\n # calculate rotation fix if necessary\n if diff > eps:\n delta_q = orient_joint_to_target(self.skeleton, node, frame, new_node_pos, self.target)\n aligned_parent_q = quaternion_multiply(delta_q, new_parent_q)\n aligned_parent_q = normalize(aligned_parent_q)\n frame[o - 4:o] = aligned_parent_q\n\n\n def apply_constraint_with_swing_and_lee(self, node, frame, o, eps=0.01):\n old_q = frame[o:o + 4]\n old_node_pos = self.skeleton.nodes[node].children[0].get_global_position(frame)\n delta_q, new_q = self.skeleton.nodes[node].joint_constraint.split(old_q)\n frame[o:o + 4] = new_q\n new_node_pos = self.skeleton.nodes[node].children[0].get_global_position(frame)\n diff = np.linalg.norm(new_node_pos - old_node_pos)\n if diff > eps:\n parent_q = frame[o - 4:o]\n new_parent_q = quaternion_multiply(parent_q, delta_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n new_node_pos = self.skeleton.nodes[node].children[0].get_global_position(frame)\n diff = np.linalg.norm(new_node_pos -old_node_pos)\n if diff > eps:\n parent_q = frame[o - 4:o]\n root = None\n if self.skeleton.nodes[node].parent is not None:\n root = self.skeleton.nodes[node].parent.node_name\n end_effector = self.skeleton.nodes[node].children[0].node_name\n print(\"apply lee tolani\",root, node, end_effector, diff)\n local_axis = self.skeleton.nodes[node].joint_constraint.axis\n #frame[o:o + 4] = calculate_limb_joint_rotation(self.skeleton, root, node, end_effector, local_axis, frame, self.target)\n delta_q = calculate_limb_root_rotation(self.skeleton, root, end_effector, frame, self.target)\n new_parent_q = quaternion_multiply(parent_q, delta_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n\n def apply_constraint_with_swing2(self, node, parent_node, frame, o):\n next_node = self.bones[node].child\n target_global_m = self.skeleton.nodes[next_node].get_global_matrix(frame)[:3,:3]\n old_q = frame[o:o + 4]\n delta_q, new_q = self.skeleton.nodes[node].joint_constraint.split(old_q)\n frame[o:o + 4] = new_q\n new_global_m = self.skeleton.nodes[next_node].get_global_matrix(frame)[:3,:3]\n delta_global_m = np.dot(np.linalg.inv(new_global_m), target_global_m)\n actual_delta_q = normalize(quaternion_from_matrix(delta_global_m))\n parent_q = frame[o - 4:o]\n new_parent_q = quaternion_multiply(actual_delta_q, parent_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n\n\n\n def apply_constraint_with_vector(self, node, parent_node, frame, o):\n next_node = self.bones[node].child\n old_pos = self.bones[next_node].position\n old_q = frame[o:o + 4]\n twist_q, swing_q = self.skeleton.nodes[node].joint_constraint.split(old_q)\n frame[o:o + 4] = swing_q\n\n # get global delta quaternion to apply on parent\n parent_pos = self.skeleton.nodes[parent_node].get_global_position(frame)\n next_node_pos = self.skeleton.nodes[next_node].get_global_position(frame)\n position_delta = np.linalg.norm(old_pos-next_node_pos)\n print(\"position delta\", position_delta)\n if position_delta < 0.001:\n return\n desired_offset = normalize(old_pos - parent_pos)\n offset = normalize(next_node_pos - parent_pos)\n delta_q = quaternion_from_vector_to_vector(offset, desired_offset)\n print(\"deltaq\", parent_node, node, next_node, next_node_pos, old_pos, twist_q, swing_q)\n # apply global delta on parent\n if True:\n global_m = self.skeleton.nodes[parent_node].get_global_matrix(frame)\n global_parent_q = normalize(quaternion_from_matrix(global_m))\n new_parent_q = quaternion_multiply(global_parent_q, delta_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = to_local_cos(self.skeleton, parent_node, frame, new_parent_q)\n else:\n local_parent_q = frame[o - 4:o]\n local_delta = to_local_cos(self.skeleton, parent_node, frame, delta_q)\n\n new_parent_q = quaternion_multiply(local_parent_q, local_delta)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n print(new_parent_q, local_parent_q, local_delta, delta_q)\n\n\n\n def get_global_rotation(self, node, next_node):\n \"\"\" FIXME works only when identity frame coordinate system is the same as the offset \"\"\"\n\n #print(\"set\", node, next_node)\n target = self.bones[next_node].position - self.bones[node].position\n next_offset = np.array(self.skeleton.nodes[next_node].offset)\n target_len = np.linalg.norm(target)\n if target_len > 0:\n target /= target_len\n next_offset /= np.linalg.norm(next_offset)\n\n # 1. sum over offsets of static nodes\n local_offset = self.get_child_offset(node, next_node)\n actual_offset = next_offset + local_offset\n actual_offset /= np.linalg.norm(actual_offset) # actual_offset = [0.5, 0.5,0]\n # 2. get global rotation\n q = quaternion_from_vector_to_vector(actual_offset, target)\n return q\n\n else:\n #print(\"skip\", target_len, self.bones[next_node].position)\n return [1, 0, 0, 0]\n\n def get_global_rotation_non_cos(self, node, next_node, frame):\n target_position = self.bones[next_node].position\n root_pos = self.skeleton.nodes[node].get_global_position(frame)\n #src_dir = np.dot(m, offset)\n end_effector_pos = self.skeleton.nodes[next_node].get_global_position(frame)\n src_delta = end_effector_pos - root_pos\n src_dir = src_delta / np.linalg.norm(src_delta)\n\n target_delta = target_position - root_pos\n target_dir = target_delta / np.linalg.norm(target_delta)\n\n #q = quaternion_from_vector_to_vector(offset, target_dir)\n q = quaternion_from_vector_to_vector2(src_dir, target_dir)\n q = normalize(q)\n return q\n\n\n def get_child_offset(self, node, child_node):\n \"\"\"\n \"\"\"\n actual_offset = np.array([0, 0, 0], np.float)\n while node is not None and self.skeleton.nodes[node].children[0].node_name != child_node:\n local_offset = np.array(self.skeleton.nodes[node].children[0].offset)\n local_offset /= np.linalg.norm(local_offset)\n actual_offset = actual_offset + local_offset\n node = self.skeleton.nodes[node].children[0].node_name\n if len(self.skeleton.nodes[node].children) < 1:\n node = None\n\n return actual_offset\n\n def get_joint_parameters_global(self):\n n_joints = len(self.node_order)-1\n frame = np.zeros(n_joints*4+3)\n o = self.frame_offset\n for idx, node in enumerate(self.node_order[:-1]):\n offset = np.array(self.skeleton.nodes[node].children[0].offset)\n offset /= np.linalg.norm(offset)\n next_node = self.bones[node].child\n if idx == 0:\n if self.skeleton.root == node:\n dir_vector = self.bones[next_node].position\n dir_vector_len = np.linalg.norm(dir_vector)\n if dir_vector_len > 0 and np.linalg.norm(offset) > 0:\n dir_vector /= dir_vector_len\n q = quaternion_from_vector_to_vector(offset, dir_vector)\n frame[o:o + 4] = q\n else:\n print(\"work around\", offset,dir_vector_len, node)\n frame[o:o + 4] = [1, 0, 0, 0]\n else:\n print(\"work root around\")\n frame[o:o + 4] = [1, 0, 0, 0]\n\n else:\n q = self.get_global_rotation(node, next_node)\n frame[o:o+4] =q\n o += 4\n print(frame)\n return frame\n\n def get_global_positions(self):\n position_dict = dict()\n for node in self.node_order:\n position_dict[node] = self.bones[node].position\n return position_dict\n\n\n def get_end_effector_position(self):\n root_node = self.node_order[-1]\n return self.bones[root_node].position\n\n def get_next_nodes(self, next_nodes):\n for idx, n in enumerate(self.node_order):\n if idx+1 < len(self.node_order):\n next_nodes[n] = self.node_order[idx+1]\n else:\n next_nodes[n] = None\n" ]
[ [ "numpy.array_equiv", "numpy.zeros", "numpy.linalg.inv", "numpy.cross", "numpy.nonzero", "numpy.array", "numpy.dot", "numpy.linalg.norm" ] ]
apayeur/GIF-Ca
[ "7ba9e715d79aa3a733f417f7dfce81842041e7ec" ]
[ "Summary_DynamicIV.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n# List separate experiments in separate folder\n# data_folders_for_separate_experiments = ['tenth_set']\ndata_folders_for_separate_experiments = ['seventh_set', 'eighth_set', 'ninth_set', 'tenth_set']\n\n# For all experiments, extract the cell names\nCellNames = {}\nfor experiment_folder in data_folders_for_separate_experiments:\n folder_path = './' + experiment_folder + '/'\n CellNames[experiment_folder] = [name for name in os.listdir(folder_path) if\n os.path.isdir(folder_path + name) and '_5HT' in name]\nCellNames['eighth_set'].remove('DRN165_5HT') # problematic cell\nCellNames['eighth_set'].remove('DRN094_5HT') # problematic cell\nCellNames['eighth_set'].remove('DRN156_5HT') # problematic cell\nCellNames['seventh_set'].remove('DRN543_5HT') # problematic cell\nCellNames['ninth_set'].remove('DRN654_5HT') # problematic cell\nCellNames['tenth_set'].remove('DRN656_5HT') # problematic cell\n\n\ndata = np.array([[0,0,0,0,0]])\nfor experiment_folder in data_folders_for_separate_experiments:\n for cell_name in CellNames[experiment_folder]:\n path_data = '../../../Dropbox/Recherches/Raphe/GIF-Ca/Results/' + cell_name + '/'\n path_results = '../../../Dropbox/Recherches/Raphe/GIF-Ca/Results/'\n data = np.concatenate((data, (np.loadtxt(path_data + 'params_IV.dat', delimiter='\\n')).reshape((1,5))), axis=0)\n\nEL = data[1:,0]\ntaum = data[1:,1]\nDeltaV = data[1:,2]\nV_T = data[1:,3]\nC = data[1:,4]\n\n\n\n\n\nfig = plt.figure(1, figsize=(8,3))\n#fig.suptitle('EIF model parameters for 5-HT neurons', y=0.99)\nax1 = fig.add_subplot(141)\nax1.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off') # labels along the bottom edge are off\nax1.boxplot(EL, showmeans=True)\nplt.ylabel(r'$E_L$ (mV)')\nax2 = fig.add_subplot(142)\nax2.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off') # labels along the bottom edge are of\nplt.ylabel(r'$\\tau_m$ (ms)')\nax2.boxplot(taum, showmeans=True)\nax3 = fig.add_subplot(143)\nax3.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off') # labels along the bottom edge are off\nplt.ylabel(r'$\\Delta V$ (mV)')\nax3.boxplot(DeltaV, showmeans=True)\nax4 = fig.add_subplot(144)\nax4.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off') # labels along the bottom edge are off\nplt.ylabel(r'$V_T$ (mV)')\nax4.boxplot(V_T, showmeans=True)\nfig.tight_layout()\nplt.savefig(path_results+'DynamicIV_Params5HT.png', format='png')\nplt.close(fig)\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "numpy.array", "numpy.loadtxt" ] ]
jkochNU/scqubits
[ "9a405759665b96284e9a449188935cd06b42d580" ]
[ "scqubits/core/zeropi.py" ]
[ "# zeropi.py\n#\n# This file is part of scqubits.\n#\n# Copyright (c) 2019, Jens Koch and Peter Groszkowski\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n############################################################################\n\nimport numpy as np\nfrom scipy import sparse\n\nimport scqubits.core.constants as constants\nimport scqubits.utils.plotting as plot\nfrom scqubits.core.discretization import Grid1d, GridSpec\nfrom scqubits.core.qubit_base import QubitBaseClass\nfrom scqubits.core.storage import WaveFunctionOnGrid\nfrom scqubits.utils.misc import is_numerical, key_in_grid1d\nfrom scqubits.utils.spectrum_utils import standardize_phases, order_eigensystem\n\n\n# -Symmetric 0-pi qubit, phi discretized, theta in charge basis---------------------------------------------------------\n\nclass ZeroPi(QubitBaseClass):\n r\"\"\"Zero-Pi Qubit\n\n | [1] Brooks et al., Physical Review A, 87(5), 052306 (2013). http://doi.org/10.1103/PhysRevA.87.052306\n | [2] Dempster et al., Phys. Rev. B, 90, 094518 (2014). http://doi.org/10.1103/PhysRevB.90.094518\n | [3] Groszkowski et al., New J. Phys. 20, 043053 (2018). https://doi.org/10.1088/1367-2630/aab7cd\n\n Zero-Pi qubit without coupling to the `zeta` mode, i.e., no disorder in `EC` and `EL`,\n see Eq. (4) in Groszkowski et al., New J. Phys. 20, 043053 (2018),\n\n .. math::\n\n H &= -2E_\\text{CJ}\\partial_\\phi^2+2E_{\\text{C}\\Sigma}(i\\partial_\\theta-n_g)^2\n +2E_{C\\Sigma}dC_J\\,\\partial_\\phi\\partial_\\theta\n -2E_\\text{J}\\cos\\theta\\cos(\\phi-\\varphi_\\text{ext}/2)+E_L\\phi^2\\\\\n &\\qquad +2E_\\text{J} + E_J dE_J \\sin\\theta\\sin(\\phi-\\phi_\\text{ext}/2).\n\n Formulation of the Hamiltonian matrix proceeds by discretization of the `phi` variable, and using charge basis for\n the `theta` variable.\n\n Parameters\n ----------\n EJ: float\n mean Josephson energy of the two junctions\n EL: float\n inductive energy of the two (super-)inductors\n ECJ: float\n charging energy associated with the two junctions\n EC: float or None\n charging energy of the large shunting capacitances; set to `None` if `ECS` is provided instead\n dEJ: float\n relative disorder in EJ, i.e., (EJ1-EJ2)/EJavg\n dCJ: float\n relative disorder of the junction capacitances, i.e., (CJ1-CJ2)/CJavg\n ng: float\n offset charge associated with theta\n flux: float\n magnetic flux through the circuit loop, measured in units of flux quanta (h/2e)\n grid: Grid1d object\n specifies the range and spacing of the discretization lattice\n ncut: int\n charge number cutoff for `n_theta`, `n_theta = -ncut, ..., ncut`\n ECS: float, optional\n total charging energy including large shunting capacitances and junction capacitances; may be provided instead\n of EC\n truncated_dim: int, optional\n desired dimension of the truncated quantum system\n \"\"\"\n\n def __init__(self, EJ, EL, ECJ, EC, ng, flux, grid, ncut, dEJ=0, dCJ=0, ECS=None, truncated_dim=None):\n self.EJ = EJ\n self.EL = EL\n self.ECJ = ECJ\n\n if EC is None and ECS is None:\n raise ValueError(\"Argument missing: must either provide EC or ECS\")\n if EC and ECS:\n raise ValueError(\"Argument error: can only provide either EC or ECS\")\n if EC:\n self.EC = EC\n else:\n self.EC = 1 / (1 / ECS - 1 / self.ECJ)\n\n self.dEJ = dEJ\n self.dCJ = dCJ\n self.ng = ng\n self.flux = flux\n self.grid = grid\n self.ncut = ncut\n self.truncated_dim = truncated_dim\n self._sys_type = '0-pi'\n self._evec_dtype = np.complex_\n self._default_grid = Grid1d(-np.pi / 2, 3 * np.pi / 2, 100) # for theta, needed for plotting wavefunction\n\n def _evals_calc(self, evals_count):\n hamiltonian_mat = self.hamiltonian()\n evals = sparse.linalg.eigsh(hamiltonian_mat, k=evals_count, return_eigenvectors=False, which='SA')\n return np.sort(evals)\n\n def _esys_calc(self, evals_count):\n hamiltonian_mat = self.hamiltonian()\n evals, evecs = sparse.linalg.eigsh(hamiltonian_mat, k=evals_count, return_eigenvectors=True, which='SA')\n # TODO consider normalization of zeropi wavefunctions\n # evecs /= np.sqrt(self.grid.grid_spacing())\n evals, evecs = order_eigensystem(evals, evecs)\n return evals, evecs\n\n def get_ECS(self):\n return 1 / (1 / self.EC + 1 / self.ECJ)\n\n def set_ECS(self, value):\n raise ValueError(\"It's not possible to directly set ECS. Instead one can set EC or ECJ,\\nor use \"\n \"set_EC_via_ECS() to update EC indirectly.\")\n\n ECS = property(get_ECS, set_ECS)\n\n def set_EC_via_ECS(self, ECS):\n \"\"\"Helper function to set `EC` by providing `ECS`, keeping `ECJ` constant.\"\"\"\n self.EC = 1 / (1 / ECS - 1 / self.ECJ)\n\n def hilbertdim(self):\n \"\"\"Returns Hilbert space dimension\"\"\"\n return self.grid.pt_count * (2 * self.ncut + 1)\n\n def potential(self, phi, theta):\n \"\"\"\n Parameters\n ----------\n phi: float\n theta: float\n\n Returns\n -------\n float\n value of the potential energy evaluated at phi, theta\n \"\"\"\n return (-2.0 * self.EJ * np.cos(theta) * np.cos(phi - 2.0 * np.pi * self.flux / 2.0)\n + self.EL * phi ** 2 + 2.0 * self.EJ\n + self.EJ * self.dEJ * np.sin(theta) * np.sin(phi - 2.0 * np.pi * self.flux / 2.0))\n\n def sparse_kinetic_mat(self):\n \"\"\"\n Kinetic energy portion of the Hamiltonian.\n TODO: update this method to use single-variable operator methods\n\n Returns\n -------\n scipy.sparse.csc_matrix\n matrix representing the kinetic energy operator\n \"\"\"\n pt_count = self.grid.pt_count\n dim_theta = 2 * self.ncut + 1\n identity_phi = sparse.identity(pt_count, format='csc', dtype=np.complex_)\n identity_theta = sparse.identity(dim_theta, format='csc', dtype=np.complex_)\n\n kinetic_matrix_phi = self.grid.second_derivative_matrix(prefactor=-2.0 * self.ECJ)\n\n diag_elements = 2.0 * self.ECS * np.square(np.arange(-self.ncut + self.ng, self.ncut + 1 + self.ng))\n kinetic_matrix_theta = sparse.dia_matrix((diag_elements, [0]), shape=(dim_theta, dim_theta)).tocsc()\n\n kinetic_matrix = (sparse.kron(kinetic_matrix_phi, identity_theta, format='csc')\n + sparse.kron(identity_phi, kinetic_matrix_theta, format='csc'))\n\n kinetic_matrix -= 2.0 * self.ECS * self.dCJ * self.i_d_dphi_operator() * self.n_theta_operator()\n return kinetic_matrix\n\n def sparse_potential_mat(self):\n \"\"\"\n Potential energy portion of the Hamiltonian.\n TODO: update this method to use single-variable operator methods\n\n Returns\n -------\n scipy.sparse.csc_matrix\n matrix representing the potential energy operator\n \"\"\"\n pt_count = self.grid.pt_count\n grid_linspace = self.grid.make_linspace()\n dim_theta = 2 * self.ncut + 1\n\n phi_inductive_vals = self.EL * np.square(grid_linspace)\n phi_inductive_potential = sparse.dia_matrix((phi_inductive_vals, [0]), shape=(pt_count, pt_count)).tocsc()\n phi_cos_vals = np.cos(grid_linspace - 2.0 * np.pi * self.flux / 2.0)\n phi_cos_potential = sparse.dia_matrix((phi_cos_vals, [0]), shape=(pt_count, pt_count)).tocsc()\n phi_sin_vals = np.sin(grid_linspace - 2.0 * np.pi * self.flux / 2.0)\n phi_sin_potential = sparse.dia_matrix((phi_sin_vals, [0]), shape=(pt_count, pt_count)).tocsc()\n\n theta_cos_potential = (-self.EJ\n * (sparse.dia_matrix(([1.0] * dim_theta, [-1]), shape=(dim_theta, dim_theta)) +\n sparse.dia_matrix(([1.0] * dim_theta, [1]), shape=(dim_theta, dim_theta)))).tocsc()\n potential_mat = (sparse.kron(phi_cos_potential, theta_cos_potential, format='csc')\n + sparse.kron(phi_inductive_potential, self._identity_theta(), format='csc')\n + 2 * self.EJ * sparse.kron(self._identity_phi(), self._identity_theta(), format='csc'))\n potential_mat += (self.EJ * self.dEJ * sparse.kron(phi_sin_potential, self._identity_theta(), format='csc')\n * self.sin_theta_operator())\n return potential_mat\n\n def hamiltonian(self):\n \"\"\"Calculates Hamiltonian in basis obtained by discretizing phi and employing charge basis for theta.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n matrix representing the potential energy operator\n \"\"\"\n return self.sparse_kinetic_mat() + self.sparse_potential_mat()\n\n def sparse_d_potential_d_flux_mat(self):\n r\"\"\"Calculates a of the potential energy w.r.t flux, at the current value of flux,\n as stored in the object.\n\n The flux is assumed to be given in the units of the ratio \\Phi_{ext}/\\Phi_0.\n So if \\frac{\\partial U}{ \\partial \\Phi_{\\rm ext}}, is needed, the expression returned\n by this function, needs to be multiplied by 1/\\Phi_0.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n matrix representing the derivative of the potential energy\n \"\"\"\n op_1 = sparse.kron(self._sin_phi_operator(x=- 2.0 * np.pi * self.flux / 2.0),\n self._cos_theta_operator(), format='csc')\n op_2 = sparse.kron(self._cos_phi_operator(x=- 2.0 * np.pi * self.flux / 2.0),\n self._sin_theta_operator(), format='csc')\n return - 2.0 * np.pi * self.EJ * op_1 - np.pi * self.EJ * self.dEJ * op_2\n\n def d_hamiltonian_d_flux(self):\n r\"\"\"Calculates a derivative of the Hamiltonian w.r.t flux, at the current value of flux,\n as stored in the object.\n\n The flux is assumed to be given in the units of the ratio \\Phi_{ext}/\\Phi_0.\n So if \\frac{\\partial H}{ \\partial \\Phi_{\\rm ext}}, is needed, the expression returned\n by this function, needs to be multiplied by 1/\\Phi_0.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n matrix representing the derivative of the Hamiltonian\n \"\"\"\n return self.sparse_d_potential_d_flux_mat()\n\n def _identity_phi(self):\n r\"\"\"\n Identity operator acting only on the `\\phi` Hilbert subspace.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n pt_count = self.grid.pt_count\n return sparse.identity(pt_count, format='csc')\n\n def _identity_theta(self):\n r\"\"\"\n Identity operator acting only on the `\\theta` Hilbert subspace.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n dim_theta = 2 * self.ncut + 1\n return sparse.identity(dim_theta, format='csc')\n\n def i_d_dphi_operator(self):\n r\"\"\"\n Operator :math:`i d/d\\varphi`.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n return sparse.kron(self.grid.first_derivative_matrix(prefactor=1j), self._identity_theta(), format='csc')\n\n def _phi_operator(self):\n r\"\"\"\n Operator :math:`\\varphi`, acting only on the `\\varphi` Hilbert subspace.\n\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n pt_count = self.grid.pt_count\n\n phi_matrix = sparse.dia_matrix((pt_count, pt_count), dtype=np.complex_)\n diag_elements = self.grid.make_linspace()\n phi_matrix.setdiag(diag_elements)\n return phi_matrix\n\n def phi_operator(self):\n r\"\"\"\n Operator :math:`\\varphi`.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n return sparse.kron(self._phi_operator(), self._identity_theta(), format='csc')\n\n def n_theta_operator(self):\n r\"\"\"\n Operator :math:`n_\\theta`.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n dim_theta = 2 * self.ncut + 1\n diag_elements = np.arange(-self.ncut, self.ncut + 1)\n n_theta_matrix = sparse.dia_matrix((diag_elements, [0]), shape=(dim_theta, dim_theta)).tocsc()\n return sparse.kron(self._identity_phi(), n_theta_matrix, format='csc')\n\n def _sin_phi_operator(self, x=0):\n r\"\"\"\n Operator :math:`\\sin(\\phi + x)`, acting only on the `\\phi` Hilbert subspace.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n pt_count = self.grid.pt_count\n\n vals = np.sin(self.grid.make_linspace() + x)\n sin_phi_matrix = sparse.dia_matrix((vals, [0]), shape=(pt_count, pt_count)).tocsc()\n return sin_phi_matrix\n\n def _cos_phi_operator(self, x=0):\n r\"\"\"\n Operator :math:`\\cos(\\phi + x)`, acting only on the `\\phi` Hilbert subspace.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n pt_count = self.grid.pt_count\n\n vals = np.cos(self.grid.make_linspace() + x)\n cos_phi_matrix = sparse.dia_matrix((vals, [0]), shape=(pt_count, pt_count)).tocsc()\n return cos_phi_matrix\n\n def _cos_theta_operator(self):\n r\"\"\"\n Operator :math:`\\cos(\\theta)`, acting only on the `\\theta` Hilbert subspace.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n dim_theta = 2 * self.ncut + 1\n cos_theta_matrix = 0.5 * (sparse.dia_matrix(([1.0] * dim_theta, [-1]), shape=(dim_theta, dim_theta)) +\n sparse.dia_matrix(([1.0] * dim_theta, [1]), shape=(dim_theta, dim_theta))).tocsc()\n return cos_theta_matrix\n\n def cos_theta_operator(self):\n r\"\"\"\n Operator :math:`\\cos(\\theta)`.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n return sparse.kron(self._identity_phi(), self._cos_phi_operator(), format='csc')\n\n def _sin_theta_operator(self):\n r\"\"\"\n Operator :math:`\\sin(\\theta)`, acting only on the `\\theta` Hilbert space.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n dim_theta = 2 * self.ncut + 1\n sin_theta_matrix = (-0.5 * 1j\n * (sparse.dia_matrix(([1.0] * dim_theta, [1]), shape=(dim_theta, dim_theta)) -\n sparse.dia_matrix(([1.0] * dim_theta, [-1]), shape=(dim_theta, dim_theta))).tocsc())\n return sin_theta_matrix\n\n def sin_theta_operator(self):\n r\"\"\"\n Operator :math:`\\sin(\\theta)`.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n return sparse.kron(self._identity_phi(), self._sin_theta_operator(), format='csc')\n\n def plot_potential(self, theta_grid=None, contour_vals=None, **kwargs):\n \"\"\"Draw contour plot of the potential energy.\n\n Parameters\n ----------\n theta_grid: Grid1d, optional\n used for setting a custom grid for theta; if None use self._default_grid\n contour_vals: list, optional\n **kwargs:\n plotting parameters\n \"\"\"\n theta_grid = self._try_defaults(theta_grid)\n\n x_vals = self.grid.make_linspace()\n y_vals = theta_grid.make_linspace()\n return plot.contours(x_vals, y_vals, self.potential, contour_vals=contour_vals, **kwargs)\n\n def wavefunction(self, esys=None, which=0, theta_grid=None):\n \"\"\"Returns a zero-pi wave function in `phi`, `theta` basis\n\n Parameters\n ----------\n esys: ndarray, ndarray\n eigenvalues, eigenvectors\n which: int, optional\n index of desired wave function (default value = 0)\n theta_grid: Grid1d, optional\n used for setting a custom grid for theta; if None use self._default_grid\n\n Returns\n -------\n WaveFunctionOnGrid object\n \"\"\"\n evals_count = max(which + 1, 3)\n if esys is None:\n _, evecs = self.eigensys(evals_count)\n else:\n _, evecs = esys\n\n theta_grid = self._try_defaults(theta_grid)\n dim_theta = 2 * self.ncut + 1\n state_amplitudes = evecs[:, which].reshape(self.grid.pt_count, dim_theta)\n\n # Calculate psi_{phi, theta} = sum_n state_amplitudes_{phi, n} A_{n, theta}\n # where a_{n, theta} = 1/sqrt(2 pi) e^{i n theta}\n n_vec = np.arange(-self.ncut, self.ncut + 1)\n theta_vec = theta_grid.make_linspace()\n a_n_theta = np.exp(1j * np.outer(n_vec, theta_vec)) / (2 * np.pi) ** 0.5\n wavefunc_amplitudes = np.matmul(state_amplitudes, a_n_theta).T\n wavefunc_amplitudes = standardize_phases(wavefunc_amplitudes)\n\n grid2d = GridSpec(np.asarray([[self.grid.min_val, self.grid.max_val, self.grid.pt_count],\n [theta_grid.min_val, theta_grid.max_val, theta_grid.pt_count]]))\n return WaveFunctionOnGrid(grid2d, wavefunc_amplitudes)\n\n def plot_wavefunction(self, esys=None, which=0, theta_grid=None, mode='abs', zero_calibrate=True, **kwargs):\n \"\"\"Plots 2d phase-basis wave function.\n\n Parameters\n ----------\n esys: ndarray, ndarray\n eigenvalues, eigenvectors as obtained from `.eigensystem()`\n which: int, optional\n index of wave function to be plotted (default value = (0)\n theta_grid: Grid1d, optional\n used for setting a custom grid for theta; if None use self._default_grid\n mode: str, optional\n choices as specified in `constants.MODE_FUNC_DICT` (default value = 'abs_sqr')\n zero_calibrate: bool, optional\n if True, colors are adjusted to use zero wavefunction amplitude as the neutral color in the palette\n **kwargs:\n plot options\n\n Returns\n -------\n Figure, Axes\n \"\"\"\n theta_grid = self._try_defaults(theta_grid)\n\n amplitude_modifier = constants.MODE_FUNC_DICT[mode]\n wavefunc = self.wavefunction(esys, theta_grid=theta_grid, which=which)\n wavefunc.amplitudes = amplitude_modifier(wavefunc.amplitudes)\n return plot.wavefunction2d(wavefunc, zero_calibrate=zero_calibrate, **kwargs)\n\n def set_params_from_dict(self, meta_dict):\n \"\"\"Set object parameters by given metadata dictionary\n\n Parameters\n ----------\n meta_dict: dict\n \"\"\"\n for param_name, param_value in meta_dict.items():\n if key_in_grid1d(param_name):\n setattr(self.grid, param_name, param_value)\n elif is_numerical(param_value):\n setattr(self, param_name, param_value)\n\n @classmethod\n def create_from_dict(cls, meta_dict):\n \"\"\"Set object parameters by given metadata dictionary\n\n Parameters\n ----------\n meta_dict: dict\n \"\"\"\n filtered_dict = {}\n grid_dict = {}\n for param_name, param_value in meta_dict.items():\n if key_in_grid1d(param_name):\n grid_dict[param_name] = param_value\n elif is_numerical(param_value):\n filtered_dict[param_name] = param_value\n\n grid = Grid1d(**grid_dict)\n filtered_dict['grid'] = grid\n return cls(**filtered_dict)\n" ]
[ [ "scipy.sparse.dia_matrix", "numpy.matmul", "scipy.sparse.kron", "scipy.sparse.linalg.eigsh", "numpy.cos", "numpy.asarray", "numpy.arange", "numpy.sort", "numpy.sin", "numpy.square", "scipy.sparse.identity", "numpy.outer" ] ]
MarcinKonowalczyk/scikit-learn
[ "8b18d4cbfc3a10ce85decec292d30470c69f40d7", "8b18d4cbfc3a10ce85decec292d30470c69f40d7" ]
[ "examples/miscellaneous/plot_isotonic_regression.py", "examples/ensemble/plot_bias_variance.py" ]
[ "\"\"\"\n===================\nIsotonic Regression\n===================\n\nAn illustration of the isotonic regression on generated data (non-linear\nmonotonic trend with homoscedastic uniform noise).\n\nThe isotonic regression algorithm finds a non-decreasing approximation of a\nfunction while minimizing the mean squared error on the training data. The\nbenefit of such a non-parametric model is that it does not assume any shape for\nthe target function besides monotonicity. For comparison a linear regression is\nalso presented.\n\nThe plot on the right-hand side shows the model prediction function that\nresults from the linear interpolation of thresholds points. The thresholds\npoints are a subset of the training input observations and their matching\ntarget values are computed by the isotonic non-parametric fit.\n\n\"\"\"\nprint(__doc__)\n\n# Author: Nelle Varoquaux <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# License: BSD\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.isotonic import IsotonicRegression\nfrom sklearn.utils import check_random_state\n\nn = 100\nx = np.arange(n)\nrs = check_random_state(0)\ny = rs.randint(-50, 50, size=(n,)) + 50.0 * np.log1p(np.arange(n))\n\n# %%\n# Fit IsotonicRegression and LinearRegression models:\n\nir = IsotonicRegression(out_of_bounds=\"clip\")\ny_ = ir.fit_transform(x, y)\n\nlr = LinearRegression()\nlr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression\n\n# %%\n# Plot results:\n\nsegments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]\nlc = LineCollection(segments, zorder=0)\nlc.set_array(np.ones(len(y)))\nlc.set_linewidths(np.full(n, 0.5))\n\nfig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 6))\n\nax0.plot(x, y, \"C0.\", markersize=12)\nax0.plot(x, y_, \"C1.-\", markersize=12)\nax0.plot(x, lr.predict(x[:, np.newaxis]), \"C2-\")\nax0.add_collection(lc)\nax0.legend((\"Training data\", \"Isotonic fit\", \"Linear fit\"), loc=\"lower right\")\nax0.set_title(\"Isotonic regression fit on noisy data (n=%d)\" % n)\n\nx_test = np.linspace(-10, 110, 1000)\nax1.plot(x_test, ir.predict(x_test), \"C1-\")\nax1.plot(ir.X_thresholds_, ir.y_thresholds_, \"C1.\", markersize=12)\nax1.set_title(\"Prediction function (%d thresholds)\" % len(ir.X_thresholds_))\n\nplt.show()\n\n# %%\n# Note that we explicitly passed `out_of_bounds=\"clip\"` to the constructor of\n# `IsotonicRegression` to control the way the model extrapolates outside of the\n# range of data observed in the training set. This \"clipping\" extrapolation can\n# be seen on the plot of the decision function on the right-hand.\n", "\"\"\"\n============================================================\nSingle estimator versus bagging: bias-variance decomposition\n============================================================\n\nThis example illustrates and compares the bias-variance decomposition of the\nexpected mean squared error of a single estimator against a bagging ensemble.\n\nIn regression, the expected mean squared error of an estimator can be\ndecomposed in terms of bias, variance and noise. On average over datasets of\nthe regression problem, the bias term measures the average amount by which the\npredictions of the estimator differ from the predictions of the best possible\nestimator for the problem (i.e., the Bayes model). The variance term measures\nthe variability of the predictions of the estimator when fit over different\ninstances LS of the problem. Finally, the noise measures the irreducible part\nof the error which is due the variability in the data.\n\nThe upper left figure illustrates the predictions (in dark red) of a single\ndecision tree trained over a random dataset LS (the blue dots) of a toy 1d\nregression problem. It also illustrates the predictions (in light red) of other\nsingle decision trees trained over other (and different) randomly drawn\ninstances LS of the problem. Intuitively, the variance term here corresponds to\nthe width of the beam of predictions (in light red) of the individual\nestimators. The larger the variance, the more sensitive are the predictions for\n`x` to small changes in the training set. The bias term corresponds to the\ndifference between the average prediction of the estimator (in cyan) and the\nbest possible model (in dark blue). On this problem, we can thus observe that\nthe bias is quite low (both the cyan and the blue curves are close to each\nother) while the variance is large (the red beam is rather wide).\n\nThe lower left figure plots the pointwise decomposition of the expected mean\nsquared error of a single decision tree. It confirms that the bias term (in\nblue) is low while the variance is large (in green). It also illustrates the\nnoise part of the error which, as expected, appears to be constant and around\n`0.01`.\n\nThe right figures correspond to the same plots but using instead a bagging\nensemble of decision trees. In both figures, we can observe that the bias term\nis larger than in the previous case. In the upper right figure, the difference\nbetween the average prediction (in cyan) and the best possible model is larger\n(e.g., notice the offset around `x=2`). In the lower right figure, the bias\ncurve is also slightly higher than in the lower left figure. In terms of\nvariance however, the beam of predictions is narrower, which suggests that the\nvariance is lower. Indeed, as the lower right figure confirms, the variance\nterm (in green) is lower than for single decision trees. Overall, the bias-\nvariance decomposition is therefore no longer the same. The tradeoff is better\nfor bagging: averaging several decision trees fit on bootstrap copies of the\ndataset slightly increases the bias term but allows for a larger reduction of\nthe variance, which results in a lower overall mean squared error (compare the\nred curves int the lower figures). The script output also confirms this\nintuition. The total error of the bagging ensemble is lower than the total\nerror of a single decision tree, and this difference indeed mainly stems from a\nreduced variance.\n\nFor further details on bias-variance decomposition, see section 7.3 of [1]_.\n\nReferences\n----------\n\n.. [1] T. Hastie, R. Tibshirani and J. Friedman,\n \"Elements of Statistical Learning\", Springer, 2009.\n\n\"\"\"\nprint(__doc__)\n\n# Author: Gilles Louppe <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.ensemble import BaggingRegressor\nfrom sklearn.tree import DecisionTreeRegressor\n\n# Settings\nn_repeat = 50 # Number of iterations for computing expectations\nn_train = 50 # Size of the training set\nn_test = 1000 # Size of the test set\nnoise = 0.1 # Standard deviation of the noise\nnp.random.seed(0)\n\n# Change this for exploring the bias-variance decomposition of other\n# estimators. This should work well for estimators with high variance (e.g.,\n# decision trees or KNN), but poorly for estimators with low variance (e.g.,\n# linear models).\nestimators = [\n (\"Tree\", DecisionTreeRegressor()),\n (\"Bagging(Tree)\", BaggingRegressor(DecisionTreeRegressor())),\n]\n\nn_estimators = len(estimators)\n\n\n# Generate data\ndef f(x):\n x = x.ravel()\n\n return np.exp(-(x ** 2)) + 1.5 * np.exp(-((x - 2) ** 2))\n\n\ndef generate(n_samples, noise, n_repeat=1):\n X = np.random.rand(n_samples) * 10 - 5\n X = np.sort(X)\n\n if n_repeat == 1:\n y = f(X) + np.random.normal(0.0, noise, n_samples)\n else:\n y = np.zeros((n_samples, n_repeat))\n\n for i in range(n_repeat):\n y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)\n\n X = X.reshape((n_samples, 1))\n\n return X, y\n\n\nX_train = []\ny_train = []\n\nfor i in range(n_repeat):\n X, y = generate(n_samples=n_train, noise=noise)\n X_train.append(X)\n y_train.append(y)\n\nX_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)\n\nplt.figure(figsize=(10, 8))\n\n# Loop over estimators to compare\nfor n, (name, estimator) in enumerate(estimators):\n # Compute predictions\n y_predict = np.zeros((n_test, n_repeat))\n\n for i in range(n_repeat):\n estimator.fit(X_train[i], y_train[i])\n y_predict[:, i] = estimator.predict(X_test)\n\n # Bias^2 + Variance + Noise decomposition of the mean squared error\n y_error = np.zeros(n_test)\n\n for i in range(n_repeat):\n for j in range(n_repeat):\n y_error += (y_test[:, j] - y_predict[:, i]) ** 2\n\n y_error /= n_repeat * n_repeat\n\n y_noise = np.var(y_test, axis=1)\n y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2\n y_var = np.var(y_predict, axis=1)\n\n print(\n \"{0}: {1:.4f} (error) = {2:.4f} (bias^2) \"\n \" + {3:.4f} (var) + {4:.4f} (noise)\".format(\n name, np.mean(y_error), np.mean(y_bias), np.mean(y_var), np.mean(y_noise)\n )\n )\n\n # Plot figures\n plt.subplot(2, n_estimators, n + 1)\n plt.plot(X_test, f(X_test), \"b\", label=\"$f(x)$\")\n plt.plot(X_train[0], y_train[0], \".b\", label=\"LS ~ $y = f(x)+noise$\")\n\n for i in range(n_repeat):\n if i == 0:\n plt.plot(X_test, y_predict[:, i], \"r\", label=r\"$\\^y(x)$\")\n else:\n plt.plot(X_test, y_predict[:, i], \"r\", alpha=0.05)\n\n plt.plot(X_test, np.mean(y_predict, axis=1), \"c\", label=r\"$\\mathbb{E}_{LS} \\^y(x)$\")\n\n plt.xlim([-5, 5])\n plt.title(name)\n\n if n == n_estimators - 1:\n plt.legend(loc=(1.1, 0.5))\n\n plt.subplot(2, n_estimators, n_estimators + n + 1)\n plt.plot(X_test, y_error, \"r\", label=\"$error(x)$\")\n plt.plot(X_test, y_bias, \"b\", label=\"$bias^2(x)$\"),\n plt.plot(X_test, y_var, \"g\", label=\"$variance(x)$\"),\n plt.plot(X_test, y_noise, \"c\", label=\"$noise(x)$\")\n\n plt.xlim([-5, 5])\n plt.ylim([0, 0.1])\n\n if n == n_estimators - 1:\n\n plt.legend(loc=(1.1, 0.5))\n\nplt.subplots_adjust(right=0.75)\nplt.show()\n" ]
[ [ "sklearn.utils.check_random_state", "sklearn.isotonic.IsotonicRegression", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.show", "numpy.full", "matplotlib.collections.LineCollection", "numpy.linspace" ], [ "matplotlib.pyplot.legend", "numpy.zeros", "numpy.var", "matplotlib.pyplot.figure", "numpy.random.seed", "sklearn.tree.DecisionTreeRegressor", "numpy.exp", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "numpy.random.normal", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "matplotlib.pyplot.subplot", "numpy.random.rand", "matplotlib.pyplot.ylim", "numpy.sort", "matplotlib.pyplot.plot", "numpy.mean" ] ]
weihaoxie/FaceX-Zoo
[ "db0b087e4f4d28152e172d6c8d3767a8870733b4" ]
[ "addition_module/DSDG/DUM/utils.py" ]
[ "import os\nimport numpy as np\nimport torch\nimport shutil\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport sklearn\nfrom sklearn import metrics\nfrom sklearn.metrics import roc_curve, auc\nimport pdb\n\n\nclass AvgrageMeter(object):\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n\n\ndef accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef get_threshold(score_file):\n with open(score_file, 'r') as file:\n lines = file.readlines()\n\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n angle = float(tokens[0])\n # pdb.set_trace()\n type = int(tokens[1])\n data.append({'map_score': angle, 'label': type})\n if type == 1:\n num_real += 1\n else:\n num_fake += 1\n\n min_error = count # account ACER (or ACC)\n min_threshold = 0.0\n min_ACC = 0.0\n min_ACER = 0.0\n min_APCER = 0.0\n min_BPCER = 0.0\n\n for d in data:\n threshold = d['map_score']\n\n type1 = len([s for s in data if s['map_score'] <= threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > threshold and s['label'] == 0])\n\n ACC = 1 - (type1 + type2) / count\n APCER = type2 / num_fake\n BPCER = type1 / num_real\n ACER = (APCER + BPCER) / 2.0\n\n if ACER < min_error:\n min_error = ACER\n min_threshold = threshold\n min_ACC = ACC\n min_ACER = ACER\n min_APCER = APCER\n min_BPCER = min_BPCER\n\n # print(min_error, min_threshold)\n return min_threshold, min_ACC, min_APCER, min_BPCER, min_ACER\n\n\ndef test_threshold_based(threshold, score_file):\n with open(score_file, 'r') as file:\n lines = file.readlines()\n\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n angle = float(tokens[0])\n type = int(tokens[1])\n data.append({'map_score': angle, 'label': type})\n if type == 1:\n num_real += 1\n else:\n num_fake += 1\n\n type1 = len([s for s in data if s['map_score'] <= threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > threshold and s['label'] == 0])\n\n ACC = 1 - (type1 + type2) / count\n APCER = type2 / num_fake\n BPCER = type1 / num_real\n ACER = (APCER + BPCER) / 2.0\n\n return ACC, APCER, BPCER, ACER\n\n\ndef get_err_threhold(fpr, tpr, threshold):\n RightIndex = (tpr + (1 - fpr) - 1)\n right_index = np.argmax(RightIndex)\n best_th = threshold[right_index]\n err = fpr[right_index]\n\n differ_tpr_fpr_1 = tpr + fpr - 1.0\n\n right_index = np.argmin(np.abs(differ_tpr_fpr_1))\n best_th = threshold[right_index]\n err = fpr[right_index]\n\n # print(err, best_th)\n return err, best_th\n\n\n# def performances(dev_scores, dev_labels, test_scores, test_labels):\ndef performances(map_score_val_filename, map_score_test_filename):\n # val\n with open(map_score_val_filename, 'r') as file:\n lines = file.readlines()\n val_scores = []\n val_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = float(tokens[1]) # label = int(tokens[1])\n val_scores.append(score)\n val_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)\n val_err, val_threshold = get_err_threhold(fpr, tpr, threshold)\n\n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n val_ACC = 1 - (type1 + type2) / count\n val_APCER = type2 / num_fake\n val_BPCER = type1 / num_real\n val_ACER = (val_APCER + val_BPCER) / 2.0\n\n # test \n with open(map_score_test_filename, 'r') as file2:\n lines = file2.readlines()\n test_scores = []\n test_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = float(tokens[1]) # label = int(tokens[1])\n test_scores.append(score)\n test_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n # test based on val_threshold \n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n print([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n print([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n test_ACC = 1 - (type1 + type2) / count\n test_APCER = type2 / num_fake\n test_BPCER = type1 / num_real\n test_ACER = (test_APCER + test_BPCER) / 2.0\n\n # test based on test_threshold \n fpr_test, tpr_test, threshold_test = roc_curve(test_labels, test_scores, pos_label=1)\n err_test, best_test_threshold = get_err_threhold(fpr_test, tpr_test, threshold_test)\n\n type1 = len([s for s in data if s['map_score'] <= best_test_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > best_test_threshold and s['label'] == 0])\n\n test_threshold_ACC = 1 - (type1 + type2) / count\n test_threshold_APCER = type2 / num_fake\n test_threshold_BPCER = type1 / num_real\n test_threshold_ACER = (test_threshold_APCER + test_threshold_BPCER) / 2.0\n\n return val_threshold, best_test_threshold, val_ACC, val_ACER, test_ACC, test_APCER, test_BPCER, test_ACER, test_threshold_ACER\n\n\ndef performances_SiW_EER(map_score_val_filename):\n # val\n with open(map_score_val_filename, 'r') as file:\n lines = file.readlines()\n val_scores = []\n val_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = int(tokens[1])\n val_scores.append(score)\n val_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)\n val_err, val_threshold = get_err_threhold(fpr, tpr, threshold)\n\n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n val_ACC = 1 - (type1 + type2) / count\n val_APCER = type2 / num_fake\n val_BPCER = type1 / num_real\n val_ACER = (val_APCER + val_BPCER) / 2.0\n\n return val_threshold, val_ACC, val_APCER, val_BPCER, val_ACER\n\n\ndef performances_SiWM_EER(map_score_val_filename):\n # val\n with open(map_score_val_filename, 'r') as file:\n lines = file.readlines()\n val_scores = []\n val_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = int(tokens[1])\n val_scores.append(score)\n val_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)\n val_err, val_threshold = get_err_threhold(fpr, tpr, threshold)\n\n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n val_ACC = 1 - (type1 + type2) / count\n val_APCER = type2 / num_fake\n val_BPCER = type1 / num_real\n val_ACER = (val_APCER + val_BPCER) / 2.0\n\n return val_threshold, val_err, val_ACC, val_APCER, val_BPCER, val_ACER\n\n\ndef get_err_threhold_CASIA_Replay(fpr, tpr, threshold):\n RightIndex = (tpr + (1 - fpr) - 1)\n right_index = np.argmax(RightIndex)\n best_th = threshold[right_index]\n err = fpr[right_index]\n\n differ_tpr_fpr_1 = tpr + fpr - 1.0\n\n right_index = np.argmin(np.abs(differ_tpr_fpr_1))\n best_th = threshold[right_index]\n err = fpr[right_index]\n\n # print(err, best_th)\n return err, best_th, right_index\n\n\ndef performances_CASIA_Replay(map_score_val_filename):\n # val\n with open(map_score_val_filename, 'r') as file:\n lines = file.readlines()\n val_scores = []\n val_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = float(tokens[1]) # int(tokens[1])\n val_scores.append(score)\n val_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)\n val_err, val_threshold, right_index = get_err_threhold_CASIA_Replay(fpr, tpr, threshold)\n\n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n print([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n print([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n val_ACC = 1 - (type1 + type2) / count\n\n FRR = 1 - tpr # FRR = 1 - TPR\n\n HTER = (fpr + FRR) / 2.0 # error recognition rate & reject recognition rate\n\n return val_ACC, fpr[right_index], FRR[right_index], HTER[right_index], val_threshold\n\n\ndef performances_ZeroShot(map_score_val_filename):\n # val\n with open(map_score_val_filename, 'r') as file:\n lines = file.readlines()\n val_scores = []\n val_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = int(tokens[1])\n val_scores.append(score)\n val_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)\n auc_val = metrics.auc(fpr, tpr)\n\n val_err, val_threshold, right_index = get_err_threhold_CASIA_Replay(fpr, tpr, threshold)\n\n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n val_ACC = 1 - (type1 + type2) / count\n\n FRR = 1 - tpr # FRR = 1 - TPR\n\n HTER = (fpr + FRR) / 2.0 # error recognition rate & reject recognition rate\n\n return val_ACC, auc_val, HTER[right_index]\n\n\ndef count_parameters_in_MB(model):\n return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if \"auxiliary\" not in name) / 1e6\n\n\ndef save_checkpoint(state, is_best, save):\n filename = os.path.join(save, 'checkpoint.pth.tar')\n torch.save(state, filename)\n if is_best:\n best_filename = os.path.join(save, 'model_best.pth.tar')\n shutil.copyfile(filename, best_filename)\n\n\ndef save(model, model_path):\n torch.save(model.state_dict(), model_path)\n\n\ndef load(model, model_path):\n model.load_state_dict(torch.load(model_path))\n\n\ndef drop_path(x, drop_prob):\n if drop_prob > 0.:\n keep_prob = 1. - drop_prob\n mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1, 1).bernoulli_(keep_prob))\n x.div_(keep_prob)\n x.mul_(mask)\n return x\n\n\ndef create_exp_dir(path, scripts_to_save=None):\n if not os.path.exists(path):\n os.mkdir(path)\n print('Experiment dir : {}'.format(path))\n\n if scripts_to_save is not None:\n os.mkdir(os.path.join(path, 'scripts'))\n for script in scripts_to_save:\n dst_file = os.path.join(path, 'scripts', os.path.basename(script))\n shutil.copyfile(script, dst_file)\n" ]
[ [ "torch.load", "sklearn.metrics.roc_curve", "sklearn.metrics.auc", "torch.save", "numpy.abs", "numpy.argmax" ] ]
fancyerii/voicebook
[ "def82da8577086d0361643a05fec2463006533a9" ]
[ "chapter_2_collection/diarize.py" ]
[ "'''\n================================================ \n## VOICEBOOK REPOSITORY ## \n================================================ \n\nrepository name: voicebook \nrepository version: 1.0 \nrepository link: https://github.com/jim-schwoebel/voicebook \nauthor: Jim Schwoebel \nauthor contact: [email protected] \ndescription: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts. \nlicense category: opensource \nlicense: Apache 2.0 license \norganization name: NeuroLex Laboratories, Inc. \nlocation: Seattle, WA \nwebsite: https://neurolex.ai \nrelease date: 2018-09-28 \n\nThis code (voicebook) is hereby released under a Apache 2.0 license license. \n\nFor more information, check out the license terms below. \n\n================================================ \n## LICENSE TERMS ## \n================================================ \n\nCopyright 2018 NeuroLex Laboratories, Inc. \n\nLicensed under the Apache License, Version 2.0 (the \"License\"); \nyou may not use this file except in compliance with the License. \nYou may obtain a copy of the License at \n\n http://www.apache.org/licenses/LICENSE-2.0 \n\nUnless required by applicable law or agreed to in writing, software \ndistributed under the License is distributed on an \"AS IS\" BASIS, \nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \nSee the License for the specific language governing permissions and \nlimitations under the License. \n\n================================================ \n## SERVICE STATEMENT ## \n================================================ \n\nIf you are using the code written for a larger project, we are \nhappy to consult with you and help you with deployment. Our team \nhas >10 world experts in Kafka distributed architectures, microservices \nbuilt on top of Node.js / Python / Docker, and applying machine learning to \nmodel speech and text data. \n\nWe have helped a wide variety of enterprises - small businesses, \nresearchers, enterprises, and/or independent developers. \n\nIf you would like to work with us let us know @ [email protected]. \n\n================================================ \n## DIARIZE.PY ## \n================================================ \n\nThis function takes in a speech sample and diarizes it for 2 speakers.\n\nThe output files are stored in a folder structure with Speaker A and Speaker B.\n\nIt is assumed to be a 2 speaker diarization problem.\n\nThe output .zip file is named filename[0:-4]+'diarization.zip' and contains:\n\n--->filename[0:-4]+'.json'\n--> speaker 1 folder\n --> speaker 1 sections (multiple .wav files)\n --> speaker 1 stiched togetehr (single .wav file)\n--> speaker 2 folder\n --> speaker 2 sections (multiple .wav files)\n --> speaker 2 stich (single .wav file)\n\nDiarization is done with the pyaudioanalysis3 library.\n'''\n\nimport os, json, importlib, scipy, shutil, ffmpy, time, sys, getpass, zipfile\nimport speech_recognition as sr_audio\nfrom pydub import AudioSegment\nimport numpy as np \n\nif 'pyAudioAnalysis3' not in os.listdir():\n os.system(\"git clone [email protected]:NeuroLexDiagnostics/pyAudioAnalysis3.git\")\n \nsys.path.append(os.getcwd()+'/pyAudioAnalysis3')\n\nimport audioTrainTest as aT\nimport audioBasicIO \nimport audioFeatureExtraction as aF\nimport audioSegmentation as aS\n\n##INITIALIZE FUNCTIONS FOR DIARIZATION\n####################################################################################\n\ndef exportfile(newAudio,time1,time2,filename,i,speaknum):\n #Exports to a wav file in the current path.\n newAudio2 = newAudio[time1:time2]\n print('making '+filename[0:-4]+'_'+str(speaknum)+'_'+str(i)+'_'+str(time1/1000)+'_'+str(time2/1000)+'.wav')\n newAudio2.export(filename[0:-4]+'_'+str(speaknum)+'_'+str(i)+'_'+str(time1/1000)+'_'+str(time2/1000)+'.wav', format=\"wav\")\n\n return filename[0:-4]+'_'+str(speaknum)+'_'+str(i)+'_'+str(time1/1000)+'_'+str(time2/1000)+'.wav'\n\ndef stitchtogether(dirlist,dirloc,filename):\n try:\n #assumes already in proper directory \n for i in range(len(dirlist)):\n if i ==0:\n sound=AudioSegment.from_wav(dirloc+'/'+str(dirlist[i]))\n else:\n sound=sound+AudioSegment.from_wav(dirloc+'/'+str(dirlist[i]))\n sound.export(dirloc+'/'+filename, format=\"wav\")\n\n except:\n print('error stitching...')\n\ndef stereo2mono(audiodata,filename):\n newaudiodata = list()\n \n for i in range(len(audiodata)):\n d = audiodata[i][0]/2 + audiodata[i][1]/2\n newaudiodata.append(d)\n \n return np.array(newaudiodata, dtype='int16')\n #to apply this function, SR=sample rate usually 44100\n #wavfile.write(newfilename, sr, newaudiodata)\n\ndef convertformat(filename):\n newfilename=filename[0:-4]+'.wav'\n ff = ffmpy.FFmpeg(\n inputs={filename:None},\n outputs={newfilename: None}\n )\n ff.run()\n\n return newfilename\n\ndef zipdir(path, ziph):\n # ziph is zipfile handle\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file))\n\ndef transcribe_audio_google(filename):\n # transcribe the audio (note this is only done if a voice sample)\n r=sr_audio.Recognizer()\n with sr_audio.AudioFile(filename) as source:\n audio = r.record(source) \n text=r.recognize_google_cloud(audio)\n\n return text \n\ndef transcribe_audio_sphinx(filename):\n # transcribe the audio (note this is only done if a voice sample)\n r=sr_audio.Recognizer()\n with sr_audio.AudioFile(filename) as source:\n audio = r.record(source) \n text=r.recognize_sphinx(audio)\n print('transcript: '+text)\n \n return text\n\n##GO TO HOST DIRECTORY AND BEGIN BULK PROCESSING \n####################################################################################\n\n#host directory in app is likely /usr/app/...\nhostdir=os.getcwd()\ncurdir=os.listdir()\n\n#now create some folders if they have not already been created \nincoming_dir=hostdir+'/diarize-incoming/'\nprocessed_dir=hostdir+'/diarize-processed/'\n\ntry:\n os.chdir(incoming_dir)\n curdir=os.listdir()\n if 'data' not in curdir:\n #this is necessary for diarnization\n shutil.copytree(hostdir+'/pyaudioanalysis3/data/',os.getcwd()+'/data/')\nexcept:\n os.mkdir(incoming_dir)\n os.chdir(incoming_dir)\n curdir=os.listdir()\n if 'data' not in curdir:\n #this is necessary for diarization \n shutil.copytree(hostdir+'/pyaudioanalysis3/data/',os.getcwd()+'/data/')\n\ntry:\n os.chdir(processed_dir)\nexcept:\n os.mkdir(processed_dir)\n\n#change to incoming directory to look for samples\nos.chdir(incoming_dir)\n\n#initialize sleep time for worker (default is 1 second)\nsleeptime=1\n\n# now initialize process list with files already in the directory\nprocesslist=os.listdir()\nconvertformat_list=list()\n\n#error counts will help us debug later\nerrorcount=0\nprocesscount=0\n\n#initialize t for infinite loop\nt=1\n\n#infinite loop for worker now begins with while loop...\n\nwhile t>0:\n\n #go to incoming directory\n os.chdir(incoming_dir)\n listdir=os.listdir()\n print(listdir)\n\n #try statement to avoid errors\n try:\n if listdir==['.DS_Store'] or listdir == ['data'] or listdir==['data','.DS_Store'] or listdir==[]:\n #pass if no files are processible\n print('no files found...')\n \n else:\n #look for any files that have not been previously in the directory\n for i in range(len(listdir)):\n if listdir[i]=='.DS_Store' or listdir[i]=='data':\n pass\n \n else:\n #convert format if not .wav\n if listdir[i][-4:] != '.wav':\n filename=convertformat(listdir[i])\n os.remove(listdir[i])\n else:\n filename=listdir[i]\n \n #log start time for later \n start_time=time.time()\n \n if filename not in processlist:\n print('processing '+filename)\n processlist.append(listdir[i])\n filesize=os.path.getsize(filename)\n \n if filesize > int(500):\n #if over 20 minute of audio collected (10.580MB), assume 2 speakers \n\n shutil.copy(incoming_dir+filename,hostdir+'/pyaudioanalysis3/data/'+filename)\n \n g=aS.speakerDiarization(filename,2,mtSize=2.0,mtStep=0.2,stWin=0.05,LDAdim=35, PLOT=False)\n\n s0seg=list()\n s1seg=list()\n allseg=list()\n\n for i in range(len(g)-1):\n if i==0:\n start=i/5.0\n else:\n if g[i]==g[i+1]:\n pass\n #continue where left off to find start length, 20 milliseconds \n else:\n if g[i+1]==0:\n end=i/5.0\n s1seg.append([start,end])\n allseg.append([0,[start,end]])\n start=(i+1)/5.0\n \n elif g[i+1]==1:\n end=i/5.0\n s0seg.append([start,end])\n allseg.append([1, [start,end]])\n start=(i+1)/5.0\n \n else:\n print('error')\n\n #now save this data in individual segments\n newAudio = AudioSegment.from_wav(filename)\n diarizedir=os.getcwd()+'/'+filename[0:-4]+'_diarization'\n\n try:\n os.mkdir(diarizedir)\n os.chdir(diarizedir)\n except:\n os.chdir(diarizedir)\n\n #copy file to this directory and delete from other directory\n shutil.move(incoming_dir+filename,os.getcwd()+'/'+filename)\n\n #diarize speaker 1 \n print('diarizing speaker 1')\n curdir=os.getcwd()\n newdir1=curdir+'/1'\n\n try:\n os.mkdir(newdir1)\n os.chdir(newdir1)\n except:\n os.chdir(newdir1)\n \n for i in range(len(s0seg)):\n filename2=filename[0:-4]+'_speaker_1'+str(i)+'.wav'\n print(('making file @ %s to %s')%(str(s0seg[i][0]),str(s0seg[i][1])))\n exportfile(newAudio,s0seg[i][0]*1000,s0seg[i][1]*1000,filename,i,1)\n\n curdir=os.getcwd()\n listdir=os.listdir(curdir)\n removedfilelist1=list()\n keptfilelist1=list()\n\n for i in range(len(listdir)):\n if os.path.getsize(listdir[i]) < 300000:\n removedfile=[listdir[i], os.path.getsize(listdir[i])]\n removedfilelist1.append(removedfile)\n os.remove(listdir[i])\n else:\n keptfile=[listdir[i],os.path.getsize(listdir[i])]\n keptfilelist1.append(keptfile)\n\n #speaker 1 stitched size\n s1stitchedsize=0\n for i in range(len(keptfilelist1)):\n s1stitchedsize=s1stitchedsize+int(keptfilelist1[i][1])\n \n #speaker 2 \n os.chdir(diarizedir)\n curdir=os.getcwd()\n newdir2=curdir+'/2'\n\n try:\n os.mkdir(newdir2)\n os.chdir(newdir2)\n except:\n os.chdir(newdir2)\n \n print('diarizing speaker 2')\n for i in range(len(s1seg)):\n filename2=filename[0:-4]+'_speaker_2'+str(i)+'.wav'\n print(('making file @ %s to %s')%(str(s1seg[i][0]),str(s1seg[i][1])))\n exportfile(newAudio,s1seg[i][0]*1000,s1seg[i][1]*1000,filename,i,2)\n\n curdir=os.getcwd()\n listdir=os.listdir(curdir)\n removedfilelist2=list()\n keptfilelist2=list()\n\n ##now delete files that are less than 300 KB \n for i in range(len(listdir)):\n if os.path.getsize(listdir[i]) < 300000:\n removedfile=[listdir[i], os.path.getsize(listdir[i])]\n removedfilelist2.append(removedfile)\n os.remove(listdir[i])\n else:\n keptfile=[listdir[i],os.path.getsize(listdir[i])]\n keptfilelist2.append(keptfile)\n\n #speaker 2 stitched size\n s2stitchedsize=0\n for i in range(len(keptfilelist2)):\n s2stitchedsize=s2stitchedsize+int(keptfilelist2[i][1])\n\n # all segments \n os.chdir(diarizedir)\n curdir=os.getcwd()\n newdir3=curdir+'/all'\n\n try:\n os.mkdir(newdir3)\n os.chdir(newdir3)\n except:\n os.chdir(newdir3)\n\n print('transcribing session')\n master_transcript=open('transcript.txt','w')\n\n for i in range(len(allseg)):\n print(('making file @ %s to %s')%(str(allseg[i][1][0]),str(allseg[i][1][1])))\n filename2=str(i)+'_'+str(allseg[i][0])+'.wav'\n filename2=exportfile(newAudio,allseg[i][1][0]*1000,allseg[i][1][1]*1000,filename,i,2)\n new_filename=str(i)+'_'+str(allseg[i][0])+'.wav'\n os.rename(filename2,new_filename)\n os.system('ffmpeg -i %s -ac 1 -acodec pcm_s16le -ar 16000 %s -y'%(new_filename,new_filename))\n\n if i == 0:\n speaker='102334'\n\n try:\n try:\n transcript=transcribe_audio_google(new_filename)\n except:\n transcript=transcribe_audio_sphinx(new_filename)\n\n if str(allseg[i][0]) != speaker:\n speaker=str(allseg[i][0])\n master_transcript.write('\\n\\nspeaker %s: %s '%(str(allseg[i][0]), transcript))\n print('\\n\\nspeaker %s: %s '%(str(allseg[i][0]), transcript))\n else:\n speaker=str(allseg[i][0])\n master_transcript.write('%s'%(transcript))\n print(transcript)\n \n except:\n print('failed transcript')\n\n master_transcript.close()\n transcript=open('transcript.txt').read()\n\n #calculate processing time\n end_time=time.time()\n processtime=end_time-start_time \n\n #this is the .json serializable diarization\n os.chdir(diarizedir)\n \n data={\n 'filename':filename,\n 'file location':diarizedir,\n 'file size':filesize,\n 'processing time':processtime,\n 'processcount':processcount,\n 'errorcount':errorcount,\n 'data':list(g),\n 'master transcript': transcript,\n 'allseg': allseg,\n 'speaker 1':s0seg,\n 'speaker 2':s1seg,\n 'speaker 1 kept segments':keptfilelist1,\n 'speaker 1 stitched size':s1stitchedsize,\n 'speaker 1 folder location':newdir1,\n 'speaker 2 kept segments':keptfilelist2,\n 'speaker 2 stitched size':s2stitchedsize,\n 'speaker 2 folder location':newdir2,\n 'speaker 1 deleted segments':removedfilelist1,\n 'speaker 2 deleted segments':removedfilelist2,\n }\n\n #write to json \n os.chdir(diarizedir)\n with open(filename[0:-4]+'.json', 'w') as f:\n json.dump(data, f)\n f.close()\n\n #read the db\n g=json.loads(open(filename[0:-4]+'.json').read())\n keptlist1=g['speaker 1 kept segments']\n keptloc1=g['speaker 1 folder location']\n filelist1=list()\n for i in range(len(keptlist1)):\n filelist1.append(str(keptlist1[i][0]))\n\n keptlist2=g['speaker 2 kept segments']\n keptloc2=g['speaker 2 folder location']\n filelist2=list()\n for i in range(len(keptlist2)):\n filelist2.append(str(keptlist2[i][0]))\n\n #save stitch to locations where segments are \n os.chdir(keptloc1)\n try:\n print('stitching to location 1: ' + keptloc1)\n print(filelist1)\n stitchtogether(filelist1,keptloc1,'stitched_1.wav')\n except:\n print('error stitching 1')\n\n #save stitch to locations where segments are\n os.chdir(keptloc2)\n try:\n print('stiching to location 2: ' + keptloc2)\n print(filelist2)\n stitchtogether(filelist2,keptloc2,'stitched_2.wav')\n except:\n print('error stitching 2')\n \n #go back to the incoming dir folder for further processing \n os.chdir(incoming_dir)\n\n #zip the entire directory into a .zip file and move to processed_dir folder\n shutil.make_archive(filename[0:-4]+'_diarization','zip',filename[0:-4]+'_diarization/') \n shutil.move(incoming_dir+filename[0:-4]+'_diarization.zip',processed_dir+filename[0:-4]+'_diarization.zip')\n\n #delete the directory using shutil\n shutil.rmtree(filename[0:-4]+'_diarization')\n\n #update processcount\n processcount=processcount+1\n\n else:\n errorcount=errorcount+1\n os.remove(filename)\n print('skipping file, need to resample (too small size)')\n \n #sleep to avoid server overhead\n print('sleeping...')\n time.sleep(sleeptime)\n except:\n print('error')\n print('sleeping...')\n errorcount=errorcount+1\n time.sleep(sleeptime)\n" ]
[ [ "numpy.array" ] ]
yliu1229/CPCTR
[ "66fcd336ee69fd18b322853f195c5b65b4a046b7" ]
[ "CPCTrans/main.py" ]
[ "import os\nimport sys\nimport time\nimport re\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom tensorboardX import SummaryWriter\nimport matplotlib.pyplot as plt\n\nplt.switch_backend('agg')\n\nsys.path.append('../Utils')\nfrom CPCTrans.dataset_3d import *\nfrom CPCTrans.model_3d import *\nfrom Backbone.resnet import neq_load_customized\nfrom Utils.augmentation import *\nfrom Utils.utils import AverageMeter, save_checkpoint, denorm, calc_topk_accuracy\n\nimport torch\nimport torch.optim as optim\nfrom torch.utils import data\nfrom torchvision import datasets, models, transforms\nimport torchvision.utils as vutils\n\ntorch.backends.cudnn.benchmark = True\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--net', default='resnet18', type=str)\nparser.add_argument('--model', default='cpc-trans', type=str)\nparser.add_argument('--dataset', default='ucf101', type=str)\nparser.add_argument('--num_seq', default=8, type=int, help='number of video blocks')\nparser.add_argument('--pred_step', default=3, type=int)\nparser.add_argument('--ds', default=3, type=int, help='frame downsampling rate')\nparser.add_argument('--batch_size', default=16, type=int)\nparser.add_argument('--lr', default=1e-3, type=float, help='learning rate')\nparser.add_argument('--wd', default=1e-5, type=float, help='weight decay')\nparser.add_argument('--resume', default='', type=str, help='path of model to resume')\nparser.add_argument('--pretrain', default='', type=str, help='path of pretrained model')\nparser.add_argument('--epochs', default=300, type=int, help='number of total epochs to run')\nparser.add_argument('--start_epoch', default=1, type=int, help='manual epoch number (useful on restarts)')\nparser.add_argument('--gpu', default='0', type=str)\nparser.add_argument('--print_freq', default=200, type=int, help='frequency of printing output during training')\nparser.add_argument('--reset_lr', action='store_true', help='Reset learning rate when resume training?')\nparser.add_argument('--prefix', default='tmp', type=str, help='prefix of checkpoint filename')\nparser.add_argument('--train_what', default='all', type=str)\nparser.add_argument('--img_dim', default=128, type=int)\n\n\ndef main():\n torch.manual_seed(0)\n np.random.seed(0)\n global args;\n\n args = parser.parse_args()\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu)\n global cuda;\n cuda = torch.device('cuda')\n\n ### CPC with TransformerEncoder model ###\n if args.model == 'cpc-trans':\n model = CPC_Trans(sample_size=args.img_dim,\n num_seq=args.num_seq,\n network=args.net,\n pred_step=args.pred_step)\n else:\n raise ValueError('wrong model!')\n\n model = model.to(cuda)\n global criterion;\n criterion = nn.CrossEntropyLoss()\n\n params = model.parameters()\n optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.wd)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)\n args.old_lr = None\n\n best_acc = 0\n global iteration;\n iteration = 0\n\n ### restart training ###\n if args.resume:\n if os.path.isfile(args.resume):\n args.old_lr = float(re.search('_lr(.+?)_', args.resume).group(1))\n print(\"=> loading resumed checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))\n args.start_epoch = checkpoint['epoch']\n iteration = checkpoint['iteration']\n best_acc = checkpoint['best_acc']\n model.load_state_dict(checkpoint['state_dict'])\n if not args.reset_lr: # if didn't reset lr, load old optimizer\n optimizer.load_state_dict(checkpoint['optimizer'])\n else:\n print('==== Change lr from %f to %f ====' % (args.old_lr, args.lr))\n print(\"=> loaded resumed checkpoint '{}' (epoch {}) with best_acc {}\".format(args.resume, checkpoint['epoch'], best_acc))\n else:\n print(\"[Warning] no checkpoint found at '{}'\".format(args.resume))\n\n if args.pretrain:\n if os.path.isfile(args.pretrain):\n print(\"=> loading pretrained checkpoint '{}'\".format(args.pretrain))\n checkpoint = torch.load(args.pretrain, map_location=torch.device('cpu'))\n model = neq_load_customized(model, checkpoint['state_dict'])\n print(\"=> loaded pretrained checkpoint '{}' (epoch {})\"\n .format(args.pretrain, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.pretrain))\n\n ### load data ###\n if args.dataset == 'ucf101': # designed for ucf101, short size=256, rand crop to 224x224 then scale to 128x128\n transform = transforms.Compose([\n RandomHorizontalFlip(consistent=True),\n RandomCrop(size=224, consistent=True),\n Scale(size=(args.img_dim, args.img_dim)),\n RandomGray(consistent=False, p=0.5),\n ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25, p=1.0),\n ToTensor(),\n Normalize()\n ])\n elif args.dataset == 'k400': # designed for kinetics400, short size=150, rand crop to 128x128\n transform = transforms.Compose([\n RandomSizedCrop(size=args.img_dim, consistent=True, p=1.0),\n RandomHorizontalFlip(consistent=True),\n RandomGray(consistent=False, p=0.5),\n ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25, p=1.0),\n ToTensor(),\n Normalize()\n ])\n\n train_loader = get_data(transform, 'train')\n val_loader = get_data(transform, 'val')\n\n # setup tools\n global de_normalize;\n de_normalize = denorm()\n global img_path;\n img_path, model_path = set_path(args)\n global writer_train\n try: # old version\n writer_val = SummaryWriter(log_dir=os.path.join(img_path, 'val'))\n writer_train = SummaryWriter(log_dir=os.path.join(img_path, 'train'))\n except: # v1.7\n writer_val = SummaryWriter(logdir=os.path.join(img_path, 'val'))\n writer_train = SummaryWriter(logdir=os.path.join(img_path, 'train'))\n\n print('-- start main loop --')\n\n ### main loop ###\n for epoch in range(args.start_epoch, args.epochs):\n train_loss, train_acc, train_accuracy_list = train(train_loader, model, optimizer, epoch)\n val_loss, val_acc, val_accuracy_list = validate(val_loader, model, epoch)\n scheduler.step()\n print('\\t Epoch: ', epoch, 'with lr: ', scheduler.get_last_lr())\n\n # save curve\n writer_train.add_scalar('global/loss', train_loss, epoch)\n writer_train.add_scalar('global/accuracy', train_acc, epoch)\n writer_val.add_scalar('global/loss', val_loss, epoch)\n writer_val.add_scalar('global/accuracy', val_acc, epoch)\n writer_train.add_scalar('accuracy/top1', train_accuracy_list[0], epoch)\n writer_train.add_scalar('accuracy/top3', train_accuracy_list[1], epoch)\n writer_train.add_scalar('accuracy/top5', train_accuracy_list[2], epoch)\n writer_val.add_scalar('accuracy/top1', val_accuracy_list[0], epoch)\n writer_val.add_scalar('accuracy/top3', val_accuracy_list[1], epoch)\n writer_val.add_scalar('accuracy/top5', val_accuracy_list[2], epoch)\n\n # save check_point\n is_best = val_acc > best_acc;\n best_acc = max(val_acc, best_acc)\n save_checkpoint({'epoch': epoch + 1,\n 'net': args.net,\n 'state_dict': model.state_dict(),\n 'best_acc': best_acc,\n 'optimizer': optimizer.state_dict(),\n 'iteration': iteration},\n is_best, filename=os.path.join(model_path, 'epoch%s.pth.tar' % str(epoch + 1)), keep_all=False)\n\n print('Training from ep %d to ep %d finished' % (args.start_epoch, args.epochs))\n\n\n\ndef process_output(mask):\n '''task mask as input, compute the target for contrastive loss'''\n (B, NP, SQ, B2, NS, _) = mask.size() # [B, P, SQ, B, N, SQ]\n target = mask == 1\n target = target * 1\n target.requires_grad = False\n return target, (B, B2, NS, NP, SQ)\n\n\ndef train(data_loader, model, optimizer, epoch):\n losses = AverageMeter()\n accuracy = AverageMeter()\n accuracy_list = [AverageMeter(), AverageMeter(), AverageMeter()]\n model.train()\n global iteration\n\n for idx, input_seq in enumerate(data_loader):\n tic = time.time()\n input_seq = input_seq.to(cuda)\n B = input_seq.size(0)\n [score_, mask_] = model(input_seq)\n # visualize\n if (iteration == 0) or (iteration == args.print_freq):\n if B > 2: input_seq = input_seq[0:2, :]\n writer_train.add_image('input_seq',\n de_normalize(vutils.make_grid(\n input_seq.view(-1, 3, args.img_dim, args.img_dim),\n nrow=args.num_seq)),\n iteration)\n del input_seq\n\n if idx == 0: target_, (_, B2, NS, NP, SQ) = process_output(mask_)\n\n score_flattened = score_.view(B * NP * SQ, B2 * NS * SQ)\n target_flattened = target_.contiguous().view(B * NP * SQ, B2 * NS * SQ)\n target_flattened = target_flattened.argmax(dim=1)\n\n loss = criterion(score_flattened, target_flattened)\n top1, top3, top5 = calc_topk_accuracy(score_flattened, target_flattened, (1, 3, 5))\n\n accuracy_list[0].update(top1.item(), B)\n accuracy_list[1].update(top3.item(), B)\n accuracy_list[2].update(top5.item(), B)\n\n losses.update(loss.item(), B)\n accuracy.update(top1.item(), B)\n\n del score_\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n del loss\n\n if idx % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Loss {loss.val:.6f} ({loss.local_avg:.4f})\\t'\n 'Acc: top1 {3:.4f}; top3 {4:.4f}; top5 {5:.4f} T:{6:.2f}\\t'.format(\n epoch, idx, len(data_loader), top1, top3, top5, time.time() - tic, loss=losses))\n\n writer_train.add_scalar('local/loss', losses.val, iteration)\n writer_train.add_scalar('local/accuracy', accuracy.val, iteration)\n\n iteration += 1\n\n return losses.local_avg, accuracy.local_avg, [i.local_avg for i in accuracy_list]\n\n\ndef validate(data_loader, model, epoch):\n losses = AverageMeter()\n accuracy = AverageMeter()\n accuracy_list = [AverageMeter(), AverageMeter(), AverageMeter()]\n model.eval()\n\n with torch.no_grad():\n for idx, input_seq in tqdm(enumerate(data_loader), total=len(data_loader)):\n input_seq = input_seq.to(cuda)\n B = input_seq.size(0)\n [score_, mask_] = model(input_seq)\n del input_seq\n\n if idx == 0: target_, (_, B2, NS, NP, SQ) = process_output(mask_)\n\n score_flattened = score_.view(B * NP * SQ, B2 * NS * SQ)\n target_flattened = target_.contiguous().view(B * NP * SQ, B2 * NS * SQ)\n target_flattened = target_flattened.argmax(dim=1)\n\n loss = criterion(score_flattened, target_flattened)\n top1, top3, top5 = calc_topk_accuracy(score_flattened, target_flattened, (1, 3, 5))\n\n losses.update(loss.item(), B)\n accuracy.update(top1.item(), B)\n\n accuracy_list[0].update(top1.item(), B)\n accuracy_list[1].update(top3.item(), B)\n accuracy_list[2].update(top5.item(), B)\n\n print('[{0}/{1}] Loss {loss.local_avg:.4f}\\t'\n 'Acc: top1 {2:.4f}; top3 {3:.4f}; top5 {4:.4f} \\t'.format(\n epoch, args.epochs, *[i.avg for i in accuracy_list], loss=losses))\n return losses.local_avg, accuracy.local_avg, [i.local_avg for i in accuracy_list]\n\n\ndef get_data(transform, mode='train'):\n print('Loading data for \"%s\" ...' % mode)\n if args.dataset == 'k400':\n pass\n elif args.dataset == 'ucf101':\n dataset = UCF101_3d(mode=mode,\n transform=transform,\n num_seq=args.num_seq,\n downsample=args.ds,\n which_split=3)\n else:\n raise ValueError('dataset not supported')\n\n sampler = data.RandomSampler(dataset)\n\n if mode == 'train':\n data_loader = data.DataLoader(dataset,\n batch_size=args.batch_size,\n sampler=sampler,\n shuffle=False,\n num_workers=2,\n pin_memory=True,\n drop_last=True)\n elif mode == 'val':\n data_loader = data.DataLoader(dataset,\n batch_size=args.batch_size,\n sampler=sampler,\n shuffle=False,\n num_workers=2,\n pin_memory=True,\n drop_last=True)\n print('\"%s\" dataset size: %d' % (mode, len(dataset)))\n return data_loader\n\n\ndef set_path(args):\n if args.resume:\n exp_path = os.path.dirname(os.path.dirname(args.resume))\n else:\n exp_path = 'log_{args.prefix}/{args.dataset}-{args.img_dim}_{0}_{args.model}_\\\nbs{args.batch_size}_lr{1}_seq{args.num_seq}_pred{args.pred_step}_ds{args.ds}_\\\ntrain-{args.train_what}{2}'.format(\n 'r%s' % args.net[6::], \\\n args.old_lr if args.old_lr is not None else args.lr, \\\n '_pt=%s' % args.pretrain.replace('/', '-') if args.pretrain else '', \\\n args=args)\n img_path = os.path.join(exp_path, 'img')\n model_path = os.path.join(exp_path, 'model')\n if not os.path.exists(img_path): os.makedirs(img_path)\n if not os.path.exists(model_path): os.makedirs(model_path)\n return img_path, model_path\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.utils.data.DataLoader", "matplotlib.pyplot.switch_backend", "torch.manual_seed", "numpy.random.seed", "torch.no_grad", "torch.optim.Adam", "torch.optim.lr_scheduler.StepLR", "torch.utils.data.RandomSampler", "torch.device" ] ]
AgnesYichenFeng/Attention
[ "b70b8882871cb74f1533265fc13ccfad7bea3550" ]
[ "text_eval.py" ]
[ "import numpy as np\n\ndef ids2str(encoder, ids, num_reserved):\n if num_reserved:\n if np.any(np.where(ids==1)[0]):\n eos = np.where(ids==1)[0]\n ids = ids[:eos[0]] \n reserved_tokens = np.where(ids < num_reserved)[0]\n \n if reserved_totkens.size > 0:\n split_locations = np.unioj1d(reserved_tokens, reserved_tokens + 1)\n ids_list = np.split(ids, split_locations)\n text_list = [\n \"<%d>\" &\n i if len(i) == 1 and i < num_reserved else encoder.decode(i.tolist())\n for i in ids_list\n ]\n return \" \".join(test_list)\n \n return encoder.decode(ids.flatten().tolist())\n \n \n \n " ]
[ [ "numpy.where", "numpy.split", "numpy.unioj1d" ] ]
JoshuaEbenezer/cwgan
[ "5f6a9a0bb8760bf85a9a28e25b29a149f3a4a7ca" ]
[ "util/visualizer.py" ]
[ "import numpy as np\nimport os\nimport sys\nimport ntpath\nimport time\nfrom . import util, html\nfrom subprocess import Popen, PIPE\nfrom scipy.misc import imresize\n\nif sys.version_info[0] == 2:\n VisdomExceptionBase = Exception\nelse:\n VisdomExceptionBase = ConnectionError\n\n\ndef save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):\n \"\"\"Save images to the disk.\n\n Parameters:\n webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)\n visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs\n image_path (str) -- the string is used to create image paths\n aspect_ratio (float) -- the aspect ratio of saved images\n width (int) -- the images will be resized to width x width\n\n This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.\n \"\"\"\n image_dir = webpage.get_image_dir()\n short_path = ntpath.basename(image_path[0])\n name = os.path.splitext(short_path)[0]\n\n webpage.add_header(name)\n ims, txts, links = [], [], []\n\n for label, im_data in visuals.items():\n im = util.tensor2im(im_data)\n image_name = '%s_%s.png' % (name, label)\n save_path = os.path.join(image_dir, image_name)\n h, w, _ = im.shape\n if aspect_ratio > 1.0:\n im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic')\n if aspect_ratio < 1.0:\n im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic')\n util.save_image(im, save_path)\n\n ims.append(image_name)\n txts.append(label)\n links.append(image_name)\n webpage.add_images(ims, txts, links, width=width)\n\n\nclass Visualizer():\n \"\"\"This class includes several functions that can display/save images and print/save logging information.\n\n It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize the Visualizer class\n\n Parameters:\n opt -- stores all the experiment flags; needs to be a subclass of BaseOptions\n Step 1: Cache the training/test options\n Step 2: connect to a visdom server\n Step 3: create an HTML object for saveing HTML filters\n Step 4: create a logging file to store training losses\n \"\"\"\n self.opt = opt # cache the option\n self.display_id = opt.display_id\n self.use_html = opt.isTrain and not opt.no_html\n self.win_size = opt.display_winsize\n self.name = opt.name\n self.port = opt.display_port\n self.saved = False\n if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>\n import visdom\n self.ncols = opt.display_ncols\n self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env,raise_exceptions=True,proxies={'http': '172.16.2.30:8080', 'https': '172.16.2.30:8080'})\n\n\n if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/\n self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')\n self.img_dir = os.path.join(self.web_dir, 'images')\n print('create web directory %s...' % self.web_dir)\n util.mkdirs([self.web_dir, self.img_dir])\n # create a logging file to store training losses\n self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')\n with open(self.log_name, \"a\") as log_file:\n now = time.strftime(\"%c\")\n log_file.write('================ Training Loss (%s) ================\\n' % now)\n\n def reset(self):\n \"\"\"Reset the self.saved status\"\"\"\n self.saved = False\n\n def create_visdom_connections(self):\n \"\"\"If the program could not connect to Visdom server, this function will start a new server at port < self.port > \"\"\"\n cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port\n print('\\n\\nCould not connect to Visdom server. \\n Trying to start a server....')\n print('Command: %s' % cmd)\n Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\n def display_current_results(self, visuals, epoch, save_result):\n \"\"\"Display current results on visdom; save current results to an HTML file.\n\n Parameters:\n visuals (OrderedDict) - - dictionary of images to display or save\n epoch (int) - - the current epoch\n save_result (bool) - - if save the current results to an HTML file\n \"\"\"\n if self.display_id > 0: # show images in the browser using visdom\n ncols = self.ncols\n if ncols > 0: # show all the images in one visdom panel\n ncols = min(ncols, len(visuals))\n h, w = next(iter(visuals.values())).shape[:2]\n table_css = \"\"\"<style>\n table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}\n table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}\n </style>\"\"\" % (w, h) # create a table css\n # create a table of images.\n title = self.name\n label_html = ''\n label_html_row = ''\n images = []\n idx = 0\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n label_html_row += '<td>%s</td>' % label\n images.append(image_numpy.transpose([2, 0, 1]))\n idx += 1\n if idx % ncols == 0:\n label_html += '<tr>%s</tr>' % label_html_row\n label_html_row = ''\n white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255\n while idx % ncols != 0:\n images.append(white_image)\n label_html_row += '<td></td>'\n idx += 1\n if label_html_row != '':\n label_html += '<tr>%s</tr>' % label_html_row\n try:\n self.vis.images(images, nrow=ncols, win=self.display_id + 1,\n padding=2, opts=dict(title=title + ' images'))\n label_html = '<table>%s</table>' % label_html\n self.vis.text(table_css + label_html, win=self.display_id + 2,\n opts=dict(title=title + ' labels'))\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n else: # show each image in a separate visdom panel;\n idx = 1\n try:\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),\n win=self.display_id + idx)\n idx += 1\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.\n self.saved = True\n # save images to the disk\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))\n util.save_image(image_numpy, img_path)\n\n # update website\n webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)\n for n in range(epoch, 0, -1):\n webpage.add_header('epoch [%d]' % n)\n ims, txts, links = [], [], []\n\n for label, image_numpy in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = 'epoch%.3d_%s.png' % (n, label)\n ims.append(img_path)\n txts.append(label)\n links.append(img_path)\n webpage.add_images(ims, txts, links, width=self.win_size)\n webpage.save()\n\n def plot_current_losses(self, epoch, counter_ratio, losses):\n \"\"\"display the current losses on visdom display: dictionary of error labels and values\n\n Parameters:\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n \"\"\"\n if not hasattr(self, 'plot_data'):\n self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}\n self.plot_data['X'].append(epoch + counter_ratio)\n self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])\n try:\n self.vis.line(\n X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),\n Y=np.array(self.plot_data['Y']),\n opts={\n 'title': self.name + ' loss over time',\n 'legend': self.plot_data['legend'],\n 'xlabel': 'epoch',\n 'ylabel': 'loss'},\n win=self.display_id)\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n # losses: same format as |losses| of plot_current_losses\n def print_current_losses(self, epoch, iters, losses, t_comp, t_data):\n \"\"\"print current losses on console; also save the losses to the disk\n\n Parameters:\n epoch (int) -- current epoch\n iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n t_comp (float) -- computational time per data point (normalized by batch_size)\n t_data (float) -- data loading time per data point (normalized by batch_size)\n \"\"\"\n message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)\n for k, v in losses.items():\n message += '%s: %.3f ' % (k, v)\n\n print(message) # print the message\n with open(self.log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message) # save the message\n" ]
[ [ "numpy.array" ] ]
pavlin-policar/ALRA
[ "fab8d7661bf2a2179b40e68fb4f022015c700252" ]
[ "ALRA/ALRA.py" ]
[ "import logging\nimport numpy as np\nfrom fbpca import pca\nfrom scipy.stats import norm\n\nfrom .sparseutils import nonzero_mean, nonzero_std, find_zeroed_indices\n\nlog = logging.getLogger(\"ALRA\")\n\n\ndef choose_k(X, k=100, pval_thresh=1e-10, noise_start=80, n_iter=2):\n if k > min(X.shape):\n raise ValueError(\n f\"`k` must be smaller than `min(N, M)`. Maximum value \"\n f\"can be {min(X.shape)} but `{k}` given\"\n )\n\n if noise_start > k - 5:\n raise ValueError(\"At least 5 singular values must be considered noise.\")\n\n U, s, Va = pca(X, k=k, n_iter=n_iter, raw=True)\n\n differences = np.diff(s)\n\n mean = np.mean(differences[noise_start - 1 :])\n std = np.std(differences[noise_start - 1 :], ddof=1)\n\n probabilities = norm.pdf(differences, loc=mean, scale=std)\n\n k = np.max(np.argwhere(probabilities < pval_thresh)) + 1\n\n return k\n\n\ndef ALRA(X, k=None, n_iter=10):\n \"\"\"Adaptively-thresholded Low Rank Approximation.\n\n Parameters\n ----------\n X: array_like\n k: int\n n_iter: int\n\n Returns\n -------\n np.array\n\n \"\"\"\n if k is None:\n k = choose_k(X)\n log.info(f\"No `k` given. Automatically determined `k={k}`.\")\n\n # Compute the SVD and compute the rank-k reconstruction\n U, s, Va = pca(X, k=k, n_iter=n_iter, raw=True)\n X_rank_k = U * s @ Va\n\n X_rank_k = np.ma.masked_array(X_rank_k)\n\n # Find the absolute values of the minimum expression levels for each gene\n minimum_expressions = np.abs(np.min(X_rank_k, axis=0))\n # Zero out all expressions with values below the gene minimum value\n X_rank_k[X_rank_k <= minimum_expressions] = np.ma.masked\n\n # Rescale the expressions so the first two moments match the original matrix\n X_mean, X_std = nonzero_mean(X, axis=0), nonzero_std(X, axis=0, ddof=1)\n X_rk_mean, X_rk_std = X_rank_k.mean(axis=0), X_rank_k.std(axis=0, ddof=1)\n\n scale = X_std / X_rk_std\n translate = -X_rk_mean * scale + X_mean\n\n scale_columns = ~np.isnan(X_std) & ~np.isnan(X_rk_std)\n X_rank_k[:, scale_columns] *= scale[scale_columns]\n X_rank_k[:, scale_columns] += translate[scale_columns]\n\n # Values can become negative during rescaling, so we zero those out\n X_rank_k[X_rank_k < 0] = np.ma.masked\n\n # Restore potentially zeroed out expression values which appeared in the\n # original expression matrix. Where both values are non-zero, prefer the\n # rank-k approximation\n zeroed_out_indices = find_zeroed_indices(X_rank_k, X)\n X_rank_k[zeroed_out_indices] = X[zeroed_out_indices]\n\n log.info(\n f\"{len(zeroed_out_indices[0])} original expression values were \"\n f\"zeroed out during imputation and restored to original values.\"\n )\n\n X_rank_k = X_rank_k.filled(0)\n\n return X_rank_k\n" ]
[ [ "numpy.ma.masked_array", "numpy.argwhere", "numpy.diff", "scipy.stats.norm.pdf", "numpy.min", "numpy.isnan", "numpy.std", "numpy.mean" ] ]
ChriPo92/tensorpack
[ "45d2155850d3870bbf110c94c73508c707e1ae42" ]
[ "examples/FasterRCNN/eval.py" ]
[ "# -*- coding: utf-8 -*-\n# File: eval.py\n\nimport itertools\nimport sys\nimport os\nimport json\nimport numpy as np\nfrom collections import namedtuple\nfrom concurrent.futures import ThreadPoolExecutor\nfrom contextlib import ExitStack\nimport cv2\nimport pycocotools.mask as cocomask\nimport tqdm\nimport tensorflow as tf\n\nfrom tensorpack.callbacks import Callback\nfrom tensorpack.tfutils.common import get_tf_version_tuple\nfrom tensorpack.utils import logger\nfrom tensorpack.utils.utils import get_tqdm\n\nfrom common import CustomResize, clip_boxes\nfrom data import get_eval_dataflow, get_eval_dataflow_YCBV\nfrom dataset import DetectionDataset\nfrom config import config as cfg\n\ntry:\n import horovod.tensorflow as hvd\nexcept ImportError:\n pass\n\n\nDetectionResult = namedtuple(\n 'DetectionResult',\n ['box', 'score', 'class_id', 'mask'])\n\"\"\"\nbox: 4 float\nscore: float\nclass_id: int, 1~NUM_CLASS\nmask: None, or a binary image of the original image shape\n\"\"\"\n\n\ndef _paste_mask(box, mask, shape):\n \"\"\"\n Args:\n box: 4 float\n mask: MxM floats\n shape: h,w\n Returns:\n A uint8 binary image of hxw.\n \"\"\"\n # int() is floor\n # box fpcoor=0.0 -> intcoor=0.0\n x0, y0 = list(map(int, box[:2] + 0.5))\n # box fpcoor=h -> intcoor=h-1, inclusive\n x1, y1 = list(map(int, box[2:] - 0.5)) # inclusive\n x1 = max(x0, x1) # require at least 1x1\n y1 = max(y0, y1)\n\n w = x1 + 1 - x0\n h = y1 + 1 - y0\n\n # rounding errors could happen here, because masks were not originally computed for this shape.\n # but it's hard to do better, because the network does not know the \"original\" scale\n mask = (cv2.resize(mask, (w, h)) > 0.5).astype('uint8')\n ret = np.zeros(shape, dtype='uint8')\n ret[y0:y1 + 1, x0:x1 + 1] = mask\n return ret\n\n\ndef predict_image(img, model_func):\n \"\"\"\n Run detection on one image, using the TF callable.\n This function should handle the preprocessing internally.\n\n Args:\n img: an image\n model_func: a callable from the TF model.\n It takes image and returns (boxes, probs, labels, [masks])\n\n Returns:\n [DetectionResult]\n \"\"\"\n\n orig_shape = img.shape[:2]\n resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE)\n resized_img = resizer.augment(img)\n scale = np.sqrt(resized_img.shape[0] * 1.0 / img.shape[0] * resized_img.shape[1] / img.shape[1])\n boxes, probs, labels, *masks = model_func(resized_img)\n boxes = boxes / scale\n # boxes are already clipped inside the graph, but after the floating point scaling, this may not be true any more.\n boxes = clip_boxes(boxes, orig_shape)\n\n if masks:\n # has mask\n full_masks = [_paste_mask(box, mask, orig_shape)\n for box, mask in zip(boxes, masks[0])]\n masks = full_masks\n else:\n # fill with none\n masks = [None] * len(boxes)\n\n results = [DetectionResult(*args) for args in zip(boxes, probs, labels.tolist(), masks)]\n return results\n\n\ndef predict_dataflow(df, model_func, tqdm_bar=None):\n \"\"\"\n Args:\n df: a DataFlow which produces (image, image_id)\n model_func: a callable from the TF model.\n It takes image and returns (boxes, probs, labels, [masks])\n tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,\n will create a new one.\n\n Returns:\n list of dict, in the format used by\n `DetectionDataset.eval_or_save_inference_results`\n \"\"\"\n df.reset_state()\n all_results = []\n with ExitStack() as stack:\n # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323\n if tqdm_bar is None:\n tqdm_bar = stack.enter_context(get_tqdm(total=df.size()))\n for img, img_id in df:\n results = predict_image(img, model_func)\n for r in results:\n # int()/float() to make it json-serializable\n res = {\n 'image_id': img_id,\n 'category_id': int(r.class_id),\n 'bbox': [round(float(x), 4) for x in r.box],\n 'score': round(float(r.score), 4),\n }\n\n # also append segmentation to results\n if r.mask is not None:\n rle = cocomask.encode(\n np.array(r.mask[:, :, None], order='F'))[0]\n rle['counts'] = rle['counts'].decode('ascii')\n res['segmentation'] = rle\n all_results.append(res)\n tqdm_bar.update(1)\n return all_results\n\n\ndef multithread_predict_dataflow(dataflows, model_funcs):\n \"\"\"\n Running multiple `predict_dataflow` in multiple threads, and aggregate the results.\n\n Args:\n dataflows: a list of DataFlow to be used in :func:`predict_dataflow`\n model_funcs: a list of callable to be used in :func:`predict_dataflow`\n\n Returns:\n list of dict, in the format used by\n `DetectionDataset.eval_or_save_inference_results`\n \"\"\"\n num_worker = len(model_funcs)\n assert len(dataflows) == num_worker\n if num_worker == 1:\n return predict_dataflow(dataflows[0], model_funcs[0])\n kwargs = {'thread_name_prefix': 'EvalWorker'} if sys.version_info.minor >= 6 else {}\n with ThreadPoolExecutor(max_workers=num_worker, **kwargs) as executor, \\\n tqdm.tqdm(total=sum([df.size() for df in dataflows])) as pbar:\n futures = []\n for dataflow, pred in zip(dataflows, model_funcs):\n futures.append(executor.submit(predict_dataflow, dataflow, pred, pbar))\n all_results = list(itertools.chain(*[fut.result() for fut in futures]))\n return all_results\n\n# TODO: Make this runnable with YCBV\nclass EvalCallback(Callback):\n \"\"\"\n A callback that runs evaluation once a while.\n It supports multi-gpu evaluation.\n \"\"\"\n\n _chief_only = False\n\n def __init__(self, eval_dataset, in_names, out_names, output_dir):\n self._eval_dataset = eval_dataset\n self._in_names, self._out_names = in_names, out_names\n self._output_dir = output_dir\n\n def _setup_graph(self):\n num_gpu = cfg.TRAIN.NUM_GPUS\n if cfg.TRAINER == 'replicated':\n # TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750\n buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)]\n\n # Use two predictor threads per GPU to get better throughput\n self.num_predictor = num_gpu if buggy_tf else num_gpu * 2\n self.predictors = [self._build_predictor(k % num_gpu) for k in range(self.num_predictor)]\n self.dataflows = [get_eval_dataflow_YCBV(self._eval_dataset,\n shard=k, num_shards=self.num_predictor)\n for k in range(self.num_predictor)]\n else:\n # Only eval on the first machine.\n # Alternatively, can eval on all ranks and use allgather, but allgather sometimes hangs\n self._horovod_run_eval = hvd.rank() == hvd.local_rank()\n if self._horovod_run_eval:\n self.predictor = self._build_predictor(0)\n self.dataflow = get_eval_dataflow_YCBV(self._eval_dataset,\n shard=hvd.local_rank(), num_shards=hvd.local_size())\n\n self.barrier = hvd.allreduce(tf.random_normal(shape=[1]))\n\n def _build_predictor(self, idx):\n return self.trainer.get_predictor(self._in_names, self._out_names, device=idx)\n\n def _before_train(self):\n eval_period = cfg.TRAIN.EVAL_PERIOD\n self.epochs_to_eval = set()\n for k in itertools.count(1):\n if k * eval_period > self.trainer.max_epoch:\n break\n self.epochs_to_eval.add(k * eval_period)\n self.epochs_to_eval.add(self.trainer.max_epoch)\n logger.info(\"[EvalCallback] Will evaluate every {} epochs\".format(eval_period))\n\n def _eval(self):\n logdir = self._output_dir\n if cfg.TRAINER == 'replicated':\n all_results = multithread_predict_dataflow(self.dataflows, self.predictors)\n else:\n filenames = [os.path.join(\n logdir, 'outputs{}-part{}.json'.format(self.global_step, rank)\n ) for rank in range(hvd.local_size())]\n\n if self._horovod_run_eval:\n local_results = predict_dataflow(self.dataflow, self.predictor)\n fname = filenames[hvd.local_rank()]\n with open(fname, 'w') as f:\n json.dump(local_results, f)\n self.barrier.eval()\n if hvd.rank() > 0:\n return\n all_results = []\n for fname in filenames:\n with open(fname, 'r') as f:\n obj = json.load(f)\n all_results.extend(obj)\n os.unlink(fname)\n\n output_file = os.path.join(\n logdir, '{}-outputs{}.json'.format(self._eval_dataset, self.global_step))\n\n scores = DetectionDataset().eval_or_save_inference_results(\n all_results, self._eval_dataset, output_file)\n for k, v in scores.items():\n self.trainer.monitors.put_scalar(k, v)\n\n def _trigger_epoch(self):\n if self.epoch_num in self.epochs_to_eval:\n logger.info(\"Running evaluation ...\")\n self._eval()\n" ]
[ [ "numpy.sqrt", "tensorflow.random_normal", "numpy.array", "numpy.zeros" ] ]
netoferraz/oeuanalitico-posts
[ "fc5799a2c77de1133f4f3f6b9f048b0fb3de6ba7" ]
[ "oeuanalitico-posts/nfe/preprocessing/functions.py" ]
[ "import pandas as pd\nfrom pathlib import Path\nfrom collections import Counter\nimport datetime\nfrom collections import defaultdict\nfrom faker import Factory\nimport faker\nfrom preprocessing.nfeProvider import Invoice\nimport csv\n\n\ndef convert_to_numeric(num):\n \"\"\"\n Converte strings que representam valores monetários em Reais (R$) para\n o padrão americano.\n \"\"\"\n num = num.strip()\n if num != \"\":\n num = num.replace(',', '.')\n count_dot = num.count('.')\n if count_dot >= 2:\n while count_dot >= 2:\n # armazena o index da primeira ocorrência da string ponto\n slice_index = num.index('.')\n # faz um slice baseado na localizacao desse index\n new_str = num[0:slice_index] + num[slice_index + 1:]\n num = new_str\n count_dot = num.count('.')\n return float(num)\n else:\n return float(num)\n else:\n return 0.0\n\n\ndef identify_encoding(filename: str) -> str:\n \"\"\"\n Identifica o encoding do arquivo filename retornando uma string com o nome do encoding.\n\n Atributos:\n filename: é o path (full ou relativo) do arquivo a ser analisado.\n \"\"\"\n try:\n encoding = 'utf8'\n with open(filename, \"r\", encoding=encoding) as file:\n _ = file.readlines()\n except UnicodeDecodeError:\n encoding = 'latin1'\n with open(filename, \"r\", encoding=encoding) as file:\n _ = file.readlines()\n finally:\n return encoding\n\n\ndef report_pkl_into_csv(filename, foldername, logger):\n \"\"\"\n Produz um relatório do status do arquivo tabular gerado a partir dos arquivos .pkl\n\n Atributos:\n filename: é o nome do arquivo .csv que será analisado.\n foldername: é o nome da sub pasta dos arquivos pkl dentro de ./data-storage/validacao/\n \"\"\"\n # VERIFICA SE ALGUM ARQUIVO .pkl NÃO FORAM PROCESSADOS.\n df = pd.read_csv(f\"./tabular-data/{filename}.csv\", sep=';', encoding='latin1')\n lista_chaves_processadas = set(df['nf_chave'].unique())\n pkl_folder = Path(f\"./data-storage/validacao/{foldername}\")\n pkl_folder = set(pkl_folder.rglob(\"*.pkl\"))\n pkl_folder = set([f.name[:-4][-44:] for f in pkl_folder])\n num_arquivos_diff = lista_chaves_processadas.difference(pkl_folder)\n if len(num_arquivos_diff) == 0:\n logger.debug(f\"Todos os arquivos .pkl foram processados. Ao todo foram processados {df['nf_chave'].nunique()} notas fiscais.\\n\")\n else:\n logger.critical(f\"Não foram processados {len(num_arquivos_diff)} arquivos.\\n\")\n for f in num_arquivos_diff:\n logger.critical(f\"Arquivo {f} não foi processado.\\n\")\n # VALIDAÇÃO SE HÁ ARQUIVOS DUPLICADOS\n files_check = Path(f\"./data-storage/validacao/{foldername}\")\n files_check = list(files_check.rglob(\"*.pkl\"))\n files_check = [f.name[:-4][-44:] for f in files_check]\n a = Counter()\n for f in files_check:\n a[f] += 1\n for chave, count in a.items():\n if count > 1:\n logger.critical(f\"CHAVE REPETIDA: {chave} # {count}\")\n # VERIFICA SE HÁ ALGUMA INCONSISTÊNCIA NOS VALORES DOS PRODUTOS E DA NOTA FISCAL\n df['prod_valor_liquido'] = df.apply(lambda x: x['prod_valor'] - x['prod_valor_desconto'], axis='columns')\n check_valor_nota_valores = df.groupby(\"nf_chave\")['prod_valor_liquido'].sum().sort_values(ascending=False)\n inconsistencia_count = 0\n container = {}\n for chave, valor in zip(check_valor_nota_valores.index, check_valor_nota_valores.values):\n validacao = df.loc[df['nf_chave'] == chave, 'nf_valor'].values[0]\n valor = round(valor, 2)\n chave = chave.replace(\"-\", \"\").replace(\".\", \"\").replace(\"/\", \"\")\n if validacao != valor:\n inconsistencia_count += 1\n diff_produtos = round(valor - validacao, 2)\n container[chave] = diff_produtos\n logger.critical(f\"{chave} => Valor Nota: R${validacao} @ Valor Produtos: R${valor} @ Diferença: R${diff_produtos}\\n\")\n\n\ndef normalize_ncm(ncm: str) -> str:\n \"\"\"\n Normaliza a string que representa o código NCM\n\n Atributos:\n ncm : string que representa o código NCM\n \"\"\"\n if len(ncm) != 8:\n ncm = \"0\" + ncm\n return ncm\n\n\ndef get_ncm_values():\n \"\"\"\n Função que retorna um dicionário contendo uma lista de macro categorias e os codigos\n NCM associados a cada um deles.\n \"\"\"\n sheet_names = [\n 'CARNES E OVOS',\n 'HORTIFRUTI',\n 'LIMPEZA',\n 'HIGIENE',\n 'LATICINIOS E DERIVADOS',\n 'BEBIDAS',\n 'PET',\n 'PADARIA',\n 'CEREAIS_GRAOS_SEMENTES',\n 'DOCES',\n 'VESTUARIO',\n 'FARINACEOS',\n 'MASSAS',\n 'TEMPEROS_MOLHOS',\n 'OUTROS'\n ]\n categorias_ncm = {}\n for sheet in sheet_names:\n df = pd.read_excel(\"./data/others/compilado_ncm_mercado_mod.xlsx\", sheet_name=sheet, dtype={'cod_ncm': str})\n df['cod_ncm'] = df['cod_ncm'].astype(str)\n df['cod_ncm'] = df['cod_ncm'].apply(normalize_ncm)\n categorias_ncm[sheet] = df['cod_ncm'].unique().tolist()\n return categorias_ncm\n\n\ndef get_weekday(value: int):\n \"\"\"\n Recebe um INT representando um datetime e retorna uma string com o dia da semana.\n\n Atributos:\n value: Inteiro representando um timestamp\n \"\"\"\n convert_int_to_day = {\n 0: 'Segunda-Feira',\n 1: 'Terça-Feira',\n 2: 'Quarta-Feira',\n 3: 'Quinta-Feira',\n 4: 'Sexta-Feira',\n 5: 'Sábado',\n 6: 'Domingo'\n }\n weekday = datetime.datetime.utcfromtimestamp(value / 1e9).weekday()\n return convert_int_to_day[weekday]\n\n\ndef logging_report(report, list_required_fields, logger):\n f = Path(report['tables'][0]['source'])\n map_columns_to_number = report['tables'][0]['headers']\n map_columns_to_number = {i: col for col, i in zip(map_columns_to_number, range(1, len(map_columns_to_number)))}\n fields_required_error = {f: False for f in list_required_fields}\n num_errors = report['error-count']\n if report['valid']:\n logger.debug(f\"Arquivo: {f.name} válido pelo schema.\\n\")\n return True, num_errors\n else:\n lista_errors = report['tables'][0]['errors']\n if 0 < num_errors < 1000:\n logger.debug(f\"Arquivo {f.name} não validado com {num_errors} erros.\")\n for erro in lista_errors:\n for feature, valor in erro.items():\n if feature == 'code':\n if valor == 'required-constraint':\n # identify which column is null\n col = map_columns_to_number[erro['column-number']]\n # change validation status of this feature\n if not fields_required_error[col]:\n fields_required_error[col] = True\n line = erro['row-number']\n logger.critical(f\"{f.name} @ Linha {line} possui {col} sem valor atribuído.\")\n elif valor == 'enumerable-constraint':\n col = map_columns_to_number[erro['column-number']]\n line = erro['row-number']\n logger.critical(f\"{f.name} @ Linha {line} e Coluna {col} erro: {erro['message']} \")\n else:\n try:\n col = map_columns_to_number[erro['column-number']]\n except: # o erro associado não é referente a uma coluna\n try:\n line = erro['row-number']\n logger.critical(f\"{f.name} @ Linha {line} : {erro['message']}\")\n except KeyError:\n logger.critical(f\"{f.name} @ {erro['message']}\")\n return False, num_errors\n\n\ndef anonymize_rows(rows):\n \"\"\"\n Rows is an iterable of dictionaries that contain name and\n email fields that need to be anonymized.\n\n \"\"\"\n # Load the faker and its providers\n faker = Factory.create(\"pt_BR\")\n faker.add_provider(Invoice)\n\n # Create mappings of names & emails to faked names & emails.\n # https://stackoverflow.com/questions/18066837/passing-a-parameter-to-objects-created-by-defaultdict\n nfecod = defaultdict(lambda: faker.nfce(**{'uf_code': 'DF'}))\n cpf = defaultdict(faker.cpf)\n nome = defaultdict(faker.name)\n endereco = defaultdict(faker.address)\n bairro = defaultdict(faker.bairro)\n municipio = defaultdict(faker.city)\n telefone = defaultdict(faker.phone_number)\n uf = defaultdict(faker.state_abbr)\n pais = defaultdict(faker.country)\n email = defaultdict(faker.email)\n\n # Iterate over the rows and yield anonymized rows.\n for row in rows:\n # Replace the name and email fields with faked fields.\n row['nf_chave'] = nfecod[row['nf_chave']]\n row['dest_cpf'] = cpf[row['dest_cpf']]\n row['dest_rz'] = nome[row['dest_rz']]\n row['dest_endereco'] = endereco[row['dest_endereco']]\n row['dest_bairro'] = bairro[row['dest_bairro']]\n row['dest_municipio'] = municipio[row['dest_municipio']]\n row['dest_telefone'] = telefone[row['dest_telefone']]\n row['dest_uf'] = uf[row['dest_uf']]\n row['dest_pais'] = pais[row['dest_pais']]\n row['dest_email'] = email[row['dest_email']]\n\n # Yield the row back to the caller\n yield row\n\n\ndef anonymize(source, target):\n \"\"\"\n The source argument is a path to a CSV file containing data to anonymize,\n while target is a path to write the anonymized CSV data to.\n \"\"\"\n # https://pymotw.com/2/csv/\n PARTIAL_SOURCE_DATA = Path(\"./tabular-data/\") / f\"{source}\"\n PARTIAL_DEST_DATA = Path(\"./tabular-data/\") / f\"{target}\"\n csv.register_dialect('semicolon', delimiter=';')\n with open(PARTIAL_SOURCE_DATA, 'r') as f:\n with open(PARTIAL_DEST_DATA, 'w') as o:\n # Use the DictReader to easily extract fields\n reader = csv.DictReader(f, dialect='semicolon')\n writer = csv.DictWriter(o, reader.fieldnames, dialect='semicolon')\n # write col names\n writer.writeheader()\n # Read and anonymize data, writing to target file.\n for row in anonymize_rows(reader):\n writer.writerow(row)\n\n\ndef subseting_data(dataframe: pd.core.frame.DataFrame, rootname: str):\n \"\"\"\n Salva um arquivo .csv com um subset das features originais\n \"\"\"\n dataframe = dataframe[['nf_dia_semana', 'nf_chave', 'nf_valor', 'em_rz',\n 'em_nomeFantasia', 'em_cnpj', 'em_endereco', 'em_bairro', 'em_cep', 'em_municipio',\n 'em_telefone', 'em_uf', 'em_pais', 'em_inscricao_estadual', 'em_inscricao_municipal',\n 'em_cnae_fiscal', 'dest_rz', 'dest_cpf', 'dest_endereco', 'dest_bairro', 'dest_municipio',\n 'dest_telefone', 'dest_uf', 'dest_pais', 'dest_inscricao_estadual', 'dest_email', 'prod_nome',\n 'prod_quantidade', 'prod_unidade', 'prod_valor', 'prod_codigo_produto', 'prod_codigo_ncm',\n 'prod_categoria_ncm', 'prod_cfop', 'prod_valor_desconto', 'prod_valor_tributos',\n 'prod_codigo_ean_cmc', 'prod_valor_unitario_cmc', 'prod_valor_unitario_trib', 'prod_unidade_trib']]\n dataframe.to_csv(f\"./tabular-data/PRE_ANONY_{rootname}.csv\", sep=';', encoding='latin1', index=True)\n" ]
[ [ "pandas.read_csv", "pandas.read_excel" ] ]
turnergarrow/galpy
[ "7132eddbf2dab491fe137790e31eacdc604b0534" ]
[ "tests/test_actionAngleTorus.py" ]
[ "from __future__ import print_function, division\nimport os\nimport sys\nimport pytest\nimport warnings\nimport numpy\nfrom galpy.util import galpyWarning\nfrom test_actionAngle import reset_warning_registry\n_TRAVIS= bool(os.getenv('TRAVIS'))\nPY2= sys.version < '3'\n# Print all galpyWarnings always for tests of warnings\nwarnings.simplefilter(\"always\",galpyWarning)\n\n#Basic sanity checking: circular orbit should have constant R, zero vR, vT=vc\ndef test_actionAngleTorus_basic():\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import MWPotential, rl, vcirc, \\\n FlattenedPowerPotential, PlummerPotential\n tol= -4.\n jr= 10.**-10.\n jz= 10.**-10.\n aAT= actionAngleTorus(pot=MWPotential)\n # at R=1, Lz=1\n jphi= 1.\n angler= numpy.linspace(0.,2.*numpy.pi,101)\n anglephi= numpy.linspace(0.,2.*numpy.pi,101)+1.\n anglez= numpy.linspace(0.,2.*numpy.pi,101)+2.\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n assert numpy.all(numpy.fabs(RvR[0]-rl(MWPotential,jphi)) < 10.**tol), \\\n 'circular orbit does not have constant radius for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \\\n 'circular orbit does not have zero radial velocity for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[2]-vcirc(MWPotential,rl(MWPotential,jphi))) < 10.**tol), \\\n 'circular orbit does not have constant vT=vc for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \\\n 'circular orbit does not have zero vertical height for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \\\n 'circular orbit does not have zero vertical velocity for actionAngleTorus'\n # at Lz=1.5, using Plummer\n tol= -3.25\n pp= PlummerPotential(normalize=1.)\n aAT= actionAngleTorus(pot=pp)\n jphi= 1.5\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n assert numpy.all(numpy.fabs(RvR[0]-rl(pp,jphi)) < 10.**tol), \\\n 'circular orbit does not have constant radius for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \\\n 'circular orbit does not have zero radial velocity for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[2]-vcirc(pp,rl(pp,jphi))) < 10.**tol), \\\n 'circular orbit does not have constant vT=vc for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \\\n 'circular orbit does not have zero vertical height for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \\\n 'circular orbit does not have zero vertical velocity for actionAngleTorus'\n # at Lz=0.5, using FlattenedPowerPotential\n tol= -4.\n fp= FlattenedPowerPotential(normalize=1.)\n aAT= actionAngleTorus(pot=fp)\n jphi= 0.5\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n assert numpy.all(numpy.fabs(RvR[0]-rl(fp,jphi)) < 10.**tol), \\\n 'circular orbit does not have constant radius for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \\\n 'circular orbit does not have zero radial velocity for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[2]-vcirc(fp,rl(fp,jphi))) < 10.**tol), \\\n 'circular orbit does not have constant vT=vc for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \\\n 'circular orbit does not have zero vertical height for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \\\n 'circular orbit does not have zero vertical velocity for actionAngleTorus'\n return None\n\n#Basic sanity checking: close-to-circular orbit should have freq. = epicycle freq.\ndef test_actionAngleTorus_basic_freqs():\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import epifreq, omegac, verticalfreq, rl, \\\n JaffePotential, PowerSphericalPotential, HernquistPotential\n tol= -3.\n jr= 10.**-6.\n jz= 10.**-6.\n jp= JaffePotential(normalize=1.)\n aAT= actionAngleTorus(pot=jp)\n # at Lz=1\n jphi= 1.\n om= aAT.Freqs(jr,jphi,jz)\n assert numpy.fabs((om[0]-epifreq(jp,rl(jp,jphi)))/om[0]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'\n assert numpy.fabs((om[1]-omegac(jp,rl(jp,jphi)))/om[1]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'\n assert numpy.fabs((om[2]-verticalfreq(jp,rl(jp,jphi)))/om[2]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'\n # at Lz=1.5, w/ different potential\n pp= PowerSphericalPotential(normalize=1.)\n aAT= actionAngleTorus(pot=pp)\n jphi= 1.5\n om= aAT.Freqs(jr,jphi,jz)\n assert numpy.fabs((om[0]-epifreq(pp,rl(pp,jphi)))/om[0]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'\n assert numpy.fabs((om[1]-omegac(pp,rl(pp,jphi)))/om[1]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'\n assert numpy.fabs((om[2]-verticalfreq(pp,rl(pp,jphi)))/om[2]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'\n # at Lz=0.5, w/ different potential\n tol= -2.5 # appears more difficult\n hp= HernquistPotential(normalize=1.)\n aAT= actionAngleTorus(pot=hp)\n jphi= 0.5\n om= aAT.Freqs(jr,jphi,jz)\n assert numpy.fabs((om[0]-epifreq(hp,rl(hp,jphi)))/om[0]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'\n assert numpy.fabs((om[1]-omegac(hp,rl(hp,jphi)))/om[1]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'\n assert numpy.fabs((om[2]-verticalfreq(hp,rl(hp,jphi)))/om[2]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'\n return None\n\n#Test that orbit from actionAngleTorus is the same as an integrated orbit\ndef test_actionAngleTorus_orbit():\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import MWPotential2014\n from galpy.orbit import Orbit\n # Set up instance\n aAT= actionAngleTorus(pot=MWPotential2014,tol=10.**-5.)\n jr,jphi,jz= 0.05,1.1,0.025\n # First calculate frequencies and the initial RvR\n RvRom= aAT.xvFreqs(jr,jphi,jz,\n numpy.array([0.]),\n numpy.array([1.]),\n numpy.array([2.]))\n om= RvRom[1:]\n # Angles along an orbit\n ts= numpy.linspace(0.,100.,1001)\n angler= ts*om[0]\n anglephi= 1.+ts*om[1]\n anglez= 2.+ts*om[2]\n # Calculate the orbit using actionAngleTorus\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n # Calculate the orbit using orbit integration\n orb= Orbit([RvRom[0][0,0],RvRom[0][0,1],RvRom[0][0,2],\n RvRom[0][0,3],RvRom[0][0,4],RvRom[0][0,5]])\n orb.integrate(ts,MWPotential2014)\n # Compare\n tol= -3.\n assert numpy.all(numpy.fabs(orb.R(ts)-RvR[0]) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in R'\n assert numpy.all(numpy.fabs(orb.vR(ts)-RvR[1]) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in vR'\n assert numpy.all(numpy.fabs(orb.vT(ts)-RvR[2]) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in vT'\n assert numpy.all(numpy.fabs(orb.z(ts)-RvR[3]) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in z'\n assert numpy.all(numpy.fabs(orb.vz(ts)-RvR[4]) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in vz'\n assert numpy.all(numpy.fabs((orb.phi(ts)-RvR[5]+numpy.pi) % (2.*numpy.pi) -numpy.pi) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in phi'\n return None\n\n# Test that actionAngleTorus w/ interp pot gives same freqs as regular pot\n# Doesn't work well: TM aborts because our interpolated forces aren't\n# consistent enough with the potential for TM's taste, but we test that it at\n# at least works somewhat\ndef test_actionAngleTorus_interppot_freqs():\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import LogarithmicHaloPotential, interpRZPotential\n lp= LogarithmicHaloPotential(normalize=1.)\n ip= interpRZPotential(RZPot=lp,\n interpPot=True,\n interpDens=True,interpRforce=True,interpzforce=True,\n enable_c=True)\n aAT= actionAngleTorus(pot=lp)\n aATi= actionAngleTorus(pot=ip)\n jr,jphi,jz= 0.05,1.1,0.02\n om= aAT.Freqs(jr,jphi,jz)\n omi= aATi.Freqs(jr,jphi,jz)\n assert numpy.fabs((om[0]-omi[0])/om[0]) < 0.2, 'Radial frequency computed using the torus machine does not agree between potential and interpolated potential'\n assert numpy.fabs((om[1]-omi[1])/om[1]) < 0.2, 'Azimuthal frequency computed using the torus machine does not agree between potential and interpolated potential'\n assert numpy.fabs((om[2]-omi[2])/om[2]) < 0.8, 'Vertical frequency computed using the torus machine does not agree between potential and interpolated potential'\n return None\n\n#Test the actionAngleTorus against an isochrone potential: actions\ndef test_actionAngleTorus_Isochrone_actions():\n from galpy.potential import IsochronePotential\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleIsochrone\n ip= IsochronePotential(normalize=1.,b=1.2)\n aAI= actionAngleIsochrone(ip=ip)\n tol= -6.\n aAT= actionAngleTorus(pot=ip,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.])\n anglephi= numpy.array([numpy.pi])\n anglez= numpy.array([numpy.pi/2.])\n # Calculate position from aAT\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n # Calculate actions from aAI\n ji= aAI(*RvR)\n djr= numpy.fabs((ji[0]-jr)/jr)\n dlz= numpy.fabs((ji[1]-jphi)/jphi)\n djz= numpy.fabs((ji[2]-jz)/jz)\n assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djr*100.)\n assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (dlz*100.) \n assert djz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djz*100.)\n return None\n\n#Test the actionAngleTorus against an isochrone potential: frequencies and angles\ndef test_actionAngleTorus_Isochrone_freqsAngles():\n from galpy.potential import IsochronePotential\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleIsochrone\n ip= IsochronePotential(normalize=1.,b=1.2)\n aAI= actionAngleIsochrone(ip=ip)\n tol= -6.\n aAT= actionAngleTorus(pot=ip,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)\n angler= angler % (2.*numpy.pi)\n anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)\n anglephi= anglephi % (2.*numpy.pi)\n anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)\n anglez= anglez % (2.*numpy.pi)\n # Calculate position from aAT\n RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)\n # Calculate actions, frequencies, and angles from aAI\n ws= aAI.actionsFreqsAngles(*RvRom[0].T)\n dOr= numpy.fabs((ws[3]-RvRom[1]))\n dOp= numpy.fabs((ws[4]-RvRom[2]))\n dOz= numpy.fabs((ws[5]-RvRom[3]))\n dar= numpy.fabs((ws[6]-angler))\n dap= numpy.fabs((ws[7]-anglephi))\n daz= numpy.fabs((ws[8]-anglez))\n dar[dar > numpy.pi]-= 2.*numpy.pi\n dar[dar < -numpy.pi]+= 2.*numpy.pi\n dap[dap > numpy.pi]-= 2.*numpy.pi\n dap[dap < -numpy.pi]+= 2.*numpy.pi\n daz[daz > numpy.pi]-= 2.*numpy.pi\n daz[daz < -numpy.pi]+= 2.*numpy.pi\n assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)\n assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.) \n assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)\n assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for ar at %f' % (numpy.nanmax(dar))\n assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for aphi at %f' % (numpy.nanmax(dap))\n assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for az at %f' % (numpy.nanmax(daz))\n return None\n\n#Test the actionAngleTorus against a Staeckel potential: actions\ndef test_actionAngleTorus_Staeckel_actions():\n from galpy.potential import KuzminKutuzovStaeckelPotential\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleStaeckel\n delta= 1.2\n kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)\n aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)\n tol= -3.\n aAT= actionAngleTorus(pot=kp,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.])\n anglephi= numpy.array([numpy.pi])\n anglez= numpy.array([numpy.pi/2.])\n # Calculate position from aAT\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n # Calculate actions from aAI\n ji= aAS(*RvR)\n djr= numpy.fabs((ji[0]-jr)/jr)\n dlz= numpy.fabs((ji[1]-jphi)/jphi)\n djz= numpy.fabs((ji[2]-jz)/jz)\n assert djr < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djr*100.)\n assert dlz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (dlz*100.) \n assert djz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djz*100.)\n return None\n\n#Test the actionAngleTorus against an isochrone potential: frequencies and angles\ndef test_actionAngleTorus_Staeckel_freqsAngles():\n from galpy.potential import KuzminKutuzovStaeckelPotential\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleStaeckel\n delta= 1.2\n kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)\n aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)\n tol= -3.\n aAT= actionAngleTorus(pot=kp,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)\n angler= angler % (2.*numpy.pi)\n anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)\n anglephi= anglephi % (2.*numpy.pi)\n anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)\n anglez= anglez % (2.*numpy.pi)\n # Calculate position from aAT\n RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)\n # Calculate actions, frequencies, and angles from aAI\n ws= aAS.actionsFreqsAngles(*RvRom[0].T)\n dOr= numpy.fabs((ws[3]-RvRom[1]))\n dOp= numpy.fabs((ws[4]-RvRom[2]))\n dOz= numpy.fabs((ws[5]-RvRom[3]))\n dar= numpy.fabs((ws[6]-angler))\n dap= numpy.fabs((ws[7]-anglephi))\n daz= numpy.fabs((ws[8]-anglez))\n dar[dar > numpy.pi]-= 2.*numpy.pi\n dar[dar < -numpy.pi]+= 2.*numpy.pi\n dap[dap > numpy.pi]-= 2.*numpy.pi\n dap[dap < -numpy.pi]+= 2.*numpy.pi\n daz[daz > numpy.pi]-= 2.*numpy.pi\n daz[daz < -numpy.pi]+= 2.*numpy.pi\n assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)\n assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.) \n assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)\n assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for ar at %f' % (numpy.nanmax(dar))\n assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for aphi at %f' % (numpy.nanmax(dap))\n assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for az at %f' % (numpy.nanmax(daz))\n return None\n\n#Test the actionAngleTorus against a general potential w/ actionAngleIsochroneApprox: actions\ndef test_actionAngleTorus_isochroneApprox_actions():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleIsochroneApprox\n aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)\n tol= -2.5\n aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.])\n anglephi= numpy.array([numpy.pi])\n anglez= numpy.array([numpy.pi/2.])\n # Calculate position from aAT\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n # Calculate actions from aAIA\n ji= aAIA(*RvR)\n djr= numpy.fabs((ji[0]-jr)/jr)\n dlz= numpy.fabs((ji[1]-jphi)/jphi)\n djz= numpy.fabs((ji[2]-jz)/jz)\n assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (djr*100.)\n assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (dlz*100.) \n assert djz < 10.**tol, 'actionAngleTorus and actionAngleMWPotential2014 applied to MWPotential2014 potential disagree for Jr at %f%%' % (djz*100.)\n return None\n\n#Test the actionAngleTorus against a general potential w/ actionAngleIsochrone: frequencies and angles\ndef test_actionAngleTorus_isochroneApprox_freqsAngles():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleIsochroneApprox\n aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)\n tol= -3.5\n aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,21)\n angler= angler % (2.*numpy.pi)\n anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,21)\n anglephi= anglephi % (2.*numpy.pi)\n anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,21)\n anglez= anglez % (2.*numpy.pi)\n # Calculate position from aAT\n RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)\n # Calculate actions, frequencies, and angles from aAI\n ws= aAIA.actionsFreqsAngles(*RvRom[0].T)\n dOr= numpy.fabs((ws[3]-RvRom[1]))\n dOp= numpy.fabs((ws[4]-RvRom[2]))\n dOz= numpy.fabs((ws[5]-RvRom[3]))\n dar= numpy.fabs((ws[6]-angler))\n dap= numpy.fabs((ws[7]-anglephi))\n daz= numpy.fabs((ws[8]-anglez))\n dar[dar > numpy.pi]-= 2.*numpy.pi\n dar[dar < -numpy.pi]+= 2.*numpy.pi\n dap[dap > numpy.pi]-= 2.*numpy.pi\n dap[dap < -numpy.pi]+= 2.*numpy.pi\n daz[daz > numpy.pi]-= 2.*numpy.pi\n daz[daz < -numpy.pi]+= 2.*numpy.pi\n assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)\n assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.) \n assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)\n assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for ar at %f' % (numpy.nanmax(dar))\n assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for aphi at %f' % (numpy.nanmax(dap))\n assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for az at %f' % (numpy.nanmax(daz))\n return None\n\n# Test that the frequencies returned by hessianFreqs are the same as those returned by Freqs\ndef test_actionAngleTorus_hessian_freqs():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n fO= aAT.Freqs(jr,jphi,jz)[:3]\n hO= aAT.hessianFreqs(jr,jphi,jz)[1:4]\n assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods Freqs and hessianFreqs return different frequencies'\n return None\n\n# Test that the Hessian is approximately symmetric\ndef test_actionAngleTorus_hessian_symm():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n h= aAT.hessianFreqs(jr,jphi,jz,tol=0.0001,nosym=True)[0]\n assert numpy.all(numpy.fabs((h-h.T)/h) < 0.03), 'actionAngleTorus Hessian is not symmetric'\n return None\n\n# Test that the Hessian is approximately correct\ndef test_actionAngleTorus_hessian_linear():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n h= aAT.hessianFreqs(jr,jphi,jz,tol=0.0001,nosym=True)[0]\n dj= numpy.array([0.02,0.005,-0.01])\n do_fromhessian= numpy.dot(h,dj)\n O= numpy.array(aAT.Freqs(jr,jphi,jz)[:3])\n do= numpy.array(aAT.Freqs(jr+dj[0],jphi+dj[1],jz+dj[2])[:3])-O\n assert numpy.all(numpy.fabs((do_fromhessian-do)/O)< 0.001), 'actionAngleTorus Hessian does not return good approximation to dO/dJ'\n return None\n\n# Test that the frequencies returned by xvJacobianFreqs are the same as those returned by Freqs\ndef test_actionAngleTorus_jacobian_freqs():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n fO= aAT.Freqs(jr,jphi,jz)[:3]\n hO= aAT.xvJacobianFreqs(jr,jphi,jz,\n numpy.array([0.]),numpy.array([1.]),\n numpy.array([2.]))[3:6]\n assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods Freqs and xvJacobianFreqs return different frequencies'\n return None\n\n# Test that the Hessian returned by xvJacobianFreqs are the same as those returned by hessianFreqs\ndef test_actionAngleTorus_jacobian_hessian():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n fO= aAT.hessianFreqs(jr,jphi,jz)[0]\n hO= aAT.xvJacobianFreqs(jr,jphi,jz,\n numpy.array([0.]),numpy.array([1.]),\n numpy.array([2.]))[2]\n assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods hessianFreqs and xvJacobianFreqs return different Hessians'\n return None\n\n# Test that the xv returned by xvJacobianFreqs are the same as those returned by __call__\ndef test_actionAngleTorus_jacobian_xv():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.,1.])\n anglephi= numpy.array([1.,2.])\n anglez= numpy.array([2.,3.])\n fO= aAT(jr,jphi,jz,angler,anglephi,anglez)\n hO= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)[0]\n assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods __call__ and xvJacobianFreqs return different xv'\n return None\n\n# Test that the determinant of the Jacobian returned by xvJacobianFreqs is close to 1/R (should be 1 for rectangular coordinates, 1/R for cylindrical\ndef test_actionAngleTorus_jacobian_detone():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014,dJ=0.0001)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.,1.])\n anglephi= numpy.array([1.,2.])\n anglez= numpy.array([2.,3.])\n jf= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)\n assert numpy.fabs(jf[0][0,0]*numpy.fabs(numpy.linalg.det(jf[1][0]))-1) < 0.01, 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not have the expected determinant'\n assert numpy.fabs(jf[0][1,0]*numpy.fabs(numpy.linalg.det(jf[1][1]))-1) < 0.01, 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not have the expected determinant'\n return None\n\n# Test that Jacobian returned by xvJacobianFreqs is approximately correct\ndef test_actionAngleTorus_jacobian_linear():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.5])\n anglephi= numpy.array([1.])\n anglez= numpy.array([2.])\n jf= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)\n xv= aAT(jr,jphi,jz,angler,anglephi,anglez)\n dja= 2.*numpy.array([0.001,0.002,0.003,-0.002,0.004,0.002])\n xv_direct= aAT(jr+dja[0],jphi+dja[1],jz+dja[2],\n angler+dja[3],anglephi+dja[4],anglez+dja[5])\n xv_fromjac= xv+numpy.dot(jf[1],dja)\n assert numpy.all(numpy.fabs((xv_fromjac-xv_direct)/xv_direct) < 0.01), 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not appear to be correct'\n return None\n\n#Test error when potential is not implemented in C\ndef test_actionAngleTorus_nocerr():\n from galpy.actionAngle import actionAngleTorus\n from test_potential import BurkertPotentialNoC\n bp= BurkertPotentialNoC()\n try:\n aAT= actionAngleTorus(pot=bp)\n except RuntimeError: pass\n else:\n raise AssertionError(\"actionAngleTorus initialization with potential w/o C should have given a RuntimeError, but didn't\")\n return None\n\n#Test error when potential is not axisymmetric\ndef test_actionAngleTorus_nonaxierr():\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import TriaxialNFWPotential\n np= TriaxialNFWPotential(normalize=1.,b=0.9)\n try:\n aAT= actionAngleTorus(pot=np)\n except RuntimeError: pass\n else:\n raise AssertionError(\"actionAngleTorus initialization with non-axisymmetric potential should have given a RuntimeError, but didn't\")\n return None\n\n# Test the Autofit torus warnings\ndef test_actionAngleTorus_AutoFitWarning():\n from galpy.potential import LogarithmicHaloPotential\n from galpy.actionAngle import actionAngleTorus\n lp= LogarithmicHaloPotential(normalize=1.,q=0.9)\n aAT= actionAngleTorus(pot=lp,tol=10.**-8.)\n # These should give warnings\n jr, jp, jz= 0.27209033, 1.80253892, 0.6078445\n ar, ap, az= numpy.array([1.95732492]), numpy.array([6.16753224]), \\\n numpy.array([4.08233059])\n #Turn warnings into errors to test for them\n import warnings\n with warnings.catch_warnings(record=True) as w:\n if PY2: reset_warning_registry('galpy')\n warnings.simplefilter(\"always\",galpyWarning)\n aAT(jr,jp,jz,ar,ap,az)\n # Should raise warning bc of Autofit, might raise others\n raisedWarning= False\n for wa in w:\n raisedWarning= (str(wa.message) == \"actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2\")\n if raisedWarning: break\n assert raisedWarning, \"actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\",galpyWarning)\n aAT.xvFreqs(jr,jp,jz,ar,ap,az)\n # Should raise warning bc of Autofit, might raise others\n raisedWarning= False\n for wa in w:\n raisedWarning= (str(wa.message) == \"actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2\")\n if raisedWarning: break\n assert raisedWarning, \"actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\",galpyWarning)\n aAT.Freqs(jr,jp,jz)\n # Should raise warning bc of Autofit, might raise others\n raisedWarning= False\n for wa in w:\n raisedWarning= (str(wa.message) == \"actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2\")\n if raisedWarning: break\n assert raisedWarning, \"actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\",galpyWarning)\n aAT.hessianFreqs(jr,jp,jz)\n # Should raise warning bc of Autofit, might raise others\n raisedWarning= False\n for wa in w:\n raisedWarning= (str(wa.message) == \"actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2\")\n if raisedWarning: break\n assert raisedWarning, \"actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\",galpyWarning)\n aAT.xvJacobianFreqs(jr,jp,jz,ar,ap,az)\n # Should raise warning bc of Autofit, might raise others\n raisedWarning= False\n for wa in w:\n raisedWarning= (str(wa.message) == \"actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2\")\n if raisedWarning: break\n assert raisedWarning, \"actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't\"\n return None\n\ndef test_MWPotential_warning_torus():\n # Test that using MWPotential throws a warning, see #229\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import MWPotential\n if PY2: reset_warning_registry('galpy')\n warnings.simplefilter(\"error\",galpyWarning)\n try:\n aAA= actionAngleTorus(pot=MWPotential)\n except: pass\n else:\n raise AssertionError(\"actionAngleTorus with MWPotential should have thrown a warning, but didn't\")\n #Turn warnings back into warnings\n warnings.simplefilter(\"always\",galpyWarning)\n return None\n\n" ]
[ [ "numpy.fabs", "numpy.nanmax", "numpy.linalg.det", "numpy.all", "numpy.array", "numpy.dot", "numpy.linspace" ] ]
abefukasawa/datascience_course
[ "ee0a505134383034e09020d9b1de18904d9b2665" ]
[ "03-machine-learning-tabular-crossection/05 - Data_Compression/05_Data_Compression/solutions/solution_01.py" ]
[ "# performing preprocessing part \nfrom sklearn.preprocessing import StandardScaler \nsc = StandardScaler() \n \nX_train = sc.fit_transform(X_train) \nX_test = sc.transform(X_test) " ]
[ [ "sklearn.preprocessing.StandardScaler" ] ]
Rupii/Machine-Learning
[ "2b00698815efb04346d5cb980b68af76f27a5ca6" ]
[ "Regression/multiple_linear_regression.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 24 23:18:54 2018\n\n@author: Rupesh\n\"\"\"\n\n\n\n# Multiple Linear Regression\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nplt.style.use(\"ggplot\")\n# loading dependies\n\ndf = pd.read_csv(\"50_Startups.csv\")\ndf.head()\nX = df.iloc[:, :-1].values\ny = df.iloc[:, 4].values\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nX_cat = LabelEncoder()\nX[:, 3] = X_cat.fit_transform(X[:, 3])\n\nonehot = OneHotEncoder(categorical_features = [3])\nX = onehot.fit_transform(X).toarray()\n# avoiding the dummy variable trap\nX = X[:, 1:]\n\n\n# train test split\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)\n\n# model\nfrom sklearn.linear_model import LinearRegression\n\nreg = LinearRegression()\nreg.fit(X_train, y_train)\n\n# predict\n\ny_pred = reg.predict(X_test)\nimport skl" ]
[ [ "matplotlib.pyplot.style.use", "pandas.read_csv", "sklearn.linear_model.LinearRegression", "sklearn.cross_validation.train_test_split", "sklearn.preprocessing.LabelEncoder", "sklearn.preprocessing.OneHotEncoder" ] ]
argonde/codeSkills
[ "939feb29102a2e47a8c2c3047d3f77dd75b1465d" ]
[ "py/webScrapper/getSyntax.py" ]
[ "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# auth: Ruben López Vázquez\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom IPython.core.display import clear_output\nfrom random import randint\nimport pandas as pd\nimport csv\nimport time as t\nimport sqlite3\nimport sys\nimport os\n\n\ndef is_digit(check_input):\n \"\"\"\n function to check whether the input is an integer digit\n returns : bool\n \"\"\"\n if check_input.isdigit():\n return True\n return False\n\n\ndef create_table():\n \"\"\"\n pass an SQL statement to the database, to create a new table with a Q&A (two fields) schema\n in case that table does not exist yet\n \"\"\"\n c.execute('CREATE TABLE IF NOT EXISTS SqlSyntax (Inquiry TEXT, Code TEXT)')\n conn.commit()\n\n\ndef data_entry(data):\n \"\"\"\n pass an SQL statement to the database, to insert new values to the table\n values are always in pairs, to write a line in compliance with the relational data scheme\n \"\"\"\n insert = \"INSERT INTO SqlSyntax (Inquiry, Code) VALUES (?, ?)\"\n c.executemany(insert, [data, ])\n conn.commit()\n\n\ndef web_data_html_scrap(page, lang):\n # Output lists\n title = []\n value = []\n exception_dict = {}\n\n # User requests to get a response from a page\n r = requests.get(page)\n # Use bs to parse the response\n soup = BeautifulSoup(r.text, 'html.parser')\n\n # Create a list of links, i.e. urls to scrap\n url_links = []\n a = 1\n try:\n # Select all links in 'div' objects from the index page\n for link in soup.findAll('a'):\n url_links.append(url + str(link.get('href')))\n # log all url links to file for inspection, then hash the BREAK command\n a += 1\n if a < len(soup.findAll('a')):\n continue\n else:\n with open(wd + f\"all_{lang}_url_links.csv\", \"w\") as f:\n write_file = csv.writer(f)\n write_file.writerow(url_links)\n # Customise the url_links slice and then hash the line below\n break\n print(\"all links found: \" + str(len(url_links)))\n re = 1\n\n def try_despite_exceptions(links, req):\n print(\"Looking into list of urls ...\")\n urls = links[req:len(links)]\n print('There are ' + str(len(urls)) + ' requests left')\n # Preparing the monitoring of the loop\n start_time = t.time()\n # Make a get request\n for u in urls:\n try:\n r2 = requests.get(u)\n # Pause the loop\n t.sleep(randint(1, 3))\n\n # Monitor the requests\n elapsed_time = t.time() - start_time\n print('Request:{}; Frequency: {} requests/s'.format(req, req / elapsed_time))\n clear_output(wait=True)\n\n # parse the data\n html_soup = BeautifulSoup(r2.text, 'html.parser')\n container = html_soup.find('div', class_='answer_info_holder_outer')\n title.append(container.find('div', class_='answer_info_title').text)\n value.append(container.find('textarea', class_='code_mirror_code').text)\n req += 1\n # in case any of the attribute references on previous lines above is not available\n except AttributeError as e:\n print(\"Error while fetching data, resuming ...\")\n # catch the exception and log it on the dictionary opened for exceptions\n exception_dict[u] = e\n req += 1\n # as long as there are links left to visit: da Capo\n if req < len(links):\n try_despite_exceptions(links, req)\n # no links left, then do nothing\n else:\n pass\n\n url_links = url_links[10:-15]\n if re < len(url_links):\n return try_despite_exceptions(url_links, re)\n else:\n pass\n\n except AttributeError as err:\n print(\"Error while fetching urls in blocks:\", err)\n\n finally:\n # log files, always useful for monitoring operations\n with open(wd + f\"log_{lang}_code_column.csv\", \"a\") as f:\n write_file = csv.writer(f)\n write_file.writerow(value)\n\n with open(wd + f\"log_{lang}_title_column.csv\", \"a\") as g:\n write_file = csv.writer(g)\n write_file.writerow(title)\n\n with open(wd + f\"log_{lang}_exceptions.csv\", \"a\") as h:\n write_file = csv.writer(h)\n write_file.writerow(exception_dict)\n\n # collect raw data into a data.frame\n py_syntax = {'Inquiry': title,\n 'Code': value\n }\n df = pd.DataFrame(py_syntax, columns=['Inquiry', 'Code'])\n\n # Create table and populate it\n create_table()\n print(\"Database created on SQLite\")\n for idx, rows in df.iterrows():\n # create a table\n row = [rows['Inquiry'], rows['Code']]\n data_entry(row)\n if conn:\n # close cursor\n c.close()\n # close connection\n conn.close()\n print(\"The SQLite connection is closed\")\n\n\n#####################################################################################################################\n# Gather input data from user\nprint(\"\\nThis script will collect data from public posts on well-known Q&A sites like stackoverflow.com\\n\"\n \" These are the available programming languages:\\n\")\n\n# variables on the contents of the menu:\nlangList = ['sql\\t\\t', 'javascript', 'python\\t', 'r\\t\\t', 'matlab\\t', 'shell\\t']\nlangShort = ['SQL', 'JS', 'PY', 'R', 'ML', 'SH']\nlanguage = ['SQL: a domain-specific declarative language for managing data in relational databases',\n 'JS: an imperative, high-level, event-driven programming language used in front end development',\n 'PY: an interpreted, imperative, high-level programming language, often used for scripting',\n 'R: a programming language used for numeric/statistical computing, graphics and data analysis',\n 'ML: a programming language used for numeric computing, plotting of functions and implementing algorithms',\n 'SH: a scripting language or program that automates the execution of tasks on a runtime system']\nn = len(langList)\noptions = {}\n\n# create a dictionary of options to later ensure the user inputs a valid choice\nfor index, item in enumerate(langList):\n print(f'{index} : {item} \\t--> {language[index]}.')\n options[str(item)] = str(language[index])\n\n# create variables to extract input from the while loop\noption = ''\noptDir = ''\noptName = ''\n# while the input is not within the range of acceptable answers keep looping\nwhile option not in options.keys():\n # ask the user for the programming language she/he is interested\n option = input(\"Choose a language to collect syntax and usage examples on a DB.\\n\"\n \"Select an index number from the menu above, else quit with 'q': \")\n # if case the user wants to quit\n if option == 'q':\n print('\\nTransaction cancelled\\n')\n print(f'O data downloaded')\n sys.exit()\n\n # if not 'q', the input from the user must be a digit (integer number)\n elif is_digit(option):\n # catch an error is the integer number provided is out of the range of available options\n try:\n # turn the index into short and long names for the chosen language\n m = int(option)\n langShort = langShort[m]\n optName = str(langList[m]).strip()\n # confirm the user wants to download the data, as well as the language option\n usr_input = input(f'You chose {optName}, do you want to continue? [y/n]: ').lower()\n\n # if the choice is confirm, ask for a subdirectory where to save the database (none: present dir)\n if usr_input == 'y':\n optDir = input('Enter a subdirectory where to save the data: ').strip(\"/\")\n break\n\n # If the user does not confirm his/her choice, then quit\n else:\n print('\\nTransaction cancelled\\n')\n print(f'O data downloaded')\n sys.exit()\n\n # index error: the digit provided is out of the range provided by the index\n except IndexError:\n print(f'\\nThere are {n} choices. Please choose only one of them.\\n')\n # If the user does not want to quit 'q', but has not supplied a digit, remind the correct input\n else:\n print(f'\\nYou must please enter one of the integer numbers indexing your choice.\\n')\n\n# Local variables\nurl = \"https://www.codegrepper.com/code-examples/\"+optName\nwd = os.path.dirname(os.path.abspath(__file__))+'/'+optDir+'/'\nos.chdir(wd)\n# prepare to establish a connection to an sqlite database\nconn = sqlite3.connect(f'SQLite_{langShort}_Syntax.db')\n# create a cursor to operate on the database\nc = conn.cursor()\nprint(\"Successfully Connected to SQLite\")\n\n# send a query to confirm, that the connection was successful\nsqlite_select_Query = \"select sqlite_version();\"\nc.execute(sqlite_select_Query)\nrecord = c.fetchall()\n# here is proof that it was successful\nprint(\"SQLite Database Version is: \", record, '\\n')\n\n# begin data collection by calling the python function web_data_html_scrap()\nprint(f'Collecting list of links to urls ...\\n')\nprint(f'Getting data from the web on {optName}!\\n')\n\nweb_data_html_scrap(url, langShort)\n" ]
[ [ "pandas.DataFrame" ] ]
byronyi/tensor2tensor
[ "b93fc036fdbacfddcadad8fb781f5b670533384e" ]
[ "tensor2tensor/trax/layers/core.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Trax layers library.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport operator as op\n\nfrom jax import lax\n\nimport numpy as onp\nfrom six.moves import reduce\nfrom tensor2tensor.trax import backend\nfrom tensor2tensor.trax.backend import numpy as np\nfrom tensor2tensor.trax.layers import base\n\n# Following the convention used in Keras and tf.layers, we use CamelCase for the\n# names of layer constructors, like Conv and Relu, while using snake_case for\n# other functions, like lax.conv and relu. To allow this, we disable below.\n# pylint: disable=invalid-name\n\n\n# Initializers.\n\n\ndef RandomNormalInitializer(stddev=1e-2):\n \"\"\"An initializer function for random normal coefficients.\"\"\"\n def init(shape, rng):\n return (stddev * backend.random.normal(rng, shape)).astype('float32')\n return init\n\n\ndef GlorotNormalInitializer(out_dim=0, in_dim=1, scale=onp.sqrt(2)):\n \"\"\"An initializer function for random Glorot-scaled coefficients.\"\"\"\n def init(shape, rng):\n fan_in, fan_out = shape[in_dim], shape[out_dim]\n size = onp.prod(onp.delete(shape, [in_dim, out_dim]))\n std = scale / np.sqrt((fan_in + fan_out) / 2. * size)\n return (std * backend.random.normal(rng, shape)).astype('float32')\n return init\n\n\ndef XavierUniformInitializer(out_dim=0, in_dim=1):\n \"\"\"An initializer function for random uniform xavier-scaled coefficients.\"\"\"\n def init(shape, rng):\n fan_in, fan_out = shape[in_dim], shape[out_dim]\n std = np.sqrt(2.0 / (fan_in + fan_out))\n a = np.sqrt(3.0) * std\n return backend.random.uniform(rng, shape, minval=-a, maxval=a)\n return init\n\n\ndef one_hot(x, size, dtype=np.float32):\n \"\"\"Make a n+1 dim one-hot array from n dim int-categorical array.\"\"\"\n return np.array(x[..., np.newaxis] == np.arange(size), dtype)\n\n\n# Layers.\n\n\[email protected]()\ndef Relu(x, **unused_kwargs):\n return np.maximum(x, 0.)\n\n\[email protected]()\ndef Tanh(x, **unused_kwargs):\n return np.tanh(x)\n\n\[email protected]()\ndef Exp(x, **unused_kwargs):\n return np.exp(x)\n\n\[email protected]()\ndef LogSoftmax(x, params, axis=-1, **kwargs):\n \"\"\"Apply log softmax to x: log-normalize along the given axis.\"\"\"\n del params, kwargs\n return x - backend.logsumexp(x, axis, keepdims=True)\n\n\[email protected]()\ndef Softmax(x, params, axis=-1, **kwargs):\n \"\"\"Apply softmax to x: exponentiate and normalize along the given axis.\"\"\"\n del params, kwargs\n return np.exp(x - backend.logsumexp(x, axis, keepdims=True))\n\n\[email protected]()\ndef Softplus(x, **unused_kwargs):\n return np.logaddexp(x, 0.)\n\n\nclass Dense(base.Layer):\n \"\"\"Layer constructor function for a dense (fully-connected) layer.\"\"\"\n\n def __init__(self, units,\n kernel_initializer=GlorotNormalInitializer(),\n bias_initializer=RandomNormalInitializer(1e-6)):\n super(Dense, self).__init__()\n self._units = units\n self._kernel_initializer = kernel_initializer\n self._bias_initializer = bias_initializer\n\n def call(self, x, params, **kwargs):\n del kwargs\n w, b = params\n return np.dot(x, w) + b\n\n def output_shape(self, input_shape):\n return tuple(input_shape[:-1]) + (self._units,)\n\n def new_parameters(self, input_shape, rng):\n w = self._kernel_initializer((input_shape[-1], self._units), rng)\n b = self._bias_initializer((self._units,), rng)\n return (w, b)\n\n\nclass Embedding(base.Layer):\n \"\"\"Layer constructor function for an embedding layer.\"\"\"\n\n def __init__(self, feature_depth, vocab_size,\n kernel_initializer=XavierUniformInitializer()):\n super(Embedding, self).__init__()\n self._feature_depth = feature_depth\n self._vocab_size = vocab_size\n self._kernel_initializer = kernel_initializer\n\n def call(self, x, params, **kwargs):\n del kwargs\n return np.take(params, x, axis=0)\n\n def output_shape(self, input_shape):\n return tuple(input_shape) + (self._feature_depth,)\n\n def new_parameters(self, input_shape, rng):\n return self._kernel_initializer(\n (self._vocab_size, self._feature_depth), rng)\n\n\ndef padtype_to_pads(in_shape, window_shape, window_strides, padding):\n \"\"\"Convert padding string to list of pairs of pad values.\"\"\"\n padding = padding.upper()\n if padding == 'SAME':\n out_shape = onp.ceil(\n onp.true_divide(in_shape, window_strides)).astype(int)\n pad_sizes = [max((out_size - 1) * stride + window_shape - in_size, 0)\n for out_size, stride, window_shape, in_size\n in zip(out_shape, window_strides, window_shape, in_shape)]\n return [(pad_size // 2, pad_size - pad_size // 2)\n for pad_size in pad_sizes]\n elif padding == 'VALID':\n return [(0, 0)] * len(in_shape)\n else:\n msg = 'Unknown padding type: {}.'\n raise TypeError(msg.format(padding))\n\n\nclass Conv(base.Layer):\n \"\"\"Layer constructor function for a general convolution layer.\"\"\"\n\n def __init__(self, filters, kernel_size, strides=None, padding='VALID',\n dimension_numbers=('NHWC', 'HWIO', 'NHWC'),\n kernel_initializer=None,\n bias_initializer=RandomNormalInitializer(1e-6)):\n super(Conv, self).__init__()\n self._filters = filters\n self._kernel_size = kernel_size\n self._padding = padding\n self._dimension_numbers = dimension_numbers\n self._lhs_spec, self._rhs_spec, self._out_spec = dimension_numbers\n self._one = (1,) * len(kernel_size)\n self._strides = strides or self._one\n self._bias_initializer = bias_initializer\n rhs_spec = self._rhs_spec\n self._kernel_initializer = kernel_initializer or GlorotNormalInitializer(\n rhs_spec.index('O'), rhs_spec.index('I'))\n\n def call(self, x, params=(), **kwargs):\n del kwargs\n w, b = params\n return lax.conv_general_dilated(\n x, w, self._strides, self._padding, self._one, self._one,\n self._dimension_numbers) + b\n\n def _kernel_shape(self, input_shape):\n \"\"\"Helper to calculate the kernel shape.\"\"\"\n kernel_size_iter = iter(self._kernel_size)\n return [self._filters if c == 'O' else\n input_shape[self._lhs_spec.index('C')] if c == 'I' else\n next(kernel_size_iter) for c in self._rhs_spec]\n\n def _conv_shape_tuple(self, lhs_shape, rhs_shape, strides, pads):\n \"\"\"Compute the shape of a conv given input shapes in canonical order.\"\"\"\n if isinstance(pads, str):\n pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)\n if len(pads) != len(lhs_shape) - 2:\n msg = 'Wrong number of explicit pads for conv: expected {}, got {}.'\n raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))\n lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads)))\n out_space = onp.floor_divide(\n onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1\n out_space = onp.maximum(0, out_space)\n out_shape = (lhs_shape[0], rhs_shape[0]) + tuple(out_space)\n return tuple(out_shape)\n\n def _conv_general_permutations(self, dimension_numbers):\n \"\"\"Utility for convolution dimension permutations relative to Conv HLO.\"\"\"\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n lhs_char, rhs_char, out_char = ('N', 'C'), ('O', 'I'), ('N', 'C')\n charpairs = (lhs_char, rhs_char, out_char)\n for i, (a, b) in enumerate(charpairs):\n if not (dimension_numbers[i].count(a) == 1 and\n dimension_numbers[i].count(b) == 1):\n msg = ('convolution dimension_numbers[{}] must contain the characters '\n '\"{}\" and \"{}\" exatly once, got {}.')\n raise TypeError(msg.format(i, a, b, dimension_numbers[i]))\n if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):\n msg = ('convolution dimension_numbers[{}] cannot have duplicate '\n 'characters, got {}.')\n raise TypeError(msg.format(i, dimension_numbers[i]))\n if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==\n set(out_spec) - set(out_char)):\n msg = ('convolution dimension_numbers elements must each have the same '\n 'set of spatial characters, got {}.')\n raise TypeError(msg.format(dimension_numbers))\n\n def getperm(spec, charpair):\n spatial = (i for i, c in enumerate(spec) if c not in charpair)\n if spec is not rhs_spec:\n spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))\n return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)\n\n lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)\n return lhs_perm, rhs_perm, out_perm\n\n def _conv_general_shape_tuple(self, lhs_shape, rhs_shape, window_strides,\n padding, dimension_numbers):\n \"\"\"Generalized computation of conv shape.\"\"\"\n lhs_perm, rhs_perm, out_perm = self._conv_general_permutations(\n dimension_numbers)\n lhs_trans = onp.take(lhs_shape, lhs_perm)\n rhs_trans = onp.take(rhs_shape, rhs_perm)\n out_trans = self._conv_shape_tuple(\n lhs_trans, rhs_trans, window_strides, padding)\n return tuple(onp.take(out_trans, onp.argsort(out_perm)))\n\n def output_shape(self, input_shape):\n kernel_shape = self._kernel_shape(input_shape)\n return self._conv_general_shape_tuple(\n input_shape, kernel_shape,\n self._strides, self._padding, self._dimension_numbers)\n\n def new_parameters(self, input_shape, rng):\n kernel_shape = self._kernel_shape(input_shape)\n bias_shape = [self._filters if c == 'C' else 1 for c in self._out_spec]\n bias_shape = tuple(itertools.dropwhile(lambda x: x == 1, bias_shape))\n w = self._kernel_initializer(kernel_shape, rng)\n b = self._bias_initializer(bias_shape, rng)\n return (w, b)\n\n\n# Flatten.\ndef _flatten_output_shape(input_shape, num_axis_to_keep=1):\n \"\"\"Output shape of a flatten layer.\"\"\"\n if num_axis_to_keep >= len(input_shape):\n raise ValueError(\n \"num_axis_to_keep[%d] should be less than input's rank[%d]\" %\n (num_axis_to_keep, len(input_shape)))\n return tuple(input_shape[:num_axis_to_keep]) + (\n reduce(op.mul, input_shape[num_axis_to_keep:], 1),)\n\n\[email protected](output_shape=_flatten_output_shape)\ndef Flatten(x, params, num_axis_to_keep=1, **kwargs):\n del params, kwargs\n return np.reshape(x, (x.shape[:num_axis_to_keep] + (-1,)))\n\n\n# Batch normalization.\ndef _batch_norm_new_params(input_shape, rng, axis=(0, 1, 2),\n center=True, scale=True, **kwargs):\n \"\"\"Helper to initialize batch norm params.\"\"\"\n del rng, kwargs\n axis = (axis,) if np.isscalar(axis) else axis\n shape = tuple(d for i, d in enumerate(input_shape) if i not in axis)\n beta = np.zeros(shape, dtype='float32') if center else ()\n gamma = np.ones(shape, dtype='float32') if scale else ()\n return (beta, gamma)\n\n\[email protected](new_parameters=_batch_norm_new_params)\ndef BatchNorm(x, params, axis=(0, 1, 2), epsilon=1e-5,\n center=True, scale=True, **unused_kwargs):\n \"\"\"Layer construction function for a batch normalization layer.\"\"\"\n mean = np.mean(x, axis, keepdims=True)\n # Fast but less numerically-stable variance calculation than np.var.\n m1 = np.mean(x**2, axis, keepdims=True)\n var = m1 - mean**2\n z = (x - mean) / np.sqrt(var + epsilon)\n\n # Expand the parameters to have the right axes.\n beta, gamma = params\n # TODO(phawkins): np.expand_dims should accept an axis tuple.\n # (https://github.com/numpy/numpy/issues/12290)\n ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x)))\n beta = beta[ed]\n gamma = gamma[ed]\n\n # Return the z rescaled by the parameters if requested.\n if center and scale:\n return gamma * z + beta\n if center:\n return z + beta\n if scale:\n return gamma * z\n return z\n\n\n# Pooling.\ndef _pooling_output_shape(input_shape, pool_size=(2, 2),\n strides=None, padding='VALID'):\n \"\"\"Helper: compute the output shape for the pooling layer.\"\"\"\n dims = (1,) + pool_size + (1,) # NHWC\n spatial_strides = strides or (1,) * len(pool_size)\n strides = (1,) + spatial_strides + (1,)\n pads = padtype_to_pads(input_shape, dims, strides, padding)\n operand_padded = onp.add(input_shape, onp.add(*zip(*pads)))\n t = onp.floor_divide(onp.subtract(operand_padded, dims), strides) + 1\n return tuple(t)\n\n\ndef _pooling_general(inputs, reducer, init_val, rescaler=None,\n pool_size=(2, 2), strides=None, padding='VALID'):\n \"\"\"Helper: general pooling computation used in pooling layers later.\"\"\"\n spatial_strides = strides or (1,) * len(pool_size)\n rescale = rescaler(pool_size, spatial_strides, padding) if rescaler else None\n dims = (1,) + pool_size + (1,) # NHWC\n strides = (1,) + spatial_strides + (1,)\n out = lax.reduce_window(inputs, init_val, reducer, dims, strides, padding)\n return rescale(out, inputs) if rescale else out\n\n\[email protected](output_shape=_pooling_output_shape)\ndef MaxPool(x, params, pool_size=(2, 2), strides=None, padding='VALID', **kw):\n del params, kw\n return _pooling_general(x, lax.max, -np.inf, pool_size=pool_size,\n strides=strides, padding=padding)\n\n\[email protected](output_shape=_pooling_output_shape)\ndef SumPool(x, params, pool_size=(2, 2), strides=None, padding='VALID', **kw):\n del params, kw\n return _pooling_general(x, lax.add, 0., pool_size=pool_size,\n strides=strides, padding=padding)\n\n\ndef _normalize_by_window_size(dims, spatial_strides, padding):\n def rescale(outputs, inputs):\n one = np.ones(inputs.shape[1:-1], dtype=inputs.dtype)\n window_sizes = lax.reduce_window(\n one, 0., lax.add, dims, spatial_strides, padding)\n return outputs / window_sizes[..., np.newaxis]\n return rescale\n\n\[email protected](output_shape=_pooling_output_shape)\ndef AvgPool(x, params, pool_size=(2, 2), strides=None, padding='VALID', **kw):\n del params, kw\n return _pooling_general(x, lax.add, 0., _normalize_by_window_size,\n pool_size, strides=strides, padding=padding)\n\n\[email protected]()\ndef Dropout(x, params, rate=0.0, mode='train', rng=None, **kwargs):\n \"\"\"Layer construction function for a dropout layer with given rate.\"\"\"\n del params, kwargs\n if rng is None:\n msg = ('Dropout layer requires apply_fun to be called with a rng keyword '\n 'argument. That is, instead of `Dropout(params, inputs)`, call '\n 'it like `Dropout(params, inputs, rng=key)`.')\n raise ValueError(msg)\n if rate >= 1.0:\n raise ValueError('Dropout rate (%f) must be lower than 1.' % rate)\n if mode == 'train' and rate > 0.0:\n keep = backend.random.bernoulli(rng, 1.0 - rate, x.shape)\n return np.where(keep, x / (1.0 - rate), 0)\n else:\n return x\n\n\[email protected]()\ndef Div(x, params, divisor=1.0, **kwargs):\n del params, kwargs\n return x / divisor\n\n\n# Mean.\ndef _mean_output_shape(input_shape, axis=-1, keepdims=False):\n shape1 = list(input_shape)[:axis] # Shape before axis.\n shape2 = list(input_shape)[axis:][1:] # Shape after axis.\n mid_shape = [1] if keepdims else []\n return tuple(shape1 + mid_shape + shape2)\n\n\[email protected](output_shape=_mean_output_shape)\ndef Mean(x, params, axis=-1, keepdims=False, **kwargs):\n del params, kwargs\n return np.mean(x, axis=axis, keepdims=keepdims)\n\n\[email protected]()\ndef ShiftRight(x, **unused_kwargs):\n \"\"\"Layer to shift the tensor to the right by padding on axis 1.\"\"\"\n pad_widths = [(0, 0), (1, 0)]\n pad_widths += [(0, 0) for _ in range(len(x.shape) - 2)]\n padded = np.pad(x, pad_widths, mode='constant')\n return padded[:, :-1, ...]\n" ]
[ [ "numpy.true_divide", "numpy.subtract", "numpy.take", "numpy.maximum", "numpy.argsort", "numpy.delete", "numpy.sqrt" ] ]
PawelRosikiewicz/SkinDiagnosticAI
[ "7cc7b7a9ccd4103095a7548e7b99de4988858356" ]
[ "src/utils/image_augmentation.py" ]
[ "# ********************************************************************************** #\n# #\n# Project: FastClassAI workbecnch # \n# # \n# Author: Pawel Rosikiewicz #\n# Contact: prosikiewicz_gmail.com #\n# #\n# This notebook is a part of Skin AanaliticAI development kit, created #\n# for evaluation of public datasets used for skin cancer detection with #\n# large number of AI models and data preparation pipelines. #\n# # \n# License: MIT #\n# Copyright (C) 2021.01.30 Pawel Rosikiewicz #\n# https://opensource.org/licenses/MIT # \n# #\n# ********************************************************************************** #\n\n\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os # allow changing, and navigating files and folders, \nimport sys\nimport re # module to use regular expressions, \nimport glob # lists names in folders that match Unix shell patterns\nimport random # functions that use and generate random numbers\n\nimport numpy as np # support for multi-dimensional arrays and matrices\nimport pandas as pd # library for data manipulation and analysis\nimport seaborn as sns # advance plots, for statistics, \nimport matplotlib.pyplot as plt # for making plots, \nimport matplotlib as mpl # to get some basif functions, heping with plot mnaking \nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport scipy.stats as stats # library for statistics and technical programming, \nimport tensorflow.keras as keras \n\nfrom PIL import Image, ImageDraw\nfrom IPython.display import display\nfrom tensorflow.keras import backend as K # used for housekeeping of tf models,\n\nimport matplotlib.patches as mpatches\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\n\n\n# Function ................................................................................\n\ndef create_augmented_images(*, external_generator, augm_img_nr=10, paramsforgenerator=\"\"):\n \"\"\" \n Function that takes pictures in a batch, provided with keras generators\n and uses another generator.\n Secondarly, this function can be used to create dataframe with data on images in image batch\n if, augm_img_nr is set 0, \n \n external_generator : iterator, based on keras image generator\n the function was designed to work with all images in a given dataset\n provided as one batch,\n \n augm_img_nr : the number of augment images that will be created \n for each image, if augm_img_nr=0, no augmented images will be created, \n but both array, and dataframe will be returned, \n \n paramsforgenerator : dictionary, with parameters for image generator,\n used for image augmentation, \n \n Returns : numpy array with img batch, [?, pixel_size, pixel_size, 3]\n pandas dataframe, with rows corresponding to each image in the batch, \n and following columns: \n class = foldername in data directory, imagename= original image name, \n imgtype={'raw', 'aug'}, imgidnumber=0, foir raw, >=1 for augmented images\n \"\"\"\n\n # extract one batch with all images in a given dataset\n img_batch, batch_labels = next(external_generator)\n\n #.. create df, with class, image and image type names\n \"\"\" I will use this df, to create, new file with subdirectories, \n and save raw and augmented images with proper names\n \"\"\"\n img_filenames = pd.Series(external_generator.filenames).str.split(pat=\"/\", expand=True)\n img_filenames = pd.concat([img_filenames, pd.Series([\"raw\"]*img_filenames.shape[0]), pd.Series([0]*img_filenames.shape[0])], axis=1)\n img_filenames.columns = [\"classname\", \"imgname\", \"imgtype\", \"imgidnumber\" ]\n\n # in case, I just wish to use that function to get everythign in the same format, but not to generate augmented images\n if augm_img_nr==0: \n pass\n \n if augm_img_nr>0:\n \n # Create generator for image augmentation\n datagen = ImageDataGenerator(**paramsforgenerator)\n datagen.fit(img_batch)\n\n #.. prepare iterator, that will return all figures in a batch, one by one, \n # augm_datagen.fit(img_batch)\n datagen_iter = datagen.flow(img_batch, batch_size=1, shuffle=False) \n\n\n # Create n augmented figures for each image in gthe batch, \n aug_img_filenames = list()\n for i in range(augm_img_nr):\n for j in range(img_batch.shape[0]):\n # create augmented figure, and add to new batch\n one_img = datagen_iter.next()\n if i+j==0: \n batch_img_augm = one_img\n else: \n batch_img_augm = np.r_[batch_img_augm, one_img]\n\n # save name and id for that image\n aug_img_filenames.append({\n \"classname\" : img_filenames.iloc[j,0],\n \"imgname\": img_filenames.iloc[j,1], \n \"imgtype\": \"aug\",\n \"imgidnumber\": i+1}) \n \n # create new batch and df with labels and filenames to return,\n img_filenames = pd.concat([img_filenames,pd.DataFrame(aug_img_filenames)], axis=0, sort=False).reset_index(drop=True)\n img_batch = np.r_[img_batch, batch_img_augm]\n \n #print(img_filenames.shape, img_batch.shape)\n return img_batch, img_filenames\n \n \n \n \n \n# Function ................................................................................\n \ndef save_augmented_images(*,\n datasetname, img_batch, batch_info, savedir, verbose=False):\n\n \"\"\"\n 1) creates save directory, with subdirectories for saving classified images\n 2) saves images as png, that were stored in img_batch\n \n datasetname : str, eg {\"test\", \"train\"}\n img_batch. : numpy array [?, pixel_nr, pixel_nr, 3], contains rgb pictures \n on scale [0-255]\n batch_info : data frame with info on each image in img_batch\n created with create_augmented_images()\n savedir : full path to directory, where all classes should be stored, \n verbose : default = False, \n \"\"\"\n\n # check if savedir exist, if not create it\n try: os.chdir(savedir)\n except: os.mkdir(savedir)\n\n # create directories with provided datasetname\n os.chdir(savedir)\n try: os.mkdir(datasetname)\n except: pass\n\n # create directories for each class\n os.chdir(os.path.join(savedir, datasetname))\n for dirname in list(batch_info.classname.unique()):\n try: os.mkdir(dirname)\n except: pass \n\n # save each images in img_batch with proper name in corresponing class/directory\n for i in range(img_batch.shape[0]):\n img_info = batch_info.iloc[i,:]\n\n # img name\n if img_info.imgtype==\"raw\":\n img_name = f\"{img_info.imgtype}_{img_info.imgname}\"\n if img_info.imgtype!=\"raw\":\n img_name = f\"{img_info.imgtype}{img_info.imgidnumber}_{img_info.imgname}\"\n\n # saving, \n try:\n mpl.image.imsave(os.path.join(savedir, datasetname, img_info.classname, img_name), \n np.array(img_batch[i], dtype=int)\n ) # [0-255] must be int, \n except: \n pass\n\n # info,\n if verbose==True:\n print(f\"{img_batch.shape[0]} images were saved\")\n print(f\"in {savedir}\")\n print(f\"in following files for each classe: {list(batch_info.classname.unique())}\")\n\n" ]
[ [ "numpy.array", "pandas.Series", "pandas.DataFrame", "tensorflow.keras.preprocessing.image.ImageDataGenerator" ] ]
Xlinford/ContrastiveSeg
[ "79eec700d2efdaad4da8cf0c07674107e72078da" ]
[ "lib/models/backbones/resnet/wide_resnet_models.py" ]
[ "from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport math\r\nimport torch.nn as nn\r\nfrom collections import OrderedDict\r\nfrom functools import partial\r\n\r\nfrom lib.models.tools.module_helper import ModuleHelper \r\n\r\n\r\n\r\nclass GlobalAvgPool2d(nn.Module):\r\n def __init__(self):\r\n \"\"\"Global average pooling over the input's spatial dimensions\"\"\"\r\n super(GlobalAvgPool2d, self).__init__()\r\n\r\n def forward(self, inputs):\r\n in_size = inputs.size()\r\n return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)\r\n\r\n\r\nclass IdentityResidualBlock(nn.Module):\r\n def __init__(self,\r\n in_channels,\r\n channels,\r\n stride=1,\r\n dilation=1,\r\n groups=1,\r\n bn_type=None,\r\n dropout=None):\r\n \"\"\"Configurable identity-mapping residual block\r\n\r\n Parameters\r\n ----------\r\n in_channels : int\r\n Number of input channels.\r\n channels : list of int\r\n Number of channels in the internal feature maps. Can either have two or three elements: if three construct\r\n a residual block with two `3 x 3` convolutions, otherwise construct a bottleneck block with `1 x 1`, then\r\n `3 x 3` then `1 x 1` convolutions.\r\n stride : int\r\n Stride of the first `3 x 3` convolution\r\n dilation : int\r\n Dilation to apply to the `3 x 3` convolutions.\r\n groups : int\r\n Number of convolution groups. This is used to create ResNeXt-style blocks and is only compatible with\r\n bottleneck blocks.\r\n bn_type : callable\r\n Function to create normalization / activation Module.\r\n dropout: callable\r\n Function to create Dropout Module.\r\n \"\"\"\r\n super(IdentityResidualBlock, self).__init__()\r\n\r\n # Check parameters for inconsistencies\r\n if len(channels) != 2 and len(channels) != 3:\r\n raise ValueError(\"channels must contain either two or three values\")\r\n if len(channels) == 2 and groups != 1:\r\n raise ValueError(\"groups > 1 are only valid if len(channels) == 3\")\r\n\r\n is_bottleneck = len(channels) == 3\r\n need_proj_conv = stride != 1 or in_channels != channels[-1]\r\n\r\n self.bn1 = ModuleHelper.BNReLU(in_channels, bn_type=bn_type)\r\n if not is_bottleneck:\r\n layers = [\r\n (\"conv1\", nn.Conv2d(in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False,\r\n dilation=dilation)),\r\n (\"bn2\", ModuleHelper.BNReLU(channels[0], bn_type=bn_type)),\r\n (\"conv2\", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False,\r\n dilation=dilation))\r\n ]\r\n if dropout is not None:\r\n layers = layers[0:2] + [(\"dropout\", dropout())] + layers[2:]\r\n else:\r\n layers = [\r\n (\"conv1\", nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)),\r\n (\"bn2\", ModuleHelper.BNReLU(channels[0], bn_type=bn_type)),\r\n (\"conv2\", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False,\r\n groups=groups, dilation=dilation)),\r\n (\"bn3\", ModuleHelper.BNReLU(channels[1], bn_type=bn_type)),\r\n (\"conv3\", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False))\r\n ]\r\n if dropout is not None:\r\n layers = layers[0:4] + [(\"dropout\", dropout())] + layers[4:]\r\n self.convs = nn.Sequential(OrderedDict(layers))\r\n\r\n if need_proj_conv:\r\n self.proj_conv = nn.Conv2d(in_channels, channels[-1], 1, stride=stride, padding=0, bias=False)\r\n\r\n def forward(self, x):\r\n if hasattr(self, \"proj_conv\"):\r\n bn1 = self.bn1(x)\r\n shortcut = self.proj_conv(bn1)\r\n else:\r\n shortcut = x.clone()\r\n bn1 = self.bn1(x)\r\n\r\n out = self.convs(bn1)\r\n out.add_(shortcut)\r\n\r\n return out\r\n\r\n\r\nclass WiderResNetA2(nn.Module):\r\n def __init__(self,\r\n structure=[3, 3, 6, 3, 1, 1],\r\n bn_type=None,\r\n classes=0,\r\n dilation=True):\r\n \"\"\"Wider ResNet with pre-activation (identity mapping) blocks\r\n\r\n This variant uses down-sampling by max-pooling in the first two blocks and by strided convolution in the others.\r\n\r\n Parameters\r\n ----------\r\n structure : list of int\r\n Number of residual blocks in each of the six modules of the network.\r\n bn_type : callable\r\n Function to create normalization / activation Module.\r\n classes : int\r\n If not `0` also include global average pooling and a fully-connected layer with `classes` outputs at the end\r\n of the network.\r\n dilation : bool\r\n If `True` apply dilation to the last three modules and change the down-sampling factor from 32 to 8.\r\n \"\"\"\r\n super(WiderResNetA2, self).__init__()\r\n self.structure = structure\r\n self.dilation = dilation\r\n\r\n if len(structure) != 6:\r\n raise ValueError(\"Expected a structure with six values\")\r\n\r\n # Initial layers\r\n self.mod1 = nn.Sequential(OrderedDict([\r\n (\"conv1\", nn.Conv2d(3, 64, 3, stride=1, padding=1, bias=False))\r\n ]))\r\n\r\n # Groups of residual blocks\r\n in_channels = 64\r\n channels = [(128, 128), (256, 256), (512, 512), (512, 1024), (512, 1024, 2048), (1024, 2048, 4096)]\r\n for mod_id, num in enumerate(structure):\r\n # Create blocks for module\r\n blocks = []\r\n for block_id in range(num):\r\n if not dilation:\r\n dil = 1\r\n stride = 2 if block_id == 0 and 2 <= mod_id <= 4 else 1\r\n else:\r\n if mod_id == 3:\r\n dil = 2\r\n elif mod_id > 3:\r\n dil = 4\r\n else:\r\n dil = 1\r\n stride = 2 if block_id == 0 and mod_id == 2 else 1\r\n\r\n if mod_id == 4:\r\n drop = None\r\n elif mod_id == 5:\r\n drop = None\r\n else:\r\n drop = None\r\n\r\n blocks.append((\r\n \"block%d\" % (block_id + 1),\r\n IdentityResidualBlock(in_channels, channels[mod_id], bn_type=bn_type, stride=stride, dilation=dil,\r\n dropout=drop)\r\n ))\r\n\r\n # Update channels and p_keep\r\n in_channels = channels[mod_id][-1]\r\n\r\n # Create module\r\n if mod_id < 2:\r\n self.add_module(\"pool%d\" % (mod_id + 2), nn.MaxPool2d(3, stride=2, padding=1, ceil_mode=True))\r\n self.add_module(\"mod%d\" % (mod_id + 2), nn.Sequential(OrderedDict(blocks)))\r\n\r\n self.bn_out = ModuleHelper.BNReLU(in_channels, bn_type=bn_type)\r\n\r\n\r\n def forward(self, img):\r\n tuple_features = list()\r\n out = self.mod1(img)\r\n out = self.mod2(self.pool2(out))\r\n out = self.mod3(self.pool3(out))\r\n out = self.mod4(out)\r\n tuple_features.append(out)\r\n out = self.mod5(out)\r\n tuple_features.append(out)\r\n out = self.mod6(out)\r\n tuple_features.append(out)\r\n out = self.mod7(out)\r\n out = self.bn_out(out)\r\n tuple_features.append(out)\r\n return tuple_features\r\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.Conv2d" ] ]
bbreton3/glove_tf_21
[ "16b18bdb2d41c104dcd9159c0a760336bb5fd4d1" ]
[ "tests/test_preprocessing_glove.py" ]
[ "from glove_tf_21.utils.file_utils import save_labels\n\nimport numpy as np\nimport os\n\n\ndef test_cooc_count(preprocessing_glove, ix_sequences_full, cooc_dict):\n output_cooc = dict()\n for ix_seq in ix_sequences_full:\n output_cooc = preprocessing_glove.cooc_count(output_cooc, ix_seq)\n\n assert len(output_cooc) == len(cooc_dict)\n\n for key, val in cooc_dict.items():\n assert np.allclose(output_cooc[key], val)\n\n\ndef test_cooc_dict_to_sparse(preprocessing_glove_fit, cooc_dict, cooc_matrix_sparse):\n sparse_cooc_mat = preprocessing_glove_fit.cooc_dict_to_sparse(cooc_dict)\n assert np.sum(sparse_cooc_mat != cooc_matrix_sparse) == 0.0\n\n\ndef test_glove_formatter(preprocessing_glove, cooc_matrix_sparse, cooc_rows, cooc_cols, cooc_data):\n test_cooc_rows, test_cooc_cols, test_cooc_data = preprocessing_glove.glove_formatter(cooc_matrix_sparse)\n\n assert np.allclose(test_cooc_rows, cooc_rows)\n assert np.allclose(test_cooc_cols, cooc_cols)\n assert np.allclose(test_cooc_data, cooc_data)\n\n\ndef test_get_labels(preprocessing_glove_fit, vocab):\n assert preprocessing_glove_fit.get_labels() == vocab\n\n\ndef test_get_cooc_mat(preprocessing_glove_fit, corpus_file_path, cooc_matrix_sparse, temp_folder_path):\n test_cooc_matrix_sparse = preprocessing_glove_fit.get_cooc_mat(corpus_file_path)\n assert np.sum(test_cooc_matrix_sparse != cooc_matrix_sparse) == 0.0\n\n empty_file_path = os.path.join(temp_folder_path, \"empty_file.txt\")\n save_labels([\"\"], empty_file_path)\n assert np.sum(preprocessing_glove_fit.get_cooc_mat(empty_file_path)) == 0.0\n\n os.remove(empty_file_path)\n\n\ndef test_call(preprocessing_glove_fit):\n\n cooc_rows, cooc_cols, cooc_data, cooc = preprocessing_glove_fit()\n\n assert len(cooc_rows) == 40\n assert len(cooc_cols) == 40\n assert len(cooc_data) == 40\n" ]
[ [ "numpy.allclose", "numpy.sum" ] ]
CitizenScienceInAstronomyWorkshop/pyIBCC
[ "35215648f3361689e374780182f39182eddda64f" ]
[ "python/tests/ibcc_test.py" ]
[ "'''\nCreated on 8 Apr 2015\n\n@author: edwin\n'''\nimport unittest\nimport ibcc\nimport logging\nimport numpy as np\nfrom dynibcc import DynIBCC\nfrom ibcc_balanced import BalancedIBCC\n\ndef check_accuracy(pT, target_acc, goldfile='./data/gold_verify.csv'):\n # check values are in tolerance range\n gold = np.genfromtxt(goldfile)\n decisions = np.round(pT[:,1]) \n errors = np.abs(gold-decisions)\n nerrors = np.nansum(errors)\n acc = np.round(1 - (nerrors/float(np.sum(np.isfinite(gold)))), decimals=5)\n logging.info( \"accuracy is %f, nerrors=%i\" % (acc, nerrors))\n assert acc==target_acc\n\ndef check_accuracy_multi(pT, target_acc, goldfile='./data/gold5_verify.csv'):\n # check values are in tolerance range\n gold = np.genfromtxt(goldfile)\n nerrors = 0\n for j in range(pT.shape[1]):\n decisions = np.round(pT[:,j])\n goldj = gold==j \n errors = np.abs(goldj-decisions)\n errors = errors[goldj]\n nerrors += np.nansum(errors)\n acc = np.round(1 - (nerrors/float(np.sum(np.isfinite(gold)))), decimals=5)\n logging.info( \"accuracy is %f, nerrors=%i\" % (acc, nerrors))\n assert acc==target_acc\n \ndef check_outputsize(pT, combiner, shape=(2,2,5), ptlength=100):\n # check output has right number of data points\n assert pT.shape[0]==ptlength\n logging.info(\"Alpha shape: \" + str(combiner.alpha.shape))\n assert combiner.alpha.shape == shape\n \nclass Test(unittest.TestCase):\n def testSparseList_noGold(self):\n configFile = './config/sparse_nogold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.82, goldfile='./data/gold_mixed_verify.csv')\n \n def testTable_noGold(self):\n # Crowdlabels contains some NaNs and some -1s.\n configFile = './config/table_nogold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.82) \n \n def testSparseList_withGold(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.95)\n \n def testTable_withGold(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.95) \n \n def testSparseList_shortGold(self):\n #Gold labels is shorter than the no. crowd-labelled data points\n configFile = './config/sparse_shortgold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.95)\n \n def testTable_shortGold(self):\n #Gold labels is shorter than the no. crowd-labelled data points\n configFile = './config/table_shortgold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.95) \n \n def testSparseList_shortGoldMatrix(self):\n #Gold labels is shorter than the no. crowd-labelled data points\n configFile = './config/sparse_shortgoldmat.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.94, goldfile='./data/gold_mixed_verify.csv')\n \n def testTable_shortGoldMatrix(self):\n #Gold labels is shorter than the no. crowd-labelled data points\n configFile = './config/table_shortgoldmat.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.94, goldfile='./data/gold_mixed_verify.csv') \n \n def testSparseList_withGold_5classes(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, (5,5,5))\n check_accuracy_multi(pT, 1)\n \n def testTable_withGold_5classes(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, (5,5,5))\n check_accuracy_multi(pT, 1) \n \n def testSparseList_lowerbound(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold_lowerbound.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.95)\n \n def testTable_lowerbound_5classes(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5_lowerbound.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, (5,5,5))\n check_accuracy_multi(pT, 1) \n \n# Dynamic IBCC---------------------------------------------------------------------------------------------------------\n \n def test_SparseList_withGold_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner, (2,2,375))\n check_accuracy(pT, 0.93)\n \n def test_Table_withGold_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner,(2,2,500))\n check_accuracy(pT, 0.94) \n \n def test_SparseList_withGold_5classes_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner,(5,5,500))\n check_accuracy_multi(pT, 1)\n \n def test_Table_withGold_5classes_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner, (5,5,500))\n check_accuracy_multi(pT, 1) \n \n def test_SparseList_lowerbound_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold_lowerbound.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner, (2,2,375))\n check_accuracy(pT, 0.93)\n \n def test_Table_lowerbound_5classes_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5_lowerbound.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner, (5,5,500))\n check_accuracy_multi(pT, 1) \n \n# BALANCED IBCC -------------------------------------------------------------------------------------------------------\n \n def testSparseList_balanced(self):\n configFile = './config/sparse_nogold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=BalancedIBCC)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.82, goldfile='./data/gold_mixed_verify.csv')\n \n def testTable_withGold_5classes_balanced(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=BalancedIBCC)\n check_outputsize(pT, combiner, (5,5,5))\n check_accuracy_multi(pT, 1) \n \n# OPTIMIZATION --------------------------------------------------------------------------------------------------------\n\n def testSparseList_opt(self):\n configFile = './config/sparse_nogold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None, optimise_hyperparams=True)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.82, goldfile='./data/gold_mixed_verify.csv')\n \n def testTable_withGold_5classes_opt(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None, optimise_hyperparams=True)\n check_outputsize(pT, combiner, (5,5,5))\n check_accuracy_multi(pT, 1) \n \n# SCORES NOT FROM 0 ---------------------------------------------------------------------------------------------------\n \n def testSparseList_scores(self):\n configFile = './config/sparse_nogold_mixscores.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.82, goldfile='./data/gold_mixed_verify.csv')\n\n# SETUP ETC. ----------------------------------------------------------------------------------------------------------\n\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n logging.info(\"TEST: \"+self._testMethodName)\n\n# TODO list -----------------------------------------------------------------------------------------------------------\n# Add tests for when scores are not consecutive from 0\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testSparseList']\n unittest.main()" ]
[ [ "numpy.abs", "numpy.nansum", "numpy.round", "numpy.genfromtxt", "numpy.isfinite" ] ]
klarman-cell-observatory/PowerAnalysisForSpatialOmics
[ "257e5663bb5476c7d9a22230741b5507fd621352" ]
[ "scripts/random_self_pref_cluster.py" ]
[ "from glob import glob\nimport numpy as np\nimport scipy.sparse as sparse\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport operator\nfrom spatialpower.tissue_generation import assign_labels\nfrom spatialpower.tissue_generation import visualization\n\nresults_dir = './results/motif_detection/'\nadj_mat_list = np.sort(glob(results_dir + 'blank_graph_network*.npy'))\npos_mat_list = np.sort(glob(results_dir + 'blank_graph_positions*.npy'))\n\ndim = 300\n\n##RANDOM##\ncell_type_probabilities = np.ones(10) * 0.1\nneighborhood_probabilities = np.ones((10,10)) * 0.1 \nn_cell_types = len(cell_type_probabilities)\n\nfor ii in range(0, len(adj_mat_list)):\n A = np.load(adj_mat_list[ii])\n C = np.load(pos_mat_list[ii])\n \n j = adj_mat_list[ii].split('_')[-1].split('.')[0]\n # Blank assignment structure\n n_cell_types = len(cell_type_probabilities)\n position_dict = dict()\n for i in range(0, C.shape[0]):\n position_dict[i] = C[i, :]\n\n graph = nx.from_numpy_matrix(A)\n node_id_list = list(graph.nodes)\n attribute_dict = dict(zip(node_id_list, [-1 for i in graph.nodes]))\n \n attribute_dict = assign_labels.heuristic_assignment(graph, cell_type_probabilities, neighborhood_probabilities, 'region', dim, position_dict)\n observed_cell_type_dist, kl = assign_labels.check_cell_type_dist(n_cell_types, attribute_dict, cell_type_probabilities)\n observed_neighborhood_dist, kl_neighbor = assign_labels.check_neighborhood_dist(n_cell_types, attribute_dict, neighborhood_probabilities, graph, 1)\n B = assign_labels.build_assignment_matrix(attribute_dict, n_cell_types)\n np.save(results_dir + 'random_B_' + str(j) + '.npy', B)\n\n visualization.make_vor(dim, attribute_dict, position_dict, n_cell_types, results_dir, 'random_B_' + str(j), node_id_list)\n\n## High Self Preference ##\n'''cell_type_probabilities = [0.03, 0.11, 0.11, 0.11, 0.11, 0.11, 0.11, 0.10, 0.11, 0.10]\nneighborhood_probabilities = np.array([[0.50, 0.06, 0.06, 0.06, 0.06, 0.06, 0.05, 0.05, 0.05, 0.05],\n [0.06, 0.11, 0.11, 0.11, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.06, 0.11, 0.11, 0.11, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.06, 0.11, 0.11, 0.11, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.06, 0.11, 0.11, 0.11, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.06, 0.10, 0.10, 0.10, 0.10, 0.10, 0.11, 0.11, 0.11, 0.11],\n [0.05, 0.10, 0.10, 0.10, 0.10, 0.11, 0.11, 0.11, 0.11, 0.11],\n [0.05, 0.10, 0.10, 0.10, 0.10, 0.11, 0.11, 0.11, 0.11, 0.11],\n [0.05, 0.10, 0.10, 0.10, 0.10, 0.11, 0.11, 0.11, 0.11, 0.11],\n [0.05, 0.10, 0.10, 0.10, 0.10, 0.11, 0.11, 0.11, 0.11, 0.11]])\nn_cell_types = len(cell_type_probabilities)\n\nfor ii in range(0, len(adj_mat_list)):\n A = np.load(adj_mat_list[ii])\n C = np.load(pos_mat_list[ii])\n j = adj_mat_list[ii].split('_')[-1].split('.')[0]\n\n # Blank assignment structure\n n_cell_types = len(cell_type_probabilities)\n position_dict = dict()\n for i in range(0, C.shape[0]):\n position_dict[i] = C[i, :]\n\n graph = nx.from_numpy_matrix(A)\n node_id_list = list(graph.nodes)\n attribute_dict = dict(zip(node_id_list, [-1 for i in graph.nodes]))\n \n attribute_dict = assign_labels.heuristic_assignment(graph, cell_type_probabilities, neighborhood_probabilities, 'region', dim, position_dict)\n\n preferred_node_type = 0 \n for i in list(graph.nodes):\n if attribute_dict[i] == preferred_node_type:\n #print(i)\n graph_distance = 1\n neighborhood = nx.ego_graph(graph, i, radius = graph_distance)\n neighborhood_nodes = list(neighborhood.nodes)\n\n # Now set the remaining probabilities in the region. \n\n for node in neighborhood_nodes:\n if node != i:\n attribute_dict[node] = assign_labels.sample_cell_type(neighborhood_probabilities[preferred_node_type])\n else:\n continue\n\n observed_cell_type_dist, kl = assign_labels.check_cell_type_dist(n_cell_types, attribute_dict, cell_type_probabilities)\n observed_neighborhood_dist, kl_neighbor = assign_labels.check_neighborhood_dist(n_cell_types, attribute_dict, neighborhood_probabilities, graph, 1)\n B = assign_labels.build_assignment_matrix(attribute_dict, n_cell_types)\n np.save(results_dir + 'selfpref_B_' + str(j) + '.npy', B)\n\n visualization.make_vor(dim, attribute_dict, position_dict, n_cell_types, results_dir, 'selfpref_B_' + str(j), node_id_list)'''\n\n## 3 Cell Motif ##\ncell_type_probabilities = [0.04, 0.04, 0.04, 0.13, 0.13, 0.13, 0.12, 0.12, 0.13, 0.12]\nneighborhood_probabilities = np.array([[0.15, 0.40, 0.15, 0.05, 0.05, 0.04, 0.04, 0.04, 0.04, 0.04],\n [0.40, 0.06, 0.40, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],\n [0.15, 0.40, 0.15, 0.05, 0.05, 0.04, 0.04, 0.04, 0.04, 0.04],\n [0.05, 0.02, 0.05, 0.13, 0.12, 0.13, 0.13, 0.13, 0.12, 0.12],\n [0.05, 0.02, 0.05, 0.12, 0.13, 0.13, 0.12, 0.12, 0.13, 0.13],\n [0.04, 0.02, 0.04, 0.13, 0.13, 0.13, 0.12, 0.13, 0.13, 0.13],\n [0.04, 0.02, 0.04, 0.13, 0.12, 0.12, 0.13, 0.13, 0.14, 0.13],\n [0.04, 0.02, 0.04, 0.13, 0.12, 0.13, 0.13, 0.12, 0.14, 0.13],\n [0.04, 0.02, 0.04, 0.12, 0.13, 0.13, 0.14, 0.14, 0.12, 0.12],\n [0.04, 0.02, 0.04, 0.12, 0.13, 0.13, 0.13, 0.13, 0.12, 0.14]])\nn_cell_types = len(cell_type_probabilities)\n\nfor ii in range(0, len(adj_mat_list)):\n A = np.load(adj_mat_list[ii])\n C = np.load(pos_mat_list[ii])\n j = adj_mat_list[ii].split('_')[-1].split('.')[0]\n\n # Blank assignment structure\n n_cell_types = len(cell_type_probabilities)\n position_dict = dict()\n for i in range(0, C.shape[0]):\n position_dict[i] = C[i, :]\n\n graph = nx.from_numpy_matrix(A)\n node_id_list = list(graph.nodes)\n attribute_dict = dict(zip(node_id_list, [-1 for i in graph.nodes]))\n \n attribute_dict = assign_labels.heuristic_assignment(graph, cell_type_probabilities, neighborhood_probabilities, 'region', dim, position_dict)\n\n #preferred_node_type = 0 \n for i in list(graph.nodes):\n if ((attribute_dict[i] == 0) or (attribute_dict[i] == 1) or (attribute_dict[i] == 2)):\n #print(i)\n graph_distance = 1\n neighborhood = nx.ego_graph(graph, i, radius = graph_distance)\n neighborhood_nodes = list(neighborhood.nodes)\n\n # Now set the remaining probabilities in the region. \n\n for node in neighborhood_nodes:\n if node != i:\n attribute_dict[node] = assign_labels.sample_cell_type(neighborhood_probabilities[attribute_dict[i]])\n else:\n continue\n\n observed_cell_type_dist, kl = assign_labels.check_cell_type_dist(n_cell_types, attribute_dict, cell_type_probabilities)\n observed_neighborhood_dist, kl_neighbor = assign_labels.check_neighborhood_dist(n_cell_types, attribute_dict, neighborhood_probabilities, graph, 1)\n B = assign_labels.build_assignment_matrix(attribute_dict, n_cell_types)\n np.save(results_dir + '3cellmotif_B_' + str(j) + '.npy', B)\n\n visualization.make_vor(dim, attribute_dict, position_dict, n_cell_types, results_dir, '3cellmotif_B_' + str(j), node_id_list)" ]
[ [ "numpy.array", "numpy.ones", "numpy.load" ] ]
JiachengLi1995/UCTopic
[ "3875f2afbf6b99dfce2d5b5cd930976049746d41" ]
[ "topic_modeling/utils.py" ]
[ "import json\nimport torch\nfrom tqdm import tqdm\nfrom .consts import ARGS, DEVICE, TOKENIZER\n\n\ndef read_data(path):\n\tdata = []\n\twith open(path, encoding='utf8') as f:\n\t\tfor line in f:\n\t\t\tline = json.loads(line)\n\t\t\tdata.append(line)\n\n\treturn data\n\ndef batchify(sentence_dict, phrase_list_sampled, batch_size=32):\n\n\tbatches = []\n\tpointer = 0\n\ttotal_num = len(phrase_list_sampled)\n\twhile pointer < total_num:\n\t\ttext_batch = []\n\t\tspan_batch = []\n\n\t\tfor data_line in phrase_list_sampled[pointer:pointer+batch_size]:\n\n\t\t\tsent_id, start, end, phrase_lemma = data_line\n\t\t\ttext = sentence_dict[sent_id]\n\n\t\t\ttext_batch.append(text)\n\t\t\tspan_batch.append([(start, end)])\n\n\t\tbatches.append((text_batch, span_batch))\n\t\tpointer += batch_size\n\n\treturn batches\n\n\ndef get_features(sentence_dict, phrase_list, model, return_prob=False):\n\n\tall_features = []\n\n\tif return_prob:\n\t\tall_probs = []\n\n\tfor batch in tqdm(batchify(sentence_dict, phrase_list, ARGS.batch_size), ncols=100, desc='Generate all features...'):\n\n\t\ttext_batch, span_batch = batch\n\n\t\tinputs = TOKENIZER(text_batch, entity_spans=span_batch, padding=True, add_prefix_space=True, return_tensors=\"pt\")\n\n\t\tfor k,v in inputs.items():\n\t\t\tinputs[k] = v.to(DEVICE)\n\n\t\twith torch.no_grad():\n\t\t\tluke_outputs, entity_pooling = model(**inputs)\n\n\t\tif return_prob:\n\t\t\tmodel_prob = model.get_cluster_prob(entity_pooling)\n\n\t\t\tall_probs.append(model_prob.detach().cpu())\n\n\t\t\n\t\tall_features.append(entity_pooling.detach().cpu())\n\n\tall_features = torch.cat(all_features, dim=0)\n\tif return_prob:\n\t\tall_probs = torch.cat(all_probs, dim=0)\n\t\treturn all_features, all_probs\n\n\treturn all_features\n\n\ndef get_probs(sentence_dict, phrase_list, model):\n\n\tall_probs = []\n\n\tfor batch in tqdm(batchify(sentence_dict, phrase_list, ARGS.batch_size), ncols=100, desc='Generate all features...'):\n\n\t\ttext_batch, span_batch = batch\n\n\t\tinputs = TOKENIZER(text_batch, entity_spans=span_batch, padding=True, add_prefix_space=True, return_tensors=\"pt\")\n\n\t\tfor k,v in inputs.items():\n\t\t\tinputs[k] = v.to(DEVICE)\n\n\t\twith torch.no_grad():\n\t\t\tluke_outputs, entity_pooling = model(**inputs)\n\n\t\tmodel_prob = model.get_cluster_prob(entity_pooling)\n\n\t\tall_probs.append(model_prob.detach().cpu())\n\n\tall_probs = torch.cat(all_probs, dim=0)\n\treturn all_probs\n\n\n\n\ndef get_all_phrase_bert_features(sentence_dict, phrase_list, model):\n\n\tall_features = []\n\n\twith torch.no_grad():\n\n\t\tfor batch in tqdm(batchify(sentence_dict, phrase_list, ARGS.batch_size), ncols=100, desc='Generate all features...'):\n\n\t\t\ttext_batch, span_batch = batch\n\n\t\t\tphrase_list = []\n\t\t\tfor text, span in zip(text_batch, span_batch):\n\n\t\t\t\tspan = span[0]\n\t\t\t\tstart, end = span\n\t\t\t\tphrase_list.append(text[start:end])\n\n\t\t\trepr_list = model.encode(phrase_list)\n\n\t\t\tall_features+=list(repr_list)\n\n\tall_features = torch.FloatTensor(all_features)\n\t\n\treturn all_features" ]
[ [ "torch.FloatTensor", "torch.no_grad", "torch.cat" ] ]
dquigley-warwick/matador
[ "729e97efb0865c4fff50af87555730ff4b7b6d91" ]
[ "matador/hull/phase_diagram.py" ]
[ "# coding: utf-8\n# Distributed under the terms of the MIT License.\n\n\"\"\" This submodule implements the base PhaseDiagram creator that interfaces\nwith QueryConvexHull and EnsembleHull.\n\n\"\"\"\n\n\nfrom traceback import print_exc\nimport bisect\n\nimport scipy.spatial\nimport numpy as np\n\nfrom matador.utils.hull_utils import (\n barycentric2cart, vertices2plane, vertices2line, FakeHull, is_point_in_triangle\n)\nfrom matador.utils.chem_utils import get_formula_from_stoich\nfrom matador.utils.cursor_utils import get_array_from_cursor, display_results, set_cursor_from_array\n\nEPS = 1e-12\n\n\nclass PhaseDiagram:\n \"\"\" This class encapsulates the actual phase data, e.g. the actual\n energy and compositions found to be stable.\n\n Attributes:\n structures (numpy.ndarray): the array passed to init used to\n make the hull, with the first (num_species-1) columns\n containing normalised concentrations, and the final column\n containing formation energy.\n convex_hull (scipy.spatial.ConvexHull): the actual convex hull\n returned by SciPy.\n formation_key (list): index/key specification of formation energy\n per atom from top level of each document.\n\n \"\"\"\n def __init__(self, cursor, formation_key, dimension):\n \"\"\" Compute the convex hull of data passed, to retrieve hull\n distances and thus stable structures.\n\n Parameters:\n cursor (list[dict]): list of matador documents to make\n phase diagram from.\n formation_key (str or list): location of the formation energy\n inside each document, either a single key or iterable of\n keys to use with `recursive_get`.\n\n \"\"\"\n self._dimension = dimension\n self.cursor = cursor\n self.formation_key = formation_key\n\n structures = np.hstack((\n get_array_from_cursor(cursor, 'concentration').reshape(len(cursor), dimension-1),\n get_array_from_cursor(cursor, self.formation_key).reshape(len(cursor), 1)))\n\n # define self._structure_slice as the filtered array of points actually used to create the convex hull\n # which can include/exclude points from the passed structures. This array is the one indexed by\n # vertices/simplices in ConvexHull\n\n if self._dimension == 3:\n # add a point \"above\" the hull\n # for simple removal of extraneous vertices (e.g. top of 2D hull)\n dummy_point = [0.333, 0.333, 1e5]\n # if ternary, use all structures, not just those with negative eform for compatibility reasons\n self._structure_slice = np.vstack((structures, dummy_point))\n else:\n # filter out those with positive formation energy, to reduce expense computing hull\n self._structure_slice = structures[np.where(structures[:, -1] <= 0 + EPS)]\n\n # filter out \"duplicates\" in _structure_slice\n # this prevents breakages if no structures are on the hull and chempots are duplicated\n # but it might be faster to hardcode this case individually\n self._structure_slice = np.unique(self._structure_slice, axis=0)\n\n # if we only have the chempots (or worse) with negative formation energy, don't even make the hull\n if len(self._structure_slice) <= dimension:\n if len(self._structure_slice) < dimension:\n raise RuntimeError('No chemical potentials on hull... either mysterious use of custom chempots, or worry!')\n self.convex_hull = FakeHull()\n else:\n try:\n self.convex_hull = scipy.spatial.ConvexHull(self._structure_slice)\n except scipy.spatial.qhull.QhullError:\n print(self._structure_slice)\n print('Error with QHull, plotting formation energies only...')\n print_exc()\n self.convex_hull = FakeHull()\n\n # remove vertices that have positive formation energy\n filtered_vertices = [vertex for vertex in self.convex_hull.vertices if self._structure_slice[vertex, -1] <= 0 + EPS]\n bad_simplices = set()\n for ind, simplex in enumerate(self.convex_hull.simplices):\n for vertex in simplex:\n if vertex not in filtered_vertices:\n bad_simplices.add(ind)\n\n filtered_simplices = [simplex for ind, simplex in enumerate(self.convex_hull.simplices) if ind not in bad_simplices]\n\n self.convex_hull = FakeHull()\n self.convex_hull.points = self._structure_slice\n self.convex_hull.vertices = list(filtered_vertices)\n self.convex_hull.simplices = list(filtered_simplices)\n\n self.hull_dist = self.get_hull_distances(structures, precompute=True)\n set_cursor_from_array(self.cursor, self.hull_dist, 'hull_distance')\n self.structures = structures\n self.stable_structures = [doc for doc in self.cursor if doc['hull_distance'] < EPS]\n\n def __str__(self):\n \"\"\" Print underlying phase diagram. \"\"\"\n return display_results(self.cursor,\n hull=True,\n colour=False,\n energy_key=self.formation_key,\n sort=False,\n return_str=True)\n\n def get_hull_distances(self, structures, precompute=False, **kwargs):\n \"\"\" Returns array of distances to pre-computed binary or ternary\n hull, from array containing concentrations and energies.\n\n Parameters:\n structures (numpy.ndarray): N x n array of concentrations and\n enthalpies for N structures, with up to 2 columns of\n concentrations and the last column containing the\n structure's formation enthalpy.\n\n Keyword arguments:\n precompute (bool): whether or not to bootstrap hull\n distances from previously computed values at the same\n stoichiometry.\n\n Returns:\n numpy.ndarray: N-dim array storing distances to\n the hull for N structures,\n\n \"\"\"\n\n if precompute:\n # dict with formula keys, containing tuple of pre-computed enthalpy/atom and hull distance\n cached_formula_dists = dict()\n cache_hits = 0\n cache_misses = 0\n\n if isinstance(structures, list):\n structures = np.asarray(structures)\n\n # if only chem pots on hull, dist = energy\n if len(self._structure_slice) == self._dimension:\n hull_dist = np.ones((len(structures)))\n hull_dist = structures[:, -1]\n\n # if binary hull, do binary search\n elif self._dimension == 2:\n tie_line_comp = self._structure_slice[self.convex_hull.vertices, 0]\n tie_line_energy = self._structure_slice[self.convex_hull.vertices, -1]\n tie_line_comp = np.asarray(tie_line_comp)\n tie_line_energy = tie_line_energy[np.argsort(tie_line_comp)]\n tie_line_comp = tie_line_comp[np.argsort(tie_line_comp)]\n\n hull_dist = np.empty((len(structures)))\n hull_dist.fill(np.nan)\n if precompute:\n for ind, _ in enumerate(structures):\n formula = get_formula_from_stoich(self.cursor[ind]['stoichiometry'], sort=True, tex=False)\n if formula in cached_formula_dists:\n hull_dist[ind] = (structures[ind, -1] - cached_formula_dists[formula][0] +\n cached_formula_dists[formula][1])\n cache_hits += 1\n else:\n i = bisect.bisect_left(tie_line_comp, structures[ind, 0])\n gradient, intercept = vertices2line([[tie_line_comp[i-1], tie_line_energy[i-1]],\n [tie_line_comp[i], tie_line_energy[i]]])\n # calculate hull_dist\n hull_dist[ind] = structures[ind, -1] - (gradient * structures[ind, 0] + intercept)\n cached_formula_dists[formula] = (structures[ind, -1], hull_dist[ind])\n cache_misses += 1\n else:\n for ind, _ in enumerate(structures):\n i = bisect.bisect_left(tie_line_comp, structures[ind, 0])\n gradient, intercept = vertices2line([[tie_line_comp[i-1], tie_line_energy[i-1]],\n [tie_line_comp[i], tie_line_energy[i]]])\n # calculate hull_dist\n hull_dist[ind] = structures[ind, -1] - (gradient * structures[ind, 0] + intercept)\n\n # if ternary, use barycentric coords\n elif self._dimension == 3:\n # loop through structures and find which plane they correspond to\n # using barycentric coordinates, if a formula has already been\n # computed then calculate delta relative to that and skip\n self.convex_hull.planes = [[self._structure_slice[vertex] for vertex in simplex]\n for simplex in self.convex_hull.simplices]\n structures_finished = [False] * len(structures)\n hull_dist = np.empty(len(structures))\n hull_dist.fill(np.nan)\n cart_planes_inv = []\n planes_height_fn = []\n for ind, plane in enumerate(self.convex_hull.planes):\n cart_planes = barycentric2cart(plane).T\n cart_planes[-1, :] = 1\n # if projection of triangle in 2D is a line, do binary search\n if np.linalg.det(cart_planes) == 0:\n cart_planes_inv.append(None)\n planes_height_fn.append(None)\n else:\n cart_planes_inv.append(np.linalg.inv(cart_planes))\n planes_height_fn.append(vertices2plane(plane))\n for idx, structure in enumerate(structures):\n for ind, plane in enumerate(self.convex_hull.planes):\n if cart_planes_inv[ind] is None:\n continue\n if precompute and get_formula_from_stoich(self.cursor[idx]['stoichiometry'], sort=True,\n tex=False) in cached_formula_dists:\n formula = get_formula_from_stoich(self.cursor[idx]['stoichiometry'], sort=True, tex=False)\n if formula in cached_formula_dists:\n cache_hits += 1\n hull_dist[idx] = (structures[idx, -1] - cached_formula_dists[formula][0] +\n cached_formula_dists[formula][1])\n structures_finished[idx] = True\n\n elif is_point_in_triangle(structure, cart_planes_inv[ind], preprocessed_triangle=True):\n structures_finished[idx] = True\n hull_dist[idx] = planes_height_fn[ind](structure)\n if precompute:\n cached_formula_dists[\n get_formula_from_stoich(self.cursor[idx]['stoichiometry'], sort=True,\n tex=False)] = (structure[-1], hull_dist[idx])\n cache_misses += 1\n break\n\n # mask values very close to 0 with 0\n hull_dist[np.where(np.abs(hull_dist) < EPS)] = 0\n\n failed_structures = []\n for ind, structure in enumerate(structures_finished):\n if not structure:\n failed_structures.append(ind)\n\n if failed_structures:\n raise RuntimeError('There were issues calculating the hull distance for {} structures.'\n .format(len(failed_structures)))\n\n # otherwise, set to zero until proper N-d distance can be implemented\n else:\n raise NotImplementedError(\n \"Unable to compute {dimension}-dimensional hull distances (yet) \"\n \"consider breaking your phase diagram into a pseudo-ternary or pseudo-binary system.\"\n )\n\n if np.isnan(hull_dist).any():\n raise RuntimeError(f\"Some hull distances failed, found NaNs at {np.isnan(hull_dist, where=True)}\")\n\n return hull_dist\n" ]
[ [ "numpy.vstack", "numpy.linalg.inv", "numpy.linalg.det", "numpy.argsort", "numpy.asarray", "numpy.abs", "numpy.isnan", "numpy.where", "numpy.unique" ] ]
atlas-calo-ml/GraphNets4Pions_LLNL
[ "fb25259124711526cc4110461f09db1d03a669f9" ]
[ "train_multiOut_weightedRegress.py" ]
[ "import numpy as np\nimport os\nimport sys\nimport glob\nimport uproot as ur\nimport matplotlib.pyplot as plt\nimport time\nimport seaborn as sns\nimport tensorflow as tf\nfrom graph_nets import utils_np\nfrom graph_nets import utils_tf\nfrom graph_nets.graphs import GraphsTuple\nimport sonnet as snt\nimport argparse\nimport yaml\nimport logging\nimport tensorflow as tf\n\nfrom modules.mpdatagen import MPGraphDataGenerator\nimport modules.multiOutBlock_wWeightedRegress as models\nsns.set_context('poster')\n\nif __name__==\"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--config', default='configs/default.yaml')\n args = parser.parse_args()\n\n config = yaml.load(open(args.config))\n\n data_config = config['data']\n model_config = config['model']\n train_config = config['training']\n\n data_dir = data_config['data_dir']\n num_train_files = data_config['num_train_files']\n num_val_files = data_config['num_val_files']\n batch_size = data_config['batch_size']\n shuffle = data_config['shuffle']\n num_procs = data_config['num_procs']\n preprocess = data_config['preprocess']\n output_dir = data_config['output_dir']\n already_preprocessed = data_config['already_preprocessed']\n\n concat_input = model_config['concat_input']\n\n epochs = train_config['epochs']\n learning_rate = train_config['learning_rate']\n alpha = train_config['alpha']\n os.environ['CUDA_VISIBLE_DEVICES'] = str(train_config['gpu'])\n log_freq = train_config['log_freq']\n save_dir = train_config['save_dir'] + '/Block_'+time.strftime(\"%Y%m%d_%H%M\")+'_'+args.config.replace('.yaml','').split('/')[-1]\n os.makedirs(save_dir, exist_ok=True)\n yaml.dump(config, open(save_dir + '/config.yaml', 'w'))\n\n logging.basicConfig(level=logging.INFO, \n format='%(message)s', \n filename=save_dir + '/output.log')\n logging.info('Using config file {}'.format(args.config))\n # logging.info('Running training for {} with concant_input: {}\\n'.format(particle_type, concat_input))\n\n pi0_files = np.sort(glob.glob(data_dir+'*graphs.v01*/*pi0*/*root'))\n pion_files = np.sort(glob.glob(data_dir+'*graphs.v01*/*pion*/*root'))\n train_start = 10\n train_end = train_start + num_train_files\n val_end = train_end + num_val_files\n pi0_train_files = pi0_files[train_start:train_end]\n pi0_val_files = pi0_files[train_end:val_end]\n pion_train_files = pion_files[train_start:train_end]\n pion_val_files = pion_files[train_end:val_end]\n\n train_output_dir = None\n val_output_dir = None\n \n # Get Data\n if preprocess:\n train_output_dir = output_dir + '/train/'\n val_output_dir = output_dir + '/val/'\n\n if already_preprocessed:\n train_files = np.sort(glob.glob(train_output_dir+'*.p'))[:num_train_files]\n val_files = np.sort(glob.glob(val_output_dir+'*.p'))[:num_val_files]\n\n pi0_train_files = train_files\n pi0_val_files = val_files\n pion_train_files = None\n pion_val_files = None\n\n\n train_output_dir = None\n val_output_dir = None\n\n data_gen_train = MPGraphDataGenerator(pi0_file_list=pi0_train_files,\n pion_file_list=pion_train_files,\n cellGeo_file=data_dir+'graph_examples/cell_geo.root',\n batch_size=batch_size,\n shuffle=shuffle,\n num_procs=num_procs,\n preprocess=preprocess,\n output_dir=train_output_dir)\n\n data_gen_val = MPGraphDataGenerator(pi0_file_list=pi0_val_files,\n pion_file_list=pion_val_files,\n cellGeo_file=data_dir+'graph_examples/cell_geo.root',\n batch_size=batch_size,\n shuffle=shuffle,\n num_procs=num_procs,\n preprocess=preprocess,\n output_dir=val_output_dir)\n\n if preprocess and not already_preprocessed:\n exit()\n\n # Optimizer.\n #optimizer = snt.optimizers.Adam(learning_rate)\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n\n model = models.MultiOutBlockWeightedRegressModel(global_output_size=1, num_outputs=2, model_config=model_config)\n\n training_loss_epoch = []\n training_loss_regress_epoch = []\n training_loss_class_epoch = []\n val_loss_epoch = []\n val_loss_regress_epoch = []\n val_loss_class_epoch = []\n \n checkpoint = tf.train.Checkpoint(module=model)\n checkpoint_prefix = os.path.join(save_dir, 'latest_model')\n latest = tf.train.latest_checkpoint(save_dir)\n if latest is not None:\n checkpoint.restore(latest)\n else:\n checkpoint.save(checkpoint_prefix)\n\n def convert_to_tuple(graphs):\n nodes = []\n edges = []\n globals = []\n senders = []\n receivers = []\n n_node = []\n n_edge = []\n offset = 0\n\n for graph in graphs:\n nodes.append(graph['nodes'])\n edges.append(graph['edges'])\n globals.append([graph['globals']])\n senders.append(graph['senders'] + offset)\n receivers.append(graph['receivers'] + offset)\n n_node.append(graph['nodes'].shape[:1])\n n_edge.append(graph['edges'].shape[:1])\n\n offset += len(graph['nodes'])\n\n nodes = tf.convert_to_tensor(np.concatenate(nodes))\n edges = tf.convert_to_tensor(np.concatenate(edges))\n globals = tf.convert_to_tensor(np.concatenate(globals))\n senders = tf.convert_to_tensor(np.concatenate(senders))\n receivers = tf.convert_to_tensor(np.concatenate(receivers))\n n_node = tf.convert_to_tensor(np.concatenate(n_node))\n n_edge = tf.convert_to_tensor(np.concatenate(n_edge))\n\n graph = GraphsTuple(\n nodes=nodes,\n edges=edges,\n globals=globals,\n senders=senders,\n receivers=receivers,\n n_node=n_node,\n n_edge=n_edge\n )\n\n return graph\n \n def get_batch(data_iter):\n for graphs, targets in data_iter:\n graphs = convert_to_tuple(graphs)\n targets = tf.convert_to_tensor(targets)\n \n yield graphs, targets\n\n samp_graph, samp_target = next(get_batch(data_gen_train.generator()))\n data_gen_train.kill_procs()\n graph_spec = utils_tf.specs_from_graphs_tuple(samp_graph, True, True, True)\n \n mae_loss = tf.keras.losses.MeanAbsoluteError()\n bce_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\n def loss_fn(targets, regress_preds, class_preds):\n regress_loss = mae_loss(targets[:,:1], regress_preds)\n class_loss = bce_loss(targets[:,1:], class_preds)\n combined_loss = alpha*regress_loss + (1 - alpha)*class_loss \n return regress_loss, class_loss, combined_loss\n\n @tf.function(input_signature=[graph_spec, tf.TensorSpec(shape=[None,2], dtype=tf.float32)])\n def train_step(graphs, targets):\n with tf.GradientTape() as tape:\n regress_output, class_output = model(graphs)\n regress_preds = regress_output.globals\n class_preds = class_output.globals\n regress_loss, class_loss, loss = loss_fn(targets, regress_preds, class_preds)\n \n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n return regress_loss, class_loss, loss\n\n @tf.function(input_signature=[graph_spec, tf.TensorSpec(shape=[None,2], dtype=tf.float32)])\n def val_step(graphs, targets):\n regress_output, class_output = model(graphs)\n regress_preds = regress_output.globals\n class_preds = class_output.globals\n regress_loss, class_loss, loss = loss_fn(targets, regress_preds, class_preds)\n\n return regress_loss, class_loss, loss, regress_preds, class_preds\n\n curr_loss = 1e5\n for e in range(epochs):\n\n logging.info('\\nStarting epoch: {}'.format(e))\n print('\\nStarting epoch: {}'.format(e))\n epoch_start = time.time()\n\n training_loss = []\n training_loss_regress = []\n training_loss_class = []\n val_loss = []\n val_loss_regress = []\n val_loss_class = []\n\n # Train\n logging.info('Training...')\n i = 1\n for graph_data_tr, targets_tr in get_batch(data_gen_train.generator()):#train_iter):\n start = time.time()\n #if i==1:\n losses_tr_rg, losses_tr_cl, losses_tr = train_step(graph_data_tr, targets_tr)\n end = time.time()\n\n training_loss.append(losses_tr.numpy())\n training_loss_regress.append(losses_tr_rg.numpy())\n training_loss_class.append(losses_tr_cl.numpy())\n\n if not (i-1)%log_freq:\n logging.info('Iter: {:04d}, Tr_loss_mean: {:.4f}, Tr_loss_rg_mean: {:.4f}, Tr_loss_cl_mean: {:.4f}, Took {:.3f}secs'. \\\n format(i, \n np.mean(training_loss), \n np.mean(training_loss_regress), \n np.mean(training_loss_class), \n end-start))\n # logging.info('Took {:.3f} secs'.format(end-start))\n \n i += 1 \n\n training_loss_epoch.append(training_loss)\n training_loss_regress_epoch.append(training_loss_regress)\n training_loss_class_epoch.append(training_loss_class)\n training_end = time.time()\n\n # validate\n logging.info('\\nValidation...')\n i = 1\n all_targets = []\n all_outputs = []\n for graph_data_val, targets_val in get_batch(data_gen_val.generator()):#val_iter):\n start = time.time()\n losses_val_rg, losses_val_cl, losses_val, regress_vals, class_vals = val_step(graph_data_val, targets_val)\n end = time.time()\n\n targets_val = targets_val.numpy()\n regress_vals = regress_vals.numpy()\n class_vals = class_vals.numpy()\n\n targets_val[:,0] = 10**targets_val[:,0]\n regress_vals = 10**regress_vals\n # targets_val[:,1] = 1 / (1 + np.exp(targets_val[:,1]))\n class_vals = tf.math.sigmoid(class_vals) # 1 / (1 + np.exp(class_vals))\n\n output_vals = np.hstack([regress_vals, class_vals])\n\n val_loss.append(losses_val.numpy())\n val_loss_regress.append(losses_val_rg.numpy())\n val_loss_class.append(losses_val_cl.numpy())\n\n all_targets.append(targets_val)\n all_outputs.append(output_vals)\n\n if not (i-1)%log_freq:\n logging.info('Iter: {:04d}, Val_loss_mean: {:.4f}, Val_loss_rg_mean: {:.4f}, Val_loss_cl_mean: {:.4f}, Took {:.3f}secs'. \\\n format(i, \n np.mean(val_loss), \n np.mean(val_loss_regress), \n np.mean(val_loss_class), \n end-start))\n # logging.info('Took {:.3f} secs'.format(end-start))\n \n i += 1\n\n epoch_end = time.time()\n\n\n all_targets = np.concatenate(all_targets)\n all_outputs = np.concatenate(all_outputs)\n \n val_loss_epoch.append(val_loss)\n val_loss_regress_epoch.append(val_loss_regress)\n val_loss_class_epoch.append(val_loss_class)\n \n np.savez(save_dir+'/losses', \n training=training_loss_epoch, validation=val_loss_epoch,\n training_regress=training_loss_regress_epoch, validation_regress=val_loss_regress_epoch,\n training_class=training_loss_class_epoch, validation_class=val_loss_class_epoch,\n )\n # checkpoint.save(checkpoint_prefix)\n \n val_mins = int((epoch_end - training_end)/60)\n val_secs = int((epoch_end - training_end)%60)\n training_mins = int((training_end - epoch_start)/60)\n training_secs = int((training_end - epoch_start)%60)\n\n logging.info('\\nEpoch {} ended\\nTraining: {:2d}:{:02d}\\nValidation: {:2d}:{:02d}'. \\\n format(e, training_mins, training_secs, val_mins, val_secs))\n print('\\nEpoch {} ended\\nTraining: {:2d}:{:02d}\\nValidation: {:2d}:{:02d}'. \\\n format(e, training_mins, training_secs, val_mins, val_secs))\n\n if np.mean(val_loss)<curr_loss:\n logging.info('\\nLoss decreased from {:.4f} to {:.4f}'.format(curr_loss, np.mean(val_loss)))\n logging.info('Checkpointing and saving predictions to:\\n{}'.format(save_dir))\n print('\\nLoss decreased from {:.4f} to {:.4f}'.format(curr_loss, np.mean(val_loss)))\n print('Checkpointing and saving predictions to:\\n{}'.format(save_dir))\n curr_loss = np.mean(val_loss)\n np.savez(save_dir+'/predictions', \n targets=all_targets, \n outputs=all_outputs)\n checkpoint.save(checkpoint_prefix)\n else: \n logging.info('\\nLoss didnt decrease from {:.4f}'.format(curr_loss))\n print('\\nLoss didnt decrease from {:.4f}'.format(curr_loss))\n\n if not (e+1)%20:\n optimizer.learning_rate = optimizer.learning_rate/10\n logging.info('\\nLearning rate decreased to: {:.3e}'.format(optimizer.learning_rate.value()))\n print('\\nLearning rate decreased to: {:.3e}'.format(optimizer.learning_rate.value()))\n\n" ]
[ [ "tensorflow.keras.optimizers.Adam", "numpy.savez", "tensorflow.keras.losses.MeanAbsoluteError", "tensorflow.train.latest_checkpoint", "tensorflow.GradientTape", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.math.sigmoid", "tensorflow.convert_to_tensor", "numpy.hstack", "tensorflow.TensorSpec", "tensorflow.train.Checkpoint", "numpy.concatenate", "numpy.mean" ] ]
gabrieldesousah/awsmetric2csv
[ "adcc8f42a8c1b5eba8a1f7413c24165d2e7d65ff" ]
[ "utils.py" ]
[ "import boto3\nimport datetime\nimport numpy as np\n\ncsv_headers = {\n 'ec2': [\n 'name',\n 'instance',\n 'type',\n 'hypervisor',\n 'virtualization_type',\n 'architecture',\n 'ebs_optimized',\n 'image_id',\n 'key_name',\n 'metric',\n 'low',\n 'high',\n 'ave',\n 'median',\n 'launch_time',\n 'subnet_id',\n 'vpc_id'\n ], 'rds': [\n 'instance',\n 'type',\n 'engine',\n 'engine_version',\n 'license_model',\n 'multi_az',\n 'publicly_accessible',\n 'allocated_storage',\n 'storage_type',\n 'storage_encrypted',\n 'metric',\n 'low',\n 'high',\n 'ave',\n 'median',\n 'launch_time'\n ]}\n\n# create boto clients\ncw = boto3.client('cloudwatch')\nec2 = boto3.resource('ec2')\nrds = boto3.client('rds')\n\n\ndef get_all_instances(resource):\n if resource == 'ec2':\n return ec2.instances.filter(\n Filters=[\n {'Name': 'instance-state-name', 'Values': ['running']}])\n elif resource == 'rds':\n result = rds.describe_db_instances()\n return result['DBInstances']\n else:\n return None\n\n\ndef get_metric(resource, id, period, days, metric='CPUUtilization'):\n # get current time\n now = datetime.datetime.now()\n\n # identify dimension name\n if resource == 'ec2':\n dimension_name = 'InstanceId'\n elif resource == 'rds':\n dimension_name = 'DBInstanceIdentifier'\n else:\n return None\n\n # get metric statistics\n return cw.get_metric_statistics(\n Namespace='AWS/%s' % resource.upper(),\n MetricName=metric,\n Dimensions=[{\n 'Name': dimension_name,\n 'Value': id\n }],\n StartTime=now - datetime.timedelta(days=days),\n EndTime=now,\n Period=period,\n Statistics=['Maximum'],\n Unit='Percent'\n )\n\n\ndef process_metric(result):\n # get all datapoints and add to list\n item_list = []\n for datapoint in result['Datapoints']:\n item_list.append(float(datapoint['Maximum']))\n\n # on empty datapoints, append zero to avoid zero-size array error\n if len(item_list) == 0:\n item_list.append(0)\n\n # return a numpy array\n return np.array(item_list)\n\n\ndef write_to_csv(resource, csvwriter, instance, item_list_arr):\n if resource == 'ec2':\n # get instance name\n if instance.tags:\n name_dict = next(\n (i for i in instance.tags if i['Key'] == 'Name'),\n None)\n else:\n name_dict = None\n\n # write data rows\n csvwriter.writerow([\n '' if name_dict is None else name_dict.get('Value'),\n instance.id,\n instance.instance_type,\n instance.hypervisor,\n instance.virtualization_type,\n instance.architecture,\n instance.ebs_optimized,\n instance.image_id,\n instance.key_name,\n 'CPUUtilization',\n np.min(item_list_arr),\n np.max(item_list_arr),\n np.round(np.average(item_list_arr), 2),\n np.median(item_list_arr),\n instance.launch_time,\n instance.subnet_id,\n instance.vpc_id\n ])\n elif resource == 'rds':\n # write data rows\n csvwriter.writerow([\n instance['DBInstanceIdentifier'],\n instance['DBInstanceClass'],\n instance['Engine'],\n instance['EngineVersion'],\n instance['LicenseModel'],\n instance['MultiAZ'],\n instance['PubliclyAccessible'],\n instance['AllocatedStorage'],\n instance['StorageType'],\n instance['StorageEncrypted'],\n 'CPUUtilization',\n np.min(item_list_arr),\n np.max(item_list_arr),\n np.round(np.average(item_list_arr), 2),\n np.median(item_list_arr),\n instance['InstanceCreateTime']\n ])\n" ]
[ [ "numpy.median", "numpy.max", "numpy.min", "numpy.array", "numpy.average" ] ]
yzygitzh/tutel
[ "ca6f018bf7afae2e37a74f17deddd0f5f91ec2b2" ]
[ "tutel/examples/helloworld.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\n# Recommend to initialize NUMA status at the most program begining (before any other imports)\nfrom tutel import system_init\nsystem_init.init_affinity_at_program_beginning()\n\nimport os\nimport time\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.distributed as dist\nfrom torch import nn\nimport argparse\n\nfrom tutel import moe as tutel_moe\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--local_rank', type=int, default=-1)\nparser.add_argument('--batch_size', type=int, default=16)\nparser.add_argument('--num_tokens', type=int, default=1024)\nparser.add_argument('--model_dim', type=int, default=2048)\nparser.add_argument('--hidden_size', type=int, default=2048)\nparser.add_argument('--num_local_experts', type=int, default=2)\nparser.add_argument('--dtype', type=str, default='float32')\nparser.add_argument('--fp32_gate', default=False, action='store_true')\nparser.add_argument('--top', type=int, default=2)\nparser.add_argument('--l_aux_wt', type=float, default=0.0)\nparser.add_argument('--a2a_ffn_overlap_degree', type=int, default=1)\nparser.add_argument('--num_steps', type=int, default=100)\nparser.add_argument('--save_load_checkpoint', default=False, action='store_true')\nargs = parser.parse_args()\n\nparallel_env = system_init.init_data_model_parallel()\ndist_rank, dist_world_size, dist_print = parallel_env.global_rank, parallel_env.global_size, parallel_env.dist_print\nargs.local_rank = parallel_env.local_device.index\n\nbatch_size = args.batch_size\nnum_tokens = args.num_tokens\nmodel_dim = args.model_dim\nhidden_size = args.hidden_size\nnum_local_experts = args.num_local_experts\ntop_value = args.top\na2a_ffn_overlap_degree = args.a2a_ffn_overlap_degree\ndevice = parallel_env.local_device\n\nif args.dtype == 'float32':\n torch.set_default_dtype(torch.float32)\nelif args.dtype == 'float64':\n torch.set_default_dtype(torch.float64)\nelif args.dtype == 'float16':\n torch.set_default_dtype(torch.float16)\nelif args.dtype == 'bfloat16':\n torch.set_default_dtype(torch.bfloat16)\nelse:\n raise Exception('Unrecognized data type specified: %s' % args.dtype)\n\n\nclass ExampleModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n self._moe_layer = tutel_moe.moe_layer(\n gate_type = {'type': 'top', 'k': top_value, 'fp32_gate': args.fp32_gate},\n experts = {'type': 'ffn', 'count_per_node': num_local_experts, 'hidden_size_per_expert': hidden_size, 'activation_fn': lambda x: F.relu(x)},\n model_dim = model_dim,\n scan_expert_func = lambda name, param: setattr(param, 'skip_allreduce', True),\n seeds = (1, dist_rank + 1, 1),\n a2a_ffn_overlap_degree = a2a_ffn_overlap_degree,\n ).to(device)\n\n # Summary of different parameter types: gate, local_experts\n local_count = sum([torch.numel(param) for name, param in self._moe_layer.get_parameter_iterator(param_type='local_experts')])\n shared_count = sum([torch.numel(param) for name, param in self._moe_layer.get_parameter_iterator(param_type='gate')])\n dist_print('[Statistics] param count for MoE local_experts = %s, param count for MoE gate = %s.\\n' % (local_count, shared_count))\n\n def forward(self, input):\n result = self._moe_layer(input)\n result = F.log_softmax(torch.sum(result, dim=2), dim=1)\n return result\n\nmodel = ExampleModel()\ndist_print(model)\n\nif args.save_load_checkpoint:\n checkpoint_path = './distributed-hellworld-%d-in-%d.ckpt' % (parallel_env.global_rank, parallel_env.global_size)\n if os.path.exists(checkpoint_path):\n model.load_state_dict(torch.load(checkpoint_path))\n else:\n print('Checkpoint not loaded: file `%s` is not found' % checkpoint_path)\n\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-5)\n\ntorch.manual_seed(0)\nx = torch.tensor(torch.randn([batch_size, num_tokens, model_dim], dtype=torch.float32, device='cpu').detach().numpy(), dtype=torch.get_default_dtype(), requires_grad=True, device=device)\ny = torch.LongTensor(batch_size).random_(1).to(device)\n\ntuples = (dist_world_size, args.dtype, model_dim, hidden_size, batch_size * num_tokens, num_local_experts, top_value, a2a_ffn_overlap_degree, device)\ndist_print('[Benchmark] world_size = %s, dtype = %s, model_dim = %s, hidden_size = %s, samples = %s, num_local_experts = %s, topK = %s, a2a_ffn_overlap_degree = %s, device = `%s`' % tuples)\n\naverage_time, num_steps = 0, args.num_steps\n\nparams_for_all_reduce = [p for p in model.parameters() if not hasattr(p, 'skip_allreduce') and getattr(p, 'requires_grad', False) and p.grad is not None]\n\nfor i in range(num_steps):\n\n torch.cuda.synchronize()\n t_start = time.time()\n optimizer.zero_grad()\n\n output = model(x)\n loss = F.nll_loss(output, y)\n if args.l_aux_wt:\n loss += args.l_aux_wt * model._moe_layer.l_aux\n loss.backward()\n if dist_world_size > 1:\n for p in params_for_all_reduce:\n p.grad /= dist_world_size\n dist.all_reduce(p.grad)\n optimizer.step()\n\n torch.cuda.synchronize()\n t_stop = time.time()\n dist_print('STEP-%s: DONE, loss = %s, step_time = %s sec.' % (i, float(loss.data), t_stop - t_start))\n\n if i + 10 >= num_steps:\n average_time += t_stop - t_start\n\naverage_time /= 10\ndist_print('\\n[Summary] Average synchronized step_time = %s sec.' % average_time)\n\nif args.save_load_checkpoint:\n torch.save(model.state_dict(), checkpoint_path)\n" ]
[ [ "torch.sum", "torch.get_default_dtype", "torch.load", "torch.randn", "torch.nn.functional.nll_loss", "torch.manual_seed", "torch.cuda.synchronize", "torch.set_default_dtype", "torch.nn.functional.relu", "torch.distributed.all_reduce", "torch.numel", "torch.LongTensor" ] ]
ZYZMarshall/Twitter-Emotion-Radar
[ "9d6ebf2464dfa1864268a9cdf69991e6cef542a3" ]
[ "Tweet_Streamer_Using_Tweepy/tweepy_streamer.py" ]
[ "import sys\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport twitter_credentials\n\n\"\"\"Tweepy module is used to stream live tweets directly from Twitter in real-time. \nThe tweets are visualized and then the TextBlob module is used to do sentiment analysis on the tweets.\"\"\"\nfrom tweepy import API \nfrom tweepy import Cursor\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport numpy as np\nimport pandas as pd\n\n\n# # # # TWITTER CLIENT # # # #\nclass TwitterClient():\n def __init__(self, twitter_user=None):\n self.auth = TwitterAuthenticator().authenticate_twitter_app()\n self.twitter_client = API(self.auth)\n\n self.twitter_user = twitter_user\n\n def get_twitter_client_api(self):\n return self.twitter_client\n\n def get_user_timeline_tweets(self, num_tweets):\n tweets = []\n for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):\n tweets.append(tweet)\n return tweets\n\n def get_friend_list(self, num_friends):\n friend_list = []\n for friend in Cursor(self.twitter_client.friends, id=self.twitter_user).items(num_friends):\n friend_list.append(friend)\n return friend_list\n\n def get_home_timeline_tweets(self, num_tweets):\n home_timeline_tweets = []\n for tweet in Cursor(self.twitter_client.home_timeline, id=self.twitter_user).items(num_tweets):\n home_timeline_tweets.append(tweet)\n return home_timeline_tweets\n\n\n# # # # TWITTER AUTHENTICATER # # # #\nclass TwitterAuthenticator():\n\n def authenticate_twitter_app(self):\n auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\n return auth\n\n# # # # TWITTER STREAMER # # # #\nclass TwitterStreamer():\n \"\"\"\n Class for streaming and processing live tweets.\n \"\"\"\n def __init__(self):\n self.twitter_autenticator = TwitterAuthenticator() \n\n def stream_tweets(self, fetched_tweets_filename, hash_tag_list):\n # This handles Twitter authetification and the connection to Twitter Streaming API\n listener = TwitterListener(fetched_tweets_filename)\n auth = self.twitter_autenticator.authenticate_twitter_app() \n stream = Stream(auth, listener)\n\n # This line filter Twitter Streams to capture data by the keywords: \n stream.filter(track=hash_tag_list)\n\n\n# # # # TWITTER STREAM LISTENER # # # #\nclass TwitterListener(StreamListener):\n \"\"\"\n This is a basic listener that just prints received tweets to stdout.\n \"\"\"\n def __init__(self, fetched_tweets_filename):\n self.fetched_tweets_filename = fetched_tweets_filename\n\n def on_data(self, data):\n try:\n print(data)\n with open(self.fetched_tweets_filename, 'a') as tf:\n tf.write(data)\n return True\n except BaseException as e:\n print(\"Error on_data %s\" % str(e))\n return True\n \n def on_error(self, status):\n if status == 420:\n # Returning False on_data method in case rate limit occurs.\n return False\n print(status)\n\n\nclass TweetAnalyzer():\n \"\"\"\n Functionality for analyzing and categorizing content from tweets.\n \"\"\"\n def tweets_to_data_frame(self, tweets):\n df = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])\n\n df['id'] = np.array([tweet.id for tweet in tweets])\n df['len'] = np.array([len(tweet.text) for tweet in tweets])\n df['date'] = np.array([tweet.created_at for tweet in tweets])\n df['source'] = np.array([tweet.source for tweet in tweets])\n df['likes'] = np.array([tweet.favorite_count for tweet in tweets])\n df['retweets'] = np.array([tweet.retweet_count for tweet in tweets])\n\n return df\n\n \nif __name__ == '__main__':\n\n twitter_client = TwitterClient()\n tweet_analyzer = TweetAnalyzer()\n\n api = twitter_client.get_twitter_client_api()\n\n tweets = api.user_timeline(screen_name=\"realDonaldTrump\", count=20)\n\n #print(dir(tweets[0]))\n #print(tweets[0].retweet_count)\n\n df = tweet_analyzer.tweets_to_data_frame(tweets)\n \n print(df.head(10))" ]
[ [ "numpy.array", "pandas.DataFrame" ] ]
hilarioushappystar/SpiderSolitaireProject
[ "4fa04d41fbdb4d916919004966594f39ec619665" ]
[ "ai_module.py" ]
[ "###########################################\n# This is the ai_module class\n# This class handles the \"clever stuff\" such as working out best move(s) to play\n###########################################\n\n# achieves 10% win rate in 50 games (more thorough testing may be warranted)\n\nimport numpy as np\nfrom numpy import random \nfrom gamestate import Gamestate\nfrom card import Card\nimport copy\n\ndef evaluate_position(gs, cfgh): \n myeval = 100 * gs.countsuitsremoved() + gs.countsuitedbuilds() + 10*(44 - gs.counthiddencards())\n \n # do columns without face-down cards\n for foo in range(10):\n if( len( gs.columns[foo] ) == 0):\n myeval += cfgh.emptycolumnsweight\n elif( gs.columns[foo][0].isvisible):\n myeval += cfgh.emptycolumnsweight\n \n # do pollution \n for foo in range(10):\n poll = gs.compute_pollution(foo)\n if( poll == 1):\n myeval += 2 * cfgh.pollutionweight\n if( poll == 2):\n myeval += 1 * cfgh.pollutionweight\n \n # do max run length (preparing to complete suits is very important at the 4-suit level!)\n for suit in ['c','d','h','s']:\n # no scientific basis for choosing these numbers!\n tempdict = {6:1.06, 7:2.07, 8:5.08, 9:10.09, 10:20.10, 11:30.11, 12:40.12}\n runlength = gs.compute_maxrunlength(suit)\n if( runlength in tempdict):\n myeval += tempdict[runlength] * cfgh.maxrunlengthweight \n \n return myeval\n\n\n# choose the best moveblock to play \ndef choose_moveblock(gs, cfgh):\n random.seed(123456)\n threshold_param = 2\n game_result = 'STILLGOING'\n prev_eval = evaluate_position(gs,cfgh)\n \n numtries = cfgh.moveblocktries\n bestsofar_moveblock = []\n bestsofar_eval = evaluate_position(gs,cfgh)\n for mytry in range(numtries):\n \n moveblock = np.random.randint(1000,size=cfgh.moveblocklength)\n \n \n # randomly truncate \n randsize = 1 + random.randint(cfgh.moveblocklength-1)\n moveblock = moveblock[0:randsize]\n \n # now attempt both static and look-ahead evaluation \n gs2 = copy.deepcopy(gs)\n gs2.executemoveblock(moveblock,threshold_param,False)\n gs3 = copy.deepcopy(gs)\n gs3.executemoveblock(moveblock,threshold_param,True)\n avg_eval = 0.5 * (evaluate_position(gs2,cfgh) + evaluate_position(gs3,cfgh))\n \n if( avg_eval > bestsofar_eval):\n bestsofar_eval = avg_eval \n bestsofar_moveblock = moveblock\n if( avg_eval == bestsofar_eval and len(moveblock) < len(bestsofar_moveblock)):\n bestsofar_eval = avg_eval \n bestsofar_moveblock = moveblock \n movesequence = gs.executemoveblock(bestsofar_moveblock,threshold_param,True)\n \n if( evaluate_position(gs,cfgh) <= prev_eval):\n if( len( gs.stock[0]) > 0):\n gs.dealrow()\n else:\n if gs.iswon():\n game_result = 'RESULT = WIN'\n else:\n game_result = 'RESULT = LOSE'\n return (movesequence, game_result)" ]
[ [ "numpy.random.randint", "numpy.random.seed" ] ]
jwoos/python_digitrecognizer
[ "4a06cc7b7ee32aa6c66a391cd6595a2b5fbad38b" ]
[ "tests/unit/test_pool.py" ]
[ "from unittest import TestCase, mock\n\nimport layers\n\nimport numpy as np\nimport pytest\n\n\nclass TestPoolForward(TestCase):\n def test_max(self):\n pool = layers.pool.Pool(\n size=2,\n stride=2,\n operation=np.max,\n )\n\n data = np.zeros((4, 4, 3))\n data[:,:,0] = np.array([\n [1, 1, 2, 4],\n [5, 6, 7, 8],\n [3, 2, 1, 0],\n [1, 2, 3, 4],\n ])\n data[:,:,1] = np.array([\n [1, 1, 2, 4],\n [5, 6, 7, 8],\n [3, 2, 1, 0],\n [1, 2, 3, 4],\n ])\n data[:,:,2] = np.array([\n [1, 1, 2, 4],\n [5, 6, 7, 8],\n [3, 2, 1, 0],\n [1, 2, 3, 4],\n ])\n\n result = pool.forward(data)\n expected = np.zeros((2, 2, 3))\n expected[:,:,0] = np.array([\n [6, 8],\n [3, 4],\n ])\n expected[:,:,1] = np.array([\n [6, 8],\n [3, 4],\n ])\n expected[:,:,2] = np.array([\n [6, 8],\n [3, 4],\n ])\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
Originofamonia/differential-privacy-library
[ "a889ba0f8d19c77e2b0369451ebc392969fac685" ]
[ "diffprivlib/models/naive_bayes.py" ]
[ "# MIT License\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nGaussian Naive Bayes classifier satisfying differential privacy\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport sklearn.naive_bayes as sk_nb\nfrom sklearn.utils import check_X_y\nfrom sklearn.utils.multiclass import _check_partial_fit_first_call\n\nfrom diffprivlib.accountant import BudgetAccountant\nfrom diffprivlib.mechanisms import LaplaceBoundedDomain, GeometricTruncated, LaplaceTruncated\nfrom diffprivlib.utils import PrivacyLeakWarning, warn_unused_args\nfrom diffprivlib.validation import check_bounds, clip_to_bounds\n\n\nclass GaussianNB(sk_nb.GaussianNB):\n r\"\"\"Gaussian Naive Bayes (GaussianNB) with differential privacy\n\n Inherits the :class:`sklearn.naive_bayes.GaussianNB` class from Scikit Learn and adds noise to satisfy differential\n privacy to the learned means and variances. Adapted from the work presented in [VSB13]_.\n\n Parameters\n ----------\n epsilon : float, default: 1.0\n Privacy parameter :math:`\\epsilon` for the model.\n\n bounds: tuple, optional\n Bounds of the data, provided as a tuple of the form (min, max). `min` and `max` can either be scalars, covering\n the min/max of the entire data, or vectors with one entry per feature. If not provided, the bounds are computed\n on the data when ``.fit()`` is first called, resulting in a :class:`.PrivacyLeakWarning`.\n\n priors : array-like, shape (n_classes,)\n Prior probabilities of the classes. If specified the priors are not adjusted according to the data.\n\n var_smoothing : float, default: 1e-9\n Portion of the largest variance of all features that is added to variances for calculation stability.\n\n accountant : BudgetAccountant, optional\n Accountant to keep track of privacy budget.\n\n Attributes\n ----------\n class_prior_ : array, shape (n_classes,)\n probability of each class.\n\n class_count_ : array, shape (n_classes,)\n number of training samples observed in each class.\n\n theta_ : array, shape (n_classes, n_features)\n mean of each feature per class\n\n sigma_ : array, shape (n_classes, n_features)\n variance of each feature per class\n\n epsilon_ : float\n absolute additive value to variances (unrelated to ``epsilon`` parameter for differential privacy)\n\n References\n ----------\n .. [VSB13] Vaidya, Jaideep, Basit Shafiq, Anirban Basu, and Yuan Hong. \"Differentially private naive bayes\n classification.\" In 2013 IEEE/WIC/ACM International Joint Conferences on Web Intelligence (WI) and Intelligent\n Agent Technologies (IAT), vol. 1, pp. 571-576. IEEE, 2013.\n\n \"\"\"\n\n def __init__(self, epsilon=1.0, bounds=None, priors=None, var_smoothing=1e-9, accountant=None):\n super().__init__(priors=priors, var_smoothing=var_smoothing)\n\n self.epsilon = epsilon\n self.bounds = bounds\n self.accountant = BudgetAccountant.load_default(accountant)\n\n def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None):\n self.accountant.check(self.epsilon, 0)\n\n if sample_weight is not None:\n warn_unused_args(\"sample_weight\")\n\n X, y = check_X_y(X, y)\n\n if self.bounds is None:\n warnings.warn(\"Bounds have not been specified and will be calculated on the data provided. This will \"\n \"result in additional privacy leakage. To ensure differential privacy and no additional \"\n \"privacy leakage, specify bounds for each dimension.\", PrivacyLeakWarning)\n self.bounds = (np.min(X, axis=0), np.max(X, axis=0))\n\n self.bounds = check_bounds(self.bounds, shape=X.shape[1])\n X = clip_to_bounds(X, self.bounds)\n\n self.epsilon_ = self.var_smoothing\n\n if _refit:\n self.classes_ = None\n\n if _check_partial_fit_first_call(self, classes):\n n_features = X.shape[1]\n n_classes = len(self.classes_)\n self.theta_ = np.zeros((n_classes, n_features))\n self.sigma_ = np.zeros((n_classes, n_features))\n\n self.class_count_ = np.zeros(n_classes, dtype=np.float64)\n\n if self.priors is not None:\n priors = np.asarray(self.priors)\n\n if len(priors) != n_classes:\n raise ValueError(\"Number of priors must match number of classes.\")\n if not np.isclose(priors.sum(), 1.0):\n raise ValueError(\"The sum of the priors should be 1.\")\n if (priors < 0).any():\n raise ValueError(\"Priors must be non-negative.\")\n self.class_prior_ = priors\n else:\n # Initialize the priors to zeros for each class\n self.class_prior_ = np.zeros(len(self.classes_), dtype=np.float64)\n else:\n if X.shape[1] != self.theta_.shape[1]:\n raise ValueError(\"Number of features %d does not match previous data %d.\" %\n (X.shape[1], self.theta_.shape[1]))\n # Put epsilon back in each time\n self.sigma_[:, :] -= self.epsilon_\n\n classes = self.classes_\n\n unique_y = np.unique(y)\n unique_y_in_classes = np.in1d(unique_y, classes)\n\n if not np.all(unique_y_in_classes):\n raise ValueError(\"The target label(s) %s in y do not exist in the initial classes %s\" %\n (unique_y[~unique_y_in_classes], classes))\n\n noisy_class_counts = self._noisy_class_counts(y)\n\n for _i, y_i in enumerate(unique_y):\n i = classes.searchsorted(y_i)\n X_i = X[y == y_i, :]\n\n n_i = noisy_class_counts[_i]\n\n new_theta, new_sigma = self._update_mean_variance(self.class_count_[i], self.theta_[i, :],\n self.sigma_[i, :], X_i, n_noisy=n_i)\n\n self.theta_[i, :] = new_theta\n self.sigma_[i, :] = new_sigma\n self.class_count_[i] += n_i\n\n self.sigma_[:, :] += self.epsilon_\n\n # Update if only no priors is provided\n if self.priors is None:\n # Empirical prior, with sample_weight taken into account\n self.class_prior_ = self.class_count_ / self.class_count_.sum()\n\n self.accountant.spend(self.epsilon, 0)\n\n return self\n\n def _update_mean_variance(self, n_past, mu, var, X, sample_weight=None, n_noisy=None):\n \"\"\"Compute online update of Gaussian mean and variance.\n\n Given starting sample count, mean, and variance, a new set of points X return the updated mean and variance.\n (NB - each dimension (column) in X is treated as independent -- you get variance, not covariance).\n\n Can take scalar mean and variance, or vector mean and variance to simultaneously update a number of\n independent Gaussians.\n\n See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:\n\n http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf\n\n Parameters\n ----------\n n_past : int\n Number of samples represented in old mean and variance. If sample weights were given, this should contain\n the sum of sample weights represented in old mean and variance.\n\n mu : array-like, shape (number of Gaussians,)\n Means for Gaussians in original set.\n\n var : array-like, shape (number of Gaussians,)\n Variances for Gaussians in original set.\n\n sample_weight : ignored\n Ignored in diffprivlib.\n\n n_noisy : int, optional\n Noisy count of the given class, satisfying differential privacy.\n\n Returns\n -------\n total_mu : array-like, shape (number of Gaussians,)\n Updated mean for each Gaussian over the combined set.\n\n total_var : array-like, shape (number of Gaussians,)\n Updated variance for each Gaussian over the combined set.\n \"\"\"\n if n_noisy is None:\n warnings.warn(\"Noisy class count has not been specified and will be read from the data. To use this \"\n \"method correctly, make sure it is run by the parent GaussianNB class.\", PrivacyLeakWarning)\n n_noisy = X.shape[0]\n\n if not n_noisy:\n return mu, var\n\n if sample_weight is not None:\n warn_unused_args(\"sample_weight\")\n\n # Split epsilon between each feature, using 1/3 of total budget for each of mean and variance\n n_features = X.shape[1]\n local_epsilon = self.epsilon / 3 / n_features\n\n new_mu = np.zeros((n_features,))\n new_var = np.zeros((n_features,))\n\n for feature in range(n_features):\n _X = X[:, feature]\n lower, upper = self.bounds[0][feature], self.bounds[1][feature]\n local_diameter = upper - lower\n\n mech_mu = LaplaceTruncated(epsilon=local_epsilon, delta=0, sensitivity=local_diameter,\n lower=lower * n_noisy, upper=upper * n_noisy)\n _mu = mech_mu.randomise(_X.sum()) / n_noisy\n\n local_sq_sens = max(_mu - lower, upper - _mu) ** 2\n mech_var = LaplaceBoundedDomain(epsilon=local_epsilon, delta=0, sensitivity=local_sq_sens, lower=0,\n upper=local_sq_sens * n_noisy)\n _var = mech_var.randomise(((_X - _mu) ** 2).sum()) / n_noisy\n\n new_mu[feature] = _mu\n new_var[feature] = _var\n\n if n_past == 0:\n return new_mu, new_var\n\n n_total = float(n_past + n_noisy)\n\n # Combine mean of old and new data, taking into consideration\n # (weighted) number of observations\n total_mu = (n_noisy * new_mu + n_past * mu) / n_total\n\n # Combine variance of old and new data, taking into consideration\n # (weighted) number of observations. This is achieved by combining\n # the sum-of-squared-differences (ssd)\n old_ssd = n_past * var\n new_ssd = n_noisy * new_var\n total_ssd = old_ssd + new_ssd + (n_past / float(n_noisy * n_total)) * (n_noisy * mu - n_noisy * new_mu) ** 2\n total_var = total_ssd / n_total\n\n return total_mu, total_var\n\n def _noisy_class_counts(self, y):\n unique_y = np.unique(y)\n n_total = y.shape[0]\n\n # Use 1/3 of total epsilon budget for getting noisy class counts\n mech = GeometricTruncated(epsilon=self.epsilon / 3, sensitivity=1, lower=1, upper=n_total)\n noisy_counts = np.array([mech.randomise((y == y_i).sum()) for y_i in unique_y])\n\n argsort = np.argsort(noisy_counts)\n i = 0 if noisy_counts.sum() > n_total else len(unique_y) - 1\n\n while np.sum(noisy_counts) != n_total:\n _i = argsort[i]\n sgn = np.sign(n_total - noisy_counts.sum())\n noisy_counts[_i] = np.clip(noisy_counts[_i] + sgn, 1, n_total)\n\n i = (i - sgn) % len(unique_y)\n\n return noisy_counts\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.in1d", "numpy.argsort", "numpy.asarray", "numpy.all", "numpy.clip", "numpy.min", "sklearn.utils.multiclass._check_partial_fit_first_call", "numpy.max", "sklearn.utils.check_X_y", "numpy.unique" ] ]
hx89/FBGEMM
[ "03a04eb7ecea8ee0afea42eaae7b2c2119a38886" ]
[ "fbgemm_gpu/bench/split_table_batched_embeddings_benchmark.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport itertools\nimport logging\nimport math\nimport random\nimport statistics\nimport time\nfrom typing import Callable, List, Optional, Tuple\n\nimport click\nimport numpy as np\nimport torch\n\nhaveAIBench = False\ntry:\n from aibench_observer.utils.observer import emitMetric\n\n haveAIBench = True\nexcept Exception:\n haveAIBench = False\n\nfrom fbgemm_gpu.split_table_batched_embeddings_ops import (\n BoundsCheckMode,\n CacheAlgorithm,\n ComputeDevice,\n DenseTableBatchedEmbeddingBagsCodegen,\n EmbeddingLocation,\n OptimType,\n SparseType,\n SplitTableBatchedEmbeddingBagsCodegen,\n IntNBitTableBatchedEmbeddingBagsCodegen,\n PoolingMode,\n)\nfrom numpy.random import default_rng\nfrom torch import Tensor\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef round_up(a: int, b: int) -> int:\n return int((a + b - 1) // b) * b\n\n\ndef get_device() -> torch.device:\n return (\n torch.cuda.current_device()\n if torch.cuda.is_available()\n else torch.device(\"cpu\")\n )\n\n\n# Merged indices with shape (T, B, L) -> (flattened indices with shape\n# (T * B * L), offsets with shape (T * B + 1))\ndef get_table_batched_offsets_from_dense(\n merged_indices: Tensor,\n) -> Tuple[Tensor, Tensor]:\n (T, B, L) = merged_indices.size()\n lengths = np.ones((T, B)) * L\n flat_lengths = lengths.flatten()\n return (\n merged_indices.long().contiguous().view(-1).to(get_device()),\n torch.tensor(([0] + np.cumsum(flat_lengths).tolist())).long().to(get_device()),\n )\n\n\ndef get_offsets_from_dense(indices: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n (B, L) = indices.size()\n return (\n indices.contiguous().view(-1),\n torch.tensor(\n np.cumsum(np.asarray([0] + [L for _ in range(B)])[:-1]).astype(np.int64)\n ),\n )\n\n\ndef b_indices(\n b: Callable[..., torch.Tensor],\n x: torch.Tensor,\n per_sample_weights: Optional[torch.Tensor] = None,\n use_cpu: bool = False,\n do_pooling: bool = True,\n) -> torch.Tensor:\n (indices, offsets) = get_offsets_from_dense(x)\n if do_pooling:\n return b(\n indices.cuda(),\n offsets.cuda(),\n per_sample_weights=per_sample_weights,\n )\n else:\n return b(indices.cuda())\n\n\ndef generate_requests(\n iters: int,\n B: int,\n T: int,\n L: int,\n E: int,\n # inter-batch indices reuse rate\n reuse: float = 0.0,\n # alpha <= 1.0: use uniform distribution\n # alpha > 1.0: use zipf distribution\n alpha: float = 1.0,\n weights_precision: SparseType = SparseType.FP32,\n weighted: bool = False,\n requests_data_file: Optional[str] = None,\n # Comma-separated list of table numbers\n tables: Optional[str] = None,\n) -> List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]]:\n if requests_data_file is not None:\n indices_tensor, offsets_tensor, lengths_tensor = torch.load(requests_data_file)\n\n average_L = 0\n if tables is not None:\n emb_tables = tuple(int(x) for x in tables.split(\",\"))\n indices = torch.zeros(0, dtype=indices_tensor.dtype)\n offsets = torch.zeros(1, dtype=offsets_tensor.dtype)\n total_L = 0\n for t in emb_tables:\n t_offsets = offsets_tensor[B * t : B * (t + 1) + 1]\n total_L += t_offsets[-1] - t_offsets[0]\n indices = torch.cat(\n (indices, indices_tensor[t_offsets[0] : t_offsets[-1]])\n )\n offsets = torch.cat(\n (\n offsets,\n t_offsets[1:] - t_offsets[0] + offsets[-1],\n )\n )\n indices_tensor = indices\n offsets_tensor = offsets\n average_L = int(total_L / B)\n\n assert np.prod(offsets_tensor.size()) - 1 == np.prod((T, B)), (\n f\"Requested tables: {emb_tables} \"\n f\"does not conform to inputs (T, B) = ({T}, {B}).\"\n )\n logging.warning(\n f\"Using (indices = {indices_tensor.size()}, offsets = {offsets_tensor.size()}) based \"\n f\"on tables: {emb_tables}\"\n )\n else:\n average_L = int((offsets_tensor[-1] - offsets_tensor[0]) / B)\n assert (np.prod(offsets_tensor.size()) - 1) == np.prod((T, B)), (\n f\"Data file (indices = {indices_tensor.size()}, \"\n f\"offsets = {offsets_tensor.size()}, lengths = {lengths_tensor.size()}) \"\n f\"does not conform to inputs (T, B) = ({T}, {B}).\"\n )\n\n assert (\n L == average_L\n ), f\"Requested L does not align with provided data file ({L} vs. {average_L})\"\n assert E > max(indices_tensor), (\n f\"Number of embeddings is not enough to support maximum index \"\n f\"provided by data file {E} vs. {max(indices_tensor)}\"\n )\n\n weights_tensor = (\n None\n if not weighted\n else torch.randn(indices_tensor.size(), device=get_device())\n )\n rs = []\n for _ in range(iters):\n rs.append(\n (\n indices_tensor.to(get_device()),\n offsets_tensor.to(get_device()),\n weights_tensor,\n )\n )\n return rs\n\n if alpha <= 1.0:\n all_indices = torch.randint(\n low=0,\n high=E,\n size=(iters, T, B, L),\n device=get_device(),\n dtype=torch.int32,\n )\n # each bag is usually sorted\n (all_indices, _) = torch.sort(all_indices)\n all_indices = all_indices.reshape(iters, T, B * L)\n else:\n assert E >= L, \"num-embeddings must be greater than equal to bag-size\"\n # oversample and then remove duplicates to obtain sampling without\n # replacement\n all_indices = (np.random.zipf(a=alpha, size=(iters, T, B, 3 * L)) - 1) % E\n for index_tuple in itertools.product(range(iters), range(T), range(B)):\n # sample without replacement from\n # https://stats.stackexchange.com/questions/20590/how-do-i-sample-without-replacement-using-a-sampling-with-replacement-function\n r = set()\n for x in all_indices[index_tuple]:\n if x not in r:\n r.add(x)\n if len(r) == L:\n break\n assert (len(r)) == L, \"too skewed distribution (alpha too big)\"\n all_indices[index_tuple][:L] = list(r)\n # shuffle indices so we don't have unintended spatial locality\n all_indices = torch.as_tensor(all_indices[:, :, :, :L])\n rng = default_rng()\n permutation = torch.as_tensor(\n rng.choice(E, size=all_indices.max().item() + 1, replace=False)\n )\n all_indices = permutation.gather(0, all_indices.flatten())\n all_indices = all_indices.to(get_device()).int().reshape(iters, T, B * L)\n for it in range(iters - 1):\n for t in range(T):\n reused_indices = torch.randperm(B * L, device=get_device())[\n : int(B * L * reuse)\n ]\n all_indices[it + 1, t, reused_indices] = all_indices[it, t, reused_indices]\n\n rs = []\n for it in range(iters):\n weights_tensor = (\n None if not weighted else torch.randn(T * B * L, device=get_device())\n )\n rs.append(\n get_table_batched_offsets_from_dense(all_indices[it].view(T, B, L))\n + (weights_tensor,)\n )\n return rs\n\n\ndef benchmark_requests(\n requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]],\n func: Callable[[Tensor, Tensor, Optional[Tensor]], Tensor],\n flush_gpu_cache_size_mb: int = 0,\n check_median: bool = False,\n) -> float:\n times = []\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n for (indices, offsets, weights) in requests:\n start_time = time.time()\n if torch.cuda.is_available():\n if flush_gpu_cache_size_mb:\n _ = torch.rand(\n flush_gpu_cache_size_mb * 1024 * 1024 // 4, dtype=torch.float\n )\n torch.cuda.synchronize()\n start_event.record()\n func(indices, offsets, weights)\n if torch.cuda.is_available():\n end_event.record()\n torch.cuda.synchronize()\n it_time = start_event.elapsed_time(end_event) * 1.0e-3\n times.append(it_time)\n else:\n it_time = time.time() - start_time\n times.append(it_time)\n avg_time = sum(times) / len(requests)\n median_time = statistics.median(times)\n return median_time if check_median else avg_time\n\n\ndef benchmark_requests_refer(\n requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]],\n T: int,\n B: int,\n L: int,\n E: int,\n D: int,\n pooling_mode: str,\n weighted: bool,\n flush_gpu_cache_size_mb: int = 0,\n check_median: bool = False,\n) -> float:\n do_pooling = pooling_mode in [\"sum\", \"mean\"]\n if do_pooling:\n nn_embedding_list = [\n torch.nn.EmbeddingBag(E, D, mode=pooling_mode, sparse=True).cuda()\n ] * T\n else:\n nn_embedding_list = [torch.nn.Embedding(E, D, sparse=True).cuda()] * T\n\n times = []\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n for (indices, _, weights) in requests:\n indices_list = indices.view(T, B, L).split(1)\n\n if weighted:\n assert weights is not None\n weights_list = weights.view(T, B, L).split(1)\n\n start_time = time.time()\n if torch.cuda.is_available():\n if flush_gpu_cache_size_mb:\n _ = torch.rand(\n flush_gpu_cache_size_mb * 1024 * 1024 // 4, dtype=torch.float\n )\n torch.cuda.synchronize()\n start_event.record()\n\n nn_embedding_output = (\n [\n b_indices(nn_embedding, x, use_cpu=False, do_pooling=do_pooling)\n for (nn_embedding, x) in zip(nn_embedding_list, indices_list)\n ]\n if not weighted\n else [\n b_indices(\n nn_embedding,\n x,\n per_sample_weights=xw.view(-1),\n use_cpu=False,\n do_pooling=do_pooling,\n )\n for (nn_embedding, x, xw) in zip(\n nn_embedding_list,\n indices_list,\n # pyre-fixme[61]: `weights_list` is undefined, or not always\n # defined.\n weights_list,\n )\n ]\n )\n if do_pooling:\n final_output = torch.cat(\n [f.view(B, -1) for f in nn_embedding_output], dim=1\n )\n else:\n final_output = torch.cat(nn_embedding_output, dim=0).view(-1, D)\n\n if torch.cuda.is_available():\n end_event.record()\n torch.cuda.synchronize()\n it_time = start_event.elapsed_time(end_event) * 1.0e-3\n times.append(it_time)\n else:\n it_time = time.time() - start_time\n times.append(it_time)\n avg_time = sum(times) / len(requests)\n median_time = statistics.median(times)\n return median_time if check_median else avg_time\n\n\ndef benchmark_pipelined_requests(\n requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]],\n func1: Callable[[Tensor, Tensor, Optional[Tensor]], None],\n func2: Callable[[Tensor, Tensor, Optional[Tensor]], None],\n flush_gpu_cache_size_mb: int = 0,\n) -> Tuple[float, float]:\n torch.cuda.synchronize()\n start_events = [\n (torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True))\n for _ in requests\n ]\n end_events = [\n (torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True))\n for _ in requests\n ]\n for ((indices, offsets, indices_weights), start_event, end_event) in zip(\n requests, start_events, end_events\n ):\n if flush_gpu_cache_size_mb:\n _ = torch.rand(\n flush_gpu_cache_size_mb * 1024 * 1024 // 4, dtype=torch.float\n )\n torch.cuda.synchronize()\n start_event[0].record()\n func1(indices, offsets, indices_weights)\n end_event[0].record()\n start_event[1].record()\n func2(indices, offsets, indices_weights)\n end_event[1].record()\n torch.cuda.synchronize()\n return (\n sum(\n start_event[0].elapsed_time(end_event[0]) * 1.0e-3\n for start_event, end_event in zip(start_events, end_events)\n )\n / len(requests),\n sum(\n start_event[1].elapsed_time(end_event[1]) * 1.0e-3\n for start_event, end_event in zip(start_events, end_events)\n )\n / len(requests),\n )\n\n\[email protected]()\ndef cli() -> None:\n pass\n\n\[email protected]()\n# recommended value: alpha=1.15 for training and alpha=1.09 for inference\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.FP32)\[email protected](\"--stoc\", is_flag=True, default=False)\[email protected](\"--iters\", default=100)\[email protected](\"--managed\", default=\"device\")\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.0)\[email protected](\"--row-wise/--no-row-wise\", default=True)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--weighted-num-requires-grad\", type=int, default=None)\[email protected](\"--flush-gpu-cache-size-mb\", default=0)\[email protected](\"--dense\", is_flag=True, default=False)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP32)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef device( # noqa C901\n alpha: float,\n bag_size: int,\n batch_size: int,\n embedding_dim: int,\n weights_precision: SparseType,\n stoc: bool,\n iters: int,\n managed: str,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n row_wise: bool,\n weighted: bool,\n weighted_num_requires_grad: Optional[int],\n flush_gpu_cache_size_mb: int,\n dense: bool,\n output_dtype: SparseType,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n T = num_tables\n if weighted_num_requires_grad:\n assert weighted_num_requires_grad <= T\n weighted_requires_grad_tables = np.random.choice(\n T, replace=False, size=(weighted_num_requires_grad,)\n ).tolist()\n feature_requires_grad = (\n torch.tensor(\n [1 if t in weighted_requires_grad_tables else 0 for t in range(T)]\n )\n .to(get_device())\n .int()\n )\n else:\n feature_requires_grad = None\n if mixed:\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n optimizer = OptimType.EXACT_ROWWISE_ADAGRAD if row_wise else OptimType.EXACT_ADAGRAD\n\n if managed == \"device\":\n managed_option = (\n EmbeddingLocation.DEVICE\n if torch.cuda.is_available()\n else EmbeddingLocation.HOST\n )\n else:\n managed_option = EmbeddingLocation.MANAGED\n\n if dense:\n emb = DenseTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n )\n for d in Ds\n ],\n use_cpu=not torch.cuda.is_available(),\n )\n else:\n emb = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n managed_option,\n ComputeDevice.CUDA\n if torch.cuda.is_available()\n else ComputeDevice.CPU,\n )\n for d in Ds\n ],\n optimizer=optimizer,\n learning_rate=0.1,\n eps=0.1,\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n output_dtype=output_dtype,\n )\n emb = emb.to(get_device())\n\n if weights_precision == SparseType.INT8:\n emb.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n nparams = sum(w.numel() for w in emb.split_embedding_weights())\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n logging.info(\n f\"Embedding parameters: {nparams / 1.0e9: .2f} GParam, \"\n f\"{nparams * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n logging.info(\n f\"Accessed weights per batch: {B * sum(Ds) * L * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n\n # forward\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb.forward(\n indices.long(),\n offsets.long(),\n per_sample_weights,\n feature_requires_grad=feature_requires_grad,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {param_size_multiplier * B * sum(Ds) * L / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n if output_dtype == SparseType.INT8:\n # backward bench not representative\n return\n\n grad_output = torch.randn(B, sum(Ds)).to(get_device())\n # backward\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb(\n indices.long(),\n offsets.long(),\n per_sample_weights,\n feature_requires_grad=feature_requires_grad,\n ).backward(grad_output),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"ForwardBackward, B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"BW: {3 * param_size_multiplier * B * sum(Ds) * L / time_per_iter / 1.0e9: .2f} GB/s, \"\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.FP32)\[email protected](\"--stoc\", is_flag=True, default=False)\[email protected](\"--iters\", default=100)\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.1)\[email protected](\"--uvm-tables\", default=1)\[email protected](\"--uvm-bag-size\", default=1)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--flush-gpu-cache-size-mb\", default=0)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP32)\[email protected](\"--use-cache\", is_flag=True, default=False)\[email protected](\"--cache-algorithm\", default=\"lru\")\[email protected](\"--cache-load-factor\", default=0.2)\[email protected](\"--enforce-hbm\", is_flag=True, default=False)\ndef uvm(\n alpha: bool,\n bag_size: int,\n batch_size: int,\n embedding_dim: int,\n weights_precision: SparseType,\n stoc: bool,\n iters: int,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n uvm_tables: int,\n uvm_bag_size: int,\n weighted: bool,\n flush_gpu_cache_size_mb: int,\n requests_data_file: Optional[str],\n tables: Optional[str],\n output_dtype: SparseType,\n use_cache: bool,\n cache_algorithm: str,\n cache_load_factor: float,\n enforce_hbm: bool,\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n T = num_tables\n T_uvm = uvm_tables\n assert T_uvm <= T\n assert (\n T_uvm > 0\n ), f\"T_uvm specified {T_uvm} <= 0. If not testing UVM, please use device benchmark.\"\n T_gpu = T - T_uvm\n L_uvm = uvm_bag_size\n\n cache_alg = CacheAlgorithm.LRU if cache_algorithm == \"lru\" else CacheAlgorithm.LFU\n managed_type = (\n EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED\n )\n\n if mixed:\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n emb_uvm = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n managed_type,\n ComputeDevice.CUDA,\n )\n for d in Ds[:T_uvm]\n ],\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n output_dtype=output_dtype,\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n enforce_hbm=enforce_hbm,\n ).cuda()\n\n if weights_precision == SparseType.INT8:\n emb_uvm.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n if T_gpu > 0:\n emb_gpu = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n EmbeddingLocation.DEVICE,\n ComputeDevice.CUDA,\n )\n for d in Ds[T_uvm:]\n ],\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n ).cuda()\n\n if weights_precision == SparseType.INT8:\n emb_gpu.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n emb_mixed = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n managed_option,\n ComputeDevice.CUDA,\n )\n for (d, managed_option) in zip(\n Ds,\n [managed_type] * T_uvm + [EmbeddingLocation.DEVICE] * T_gpu,\n )\n ],\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n output_dtype=output_dtype,\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n enforce_hbm=enforce_hbm,\n ).cuda()\n\n if weights_precision == SparseType.INT8:\n emb_mixed.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n requests_uvm = generate_requests(\n iters,\n B,\n T_uvm,\n L_uvm,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n\n requests_gpu = None\n if T_gpu > 0:\n requests_gpu = generate_requests(\n iters,\n B,\n T_gpu,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=False,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n output_size_multiplier = output_dtype.bit_rate() / 8.0\n read_write_bytes_uvm = (\n output_size_multiplier * B * sum(Ds[:T_uvm])\n + param_size_multiplier * B * sum(Ds[:T_uvm]) * L_uvm\n )\n\n time_per_iter = benchmark_requests(\n requests_uvm,\n lambda indices, offsets, per_sample_weights: emb_uvm.forward(\n indices.long(),\n offsets.long(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"UVM Forward, B: {B}, \"\n f\"E: {E}, T: {T_uvm}, D: {D}, L: {L_uvm}, W: {weighted}, \"\n f\"BW: {read_write_bytes_uvm / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n if T_gpu > 0:\n requests = []\n assert requests_gpu is not None\n for rs_uvm, rs_gpu in zip(requests_uvm, requests_gpu):\n indices = torch.cat([rs_uvm[0], rs_gpu[0]])\n lengths = [L_uvm] * (T_uvm * B) + [L] * (T_gpu * B)\n offsets = torch.tensor(([0] + np.cumsum(lengths).tolist())).int().cuda()\n per_sample_weights = None\n if weighted:\n assert (this_rs_uvm_weights := rs_uvm[2]) is not None\n assert (this_rs_gpu_weights := rs_gpu[2]) is not None\n per_sample_weights = torch.cat(\n [this_rs_uvm_weights, this_rs_gpu_weights]\n )\n requests.append((indices, offsets, per_sample_weights))\n\n # forward\n time_per_iter = benchmark_requests(\n requests_gpu,\n lambda indices, offsets, per_sample_weights: emb_gpu.forward(\n indices.long(),\n offsets.long(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n read_write_bytes_hbm = (\n output_size_multiplier * B * sum(Ds[T_uvm:])\n + param_size_multiplier * B * sum(Ds[T_uvm:]) * L\n )\n logging.info(\n f\"GPU Forward, B: {B}, \"\n f\"E: {E}, T: {T_gpu}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {read_write_bytes_hbm / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb_mixed.forward(\n indices.long(),\n offsets.long(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n read_write_bytes_total = read_write_bytes_uvm + read_write_bytes_hbm\n logging.info(\n f\"Mixed Forward, B: {B}, \"\n f\"E: {E}, T_GPU: {T_gpu}, T_UVM: {T_uvm}, D: {D}, L_GPU: {L}, L_UVM: {L_uvm}, W: {weighted}, \"\n f\"BW: {read_write_bytes_total / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--cache-algorithm\", default=\"lru\")\[email protected](\"--cache-load-factor\", default=0.2)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.FP32)\[email protected](\"--stoc\", is_flag=True, default=False)\[email protected](\"--long-index\", is_flag=True, default=False)\[email protected](\"--iters\", default=100)\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.1)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--flush-gpu-cache-size-mb\", default=0)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef cache( # noqa C901\n alpha: float,\n bag_size: int,\n batch_size: int,\n cache_algorithm: str,\n cache_load_factor: float,\n embedding_dim: int,\n weights_precision: SparseType,\n stoc: bool,\n iters: int,\n long_index: bool,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n weighted: bool,\n flush_gpu_cache_size_mb: int,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n optimizer = OptimType.EXACT_ROWWISE_ADAGRAD\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n T = num_tables\n cache_alg = CacheAlgorithm.LRU if cache_algorithm == \"lru\" else CacheAlgorithm.LFU\n if mixed:\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n\n emb_nc = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n EmbeddingLocation.MANAGED,\n ComputeDevice.CUDA,\n )\n for d in Ds\n ],\n optimizer=optimizer,\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n ).cuda()\n\n if weights_precision == SparseType.INT8:\n emb_nc.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n emb = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n EmbeddingLocation.MANAGED_CACHING,\n ComputeDevice.CUDA,\n )\n for d in Ds\n ],\n optimizer=optimizer,\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n ).cuda()\n\n if weights_precision == SparseType.INT8:\n emb.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n nparams = sum(w.numel() for w in emb.split_embedding_weights())\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n logging.info(\n f\"Embedding tables: {E * T} rows, {nparams / 1.0e9: .2f} GParam, \"\n f\"{nparams * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n logging.info(\n f\"Accessed weights per batch: {B * T * L} rows, \"\n f\"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n requests = generate_requests(\n 2 * iters,\n B,\n T,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n warmup_requests, requests = requests[:iters], requests[iters:]\n grad_output = torch.randn(B, sum(Ds)).cuda()\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb_nc(\n indices.long(), offsets.long(), per_sample_weights\n ).backward(grad_output),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"ForwardBackward (UVM), B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"BW: {3 * param_size_multiplier * B * sum(Ds) * L / time_per_iter / 1.0e9: .2f} GB/s, \"\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n # warm up\n for indices, offsets, _ in warmup_requests:\n emb.forward(indices.long(), offsets.long())\n # get cache miss rate (forward and backward) and exchanged cache lines (prefetch)\n cache_misses = []\n exchanged_cache_lines = []\n NOT_FOUND = -1\n for indices, offsets, _ in requests:\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.clone)[[Named(self,\n # Variable[torch._TTensor (bound to Tensor)])], Variable[torch._TTensor (bound\n # to Tensor)]], Tensor], Tensor, torch.nn.Module]` is not a function.\n old_lxu_cache_state = emb.lxu_cache_state.clone()\n emb.prefetch(indices.long(), offsets.long())\n exchanged_cache_lines.append(\n # pyre-fixme[16]: `bool` has no attribute `sum`.\n (emb.lxu_cache_state != old_lxu_cache_state)\n .sum()\n .item()\n )\n cache_misses.append((emb.lxu_cache_locations_list[0] == NOT_FOUND).sum().item())\n emb.forward(indices.long(), offsets.long())\n logging.info(\n f\"Exchanged cache lines -- mean: {sum(exchanged_cache_lines)/len(requests): .2f}, \"\n f\"max: {max(exchanged_cache_lines)}, min: {min(exchanged_cache_lines)}\"\n )\n logging.info(\n f\"Cache miss -- mean: {sum(cache_misses)/len(requests)}, \"\n f\"max: {max(cache_misses)}, min: {min(cache_misses)}\"\n )\n\n # benchmark prefetch\n emb.reset_cache_states()\n for indices, offsets, _ in warmup_requests:\n emb.forward(indices, offsets)\n prefetch_time, forward_backward_time = benchmark_pipelined_requests(\n requests,\n lambda indices, offsets, indices_weights: emb.prefetch(indices, offsets),\n lambda indices, offsets, indices_weights: emb.forward(\n indices, offsets, indices_weights\n ).backward(grad_output),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n e2e_time = prefetch_time + forward_backward_time\n\n logging.info(\n f\"ForwardBackward (LXU), reuse: {reuse}, alpha: {alpha}, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"BW: {3 * param_size_multiplier * B * sum(Ds) * L / e2e_time / 1.0e9: .2f} GB/s, \"\n f\"Tprefetch: {prefetch_time * 1.0e6:.0f}us, \"\n f\"{2 * sum(exchanged_cache_lines) * param_size_multiplier * D / prefetch_time / len(requests) / 1.0e9: .2f} GB/s, \"\n f\"Tfwdbwd: {forward_backward_time * 1.0e6:.0f}us, \"\n f\"{3 * param_size_multiplier * B * sum(Ds) * L / forward_backward_time / 1.0e9: .2f} GB/s, \"\n f\"Te2e: {e2e_time * 1.0e6:.0f}us, \"\n )\n\n\ndef benchmark_cpu_requests(\n requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]],\n func: Callable[[Tensor, Tensor, Optional[Tensor]], Tensor],\n) -> float:\n import time\n\n start_time = time.perf_counter()\n for (indices, offsets, weights) in requests:\n func(indices, offsets, weights)\n end_time = time.perf_counter()\n return (end_time - start_time) / len(requests)\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.INT4)\[email protected](\"--stoc\", is_flag=True, default=False)\[email protected](\"--iters\", default=100)\[email protected](\"--managed\", default=\"device\")\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.0)\[email protected](\"--row-wise/--no-row-wise\", default=True)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--index-remapping\", is_flag=True, default=False)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP16)\ndef nbit_cpu( # noqa C901\n alpha: float,\n bag_size: int,\n batch_size: int,\n embedding_dim: int,\n weights_precision: SparseType,\n stoc: bool,\n iters: int,\n managed: str,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n row_wise: bool,\n weighted: bool,\n index_remapping: bool,\n requests_data_file: Optional[str],\n tables: Optional[str],\n output_dtype: SparseType,\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n T = num_tables\n if mixed:\n Ds = [\n # int4 table batched emb op can only handle mixed D where D is multiple of 8\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 8)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n\n emb = IntNBitTableBatchedEmbeddingBagsCodegen(\n [(\"\", E, d, weights_precision, EmbeddingLocation.HOST) for d in Ds],\n device=\"cpu\",\n index_remapping=[torch.arange(E) for _ in Ds] if index_remapping else None,\n output_dtype=output_dtype,\n ).cpu()\n emb.fill_random_weights()\n\n nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n output_size_multiplier = output_dtype.bit_rate() / 8.0\n read_write_bytes = (\n output_size_multiplier * B * T * D + param_size_multiplier * B * T * L * D\n )\n logging.info(\n f\"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, \"\n f\"{nparams_byte / 1.0e9: .2f} GB\" # IntN TBE use byte for storage\n )\n logging.info(\n f\"Accessed weights per batch: {B * T * L} rows, \"\n f\"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n requests = [\n (a.cpu().int(), b.cpu().int(), c.cpu() if c else None) for (a, b, c) in requests\n ]\n\n time_per_iter = benchmark_cpu_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb.forward(\n indices,\n offsets,\n per_sample_weights,\n ),\n )\n\n logging.info(\n f\"{weights_precision} Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.INT4)\[email protected](\"--stoc\", is_flag=True, default=False)\[email protected](\"--managed\", default=\"device\")\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.0)\[email protected](\"--row-wise/--no-row-wise\", default=True)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--pooling\", type=str, default=\"sum\")\[email protected](\"--weighted-num-requires-grad\", type=int, default=None)\[email protected](\"--bounds-check-mode\", type=int, default=BoundsCheckMode.WARNING.value)\[email protected](\"--pruning-ratio\", type=float, default=None)\[email protected](\"--load-factor\", default=0.75)\[email protected](\"--use-array-for-index-remapping\", is_flag=True, default=True)\[email protected](\"--check-median\", is_flag=True, default=True)\[email protected](\"--iters\", default=100)\[email protected](\"--runs-of-iters\", default=5)\[email protected](\"--warmup-runs\", default=2)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP16)\[email protected](\"--report-aibench\", is_flag=True)\[email protected](\"--run-reference\", is_flag=True, default=False)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef nbit_device( # noqa C901\n alpha: float,\n bag_size: int,\n batch_size: int,\n embedding_dim: int,\n weights_precision: SparseType,\n stoc: bool,\n managed: str,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n row_wise: bool,\n weighted: bool,\n pooling: str,\n weighted_num_requires_grad: Optional[int],\n bounds_check_mode: int,\n pruning_ratio: Optional[float],\n load_factor: float,\n use_array_for_index_remapping: bool,\n check_median: bool,\n iters: int,\n runs_of_iters: int,\n warmup_runs: int,\n output_dtype: SparseType,\n report_aibench: bool,\n run_reference: bool,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n original_E = E\n T = num_tables\n index_remapping = None\n if mixed:\n # int4 table batched emb op can only handle mixed D where D is multiple of 8\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 8)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n\n mem_for_pruning = 0\n if pruning_ratio:\n assert pruning_ratio < 1 and pruning_ratio >= 0\n E = math.ceil(E * (1.0 - pruning_ratio))\n index_remapping = []\n for _ in range(T):\n mapping = torch.tensor([-1] * original_E, dtype=torch.int32)\n selected_indices = random.sample(range(original_E), E)\n for i, idx in enumerate(selected_indices):\n mapping[idx] = i\n index_remapping.append(mapping)\n if use_array_for_index_remapping:\n mem_for_pruning += mapping.numel() * 4\n else:\n mem_for_pruning += E / load_factor * 2 * 4\n\n if managed == \"device\":\n managed_option = EmbeddingLocation.DEVICE\n else:\n managed_option = EmbeddingLocation.MANAGED\n\n if pooling is None or pooling == \"sum\":\n pooling = \"sum\"\n pooling_mode = PoolingMode.SUM\n do_pooling = True\n elif pooling == \"mean\":\n pooling_mode = PoolingMode.MEAN\n do_pooling = True\n else: # \"none\"\n pooling_mode = PoolingMode.NONE\n do_pooling = False\n\n emb = IntNBitTableBatchedEmbeddingBagsCodegen(\n [(\"\", E, d, weights_precision, managed_option) for d in Ds],\n bounds_check_mode=BoundsCheckMode(bounds_check_mode),\n index_remapping=index_remapping,\n load_factor=load_factor,\n use_array_for_index_remapping=use_array_for_index_remapping,\n output_dtype=output_dtype,\n pooling_mode=pooling_mode,\n ).cuda()\n emb.fill_random_weights()\n\n nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n output_size_multiplier = output_dtype.bit_rate() / 8.0\n if do_pooling:\n read_write_bytes = (\n output_size_multiplier * B * T * D + param_size_multiplier * B * T * L * D\n )\n else:\n read_write_bytes = (\n output_size_multiplier * B * T * L * D\n + param_size_multiplier * B * T * L * D\n )\n logging.info(\n f\"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, \"\n f\"{nparams_byte / 1.0e9: .2f} GB\" # IntN TBE use byte for storage\n )\n logging.info(\n f\"Accessed weights per batch: {B * T * L} rows, \"\n f\"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n times = []\n for i in range(runs_of_iters):\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]\n\n # forward\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb.forward(\n indices.int(),\n offsets.int(),\n per_sample_weights,\n ),\n check_median=check_median,\n )\n\n # free up GPU memory\n del requests\n\n logging.info(\n f\"Iteration {i}: \"\n f\"{weights_precision} Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter * 1.0e6:.0f}us, \"\n f\"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB\"\n )\n\n if i >= warmup_runs:\n times.append(time_per_iter)\n\n time_per_iter = statistics.mean(times)\n bandwidth = read_write_bytes / time_per_iter / 1.0e9\n\n logging.info(\n f\"Average of all iterations: \"\n f\"{weights_precision} Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {bandwidth: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter * 1.0e6:.0f}us, \"\n f\"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB\"\n )\n\n if report_aibench and haveAIBench:\n print(\n emitMetric(\n type=\"NET\",\n metric=f\"bandwidth_{weights_precision}\",\n unit=\"scalar\",\n value=str(bandwidth),\n )\n )\n print(\n emitMetric(\n type=\"NET\",\n metric=f\"time_per_iter_{weights_precision}\",\n unit=\"scalar\",\n value=str(time_per_iter * 1.0e6),\n )\n )\n\n if run_reference:\n times = []\n for i in range(runs_of_iters):\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]\n\n # forward\n time_per_iter_refer = benchmark_requests_refer(\n requests,\n T,\n B,\n L,\n E,\n D,\n pooling,\n weighted,\n check_median=check_median,\n )\n\n # free up GPU memory\n del requests\n\n logging.info(\n f\"Reference (nn.Embedding(Bag)) Iteration {i}: \"\n f\"Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {read_write_bytes / time_per_iter_refer / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter_refer * 1.0e6:.0f}us \"\n )\n\n if i >= warmup_runs:\n times.append(time_per_iter_refer)\n\n time_per_iter_refer = statistics.mean(times)\n bandwidth = read_write_bytes / time_per_iter_refer / 1.0e9\n\n logging.info(\n f\"Average of all iterations: \"\n f\"Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"Effective BW: {bandwidth: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter_refer * 1.0e6:.0f}us \"\n )\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.INT4)\[email protected](\"--iters\", default=100)\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.1)\[email protected](\"--uvm-num-embeddings\", default=int(1e5))\[email protected](\"--uvm-tables\", default=1)\[email protected](\"--uvm-bag-size\", default=1)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--flush-gpu-cache-size-mb\", default=0)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP16)\[email protected](\"--use-cache\", is_flag=True, default=False)\[email protected](\"--cache-algorithm\", default=\"lru\")\[email protected](\"--cache-load-factor\", default=0.2)\[email protected](\"--enforce-hbm\", is_flag=True, default=False)\ndef nbit_uvm(\n alpha: bool,\n bag_size: int,\n batch_size: int,\n embedding_dim: int,\n weights_precision: SparseType,\n iters: int,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n uvm_num_embeddings: int,\n uvm_tables: int,\n uvm_bag_size: int,\n weighted: bool,\n flush_gpu_cache_size_mb: int,\n output_dtype: SparseType,\n use_cache: bool,\n cache_algorithm: str,\n cache_load_factor: float,\n enforce_hbm: bool,\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n E_uvm = uvm_num_embeddings\n T = num_tables\n T_uvm = uvm_tables\n assert T_uvm <= T\n assert (\n T_uvm > 0\n ), f\"T_uvm specified {T_uvm} <= 0. If not testing UVM, please use device benchmark.\"\n T_gpu = T - T_uvm\n L_uvm = uvm_bag_size\n cache_alg = CacheAlgorithm.LRU if cache_algorithm == \"lru\" else CacheAlgorithm.LFU\n managed_type = (\n EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED\n )\n\n logging.info(f\"T: {T}, T_uvm: {T_uvm}, T_gpu: {T_gpu}\")\n\n if mixed:\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n emb_uvm = IntNBitTableBatchedEmbeddingBagsCodegen(\n [\n (\n \"\",\n E_uvm,\n d,\n weights_precision,\n managed_type,\n )\n for d in Ds[:T_uvm]\n ],\n output_dtype=output_dtype,\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n enforce_hbm=enforce_hbm,\n ).cuda()\n emb_uvm.fill_random_weights()\n\n if T_gpu > 0:\n emb_gpu = IntNBitTableBatchedEmbeddingBagsCodegen(\n [\n (\n \"\",\n E,\n d,\n weights_precision,\n EmbeddingLocation.DEVICE,\n )\n for d in Ds[T_uvm:]\n ],\n output_dtype=output_dtype,\n ).cuda()\n emb_gpu.fill_random_weights()\n\n emb_mixed = IntNBitTableBatchedEmbeddingBagsCodegen(\n [\n (\n \"\",\n e,\n d,\n weights_precision,\n managed_option,\n )\n for (e, d, managed_option) in zip(\n [E_uvm] * T_uvm + [E] * T_gpu,\n Ds,\n [managed_type] * T_uvm + [EmbeddingLocation.DEVICE] * T_gpu,\n )\n ],\n output_dtype=output_dtype,\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n enforce_hbm=enforce_hbm,\n ).cuda()\n emb_mixed.fill_random_weights()\n\n requests_uvm = generate_requests(\n iters,\n B,\n T_uvm,\n L_uvm,\n E_uvm,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n )\n requests_uvm = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests_uvm]\n\n requests_gpu = None\n if T_gpu > 0:\n requests_gpu = generate_requests(\n iters,\n B,\n T_gpu,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=False,\n )\n requests_gpu = [\n (a.int(), b.int(), c if c else None) for (a, b, c) in requests_gpu\n ]\n\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n output_size_multiplier = output_dtype.bit_rate() / 8.0\n read_write_bytes_uvm = (\n output_size_multiplier * B * sum(Ds[:T_uvm])\n + param_size_multiplier * B * sum(Ds[:T_uvm]) * L_uvm\n )\n\n if T_gpu > 0:\n nparams_byte = sum(w.numel() for (w, _) in emb_mixed.split_embedding_weights())\n logging.info(\n f\"{weights_precision} Embedding tables: {E * T + E_uvm * T_uvm} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, \"\n f\"{nparams_byte / 1.0e9: .2f} GB\" # IntN TBE use byte for storage\n )\n logging.info(\n f\"Accessed weights per batch: {B * (T * L + T_uvm * L_uvm)} rows, \"\n f\"{B * (T * L * sum(Ds[T_uvm:]) + T_uvm * L_uvm * sum(Ds[:T_uvm])) * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n time_per_iter = benchmark_requests(\n requests_uvm,\n lambda indices, offsets, per_sample_weights: emb_uvm.forward(\n indices.int(),\n offsets.int(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"UVM NBit Forward, {weights_precision}, B: {B}, \"\n f\"E_uvm: {E_uvm}, T: {T_uvm}, D: {D}, L: {L_uvm}, W: {weighted}, \"\n f\"BW: {read_write_bytes_uvm / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n if T_gpu > 0:\n requests = []\n assert requests_gpu is not None\n for rs_uvm, rs_gpu in zip(requests_uvm, requests_gpu):\n indices = torch.cat([rs_uvm[0], rs_gpu[0]])\n lengths = [L_uvm] * (T_uvm * B) + [L] * (T_gpu * B)\n offsets = torch.tensor(([0] + np.cumsum(lengths).tolist())).int().cuda()\n per_sample_weights = None\n if weighted:\n assert (this_rs_uvm_weights := rs_uvm[2]) is not None\n assert (this_rs_gpu_weights := rs_gpu[2]) is not None\n per_sample_weights = torch.cat(\n [this_rs_uvm_weights, this_rs_gpu_weights]\n )\n requests.append((indices, offsets, per_sample_weights))\n\n # forward\n time_per_iter = benchmark_requests(\n requests_gpu,\n lambda indices, offsets, per_sample_weights: emb_gpu.forward(\n indices.int(),\n offsets.int(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n\n read_write_bytes_hbm = (\n output_size_multiplier * B * sum(Ds[T_uvm:])\n + param_size_multiplier * B * sum(Ds[T_uvm:]) * L\n )\n logging.info(\n f\"GPU NBit Forward, {weights_precision}, B: {B}, \"\n f\"E: {E}, T: {T_gpu}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {read_write_bytes_hbm / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb_mixed.forward(\n indices.int(),\n offsets.int(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n read_write_bytes_total = read_write_bytes_uvm + read_write_bytes_hbm\n logging.info(\n f\"Mixed NBit Forward, {weights_precision}, B: {B}, \"\n f\"E_GPU: {E}, E_UVM: {E_uvm}, T_GPU: {T_gpu}, T_UVM: {T_uvm}, D: {D}, L_GPU: {L}, L_UVM: {L_uvm}, W: {weighted}, \"\n f\"BW: {read_write_bytes_total / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n # benchmark prefetch\n emb_mixed.reset_cache_states()\n for indices, offsets, _ in requests:\n emb_mixed.forward(indices, offsets)\n prefetch_time, forward_time = benchmark_pipelined_requests(\n requests,\n lambda indices, offsets, indices_weights: emb_mixed.prefetch(\n indices,\n offsets,\n ),\n # pyre-fixme[6]: Expected `(Tensor, Tensor, Optional[Tensor]) -> None` for\n # 3rd param but got `(indices: Any, offsets: Any, indices_weights: Any) ->\n # Tensor`.\n lambda indices, offsets, indices_weights: emb_mixed.forward(\n indices,\n offsets,\n indices_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n e2e_time = prefetch_time + forward_time\n\n logging.info(\n f\"Forward(LXU) {weights_precision}, reuse: {reuse}, alpha: {alpha}, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"Te2e: {e2e_time * 1.0e6:.0f}us, \"\n f\"e2e BW: {read_write_bytes_total / e2e_time / 1.0e9: .2f} GB/s, \"\n f\"Tprefetch: {prefetch_time * 1.0e6:.0f}us, \"\n f\"TfwdTime: {forward_time * 1.0e6:.0f}us, \"\n f\"{read_write_bytes_total / forward_time / 1.0e9: .2f} GB/s\"\n )\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--cache-algorithm\", default=\"lru\")\[email protected](\"--cache-load-factor\", default=0.2)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.INT4)\[email protected](\"--iters\", default=100)\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.1)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--flush-gpu-cache-size-mb\", default=0)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP16)\[email protected](\"--enforce-hbm\", is_flag=True, default=False)\ndef nbit_cache( # noqa C901\n alpha: float,\n bag_size: int,\n batch_size: int,\n cache_algorithm: str,\n cache_load_factor: float,\n embedding_dim: int,\n weights_precision: SparseType,\n iters: int,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n weighted: bool,\n flush_gpu_cache_size_mb: int,\n output_dtype: SparseType,\n enforce_hbm: bool,\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n T = num_tables\n cache_alg = CacheAlgorithm.LRU if cache_algorithm == \"lru\" else CacheAlgorithm.LFU\n if mixed:\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n\n emb_nc = IntNBitTableBatchedEmbeddingBagsCodegen(\n [\n (\n \"\",\n E,\n d,\n weights_precision,\n EmbeddingLocation.MANAGED,\n )\n for d in Ds\n ],\n output_dtype=output_dtype,\n enforce_hbm=enforce_hbm,\n ).cuda()\n emb_nc.fill_random_weights()\n\n emb = IntNBitTableBatchedEmbeddingBagsCodegen(\n [\n (\n \"\",\n E,\n d,\n weights_precision,\n EmbeddingLocation.MANAGED_CACHING,\n )\n for d in Ds\n ],\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n output_dtype=output_dtype,\n enforce_hbm=enforce_hbm,\n ).cuda()\n emb.fill_random_weights()\n\n nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n output_size_multiplier = output_dtype.bit_rate() / 8.0\n read_write_bytes = (\n output_size_multiplier * B * sum(Ds) + param_size_multiplier * B * sum(Ds) * L\n )\n logging.info(\n f\"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, \"\n f\"{nparams_byte / 1.0e9: .2f} GB\" # IntN TBE use byte for storage\n )\n logging.info(\n f\"Accessed weights per batch: {B * T * L} rows, \"\n f\"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n requests = generate_requests(\n 2 * iters, B, T, L, E, reuse=reuse, alpha=alpha, weighted=weighted\n )\n requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]\n warmup_requests, requests = requests[:iters], requests[iters:]\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb_nc(\n indices.int(), offsets.int(), per_sample_weights\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"Forward (UVM) {weights_precision}, B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n # exchanged_cache_lines = [100]\n # warm up\n for indices, offsets, _ in warmup_requests:\n emb.forward(indices.int(), offsets.int())\n # get cache miss rate (forward only) and exchanged cache lines (prefetch)\n cache_misses = []\n exchanged_cache_lines = []\n NOT_FOUND = -1\n for indices, offsets, _ in requests:\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.clone)[[Named(self,\n # Variable[torch._TTensor (bound to Tensor)])], Variable[torch._TTensor (bound\n # to Tensor)]], Tensor], Tensor, torch.nn.Module]` is not a function.\n old_lxu_cache_state = emb.lxu_cache_state.clone()\n emb.prefetch(indices, offsets)\n exchanged_cache_lines.append(\n # pyre-fixme[16]: `bool` has no attribute `sum`.\n (emb.lxu_cache_state != old_lxu_cache_state)\n .sum()\n .item()\n )\n cache_misses.append(\n (emb.lxu_cache_locations_list.top() == NOT_FOUND).sum().item()\n )\n emb.forward(indices, offsets)\n logging.info(\n f\"Exchanged cache lines -- mean: {sum(exchanged_cache_lines)/len(requests): .2f}, \"\n f\"max: {max(exchanged_cache_lines)}, min: {min(exchanged_cache_lines)}\"\n )\n logging.info(\n f\"Cache miss -- mean: {sum(cache_misses)/len(requests)}, \"\n f\"max: {max(cache_misses)}, min: {min(cache_misses)}\"\n )\n\n # benchmark prefetch\n emb.reset_cache_states()\n for indices, offsets, _ in warmup_requests:\n emb.forward(indices, offsets)\n prefetch_time, forward_time = benchmark_pipelined_requests(\n requests,\n lambda indices, offsets, indices_weights: emb.prefetch(\n indices,\n offsets,\n ),\n # pyre-fixme[6]: Expected `(Tensor, Tensor, Optional[Tensor]) -> None` for\n # 3rd param but got `(indices: Any, offsets: Any, indices_weights: Any) ->\n # Tensor`.\n lambda indices, offsets, indices_weights: emb.forward(\n indices,\n offsets,\n indices_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n e2e_time = prefetch_time + forward_time\n\n logging.info(\n f\"Forward(LXU) {weights_precision}, reuse: {reuse}, alpha: {alpha}, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"Te2e: {e2e_time * 1.0e6:.0f}us, \"\n f\"e2e BW: {read_write_bytes / e2e_time / 1.0e9: .2f} GB/s, \"\n f\"Tprefetch: {prefetch_time * 1.0e6:.0f}us, \"\n f\"{2 * sum(exchanged_cache_lines) * param_size_multiplier * D / prefetch_time / len(requests) / 1.0e9: .2f} GB/s, \"\n f\"TfwdTime: {forward_time * 1.0e6:.0f}us, \"\n f\"{read_write_bytes / forward_time / 1.0e9: .2f} GB/s\"\n )\n\n\[email protected]()\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=2048)\[email protected](\"--iters\", default=10)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=100)\[email protected](\"--load-factor\", default=0.75)\[email protected](\"--hit-rate\", default=0.9)\[email protected](\"--use-cpu\", is_flag=True, default=False)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef hashtable( # noqa C901\n bag_size: int,\n batch_size: int,\n iters: int,\n num_embeddings: int,\n num_tables: int,\n load_factor: float,\n hit_rate: float,\n use_cpu: bool,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n B = batch_size\n T = num_tables\n L = bag_size\n E = num_embeddings\n np.random.seed(42)\n torch.manual_seed(42)\n if hit_rate == 1.0:\n chosen_indices = torch.cat([torch.arange(E) for _ in range(T)], dim=0).int()\n else:\n chosen_indices = (\n torch.randint(low=0, high=int(E * 1.0 / hit_rate), size=(E * T,))\n .view(-1)\n .int()\n )\n dense_indices = torch.cat([torch.arange(E) for _ in range(T)], dim=0).int()\n offsets = torch.tensor([E * t for t in range(T + 1)]).int()\n assert offsets[-1] == chosen_indices.numel()\n assert offsets.numel() == T + 1\n assert (offsets.numel() - 1) // T == 1\n\n capacities = [round_up(int(E / load_factor), 32) for _ in range(T)]\n\n hash_table = torch.zeros(\n (sum(capacities), 2),\n dtype=torch.int32,\n )\n hash_table_offsets = torch.tensor([0] + np.cumsum(capacities).tolist()).long()\n\n assert hash_table.numel() * 4 < 2 ** 32\n # initialize\n hash_table[:, :] = -1\n torch.ops.fbgemm.pruned_hashmap_insert(\n chosen_indices, dense_indices, offsets, hash_table, hash_table_offsets\n )\n\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n\n if not use_cpu:\n hash_table = hash_table.cuda()\n hash_table_offsets = hash_table_offsets.cuda()\n requests = [(a.cuda().int(), b.cuda().int(), c) for (a, b, c) in requests]\n else:\n requests = [(a.int().cpu(), b.int().cpu(), c) for (a, b, c) in requests]\n\n empirical_hit_rate = np.mean(\n [\n torch.ops.fbgemm.pruned_hashmap_lookup(\n indices, offsets, hash_table, hash_table_offsets\n )\n .ne(-1)\n .sum()\n .item()\n / indices.numel()\n for indices, offsets, _ in requests\n ]\n )\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, _: torch.ops.fbgemm.pruned_hashmap_lookup(\n indices, offsets, hash_table, hash_table_offsets\n ),\n )\n\n logging.info(\n f\"LinearTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, \"\n f\"T: {time_per_iter * 1.0e6:.0f}us, load factor: {E * T / hash_table.shape[0] * 100:.1f}%, hit rate: {empirical_hit_rate * 100:.2f}%, Table size: {hash_table.numel() * 4 / 1.0e9:.0f} GB\"\n )\n\n if use_cpu:\n ht = torch.classes.fb.PrunedMapCPU()\n ht.insert(chosen_indices, dense_indices, offsets, T)\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, _: ht.lookup(indices, offsets),\n )\n\n logging.info(\n f\"HashTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, \"\n f\"T: {time_per_iter * 1.0e6:.0f}us, load factor: {E * T / hash_table.shape[0] * 100:.1f}%, hit rate: {empirical_hit_rate * 100:.2f}%, Table size: {hash_table.numel() * 4 / 1.0e9:.0f} GB\"\n )\n\n\[email protected]()\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=2048)\[email protected](\"--iters\", default=100)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=100)\[email protected](\"--pruning-ratio\", default=0.9)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef pruned_array( # noqa C901\n bag_size: int,\n batch_size: int,\n iters: int,\n num_embeddings: int,\n num_tables: int,\n pruning_ratio: float,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n B = batch_size\n T = num_tables\n L = bag_size\n E = num_embeddings\n np.random.seed(42)\n torch.manual_seed(42)\n assert pruning_ratio > 0 and pruning_ratio <= 1\n original_E = int(E / (1.0 - pruning_ratio))\n index_remappings = torch.tensor(\n [-1] * original_E * T, dtype=torch.int32, device=\"cuda\"\n )\n index_remappings_offsets = torch.empty(T + 1, dtype=torch.int32, device=\"cuda\")\n index_remappings_offsets[0] = 0\n dense_indicies = torch.tensor(range(E), dtype=torch.int32, device=\"cuda\")\n for t in range(T):\n selected_indices = torch.add(\n torch.randperm(original_E, device=\"cuda\"), t * original_E\n )[:E]\n index_remappings[selected_indices] = dense_indicies\n index_remappings_offsets[t + 1] = index_remappings_offsets[t] + original_E\n\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n requests = [(a.cuda().int(), b.cuda().int(), c) for (a, b, c) in requests]\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, _: torch.ops.fbgemm.pruned_array_lookup(\n indices,\n offsets,\n index_remappings,\n index_remappings_offsets,\n ),\n )\n\n logging.info(\n f\"LinearTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, \"\n f\"T: {time_per_iter * 1.0e6:.0f}us, Pruning Ratio: {pruning_ratio * 100:.2f}%, Table size: {original_E * T * 4 / 1.0e9:.0f} GB\"\n )\n\n\[email protected]()\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--iters\", default=100)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--bounds-check-mode\", type=int, default=BoundsCheckMode.WARNING.value)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef bounds_check_indices( # noqa C901\n bag_size: int,\n batch_size: int,\n iters: int,\n num_embeddings: int,\n num_tables: int,\n bounds_check_mode: int,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n L = bag_size\n E = num_embeddings\n T = num_tables\n\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n # requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]\n\n warning = torch.tensor([0]).long().to(get_device())\n rows_per_table = torch.tensor([E for _ in range(T)]).long().to(get_device())\n # forward\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, _: torch.ops.fbgemm.bounds_check_indices(\n rows_per_table,\n indices,\n offsets,\n BoundsCheckMode(bounds_check_mode),\n warning,\n ),\n )\n\n logging.info(\n f\"Bounds Check Indices: B: {B}, \"\n f\"E: {E}, T: {T}, L: {L}, \"\n f\"BW: {(8 * B * T * L + 8 * (B * T + 1)) / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n\nif __name__ == \"__main__\":\n cli()\n" ]
[ [ "torch.empty", "numpy.ones", "torch.as_tensor", "torch.rand", "numpy.random.seed", "torch.cuda.synchronize", "torch.classes.fb.PrunedMapCPU", "torch.cuda.Event", "torch.cuda.is_available", "torch.cat", "torch.ops.fbgemm.pruned_array_lookup", "torch.nn.EmbeddingBag", "numpy.random.choice", "torch.arange", "torch.ops.fbgemm.pruned_hashmap_lookup", "numpy.average", "torch.device", "torch.sort", "torch.ops.fbgemm.pruned_hashmap_insert", "torch.load", "torch.manual_seed", "torch.tensor", "torch.cuda.current_device", "numpy.prod", "numpy.cumsum", "numpy.random.default_rng", "torch.nn.Embedding", "numpy.random.zipf", "torch.randperm", "torch.zeros" ] ]
Moon-sung-woo/Tacotron2_korean
[ "cb503e212b6bcae7c7b732b50887b56d11cfd543" ]
[ "train.py" ]
[ "# *****************************************************************************\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the NVIDIA CORPORATION nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# *****************************************************************************\n\nimport os\nimport time\nimport argparse\nimport numpy as np\nfrom contextlib import contextmanager\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\n\nimport torch.distributed as dist\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom apex.parallel import DistributedDataParallel as DDP\n\nimport models\nimport loss_functions\nimport data_functions\n\nimport dllogger as DLLogger\nfrom dllogger import StdOutBackend, JSONStreamBackend, Verbosity\n\nfrom scipy.io.wavfile import write as write_wav\n\nfrom apex import amp\namp.lists.functional_overrides.FP32_FUNCS.remove('softmax')\namp.lists.functional_overrides.FP16_FUNCS.append('softmax')\n\n\ndef parse_args(parser):\n \"\"\"\n Parse commandline arguments.\n \"\"\"\n\n parser.add_argument('-o', '--output', type=str, required=True,\n help='Directory to save checkpoints')\n parser.add_argument('-d', '--dataset-path', type=str,\n default='./', help='Path to dataset')\n parser.add_argument('-m', '--model-name', type=str, default='', required=True,\n help='Model to train')\n parser.add_argument('--log-file', type=str, default='nvlog.json',\n help='Filename for logging')\n parser.add_argument('--anneal-steps', nargs='*',\n help='Epochs after which decrease learning rate')\n parser.add_argument('--anneal-factor', type=float, choices=[0.1, 0.3], default=0.1,\n help='Factor for annealing learning rate')\n\n # training\n training = parser.add_argument_group('training setup')\n training.add_argument('--epochs', type=int, required=True,\n help='Number of total epochs to run')\n training.add_argument('--epochs-per-checkpoint', type=int, default=50,\n help='Number of epochs per checkpoint')\n training.add_argument('--checkpoint-path', type=str, default='',\n help='Checkpoint path to resume training')\n training.add_argument('--resume-from-last', action='store_true',\n help='Resumes training from the last checkpoint; uses the directory provided with \\'--output\\' option to search for the checkpoint \\\"checkpoint_<model_name>_last.pt\\\"')\n training.add_argument('--dynamic-loss-scaling', type=bool, default=True,\n help='Enable dynamic loss scaling')\n training.add_argument('--amp', action='store_true',\n help='Enable AMP')\n training.add_argument('--cudnn-enabled', action='store_true',\n help='Enable cudnn')\n training.add_argument('--cudnn-benchmark', action='store_true',\n help='Run cudnn benchmark')\n training.add_argument('--disable-uniform-initialize-bn-weight', action='store_true',\n help='disable uniform initialization of batchnorm layer weight')\n\n optimization = parser.add_argument_group('optimization setup')\n optimization.add_argument(\n '--use-saved-learning-rate', default=False, type=bool)\n optimization.add_argument('-lr', '--learning-rate', type=float, required=True,\n help='Learing rate')\n optimization.add_argument('--weight-decay', default=1e-6, type=float,\n help='Weight decay')\n optimization.add_argument('--grad-clip-thresh', default=1.0, type=float,\n help='Clip threshold for gradients')\n optimization.add_argument('-bs', '--batch-size', type=int, required=True,\n help='Batch size per GPU')\n optimization.add_argument('--grad-clip', default=5.0, type=float,\n help='Enables gradient clipping and sets maximum gradient norm value')\n\n # dataset parameters\n dataset = parser.add_argument_group('dataset parameters')\n dataset.add_argument('--load-mel-from-disk', action='store_true',\n help='Loads mel spectrograms from disk instead of computing them on the fly')\n dataset.add_argument('--training-files',\n default='filelists/kss_train.txt',\n type=str, help='Path to training filelist')\n dataset.add_argument('--validation-files',\n default='filelists/kss_val.txt',\n type=str, help='Path to validation filelist')\n dataset.add_argument('--text-cleaners', nargs='*',\n default=['english_cleaners'], type=str,\n help='Type of text cleaners for input text')\n\n # audio parameters\n audio = parser.add_argument_group('audio parameters')\n audio.add_argument('--max-wav-value', default=32768.0, type=float,\n help='Maximum audiowave value')\n audio.add_argument('--sampling-rate', default=22050, type=int,\n help='Sampling rate')\n audio.add_argument('--filter-length', default=1024, type=int,\n help='Filter length')\n audio.add_argument('--hop-length', default=256, type=int,\n help='Hop (stride) length')\n audio.add_argument('--win-length', default=1024, type=int,\n help='Window length')\n audio.add_argument('--mel-fmin', default=0.0, type=float,\n help='Minimum mel frequency')\n audio.add_argument('--mel-fmax', default=8000.0, type=float,\n help='Maximum mel frequency')\n\n distributed = parser.add_argument_group('distributed setup')\n # distributed.add_argument('--distributed-run', default=True, type=bool,\n # help='enable distributed run')\n distributed.add_argument('--rank', default=0, type=int,\n help='Rank of the process, do not set! Done by multiproc module')\n distributed.add_argument('--world-size', default=1, type=int,\n help='Number of processes, do not set! Done by multiproc module')\n distributed.add_argument('--dist-url', type=str, default='tcp://localhost:23456',\n help='Url used to set up distributed training')\n distributed.add_argument('--group-name', type=str, default='group_name',\n required=False, help='Distributed group name')\n distributed.add_argument('--dist-backend', default='nccl', type=str, choices={'nccl'},\n help='Distributed run backend')\n\n benchmark = parser.add_argument_group('benchmark')\n benchmark.add_argument('--bench-class', type=str, default='')\n\n return parser\n\n\ndef reduce_tensor(tensor, num_gpus):\n rt = tensor.clone()\n dist.all_reduce(rt, op=dist.reduce_op.SUM)\n rt /= num_gpus\n return rt\n\n\ndef init_distributed(args, world_size, rank, group_name):\n assert torch.cuda.is_available(), \"Distributed mode requires CUDA.\"\n print(\"Initializing Distributed\")\n\n # Set cuda device so everything is done on the right GPU.\n torch.cuda.set_device(rank % torch.cuda.device_count())\n\n # Initialize distributed communication\n dist.init_process_group(\n backend=args.dist_backend, init_method=args.dist_url,\n world_size=world_size, rank=rank, group_name=group_name)\n\n print(\"Done initializing distributed\")\n\n\ndef save_checkpoint(model, optimizer, epoch, config, amp_run, output_dir, model_name,\n local_rank, world_size):\n\n random_rng_state = torch.random.get_rng_state().cuda()\n cuda_rng_state = torch.cuda.get_rng_state(local_rank).cuda()\n\n random_rng_states_all = [torch.empty_like(random_rng_state) for _ in range(world_size)]\n cuda_rng_states_all = [torch.empty_like(cuda_rng_state) for _ in range(world_size)]\n\n if world_size > 1:\n dist.all_gather(random_rng_states_all, random_rng_state)\n dist.all_gather(cuda_rng_states_all, cuda_rng_state)\n else:\n random_rng_states_all = [random_rng_state]\n cuda_rng_states_all = [cuda_rng_state]\n\n random_rng_states_all = torch.stack(random_rng_states_all).cpu()\n cuda_rng_states_all = torch.stack(cuda_rng_states_all).cpu()\n\n if local_rank == 0:\n checkpoint = {'epoch': epoch,\n 'cuda_rng_state_all': cuda_rng_states_all,\n 'random_rng_states_all': random_rng_states_all,\n 'config': config,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()}\n if amp_run:\n checkpoint['amp'] = amp.state_dict()\n\n checkpoint_filename = \"checkpoint_{}_{}.pt\".format(model_name, epoch)\n checkpoint_path = os.path.join(\n output_dir, checkpoint_filename)\n print(\"Saving model and optimizer state at epoch {} to {}\".format(\n epoch, checkpoint_path))\n torch.save(checkpoint, checkpoint_path)\n\n symlink_src = checkpoint_filename\n symlink_dst = os.path.join(\n output_dir, \"checkpoint_{}_last.pt\".format(model_name))\n if os.path.exists(symlink_dst) and os.path.islink(symlink_dst):\n print(\"|||| Updating symlink\", symlink_dst, \"to point to\", symlink_src)\n os.remove(symlink_dst)\n\n os.symlink(symlink_src, symlink_dst)\n\n\ndef get_last_checkpoint_filename(output_dir, model_name):\n symlink = os.path.join(output_dir, \"checkpoint_{}_last.pt\".format(model_name))\n if os.path.exists(symlink):\n print(\"|||| Loading checkpoint from symlink\", symlink)\n return os.path.join(output_dir, os.readlink(symlink))\n else:\n print(\"|||| No last checkpoint available - starting from epoch 0 \")\n return \"\"\n\n\ndef load_checkpoint(model, optimizer, epoch, config, amp_run, filepath, local_rank):\n\n checkpoint = torch.load(filepath, map_location='cpu')\n\n epoch[0] = checkpoint['epoch']+1\n device_id = local_rank % torch.cuda.device_count()\n torch.cuda.set_rng_state(checkpoint['cuda_rng_state_all'][device_id])\n torch.random.set_rng_state(checkpoint['random_rng_states_all'][device_id])\n config = checkpoint['config']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n if amp_run:\n amp.load_state_dict(checkpoint['amp'])\n\n\n# adapted from: https://discuss.pytorch.org/t/opinion-eval-should-be-a-context-manager/18998/3\n# Following snippet is licensed under MIT license\n\n@contextmanager\ndef evaluating(model):\n '''Temporarily switch to evaluation mode.'''\n istrain = model.training\n try:\n model.eval()\n yield model\n finally:\n if istrain:\n model.train()\n\n\ndef validate(model, criterion, valset, epoch, batch_iter, batch_size,\n world_size, collate_fn, distributed_run, rank, batch_to_gpu):\n \"\"\"Handles all the validation scoring and printing\"\"\"\n with evaluating(model), torch.no_grad():\n val_sampler = DistributedSampler(valset) if distributed_run else None\n val_loader = DataLoader(valset, num_workers=1, shuffle=False,\n sampler=val_sampler,\n batch_size=batch_size, pin_memory=False,\n collate_fn=collate_fn)\n\n val_loss = 0.0\n num_iters = 0\n val_items_per_sec = 0.0\n for i, batch in enumerate(val_loader):\n torch.cuda.synchronize()\n iter_start_time = time.perf_counter()\n\n x, y, num_items = batch_to_gpu(batch)\n y_pred = model(x)\n loss = criterion(y_pred, y)\n if distributed_run:\n reduced_val_loss = reduce_tensor(loss.data, world_size).item()\n reduced_num_items = reduce_tensor(num_items.data, 1).item()\n else: #\n reduced_val_loss = loss.item()\n reduced_num_items = num_items.item()\n val_loss += reduced_val_loss\n\n torch.cuda.synchronize()\n iter_stop_time = time.perf_counter()\n iter_time = iter_stop_time - iter_start_time\n\n items_per_sec = reduced_num_items/iter_time\n DLLogger.log(step=(epoch, batch_iter, i), data={'val_items_per_sec': items_per_sec})\n val_items_per_sec += items_per_sec\n num_iters += 1\n\n val_loss = val_loss/(i + 1)\n\n DLLogger.log(step=(epoch,), data={'val_loss': val_loss})\n DLLogger.log(step=(epoch,), data={'val_items_per_sec':\n (val_items_per_sec/num_iters if num_iters > 0 else 0.0)})\n\n return val_loss\n\ndef adjust_learning_rate(iteration, epoch, optimizer, learning_rate,\n anneal_steps, anneal_factor, rank):\n\n p = 0\n if anneal_steps is not None:\n for i, a_step in enumerate(anneal_steps):\n if epoch >= int(a_step):\n p = p+1\n\n if anneal_factor == 0.3:\n lr = learning_rate*((0.1 ** (p//2))*(1.0 if p % 2 == 0 else 0.3))\n else:\n lr = learning_rate*(anneal_factor ** p)\n\n if optimizer.param_groups[0]['lr'] != lr:\n DLLogger.log(step=(epoch, iteration), data={'learning_rate changed': str(optimizer.param_groups[0]['lr'])+\" -> \"+str(lr)})\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Training')\n parser = parse_args(parser)\n args, _ = parser.parse_known_args()\n\n if 'LOCAL_RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n local_rank = int(os.environ['LOCAL_RANK'])\n world_size = int(os.environ['WORLD_SIZE'])\n else:\n local_rank = args.rank\n world_size = args.world_size\n\n distributed_run = world_size > 1\n\n if local_rank == 0:\n DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT,\n args.output+'/'+args.log_file),\n StdOutBackend(Verbosity.VERBOSE)])\n else:\n DLLogger.init(backends=[])\n\n for k,v in vars(args).items():\n DLLogger.log(step=\"PARAMETER\", data={k:v})\n DLLogger.log(step=\"PARAMETER\", data={'model_name':'Tacotron2_PyT'})\n\n model_name = args.model_name\n parser = models.parse_model_args(model_name, parser)\n args, _ = parser.parse_known_args()\n\n torch.backends.cudnn.enabled = args.cudnn_enabled\n torch.backends.cudnn.benchmark = args.cudnn_benchmark\n\n if distributed_run:\n init_distributed(args, world_size, local_rank, args.group_name)\n\n torch.cuda.synchronize()\n run_start_time = time.perf_counter()\n\n model_config = models.get_model_config(model_name, args)\n model = models.get_model(model_name, model_config,\n cpu_run=False,\n uniform_initialize_bn_weight=not args.disable_uniform_initialize_bn_weight)\n\n if not args.amp and distributed_run:\n model = DDP(model)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate,\n weight_decay=args.weight_decay)\n\n if args.amp:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n if distributed_run:\n model = DDP(model)\n\n try:\n sigma = args.sigma\n except AttributeError:\n sigma = None\n\n start_epoch = [0]\n\n if args.resume_from_last:\n args.checkpoint_path = get_last_checkpoint_filename(args.output, model_name)\n\n if args.checkpoint_path is not \"\":\n load_checkpoint(model, optimizer, start_epoch, model_config,\n args.amp, args.checkpoint_path, local_rank)\n\n start_epoch = start_epoch[0]\n\n criterion = loss_functions.get_loss_function(model_name, sigma)\n\n try:\n n_frames_per_step = args.n_frames_per_step\n except AttributeError:\n n_frames_per_step = None\n\n collate_fn = data_functions.get_collate_function(\n model_name, n_frames_per_step)\n trainset = data_functions.get_data_loader(\n model_name, args.dataset_path, args.training_files, args)\n if distributed_run:\n train_sampler = DistributedSampler(trainset)\n shuffle = False\n else:\n train_sampler = None\n shuffle = True\n\n train_loader = DataLoader(trainset, num_workers=1, shuffle=shuffle,\n sampler=train_sampler,\n batch_size=args.batch_size, pin_memory=False,\n drop_last=True, collate_fn=collate_fn)\n\n valset = data_functions.get_data_loader(\n model_name, args.dataset_path, args.validation_files, args)\n\n batch_to_gpu = data_functions.get_batch_to_gpu(model_name)\n\n iteration = 0\n train_epoch_items_per_sec = 0.0\n val_loss = 0.0\n num_iters = 0\n\n model.train()\n\n for epoch in range(start_epoch, args.epochs):\n torch.cuda.synchronize()\n epoch_start_time = time.perf_counter()\n # used to calculate avg items/sec over epoch\n reduced_num_items_epoch = 0\n\n train_epoch_items_per_sec = 0.0\n\n num_iters = 0\n reduced_loss = 0\n\n # if overflow at the last iteration then do not save checkpoint\n overflow = False\n\n if distributed_run:\n train_loader.sampler.set_epoch(epoch)\n\n for i, batch in enumerate(train_loader):\n torch.cuda.synchronize()\n iter_start_time = time.perf_counter()\n DLLogger.log(step=(epoch, i),\n data={'glob_iter/iters_per_epoch': str(iteration)+\"/\"+str(len(train_loader))})\n\n adjust_learning_rate(iteration, epoch, optimizer, args.learning_rate,\n args.anneal_steps, args.anneal_factor, local_rank)\n\n model.zero_grad()\n x, y, num_items = batch_to_gpu(batch)\n\n y_pred = model(x)\n loss = criterion(y_pred, y)\n\n if distributed_run:\n reduced_loss = reduce_tensor(loss.data, world_size).item()\n reduced_num_items = reduce_tensor(num_items.data, 1).item()\n else:\n reduced_loss = loss.item()\n reduced_num_items = num_items.item()\n if np.isnan(reduced_loss):\n raise Exception(\"loss is NaN\")\n\n DLLogger.log(step=(epoch,i), data={'train_loss': reduced_loss})\n\n num_iters += 1\n\n # accumulate number of items processed in this epoch\n reduced_num_items_epoch += reduced_num_items\n\n if args.amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n grad_norm = torch.nn.utils.clip_grad_norm_(\n amp.master_params(optimizer), args.grad_clip_thresh)\n else:\n loss.backward()\n grad_norm = torch.nn.utils.clip_grad_norm_(\n model.parameters(), args.grad_clip_thresh)\n\n optimizer.step()\n\n torch.cuda.synchronize()\n iter_stop_time = time.perf_counter()\n iter_time = iter_stop_time - iter_start_time\n items_per_sec = reduced_num_items/iter_time\n train_epoch_items_per_sec += items_per_sec\n\n DLLogger.log(step=(epoch, i), data={'train_items_per_sec': items_per_sec})\n DLLogger.log(step=(epoch, i), data={'train_iter_time': iter_time})\n iteration += 1\n\n torch.cuda.synchronize()\n epoch_stop_time = time.perf_counter()\n epoch_time = epoch_stop_time - epoch_start_time\n\n DLLogger.log(step=(epoch,), data={'train_items_per_sec':\n (train_epoch_items_per_sec/num_iters if num_iters > 0 else 0.0)})\n DLLogger.log(step=(epoch,), data={'train_loss': reduced_loss})\n DLLogger.log(step=(epoch,), data={'train_epoch_time': epoch_time})\n\n val_loss = validate(model, criterion, valset, epoch, iteration,\n args.batch_size, world_size, collate_fn,\n distributed_run, local_rank, batch_to_gpu)\n\n if (epoch % args.epochs_per_checkpoint == 0) and args.bench_class == \"\":\n save_checkpoint(model, optimizer, epoch, model_config,\n args.amp, args.output, args.model_name,\n local_rank, world_size)\n if local_rank == 0:\n DLLogger.flush()\n\n torch.cuda.synchronize()\n run_stop_time = time.perf_counter()\n run_time = run_stop_time - run_start_time\n DLLogger.log(step=tuple(), data={'run_time': run_time})\n DLLogger.log(step=tuple(), data={'val_loss': val_loss})\n DLLogger.log(step=tuple(), data={'train_items_per_sec':\n (train_epoch_items_per_sec/num_iters if num_iters > 0 else 0.0)})\n\n if local_rank == 0:\n DLLogger.flush()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.distributed.all_gather", "torch.stack", "torch.empty_like", "torch.load", "torch.cuda.set_rng_state", "torch.utils.data.distributed.DistributedSampler", "torch.random.get_rng_state", "torch.cuda.get_rng_state", "torch.distributed.init_process_group", "torch.save", "torch.random.set_rng_state", "torch.cuda.synchronize", "torch.cuda.device_count", "torch.no_grad", "torch.cuda.is_available", "torch.distributed.all_reduce", "numpy.isnan" ] ]
palminde/P9Project
[ "5df03d18b74585ce1d9feefce8c183225dd27f68" ]
[ "Code/Nets.py" ]
[ "import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\n\nlayers = tf.keras.layers\n\n\n# Clip model weights to a given hypercube\nclass ClipConstraint(tf.keras.constraints.Constraint):\n # set clip value when initialized\n def __init__(self, clip_value):\n self.clip_value = clip_value\n\n # clip model weights to hypercube\n def __call__(self, weights):\n return tf.keras.backend.clip(weights, -self.clip_value, self.clip_value)\n\n # get the config\n def get_config(self):\n return {'clip_value': self.clip_value}\n\n\nprelu_init = tf.keras.initializers.Constant(0.25)\n\n\ndef encoder(args):\n input_dim = args.dataset_dim[1]\n channels = args.dataset_dim[3]\n model = keras.Sequential()\n\n # normal\n model.add(layers.Conv2D(64, (3, 3), padding='same', input_shape=[input_dim, input_dim, channels]))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n # classifier\n model.add(layers.Flatten())\n model.add(layers.Dropout(0.4))\n model.add(layers.Dense(args.noise_dim))\n # compile model\n return model\n\n\n\n# 32x32\ndef cifargan_gen(args):\n g_dim = args.g_dim\n z_dim = args.noise_dim\n img_dim = args.dataset_dim[1]\n channels = args.dataset_dim[3]\n img_resize = img_dim//(2*2*2)\n\n model = keras.Sequential()\n # foundation for 4x4 image\n model.add(layers.Dense(g_dim * img_resize * img_resize, input_dim=z_dim, kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(layers.Reshape((img_resize, img_resize, g_dim)))\n # upsample to 8x8\n model.add(layers.Conv2DTranspose(128, (4, 4), strides=(1, 1), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU(alpha=0.2))\n # upsample to 16x16\n model.add(layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU(alpha=0.2))\n # upsample to 32x32\n model.add(layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU(alpha=0.2))\n\n model.add(layers.Conv2DTranspose(256, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU(alpha=0.2))\n # output layer\n model.add(layers.Conv2D(channels, (6, 6), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n return model\n\n\ndef cifargan_disc(args):\n d_dim = args.d_dim\n input_dim = args.dataset_dim[1]\n channels = args.dataset_dim[3]\n model = keras.Sequential()\n\n # normal\n model.add(layers.Conv2D(64, (3, 3), padding='same', input_shape=[input_dim, input_dim, channels], kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(layers.LeakyReLU(alpha=0.2))\n # classifier\n model.add(layers.Flatten())\n model.add(layers.Dropout(0.4))\n model.add(layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n # compile model\n return model\n\n\n# 64x64\ndef gan64_gen(args):\n channels = args.dataset_dim[3]\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024*16*16)(noise)\n model = tf.keras.layers.Reshape((16, 16, 1024))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (3,3), strides=(2, 2), padding='same'))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.LeakyReLU(alpha=0.2))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (3,3), strides=(2, 2), padding='same'))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.LeakyReLU(alpha=0.2))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (3,3), strides=(2, 2), padding='same'))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.LeakyReLU(alpha=0.2))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (3,3), strides=(2, 2), padding='same'))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.LeakyReLU(alpha=0.2))(model)\n\n # Generator 1\n img1 = tf.keras.layers.Conv2DTranspose(channels, (6,6), strides=(1, 1), activation='tanh', padding='same')(model)\n\n return keras.Model(noise, img1)\n\n\ndef gan64_disc(args):\n d_dim = args.d_dim\n input_dim = args.dataset_dim[1]\n channels = args.dataset_dim[3]\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n model = keras.Sequential()\n # normal\n model.add(layers.Conv2D(64, (3, 3), padding='same', input_shape=img_shape))\n model.add(layers.LeakyReLU(alpha=0.2))\n\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n\n model.add(layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n # classifier\n model.add(layers.Flatten())\n model.add(layers.Dropout(0.4))\n model.add(layers.Dense(1))\n # compile model\n return model\n\n\ndef gan128_gen(args):\n channels = args.dataset_dim[3]\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024*4*4, kernel_initializer=args.w_init, kernel_regularizer=args.wd)(noise)\n model = tf.keras.layers.Reshape((4, 4, 1024))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(64, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n # Generator 1\n img1 = (tf.keras.layers.Conv2DTranspose(32, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n img1 = (tf.keras.layers.BatchNormalization())(img1)\n img1 = (tf.keras.layers.PReLU(prelu_init))(img1)\n\n img1 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n\n return keras.Model(noise, img1)\n\n\ndef gan128_disc(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n img1 = tf.keras.layers.Input(shape=img_shape)\n\n x1 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n x1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n model = keras.Sequential()\n model.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.1))\n\n model.add(tf.keras.layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(1024, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(2048, kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n\n output1 = model(x1)\n\n return keras.Model(img1, output1)\n\n\ndef gan256_gen(args):\n channels = args.dataset_dim[3]\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(2048*4*4, kernel_regularizer=args.wd)(noise)\n model = tf.keras.layers.Reshape((4, 4, 2048))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(1024, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(64, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n img1 = (tf.keras.layers.Conv2DTranspose(32, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n img1 = (tf.keras.layers.BatchNormalization(momentum=0.8))(img1)\n img1 = (tf.keras.layers.PReLU(prelu_init))(img1)\n img1 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_regularizer=args.wd)(img1)\n\n return keras.Model(noise, img1)\n\n\ndef gan256_disc(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd)(img1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n x1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd)(x1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n model = keras.Sequential()\n model.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.1))\n\n model.add(tf.keras.layers.Conv2D(256, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(512, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(1024, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Conv2D(2048, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(4096, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Dense(1, kernel_regularizer=args.wd))\n\n output1 = model(x1)\n\n return keras.Model(img1, output1)\n\n\n# Toy\ndef toy_gen(n_dim):\n inputs = keras.Input(shape=(n_dim,), name='digits')\n x = layers.Dense(128, activation='tanh', name='dense1')(inputs)\n x = layers.Dense(128, activation='tanh', name='dense2')(x)\n x = layers.Dense(128, activation='tanh', name='dense3')(x)\n outputs = layers.Dense(2, activation='linear', name='preds')(x)\n model = keras.Model(inputs=inputs, outputs=outputs)\n return model\n\n\ndef toy_disc(args):\n inputs = keras.Input(shape=(args.batch_size, 2), name='digits')\n x = layers.Dense(128, activation='tanh', name='dense1')(inputs)\n x = layers.Dense(128, activation='tanh', name='dense2')(x)\n x = layers.Dense(128, activation='tanh', name='dense3')(x)\n outputs = layers.Dense(1, name='preds')(x)\n model = keras.Model(inputs=inputs, outputs=outputs)\n return model\n\n\n# Mnist negative + edge\ndef cogan_generators_digit(args):\n channels = args.dataset_dim[3]\n\n output1 = []\n output2 = []\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024*4*4, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(noise)\n model = tf.keras.layers.Reshape((4, 4, 1024))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n features_4x4 = (tf.keras.layers.PReLU())(model)\n output1.append(features_4x4)\n output2.append(features_4x4)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n features_8x8 = (tf.keras.layers.PReLU())(model)\n output1.append(features_8x8)\n output2.append(features_8x8)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU())(model)\n output1.append(model)\n output2.append(model)\n\n # Generator 1\n img1 = tf.keras.layers.Conv2DTranspose(channels, (6,6), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model)\n\n # Generator 2\n img2 = tf.keras.layers.Conv2DTranspose(channels, (6,6), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model)\n\n output1.append(img1)\n output2.append(img2)\n return keras.Model(noise, output1), keras.Model(noise, output2)\n\n\ndef cogan_discriminators_digit(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(20, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.MaxPool2D()(x1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n x2 = tf.keras.layers.Conv2D(20, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n x2 = tf.keras.layers.MaxPool2D()(x2)\n\n # Shared discriminator layers\n model = keras.Sequential()\n model.add(tf.keras.layers.Conv2D(50, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.MaxPool2D())\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(500, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n model.add(tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n output1 = model(x1, training=True)\n output2 = model(x2, training=True)\n\n return keras.Model(img1, output1), keras.Model(img2, output2)\n\n\ndef cogan_generators_digit_noshare(args):\n channels = args.dataset_dim[3]\n\n\n output1 = []\n output2 = []\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024*4*4, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(noise)\n model = tf.keras.layers.Reshape((4, 4, 1024))(model)\n model = tf.keras.layers.BatchNormalization()(model)\n model = tf.keras.layers.PReLU(prelu_init)(model)\n\n # Generator 1\n if args.use_firstlayer:\n model1 = (tf.keras.layers.Conv2DTranspose(1024, (4,4), strides=(1, 1), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n features1_4x4 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(features1_4x4)\n\n model1 = (tf.keras.layers.Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(features1_4x4)\n else:\n model1 = (tf.keras.layers.Conv2DTranspose(512, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n features1_8x8 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(features1_8x8)\n\n model1 = (tf.keras.layers.Conv2DTranspose(256, (3,3), strides=(2, 2), padding='same',kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(features1_8x8)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n features1_16x16 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(features1_16x16)\n\n model1 = (tf.keras.layers.Conv2DTranspose(128, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(features1_16x16)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n model1 = (tf.keras.layers.PReLU(prelu_init))(model1)\n img1 = tf.keras.layers.Conv2DTranspose(channels, (6,6), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model1)\n\n output1.append(img1)\n\n # Generator 2\n if args.use_firstlayer:\n model2 = (tf.keras.layers.Conv2DTranspose(1024, (4,4), strides=(1, 1), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n features2_4x4 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(features2_4x4)\n model2 = (tf.keras.layers.Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,kernel_regularizer=args.wd, bias_initializer=args.bi))(features2_4x4)\n else:\n model2 = (tf.keras.layers.Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n features2_8x8 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(features2_8x8)\n\n model2 = (tf.keras.layers.Conv2DTranspose(256, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(features2_8x8)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n features2_16x16 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(features2_16x16)\n\n model2 = (tf.keras.layers.Conv2DTranspose(128, (3,3), strides=(2, 2), padding='same',kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(features2_16x16)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n model2 = (tf.keras.layers.PReLU(prelu_init))(model2)\n img2 = tf.keras.layers.Conv2DTranspose(channels, (6,6), strides=(1, 1), activation='tanh', padding='same',kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model2)\n output2.append(img2)\n\n return keras.Model(noise, output1), keras.Model(noise, output2)\n\n\ndef cogan_discriminators_digit_noshare(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(20, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.MaxPool2D()(x1)\n\n model1 = tf.keras.layers.Conv2D(50, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x1)\n model1 = tf.keras.layers.MaxPool2D()(model1)\n model1 = tf.keras.layers.Flatten()(model1)\n model1 = tf.keras.layers.Dense(500, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model1)\n model1 = tf.keras.layers.PReLU(prelu_init)(model1)\n model1 = tf.keras.layers.Dropout(0.5)(model1)\n model1 = tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n x2 = tf.keras.layers.Conv2D(20, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n x2 = tf.keras.layers.MaxPool2D()(x2)\n\n model2 = tf.keras.layers.Conv2D(50, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x2)\n model2 = tf.keras.layers.MaxPool2D()(model2)\n model2 = tf.keras.layers.Flatten()(model2)\n model2 = tf.keras.layers.Dense(500, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model2)\n model2 = tf.keras.layers.PReLU(prelu_init)(model2)\n model2 = tf.keras.layers.Dropout(0.5)(model2)\n model2 = tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model2)\n\n return keras.Model(img1, model1), keras.Model(img2, model2)\n\n\n# Mnist rotate\ndef cogan_generators_rotate(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n # Shared weights between generators\n model = keras.Sequential()\n model.add(tf.keras.layers.Dense(1024, input_dim=args.noise_dim, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.Dense(1024, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.Dense(1024, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.Dense(1024, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.BatchNormalization())\n\n feature_repr = model(noise)\n\n # Generator 1\n g1 = tf.keras.layers.Dense(np.prod(img_shape), activation='sigmoid', kernel_regularizer=args.wd, bias_initializer=args.bi)(feature_repr)\n img1 = tf.keras.layers.Reshape(img_shape)(g1)\n\n # Generator 2\n g2 = tf.keras.layers.Dense(np.prod(img_shape), activation='sigmoid', kernel_regularizer=args.wd, bias_initializer=args.bi)(feature_repr)\n img2 = tf.keras.layers.Reshape(img_shape)(g2)\n\n return keras.Model(noise, img1), keras.Model(noise, img2)\n\n\ndef cogan_discriminators_rotate(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n model1 = tf.keras.layers.Conv2D(20, (5,5), padding='same', kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n model1 = tf.keras.layers.MaxPool2D()(model1)\n model1 = tf.keras.layers.Conv2D(50, (5,5), padding='same', kernel_regularizer=args.wd, bias_initializer=args.bi)(model1)\n model1 = tf.keras.layers.MaxPool2D()(model1)\n model1 = tf.keras.layers.Dense(500, kernel_regularizer=args.wd, bias_initializer=args.bi)(model1)\n model1 = tf.keras.layers.LeakyReLU()(model1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n model2 = tf.keras.layers.Conv2D(20, (5,5), padding='same', kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n model2 = tf.keras.layers.MaxPool2D()(model2)\n model2 = tf.keras.layers.Conv2D(50, (5,5), padding='same', kernel_regularizer=args.wd, bias_initializer=args.bi)(model2)\n model2 = tf.keras.layers.MaxPool2D()(model2)\n model2 = tf.keras.layers.Dense(500, kernel_regularizer=args.wd, bias_initializer=args.bi)(model2)\n model2 = tf.keras.layers.LeakyReLU()(model2)\n\n # Shared discriminator layers\n model = keras.Sequential()\n model.add(tf.keras.layers.Flatten(input_shape=(8,8,500)))\n model.add(tf.keras.layers.Dense(1, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n validity1 = model(model1)\n validity2 = model(model2)\n\n return keras.Model(img1, validity1), keras.Model(img2, validity2)\n\n# Faces No share\ndef cogan_generators_faces_noshare(args):\n channels = args.dataset_dim[3]\n\n output1 = []\n output2 = []\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024 * 4 * 4, kernel_initializer=args.w_init, kernel_regularizer=args.wd,\n bias_initializer=args.bi)(noise)\n model = tf.keras.layers.Reshape((4, 4, 1024))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model1 = (tf.keras.layers.Conv2DTranspose(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n feature1_1 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(feature1_1)\n\n model1 = (tf.keras.layers.Conv2DTranspose(256, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(feature1_1)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n feature2_1 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(feature2_1)\n\n model1 = (tf.keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init,kernel_regularizer=args.wd, bias_initializer=args.bi))(feature2_1)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n feature3_1 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(feature3_1)\n\n model1 = (tf.keras.layers.Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(feature3_1)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n feature4_1 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(feature4_1)\n\n # Generator 1\n img1 = (tf.keras.layers.Conv2DTranspose(32, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(feature4_1)\n img1 = (tf.keras.layers.BatchNormalization())(img1)\n img1 = (tf.keras.layers.PReLU(prelu_init))(img1)\n img1 = tf.keras.layers.Conv2DTranspose(channels, (3, 3), strides=(1, 1), activation='tanh', padding='same',\n kernel_initializer=args.w_init, kernel_regularizer=args.wd,\n bias_initializer=args.bi)(img1)\n output1.append(img1)\n\n model2 = (tf.keras.layers.Conv2DTranspose(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n feature1_2 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(feature1_2)\n\n model2 = (tf.keras.layers.Conv2DTranspose(256, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(feature1_2)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n feature2_2 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(feature2_2)\n\n model2 = (tf.keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))(feature2_2)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n feature3_2 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(feature3_2)\n\n model2 = (\n tf.keras.layers.Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))(feature3_2)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n feature4_2 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(feature4_2)\n\n\n # Generator 2\n img2 = (tf.keras.layers.Conv2DTranspose(32, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))(feature4_2)\n img2 = (tf.keras.layers.BatchNormalization())(img2)\n img2 = (tf.keras.layers.PReLU(prelu_init))(img2)\n img2 = tf.keras.layers.Conv2DTranspose(channels, (3, 3), strides=(1, 1), activation='tanh', padding='same',\n kernel_initializer=args.w_init, kernel_regularizer=args.wd,\n bias_initializer=args.bi)(img2)\n output2.append(img2)\n\n return keras.Model(noise, output1), keras.Model(noise, output2)\n\ndef cogan_discriminators_faces_noshare(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.LayerNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n x1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi)(x1)\n x1 = tf.keras.layers.LayerNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n x2 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n x2 = tf.keras.layers.LayerNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n x2 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi)(x2)\n x2 = tf.keras.layers.LayerNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n\n # Shared discriminator layers\n model1 = keras.Sequential()\n model1.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model1.add(tf.keras.layers.LayerNormalization())\n model1.add(tf.keras.layers.PReLU(prelu_init))\n model1.add(tf.keras.layers.Dropout(0.1))\n\n model1.add(tf.keras.layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model1.add(tf.keras.layers.LayerNormalization())\n model1.add(tf.keras.layers.PReLU(prelu_init))\n model1.add(tf.keras.layers.Dropout(0.3))\n\n model1.add(tf.keras.layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model1.add(tf.keras.layers.LayerNormalization())\n model1.add(tf.keras.layers.PReLU(prelu_init))\n model1.add(tf.keras.layers.Dropout(0.3))\n\n model1.add(tf.keras.layers.Conv2D(1024, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model1.add(tf.keras.layers.LayerNormalization())\n model1.add(tf.keras.layers.PReLU(prelu_init))\n model1.add(tf.keras.layers.Dropout(0.5))\n\n model1.add(tf.keras.layers.Flatten())\n model1.add(tf.keras.layers.Dense(2048, kernel_initializer=args.w_init, kernel_regularizer=args.wd,\n bias_initializer=args.bi))\n model1.add(tf.keras.layers.LayerNormalization())\n model1.add(tf.keras.layers.PReLU(prelu_init))\n model1.add(tf.keras.layers.Dropout(0.5))\n\n model1.add(\n tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n model2 = keras.Sequential()\n model2.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model2.add(tf.keras.layers.LayerNormalization())\n model2.add(tf.keras.layers.PReLU(prelu_init))\n model2.add(tf.keras.layers.Dropout(0.1))\n\n model2.add(tf.keras.layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model2.add(tf.keras.layers.LayerNormalization())\n model2.add(tf.keras.layers.PReLU(prelu_init))\n model2.add(tf.keras.layers.Dropout(0.3))\n\n model2.add(tf.keras.layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model2.add(tf.keras.layers.LayerNormalization())\n model2.add(tf.keras.layers.PReLU(prelu_init))\n model2.add(tf.keras.layers.Dropout(0.3))\n\n model2.add(tf.keras.layers.Conv2D(1024, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model2.add(tf.keras.layers.LayerNormalization())\n model2.add(tf.keras.layers.PReLU(prelu_init))\n model2.add(tf.keras.layers.Dropout(0.5))\n\n model2.add(tf.keras.layers.Flatten())\n model2.add(tf.keras.layers.Dense(2048, kernel_initializer=args.w_init, kernel_regularizer=args.wd,\n bias_initializer=args.bi))\n model2.add(tf.keras.layers.LayerNormalization())\n model2.add(tf.keras.layers.PReLU(prelu_init))\n model2.add(tf.keras.layers.Dropout(0.5))\n\n model2.add(\n tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n output1 = model1(x1)\n output2 = model2(x2)\n\n return keras.Model(img1, output1), keras.Model(img2, output2)\n\n\n# Faces\ndef cogan_generators_faces(args):\n channels = args.dataset_dim[3]\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024*4*4, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(noise)\n model = tf.keras.layers.Reshape((4, 4, 1024))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(64, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n # Generator 1\n img1 = (tf.keras.layers.Conv2DTranspose(32, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n img1 = (tf.keras.layers.BatchNormalization())(img1)\n img1 = (tf.keras.layers.PReLU(prelu_init))(img1)\n img1 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n\n # Generator 2\n img2 = (tf.keras.layers.Conv2DTranspose(32, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n img2 = (tf.keras.layers.BatchNormalization())(img2)\n img2 = (tf.keras.layers.PReLU(prelu_init))(img2)\n img2 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n\n return keras.Model(noise, img1), keras.Model(noise, img2)\n\n\ndef cogan_discriminators_faces(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n x1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n x2 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n x2 = tf.keras.layers.BatchNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n x2 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x2)\n x2 = tf.keras.layers.BatchNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n\n # Shared discriminator layers\n model = keras.Sequential()\n model.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.1))\n\n model.add(tf.keras.layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(1024, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(2048, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n output1 = model(x1, training=True)\n output2 = model(x2, training=True)\n\n return keras.Model(img1, output1), keras.Model(img2, output2)\n\n\n# 256x256 CoGANs\ndef cogan_generators_256(args):\n channels = args.dataset_dim[3]\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(2048*4*4, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(noise)\n model = tf.keras.layers.Reshape((4, 4, 2048))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(1024, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.LeakyReLU())(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.LeakyReLU())(model)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(64, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(32, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n # Generator 1\n img1 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model)\n\n # Generator 2\n img2 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model)\n\n return keras.Model(noise, img1), keras.Model(noise, img2)\n\n\ndef cogan_discriminators_256(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(32, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n x1 = tf.keras.layers.Conv2D(64, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n x2 = tf.keras.layers.Conv2D(32, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n x2 = tf.keras.layers.BatchNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n x2 = tf.keras.layers.Conv2D(64, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x2)\n x2 = tf.keras.layers.BatchNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n\n # Shared discriminator layers\n model = keras.Sequential()\n model.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.1))\n\n model.add(tf.keras.layers.Conv2D(256, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(512, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(1024, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(2048, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n output1 = model(x1)\n output2 = model(x2)\n\n return keras.Model(img1, output1), keras.Model(img2, output2)\n\n\ndef mnist_classifier(args, num_classes):\n img_shape = (32, 32, 3)\n input = tf.keras.layers.Input(shape=img_shape)\n model = tf.keras.layers.Conv2D(32, (3,3))(input)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.Conv2D(64, (3,3))(model)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.MaxPooling2D((2,2))(model)\n model = tf.keras.layers.Dropout(0.25)(model)\n model = tf.keras.layers.Flatten()(model)\n model = tf.keras.layers.Dense(128)(model)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.Dropout(0.5)(model)\n output = tf.keras.layers.Dense(num_classes, activation='softmax')(model)\n\n return tf.keras.Model(input, output)\n\n\ndef celeba_classifier(args, num_classes):\n img_shape = (128,128,3)\n input = tf.keras.layers.Input(shape=img_shape)\n\n model = tf.keras.layers.Conv2D(32, (3,3))(input)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.Conv2D(64, (3,3))(model)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.MaxPooling2D((2,2))(model)\n model = tf.keras.layers.Dropout(0.25)(model)\n model = tf.keras.layers.Flatten()(model)\n model = tf.keras.layers.Dense(128)(model)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.Dropout(0.5)(model)\n\n output = tf.keras.layers.Dense(num_classes, activation='sigmoid')(model)\n return tf.keras.Model(input, output)\n\n\n\n\n\n" ]
[ [ "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.initializers.Constant", "tensorflow.keras.layers.Reshape", "tensorflow.keras.Sequential", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Dense", "tensorflow.keras.Input", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Conv2DTranspose", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.LeakyReLU", "numpy.prod", "tensorflow.keras.layers.LayerNormalization", "tensorflow.keras.backend.clip", "tensorflow.keras.Model", "tensorflow.keras.layers.MaxPool2D", "tensorflow.keras.layers.PReLU", "tensorflow.keras.layers.Input" ] ]
oke-aditya/fashion_intel
[ "8add9a94c6a7f30cc1c70a99c2e83860b2204f11" ]
[ "fashion_intel/pytorch_cnn_trainer/utils.py" ]
[ "import numpy as np\nimport torch\nimport random\nimport matplotlib.pyplot as plt\nimport os\n\n__all__ = [\n \"seed_everything\",\n \"AverageMeter\",\n \"accuracy\",\n \"EarlyStopping\",\n \"matplotlib_imshow\",\n \"print_size_of_model\",\n]\n\n\ndef seed_everything(seed):\n \"\"\"\n Makes code deterministic using a given seed. Internally sets all seeds of torch, numpy and random.\n \"\"\"\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n return [correct[:k].view(-1).float().sum(0) * 100.0 / batch_size for k in topk]\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print(\"\\t\".join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = \"{:\" + str(num_digits) + \"d}\"\n return \"[\" + fmt + \"/\" + fmt.format(num_batches) + \"]\"\n\n\nclass EarlyStopping:\n \"\"\"Early stops the training if validation loss doesn't improve after a given patience.\"\"\"\n\n def __init__(self, patience=7, verbose=False, delta=0.0001, path=\"checkpoint.pt\"):\n \"\"\"\n Args:\n patience (int): How long to wait after last time validation loss improved.\n Default: 7\n verbose (bool): If True, prints a message for each validation loss improvement. \n Default: False\n delta (float): Minimum change in the monitored quantity to qualify as an improvement.\n Default: 0\n path (str): Path for the checkpoint to be saved to.\n Default: 'checkpoint.pt'\n \"\"\"\n self.patience = patience\n self.verbose = verbose\n self.counter = 0\n self.best_score = None\n self.early_stop = False\n self.val_loss_min = np.Inf\n self.delta = delta\n self.path = path\n\n def __call__(self, val_loss, model):\n\n score = -val_loss\n\n if self.best_score is None:\n self.best_score = score\n self.save_checkpoint(val_loss, model)\n elif score < self.best_score + self.delta:\n self.counter += 1\n print(f\"EarlyStopping counter: {self.counter} out of {self.patience}\")\n if self.counter >= self.patience:\n self.early_stop = True\n else:\n self.best_score = score\n self.save_checkpoint(val_loss, model)\n self.counter = 0\n\n def save_checkpoint(self, val_loss, model):\n \"\"\"Saves model when validation loss decrease.\"\"\"\n if self.verbose:\n print(\n f\"Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...\"\n )\n torch.save(model.state_dict(), self.path)\n self.val_loss_min = val_loss\n\n\ndef print_size_of_model(model):\n torch.save(model.state_dict(), \"temp.p\")\n print(\"Size (MB):\", os.path.getsize(\"temp.p\") / 1e6)\n os.remove(\"temp.p\")\n\n\ndef matplotlib_imshow(img, one_channel=False):\n if one_channel:\n img = img.mean(dim=0)\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n if one_channel:\n plt.imshow(npimg, cmap=\"Greys\")\n else:\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n" ]
[ [ "numpy.transpose", "torch.cuda.manual_seed", "torch.manual_seed", "numpy.random.seed", "matplotlib.pyplot.imshow" ] ]
zzing0907/Tensorflow
[ "f0d66b2674fecc0f2be1423cf696a7e6e7c7a39d" ]
[ "tensorflow/python/ops/structured/structured_tensor_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for StructuredTensor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.ops.structured import structured_tensor\nfrom tensorflow.python.platform import googletest\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass StructuredTensorTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def assertAllEqual(self, a, b, msg=None):\n if not (isinstance(a, structured_tensor.StructuredTensor) or\n isinstance(b, structured_tensor.StructuredTensor)):\n return super(StructuredTensorTest, self).assertAllEqual(a, b, msg)\n if not (isinstance(a, structured_tensor.StructuredTensor) and\n isinstance(b, structured_tensor.StructuredTensor)):\n # TODO(edloper) Add support for this once structured_factory_ops is added.\n raise ValueError(\"Not supported yet\")\n\n self.assertEqual(repr(a.shape), repr(b.shape))\n self.assertEqual(set(a.field_names()), set(b.field_names()))\n for field in a.field_names():\n self.assertAllEqual(a.field_value(field), b.field_value(field))\n\n @parameterized.parameters([\n {\n \"shape\": [],\n \"fields\": {},\n },\n {\n \"shape\": [None],\n \"fields\": {},\n },\n {\n \"shape\": [1, 5, 3],\n \"fields\": {},\n },\n {\n \"shape\": [],\n \"fields\": {\"Foo\": 5, \"Bar\": [1, 2, 3]},\n },\n {\n \"shape\": [2],\n \"fields\": {\"x\": [1, 2], \"y\": [[1, 2], [3, 4]]},\n },\n {\n \"shape\": [None],\n \"fields\": {\"x\": [1, 2], \"y\": [[1, 2], [3, 4]]},\n \"expected_shape\": [2], # inferred from field values.\n },\n {\n \"shape\": [],\n \"fields\": {\n \"r\": ragged_factory_ops.constant_value([[1, 2], [3]]),\n },\n },\n {\n \"shape\": [2],\n \"fields\": {\n \"r\": ragged_factory_ops.constant_value([[1, 2], [3]]),\n },\n },\n {\n \"shape\": [2, None],\n \"fields\": {\n \"r\": ragged_factory_ops.constant_value(\n [[[1, 2], [3]], [[4, 5, 6], [7], [8, 9]]]),\n },\n },\n {\n # Note: fields must have identical row_splits.\n \"shape\": [2, None],\n \"fields\": {\n \"a\": ragged_factory_ops.constant_value([[1, 2], [3]]),\n \"b\": ragged_factory_ops.constant_value([[4, 5], [6]]),\n },\n },\n {\n # Note: fields must have identical outer row_splits.\n \"shape\": [2, None],\n \"fields\": {\n \"a\": ragged_factory_ops.constant_value(\n [[[1, 2], [3]], [[4, 5, 6], [7], [8, 9]]]),\n \"b\": ragged_factory_ops.constant_value(\n [[[1], []], [[2, 3], [4, 5, 6], [7, 8]]]),\n },\n },\n ]) # pyformat: disable\n def testFromFields(self, shape, fields, expected_shape=None):\n struct = structured_tensor.StructuredTensor.from_fields(shape, fields)\n if expected_shape is None:\n expected_shape = shape\n self.assertEqual(struct.shape.as_list(), expected_shape)\n self.assertLen(expected_shape, struct.rank)\n self.assertEqual(struct.field_names(), tuple(fields.keys()))\n for field, value in fields.items():\n self.assertIsInstance(\n struct.field_value(field),\n (ops.Tensor, structured_tensor.StructuredTensor,\n ragged_tensor.RaggedTensor))\n self.assertAllEqual(struct.field_value(field), value)\n\n def testNestedStructConstruction(self):\n rt = ragged_factory_ops.constant([[1, 2], [3]])\n struct1 = structured_tensor.StructuredTensor.from_fields([], {\"x\": [1, 2]})\n struct2 = structured_tensor.StructuredTensor.from_fields([2], {\"x\": [1, 2]})\n struct3 = structured_tensor.StructuredTensor.from_fields([], {\n \"r\": rt,\n \"s\": struct1\n })\n struct4 = structured_tensor.StructuredTensor.from_fields([2], {\n \"r\": rt,\n \"s\": struct2\n })\n\n self.assertEqual(struct3.shape.as_list(), [])\n self.assertEqual(struct3.rank, 0)\n self.assertEqual(set(struct3.field_names()), set([\"r\", \"s\"]))\n self.assertAllEqual(struct3.field_value(\"r\"), rt)\n self.assertAllEqual(struct3.field_value(\"s\"), struct1)\n\n self.assertEqual(struct4.shape.as_list(), [2])\n self.assertEqual(struct4.rank, 1)\n self.assertEqual(set(struct4.field_names()), set([\"r\", \"s\"]))\n self.assertAllEqual(struct4.field_value(\"r\"), rt)\n self.assertAllEqual(struct4.field_value(\"s\"), struct2)\n\n @parameterized.parameters([\n (object(), {}, TypeError),\n ([], object(), TypeError, \"fields must be a dictionary\"),\n ([], {1: 2}, TypeError, \"Unexpected type for key\"),\n ([], {\"x\": object()}, TypeError, \"Unexpected type for value\"),\n (None, {}, ValueError, \"StructuredTensor's shape must have known rank\"),\n ([5], {\"f\": 5}, ValueError, r\"Shapes \\(5,\\) and \\(\\) are not compatible\"),\n ([None], {\"x\": [1], \"y\": []}, ValueError,\n r\"Shapes \\([01],\\) and \\([01],\\) are not compatible\"),\n ([], {\"\": 5}, ValueError, \"Field name '' is not currently allowed.\"),\n ([], {\"_\": 5}, ValueError, \"Field name '_' is not currently allowed.\"),\n {\n # Note: fields must have identical outer row_splits.\n \"shape\": [2, None],\n \"fields\": {\n \"r1\": ragged_factory_ops.constant_value(\n [[1, 2], [3]]),\n \"r2\": ragged_factory_ops.constant_value(\n [[1, 2, 3], [4]]),\n },\n \"err\": errors.InvalidArgumentError,\n \"msg\": r\"`fields` are not consistent in the outer 2 dimension\"\n },\n ]) # pyformat: disable\n def testFromFieldsErrors(self, shape, fields, err, msg=None):\n with self.assertRaisesRegexp(err, msg):\n struct = structured_tensor.StructuredTensor.from_fields(shape, fields)\n self.evaluate(struct.field_value(struct.field_names()[0]))\n\n @parameterized.parameters([\n {\n \"shape\": [3],\n \"fields\": {\"x\": [1, 2, 3], \"y\": [[1, 2], [3, 4], [5, 6]]},\n \"row_splits\": [0, 2, 3],\n },\n ]) # pyformat: disable\n def testFromRowSplits(self, shape, fields, row_splits, expected_shape=None):\n values = structured_tensor.StructuredTensor.from_fields(shape, fields)\n struct = structured_tensor.StructuredTensor.from_row_splits(\n values, row_splits)\n if expected_shape is None:\n expected_shape = tensor_shape.TensorShape([None,\n None]).concatenate(shape[1:])\n struct.shape.assert_is_compatible_with(expected_shape)\n else:\n self.assertEqual(struct.shape.as_list(), expected_shape)\n self.assertEqual(struct.shape.rank, struct.rank)\n self.assertEqual(struct.field_names(), tuple(fields.keys()))\n for field, value in fields.items():\n self.assertIsInstance(\n struct.field_value(field),\n (ops.Tensor, structured_tensor.StructuredTensor,\n ragged_tensor.RaggedTensor))\n self.assertAllEqual(\n struct.field_value(field),\n ragged_tensor.RaggedTensor.from_row_splits(value, row_splits))\n\n @parameterized.parameters([\n ([], {}, [\"x\"], ValueError,\n r\"Shape \\(\\) must have rank at least 1\"),\n ([0], {}, [\"x\"], ValueError,\n r\"Row-partitioning tensors must have dtype int32 or int64\"),\n ([0], {}, [[0]], ValueError,\n r\"Shape \\(1, 1\\) must have rank 1\"),\n ([0], {}, np.array([], np.int32), ValueError,\n r\"row_splits may not be empty\"),\n ]) # pyformat: disable\n def testFromRowSplitsErrors(self, shape, fields, row_splits, err, msg=None):\n with self.assertRaisesRegexp(err, msg):\n values = structured_tensor.StructuredTensor.from_fields(shape, fields)\n structured_tensor.StructuredTensor.from_row_splits(values, row_splits)\n\n def testFromRowSplitsBadValueType(self):\n with self.assertRaisesRegexp(TypeError,\n \"values must be a StructuredTensor\"):\n structured_tensor.StructuredTensor.from_row_splits([1, 2], [0, 2])\n\n\nif __name__ == \"__main__\":\n googletest.main()\n" ]
[ [ "tensorflow.python.ops.ragged.ragged_factory_ops.constant", "tensorflow.python.ops.ragged.ragged_factory_ops.constant_value", "tensorflow.python.platform.googletest.main", "tensorflow.python.ops.structured.structured_tensor.StructuredTensor.from_fields", "tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits", "tensorflow.python.ops.structured.structured_tensor.StructuredTensor.from_row_splits", "tensorflow.python.framework.tensor_shape.TensorShape", "numpy.array" ] ]
krsmith/DS-Unit-3-Sprint-2-SQL-and-Databases
[ "9617528ad5fd23354623926b819f98f9a063d252" ]
[ "demo_data.py" ]
[ "import sqlite3\nimport pandas as pd\n\nconn = sqlite3.connect('demo_data.sqlite3')\ncurs = conn.cursor()\n\ncreate_demo_table = \"\"\"\nCREATE TABLE demo (\n s varchar(5),\n x int,\n y int\n );\"\"\"\n\ncurs.execute(create_demo_table)\nconn.commit()\n\ncurs.execute(\"\"\"INSERT INTO demo (\n s, x, y) VALUES\"\"\" + str(('g', 3, 9)))\nconn.commit()\n\ncurs.execute(\"\"\"INSERT INTO demo (\n s, x, y) VALUES\"\"\" + str(('v', 5, 7)))\nconn.commit()\n\ncurs.execute(\"\"\"INSERT INTO demo (\n s, x, y) VALUES\"\"\" + str(('f', 8, 7)))\nconn.commit()\n\n# Queries for SC questions\n\n\n# Count how many rows you have - it should be 3!\ndef row_count():\n print(pd.read_sql_query(\"\"\"SELECT COUNT(*) as row_count\n FROM demo;\"\"\", conn))\n# row_count\n# 0 3\n\n\n# How many rows are there where both x and y are at least 5?\ndef row_xy5():\n print(pd.read_sql_query(\"\"\"SELECT COUNT(*) as row_count\n FROM demo\n WHERE x >= 5\n AND y >= 5;\"\"\", conn))\n# row_count\n# 0 2\n\n\n# How many unique values of y are there (hint - COUNT() can accept\n# a keyword DISTINCT)?\ndef y_values():\n print(pd.read_sql_query(\"\"\"SELECT COUNT(distinct y) as y_values\n FROM demo;\"\"\", conn))\n# y_values\n# 0 2\n" ]
[ [ "pandas.read_sql_query" ] ]
fpmosley/advent-of-code
[ "507bd89795ff6a0824284c3c8d2123cf19a932a3" ]
[ "2021/day09/part01/smoke_basin.py" ]
[ "#!/usr/bin/env python\n\n'''\nAdvent of Code 2021 - Day 9: Smoke Basin (Part 1)\nhttps://adventofcode.com/2021/day/9\n'''\n\nimport numpy as np\n\n\nclass HeightMap():\n def __init__(self) -> None:\n self._grid = np.array([])\n\n def add_row(self, row):\n np_row = np.array(row)\n if self._grid.size != 0:\n self._grid = np.vstack([self._grid, np_row])\n else:\n self._grid = np_row\n\n def find_low_points(self, radius=1):\n low_points = []\n for index, point in np.ndenumerate(self._grid):\n neighbor_points = self._neighbors(radius, coordinates=index)\n\n if point < min(neighbor_points):\n low_points.append(point)\n\n return low_points\n\n def _neighbors(self, radius, coordinates=(0, 0)):\n neighbors = []\n row = coordinates[0]\n column = coordinates[1]\n\n # Get UP neighbor value\n if row >= 1:\n neighbors.append(self._grid[row - radius, column])\n\n # Get LEFT neighbor value\n if column >= 1:\n neighbors.append(self._grid[row, column - radius])\n\n # Get RIGHT neighbor value\n if column < len(self._grid[0]) - radius:\n neighbors.append(self._grid[row, column + radius])\n\n # Get DOWN neighbor value\n if row < len(self._grid) - radius:\n neighbors.append(self._grid[row + radius, column])\n\n return neighbors\n\n def __str__(self) -> str:\n output = \"\"\n for row in self._grid:\n for elem in row:\n output = output + f\"{elem:>3}\"\n output = output + \"\\n\"\n return output\n\n\ndef calculate_risk(heights):\n # Risk is 1 plus the height\n return sum([height + 1 for height in heights])\n\n\ndef main():\n\n filename = input(\"What is the input file name? \")\n\n try:\n with open(filename, \"r\") as file:\n\n # Create a new board\n area = HeightMap()\n\n # Read the rows and setup the HeightMap\n for line in file:\n line = line.strip()\n\n input_row = [int(x) for x in str(line)]\n area.add_row(input_row)\n\n print(\"The input grid: \")\n print(area)\n low_points = area.find_low_points()\n sum_risk_levels = calculate_risk(\n low_points) if low_points else None\n\n if sum_risk_levels:\n low_points_str = [str(point) for point in low_points]\n print(f\"Number of low points: {len(low_points)}\")\n print(f\"Low points: {', '.join(low_points_str)}\")\n print(\n f\"\\nThe sum of the risk levels of all low points is: {sum_risk_levels}\\n\")\n else:\n print(\"The sum of the risk levels of all low points not found.\\n\")\n\n except FileNotFoundError:\n print(f\"No such file or directory: '{filename}'\")\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "numpy.ndenumerate", "numpy.vstack" ] ]
Mohammed-Abbass/DeepEI
[ "6466556e529afd9ef747105c21cba51cbac890fe", "6466556e529afd9ef747105c21cba51cbac890fe" ]
[ "Retention/multi_cnn.py", "Discussion/Reply_Comments/NEIMS_A_B_comparison.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 9 09:22:42 2020\n\n@author: hcji\n\"\"\"\n\n\nimport numpy as np\nimport tensorflow.keras.backend as K\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\nfrom tensorflow.keras import optimizers\nfrom sklearn.metrics import mean_absolute_error, r2_score\nfrom smiles_to_onehot.encoding import get_dict, one_hot_coding\n\nclass multi_CNN:\n def __init__(self, X, Y):\n self.X = X\n self.Y = Y\n self.X_tr, self.X_ts, self.Y_tr, self.Y_ts = train_test_split(X, Y, test_size=0.1)\n \n inp = Input(shape=(X.shape[1:3]))\n n = X.shape[1]\n\n hidv1 = Conv1D(n, kernel_size=2, activation='relu')(inp)\n # hidv1 = MaxPooling1D(pool_size=2)(hidv1)\n hidv1 = Conv1D(n, kernel_size=2, activation='relu')(hidv1)\n # hidv1 = MaxPooling1D(pool_size=2)(hidv1)\n hidv1 = Flatten()(hidv1)\n \n hidv2 = Conv1D(n, kernel_size=3, activation='relu')(inp)\n # hidv2 = MaxPooling1D(pool_size=3)(hidv2)\n hidv2 = Conv1D(n, kernel_size=3, activation='relu')(hidv2)\n # hidv2 = MaxPooling1D(pool_size=3)(hidv2)\n hidv2 = Flatten()(hidv2)\n \n hidv3 = Conv1D(n, kernel_size=4, activation='relu')(inp)\n # hidv3 = MaxPooling1D(pool_size=4)(hidv3)\n hidv3 = Conv1D(n, kernel_size=4, activation='relu')(hidv3)\n # hidv3 = MaxPooling1D(pool_size=4)(hidv3)\n hidv3 = Flatten()(hidv3)\n\n hid = concatenate([hidv1, hidv2, hidv3], axis=-1)\n hid = Dense(32, activation=\"relu\")(hid)\n hid = Dense(32, activation=\"relu\")(hid)\n \n prd = Dense(1, activation=\"linear\")(hid)\n opt = optimizers.Adam(lr=0.001)\n model = Model(inp, prd)\n model.compile(optimizer=opt, loss='mse', metrics=['mae'])\n self.model = model\n \n def train(self, epochs=20):\n history = self.model.fit(self.X_tr, self.Y_tr, epochs=epochs, validation_split = 0.1)\n plt.cla()\n plt.plot(history.history['val_loss'], alpha= 0.8)\n plt.plot(history.history['val_mean_absolute_error'], alpha= 0.8)\n plt.legend(['loss', 'accuracy'], loc=\"lower left\")\n plt.xlabel('epoch')\n return history\n \n def test(self):\n Y_test = self.Y_ts\n Y_pred = np.round(self.model.predict(self.X_ts))\n r2 = round(r2_score(Y_pred, Y_test), 4)\n mae = round(mean_absolute_error(Y_pred, Y_test), 4)\n\n plt.cla()\n plt.plot(Y_test, Y_pred, '.', color = 'blue')\n plt.plot([0,4500], [0,4500], color ='red')\n plt.ylabel('Predicted RI')\n plt.xlabel('Experimental RI') \n plt.text(0, 4000, 'R2='+str(r2), fontsize=12)\n plt.text(0, 3600, 'MAE='+str(mae), fontsize=12)\n plt.show()\n return r2, mae\n \n def save(self, path):\n self.model.save(path)\n K.clear_session()\n \n\nif __name__ == '__main__':\n \n import json\n \n with open('DeepEI/data/split.json', 'r') as js:\n keep = np.array(json.load(js)['keep'])\n \n smiles = np.array(json.load(open('DeepEI/data/all_smiles.json')))[keep]\n rindex = np.load('DeepEI/data/retention.npy')[keep,:]\n \n words = get_dict(smiles, save_path='DeepEI/data/words.json')\n smiles = [one_hot_coding(smi, words, max_len=100).todense() for smi in smiles]\n smiles = np.array(smiles)\n \n # simipolar\n i = np.where(~ np.isnan(rindex[:,0]))[0]\n mod = multi_CNN(smiles[i], rindex[i,0])\n mod.train()\n mod.test()\n mod.save('Retention/models/SimiStdNP_CNN_multi_model.h5')\n \n # nonpolar\n i = np.where(~ np.isnan(rindex[:,1]))[0]\n mod = multi_CNN(smiles[i], rindex[i,1])\n mod.train()\n mod.test()\n mod.save('Retention/models/StdNP_CNN_multi_model.h5')\n\n # polar\n i = np.where(~ np.isnan(rindex[:,2]))[0]\n mod = multi_CNN(smiles[i], rindex[i,2])\n mod.train()\n mod.test()\n mod.save('Retention/models/StdPolar_CNN_multi_model.h5')\n ", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 7 09:04:52 2020\n\n@author: hcji\n\"\"\"\n\n\nimport json\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom scipy.sparse import load_npz\nfrom DeepEI.utils import get_score\n\nwith open('DeepEI/data/split.json', 'r') as js:\n split = json.load(js)\nkeep = np.array(split['keep'])\n\nnist_smiles = np.array(json.load(open('DeepEI/data/all_smiles.json')))[keep]\nnist_masses = np.load('DeepEI/data/molwt.npy')[keep]\nnist_spec = load_npz('DeepEI/data/peakvec.npz').todense()[keep,:]\nneims_nist_spec = load_npz('DeepEI/data/neims_spec_nist.npz').todense()[keep,:]\n\nneims_msbk_smiles = np.array(json.load(open('DeepEI/data/neims_msbk_smiles.json')))\nneims_msbk_masses = np.load('DeepEI/data/neims_msbk_masses.npy')\nneims_msbk_spec = load_npz('DeepEI/data/neims_spec_msbk.npz').todense()\n\nmsbk_smiles = np.array(json.load(open('DeepEI/data/msbk_smiles.json')))\nmsbk_masses = np.load('DeepEI/data/msbk_masses.npy')\nmsbk_spec = load_npz('DeepEI/data/msbk_spec.npz').todense()\n\ndb_smiles = np.array(list(nist_smiles) + list(neims_msbk_smiles))\ndb_masses = np.append(nist_masses, neims_msbk_masses)\ndb_spec_a = np.append(nist_spec, neims_msbk_spec, axis=0)\ndb_spec_b = np.append(neims_nist_spec, neims_msbk_spec, axis=0)\n\ni = 70\nsmi = msbk_smiles[i]\nspecr = msbk_spec[i]\nmass = msbk_masses[i]\ncandidate = np.where(np.abs(db_masses - mass) < 5)[0]\ncand_smi = db_smiles[candidate]\n\nscores_a = get_score(specr, db_spec_a[candidate,:], m='wdp')\nscores_b = get_score(specr, db_spec_b[candidate,:], m='wdp')\n\nwh_true = np.where(cand_smi == smi)[0][0]\ntrue_score_a = scores_a[wh_true]\ntrue_score_b = scores_b[wh_true]\nrank_a = len(np.where(scores_a > true_score_a)[0]) + 1\nrank_b = len(np.where(scores_b > true_score_b)[0]) + 1\n\ntrue = candidate[wh_true]\nj = candidate[435]\n\ndecoy_smi = db_smiles[j]\n\nplt.figure(figsize=(6, 6))\nplt.vlines(np.arange(0, 2000), np.zeros(2000), db_spec_a[j], 'red', label='NIST_decoy')\nplt.vlines(np.arange(0, 2000), np.zeros(2000), -db_spec_b[j], 'blue', label='NEIMS_decoy')\nplt.axhline(0, color='black')\nplt.xlim(0, 500)\nplt.legend()\n\nplt.figure(figsize=(6, 6))\nplt.vlines(np.arange(0, 2000), np.zeros(2000), specr, 'green', label='MassBank_true')\nplt.axhline(0, color='black')\nplt.vlines(np.arange(0, 2000), np.zeros(2000), -db_spec_b[j], 'blue', label='NEIMS_decoy')\nplt.xlim(0, 500)\nplt.legend()\n\nplt.figure(figsize=(6, 6))\nplt.vlines(np.arange(0, 2000), np.zeros(2000), specr, 'green', label='MassBank_true')\nplt.axhline(0, color='black')\nplt.vlines(np.arange(0, 2000), np.zeros(2000), -db_spec_a[j], 'red', label='NIST_decoy')\nplt.xlim(0, 500)\nplt.legend()\n\nplt.figure(figsize=(6, 6))\nplt.vlines(np.arange(0, 2000), np.zeros(2000), specr, 'green', label='MassBank_true')\nplt.axhline(0, color='black')\nplt.vlines(np.arange(0, 2000), np.zeros(2000), -db_spec_a[true], 'purple', label='NEIMS_true')\nplt.xlim(0, 500)\nplt.legend()\n" ]
[ [ "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Flatten", "matplotlib.pyplot.cla", "tensorflow.keras.models.Model", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.layers.Dense", "numpy.isnan", "numpy.load", "sklearn.metrics.mean_absolute_error", "tensorflow.keras.layers.concatenate", "sklearn.metrics.r2_score", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.legend", "tensorflow.keras.backend.clear_session", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.xlabel", "tensorflow.keras.layers.Input" ], [ "numpy.load", "matplotlib.pyplot.legend", "numpy.append", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.axhline", "numpy.abs", "scipy.sparse.load_npz", "matplotlib.pyplot.xlim", "numpy.arange", "numpy.array", "numpy.where" ] ]
matthewclso/ivy
[ "b297fd89812ec96212ef6996a82c65fe3aab9d3c" ]
[ "ivy/functional/backends/mxnet/old/general.py" ]
[ "\"\"\"\nCollection of MXNet general functions, wrapped to fit Ivy syntax and signature.\n\"\"\"\n\n# global\nimport ivy\n_round = round\nimport logging\nimport mxnet as _mx\nimport numpy as _np\nimport math as _math\nfrom numbers import Number\nfrom operator import mul as _mul\nfrom functools import reduce as _reduce\nimport multiprocessing as _multiprocessing\n\n# local\nfrom ivy.functional.ivy.old import default_device, default_dtype\nfrom ivy.functional.backends.mxnet.old.device import _callable_dev\nfrom ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context,\\\n _scalar_or_flat_array_to_scalar, _handle_flat_arrays_in\n\n\nDTYPE_TO_STR = {_np.dtype('int8'): 'int8',\n _np.dtype('int16'): 'int16',\n _np.dtype('int32'): 'int32',\n _np.dtype('int64'): 'int64',\n _np.dtype('uint8'): 'uint8',\n _np.dtype('uint16'): 'uint16',\n _np.dtype('uint32'): 'uint32',\n _np.dtype('uint64'): 'uint64',\n 'bfloat16': 'bfloat16',\n _np.dtype('float16'): 'float16',\n _np.dtype('float32'): 'float32',\n _np.dtype('float64'): 'float64',\n _np.dtype('bool'): 'bool',\n\n _np.int8: 'int8',\n _np.int16: 'int16',\n _np.int32: 'int32',\n _np.int64: 'int64',\n _np.uint8: 'uint8',\n _np.uint16: 'uint16',\n _np.uint32: 'uint32',\n _np.uint64: 'uint64',\n _np.float16: 'float16',\n _np.float32: 'float32',\n _np.float64: 'float64',\n _np.bool_: 'bool'}\n\nDTYPE_FROM_STR = {'int8': _np.int8,\n 'int16': _np.int16,\n 'int32': _np.int32,\n 'int64': _np.int64,\n 'uint8': _np.uint8,\n 'uint16': _np.uint16,\n 'uint32': _np.uint32,\n 'uint64': _np.uint64,\n 'bfloat16': 'bfloat16',\n 'float16': _np.float16,\n 'float32': _np.float32,\n 'float64': _np.float64,\n 'bool': _np.bool_}\n\n\n# API #\n# ----#\n\ndef array(object_in, dtype=None, dev=None):\n cont = _mxnet_init_context(default_device(dev))\n return _mx.nd.array(object_in, cont, dtype=default_dtype(dtype, object_in))\n\n\nasarray = array\n\n\ndef is_array(x, exclusive=False):\n if isinstance(x, _mx.ndarray.ndarray.NDArray):\n if exclusive and x.grad is not None:\n return False\n return True\n return False\n\n\ncopy_array = lambda x: x.copy()\n\n\n@_handle_flat_arrays_in_out\ndef array_equal(x0, x1):\n if ivy.dtype(x0, as_str=True) == 'bool':\n x0 = x0.astype('int32')\n if ivy.dtype(x1, as_str=True) == 'bool':\n x1 = x1.astype('int32')\n return _mx.nd.min(_mx.nd.broadcast_equal(x0, x1)) == 1\n\n\ndef dtype_bits(dtype_in):\n dtype_str = dtype_to_str(dtype_in)\n if 'bool' in dtype_str:\n return 1\n return int(dtype_str.replace(\"<class 'numpy.\", '').replace(\"'>\", '').replace('uint', '').replace(\n 'int', '').replace('bfloat', '').replace('float', ''))\n\n\nequal = lambda x1, x2: x1 == x2\nequal.__name__ = 'equal'\nto_numpy = lambda x: x if isinstance(x, _np.ndarray) else (_np.array(x) if isinstance(x, (int, float)) else x.asnumpy())\nto_numpy.__name__ = 'to_numpy'\nto_scalar = lambda x: x if isinstance(x, Number) else x.asscalar().item()\nto_scalar.__name__ = 'to_scalar'\nto_list = lambda x: to_numpy(x).tolist()\nto_list.__name__ = 'to_list'\nshape = lambda x, as_tensor=False: _mx.nd.shape_array(x) if as_tensor else x.shape\nshape.__name__ = 'shape'\nget_num_dims = lambda x, as_tensor=False:\\\n _mx.nd.shape_array(_mx.nd.shape_array(x)).reshape([]) if as_tensor else len(x.shape)\nminimum = lambda x, y: _mx.nd.array(_mx.nd.minimum(_scalar_or_flat_array_to_scalar(x), _scalar_or_flat_array_to_scalar(y)))\nmaximum = lambda x, y: _mx.nd.array(_mx.nd.maximum(_scalar_or_flat_array_to_scalar(x), _scalar_or_flat_array_to_scalar(y)))\n\n\n@_handle_flat_arrays_in_out\ndef clip(x, x_min, x_max):\n return _mx.nd.clip(_mx.nd.array(x), x_min, x_max)\n\n\n@_handle_flat_arrays_in_out\ndef round(x):\n return _mx.nd.round(x)\n\n\n@_handle_flat_arrays_in_out\ndef floormod(x, y):\n return x % y\n\n\n@_handle_flat_arrays_in_out\ndef floor(x):\n return _mx.nd.floor(x)\n\n\n# noinspection PyShadowingBuiltins\n@_handle_flat_arrays_in_out\ndef abs(x):\n return _mx.nd.abs(x)\n\nargmin = lambda x, axis=0: _mx.nd.argmin(x, axis)\n\n\n@_handle_flat_arrays_in_out\ndef cast(x, dtype):\n return x.astype(dtype)\n\n\nastype = cast\n\n\n# noinspection PyUnresolvedReferences\ndef arange(stop, start=0, step=1, dtype=None, dev=None):\n cont = _mxnet_init_context(default_device(dev))\n stop = stop if isinstance(stop, Number) else stop.asscalar()\n start = start if isinstance(start, Number) else start.asscalar()\n step = step if isinstance(step, Number) else step.asscalar()\n return _mx.nd.arange(start, stop, ctx=cont, step=step, dtype=dtype)\n\n\ndef _linspace(start, stop, num, cont):\n if num == 1:\n return start\n start = _mx.nd.array(start).reshape((1,)).astype('float32')\n stop = _mx.nd.array(stop).reshape((1,)).astype('float32')\n n_m_1 = _mx.nd.array(num - 1).reshape((1,)).astype('float32')\n increment = (stop - start)/n_m_1\n increment_tiled = _mx.nd.tile(increment, num - 1)\n increments = increment_tiled * _mx.nd.array(_mx.nd.np.linspace(1, num - 1, num - 1).tolist(), ctx=cont)\n ret = _mx.nd.concat(start, start + increments, dim=0)\n return ret\n\n\ndef linspace(start, stop, num, axis=None, dev=None):\n cont = _mxnet_init_context(default_device(dev))\n num = num.asnumpy()[0] if isinstance(num, _mx.nd.NDArray) else num\n start_is_array = isinstance(start, _mx.nd.NDArray)\n stop_is_array = isinstance(stop, _mx.nd.NDArray)\n start_shape = []\n if start_is_array:\n start_shape = list(start.shape)\n start = start.reshape((-1,))\n if stop_is_array:\n start_shape = list(stop.shape)\n stop = stop.reshape((-1,))\n if start_is_array and stop_is_array:\n res = [_linspace(strt, stp, num, cont) for strt, stp in zip(start, stop)]\n elif start_is_array and not stop_is_array:\n res = [_linspace(strt, stop, num, cont) for strt in start]\n elif not start_is_array and stop_is_array:\n res = [_linspace(start, stp, num, cont) for stp in stop]\n else:\n return _linspace(start, stop, num, cont)\n new_shape = start_shape + [num]\n res = _mx.nd.concat(*res, dim=-1).reshape(new_shape)\n if axis is not None:\n res = _mx.nd.swapaxes(res, axis, -1)\n return res\n\n\ndef logspace(start, stop, num, base=10., axis=None, dev=None):\n power_seq = linspace(start, stop, num, axis, default_device(dev))\n return base ** power_seq\n\n\n@_handle_flat_arrays_in_out\ndef concatenate(xs, axis=-1):\n return _mx.nd.concat(*xs, dim=axis)\n\n\ndef stack(xs, axis=0):\n if xs[0].shape == ():\n return _mx.nd.reshape(_mx.nd.stack(*[_flat_array_to_1_dim_array(x) for x in xs], axis=axis), -1)\n return _mx.nd.stack(*xs, axis=axis)\n\n\ndef unstack(x, axis, keepdims=False):\n if x.shape == ():\n return [x]\n num_outputs = x.shape[axis]\n ret = _mx.nd.split(x, num_outputs, axis, squeeze_axis=not keepdims)\n return ret if isinstance(ret, list) else [ret]\n\n\ndef split(x, num_or_size_splits=None, axis=0, with_remainder=False):\n if x.shape == ():\n if num_or_size_splits is not None and num_or_size_splits != 1:\n raise Exception('input array had no shape, but num_sections specified was {}'.format(num_or_size_splits))\n return [x]\n if num_or_size_splits == 1:\n return [x]\n elif with_remainder and isinstance(num_or_size_splits, int):\n num_or_size_splits = x.shape[axis] if not num_or_size_splits else num_or_size_splits\n num_chunks = x.shape[axis] / num_or_size_splits\n num_chunks_int = _math.floor(num_chunks)\n remainder_size = int((num_chunks - num_chunks_int) * num_or_size_splits)\n num_or_size_splits = [num_or_size_splits]*num_chunks_int + [remainder_size]\n if isinstance(num_or_size_splits, (list, tuple)):\n csum = [0] + _np.cumsum(num_or_size_splits).tolist()\n starts = csum[:-1]\n ends = csum[1:]\n if axis < 0:\n slices = [tuple([Ellipsis, slice(s, e, 1)] + [slice(None, None, None)]*int(abs(axis)-1))\n for s, e in zip(starts, ends)]\n else:\n slices = [tuple([slice(None, None, None)]*axis + [slice(s, e, 1)])\n for s, e in zip(starts, ends)]\n return [x[so] for so in slices]\n return _mx.nd.split(x, x.shape[axis] if not num_or_size_splits else num_or_size_splits, axis)\n\n\n@_handle_flat_arrays_in_out\ndef repeat(x, repeats, axis=None):\n return _mx.nd.repeat(x, repeats, axis)\n\n\ndef tile(x, reps):\n if isinstance(reps, _mx.nd.ndarray.NDArray):\n reps = reps.asnumpy().tolist()\n return _mx.nd.tile(_flat_array_to_1_dim_array(x), reps)\n\n\n@_handle_flat_arrays_in\ndef constant_pad(x, pad_width, value=0):\n if isinstance(pad_width, _mx.ndarray.ndarray.NDArray):\n pad_width = pad_width.asnumpy().tolist()\n x_shape = list(x.shape)\n num_dims = len(x_shape)\n if num_dims > 3:\n raise Exception('Invalid inputs. Pad for mxnet only supports inputs with 3 dimensions or smaller.')\n num_dims_to_add = 4 - num_dims\n new_shape = tuple([1] * num_dims_to_add + x_shape)\n mat_expanded_dims = _mx.nd.reshape(x, new_shape)\n pad_width_flat = [0]*num_dims_to_add*2 + [item for sublist in pad_width for item in sublist]\n pad_expanded_dims = _mx.nd.pad(mat_expanded_dims, mode=\"constant\", pad_width=tuple(pad_width_flat),\n constant_value=value)\n new_shape = [orig_dim + pad_width_item[0] + pad_width_item[1] for orig_dim, pad_width_item in zip(x_shape, pad_width)]\n res = _mx.nd.reshape(pad_expanded_dims, tuple(new_shape))\n return res\n\n\ndef zero_pad(x, pad_width):\n return constant_pad(x, pad_width, 0)\n\n\nswapaxes = _mx.nd.swapaxes\n\n\ndef transpose(x, axes=None):\n if axes is None:\n num_dims = len(x.shape)\n axes = list(range(num_dims))\n axes.reverse()\n return _mx.nd.transpose(x, axes)\n\n\ndef expand_dims(x, axis):\n if x.shape == ():\n return _flat_array_to_1_dim_array(x)\n return _mx.nd.expand_dims(x, axis)\n\n\n@_handle_flat_arrays_in_out\ndef where(condition, x1, x2):\n x_shape = list(x1.shape)\n condition_shape = list(condition.shape)\n if x_shape == condition_shape:\n res = _mx.nd.where(condition, x1, x2)\n return res\n tile_reps = [int(x / c) for x, c in zip(x_shape, condition_shape)]\n tiled_condition = _mx.nd.tile(condition, tile_reps)\n return _mx.nd.where(tiled_condition, x1, x2)\n\n\ndef indices_where(x):\n x_shape = x.shape\n x_flat = x.reshape((1, -1,))\n flat_indices = x_flat.astype('int32').tostype('csr').indices\n if flat_indices.shape == (0,):\n res = flat_indices.reshape((0, len(x_shape)))\n return res\n res = _mx.nd.swapaxes(_mx.nd.unravel_index(flat_indices, x_shape), 0, 1)\n return res\n\n\n@_handle_flat_arrays_in_out\ndef isinf(x):\n return _mx.nd.contrib.isinf(x).astype('bool')\n\n\nreshape = lambda x, new_shape: x.reshape(new_shape)\n\n\ndef broadcast_to(x, new_shape):\n x_shape = list(x.shape)\n num_x_dims = len(x_shape)\n num_shape_dims = len(new_shape)\n diff = num_shape_dims - num_x_dims\n if diff == 0:\n return _mx.nd.broadcast_to(x, new_shape)\n x = _mx.nd.reshape(x, [1]*diff + x_shape)\n return _mx.nd.broadcast_to(x, new_shape)\n\n\ndef squeeze(x, axis=None):\n if x.shape == ():\n if axis is None or axis == 0 or axis == -1:\n return x\n raise Exception('tried to squeeze a zero-dimensional input by axis {}'.format(axis))\n res = _mx.nd.squeeze(x, axis)\n if axis is None:\n return _1_dim_array_to_flat_array(res)\n return res\n\n\n# noinspection PyShadowingNames\n\n\n\ndef zeros_like(x, dtype=None, dev=None):\n if x.shape == ():\n return _mx.nd.array(0., ctx=_mxnet_init_context(default_device(dev)))\n mx_zeros = _mx.nd.zeros_like(x, ctx=_mxnet_init_context(default_device(dev)))\n return mx_zeros if not dtype else mx_zeros.astype(dtype)\n\n\ndef full(shape, fill_value, dtype=None, device=None):\n shape = ivy.shape_to_tuple(shape)\n cont = _mxnet_init_context(default_device(device))\n if len(shape) == 0 or 0 in shape:\n return _1_dim_array_to_flat_array(\n _mx.nd.full((1,), fill_value, cont, dtype_from_str(default_dtype(dtype, fill_value))))\n return _mx.nd.full(shape, fill_value, cont, dtype_from_str(default_dtype(dtype, fill_value)))\n\n# noinspection PyUnusedLocal\none_hot = lambda indices, depth, dev=None: _mx.nd.one_hot(indices, depth)\n\n\ndef cross(x1, x2):\n a1 = x1[..., 0:1]\n a2 = x1[..., 1:2]\n a3 = x1[..., 2:3]\n b1 = x2[..., 0:1]\n b2 = x2[..., 1:2]\n b3 = x2[..., 2:3]\n res1 = a2*b3 - a3*b2\n res2 = a3*b1 - a1*b3\n res3 = a1*b2 - a2*b1\n res = _mx.nd.concat(res1, res2, res3, dim=-1)\n return res\n\n\ndef matmul(x1, x2):\n expanded = False\n x1_shape = list(x1.shape)\n x2_shape = list(x2.shape)\n if len(x1_shape) != 3:\n num_x1_dims = len(x1_shape)\n x1 = _mx.nd.reshape(x1, [1]*max(2-num_x1_dims, 0) + [-1] + x1_shape[-min(num_x1_dims, 2):])\n expanded = True\n if len(x2_shape) != 3:\n num_x2_dims = len(x2_shape)\n x2 = _mx.nd.reshape(x2, [1]*max(2-num_x2_dims, 0) + [-1] + x2_shape[-min(num_x2_dims, 2):])\n expanded = True\n x1_batch_size = x1.shape[0]\n x2_batch_size = x2.shape[0]\n if x1_batch_size > x2_batch_size:\n x2 = _mx.nd.tile(x2, (int(x1_batch_size/x2_batch_size), 1, 1))\n elif x2_batch_size > x1_batch_size:\n x1 = _mx.nd.tile(x1, (int(x2_batch_size / x1_batch_size), 1, 1))\n res = _mx.nd.batch_dot(x1, x2)\n if expanded:\n return _mx.nd.reshape(res, list(x1_shape[:-1]) + [res.shape[-1]])\n return res\n\n\ncumsum = lambda x, axis=0: _mx.nd.cumsum(x, axis if axis >= 0 else axis % len(x.shape))\n\n\ndef cumprod(x, axis=0, exclusive=False):\n array_stack = [_mx.nd.expand_dims(chunk, axis) for chunk in unstack(x, axis)]\n if exclusive:\n array_stack = [_mx.nd.ones_like(array_stack[0])] + array_stack[:-1]\n new_array_list = [array_stack[0]]\n for array_chunk in array_stack[1:]:\n new_array_list.append(new_array_list[-1] * array_chunk)\n return _mx.nd.concat(*new_array_list, dim=axis)\n\n\ndef identity(n, dtype='float32', batch_shape=None, dev=None):\n mat = _mx.nd.eye(n, dtype=dtype).copyto(_mxnet_init_context(default_device(dev)))\n if batch_shape is None:\n return mat\n else:\n reshape_dims = [1]*len(batch_shape) + [n, n]\n tile_dims = list(batch_shape) + [1, 1]\n res = _mx.nd.tile(_mx.nd.reshape(mat, reshape_dims), tile_dims)\n return res\n\n\ndef meshgrid(*xs, indexing='ij'):\n # ToDo: implement this without reliance on NumPy backend\n xs_np = [x.as_np_ndarray() for x in xs]\n return tuple([item.as_nd_ndarray() for item in _mx.np.meshgrid(*xs_np, indexing=indexing)])\n\n\n# noinspection PyShadowingNames\ndef scatter_flat(indices, updates, size=None, tensor=None, reduction='sum', dev=None):\n if ivy.exists(tensor):\n raise Exception('MXNet scatter_flat does not support scattering into an pre-existing tensor.')\n if reduction == 'replace':\n return _mx.nd.scatter_nd(updates, _mx.nd.expand_dims(indices, 0), [size]).copyto(_mxnet_init_context(default_device(dev)))\n else:\n raise Exception('MXNet scatter_flat currently only supports reduction mode \"replace\", but {} selected.'.\n format(reduction))\n\n\n# noinspection PyShadowingNames\ndef scatter_nd(indices, updates, shape=None, tensor=None, reduction='sum', dev=None):\n if ivy.exists(tensor):\n raise Exception('MXNet scatter_flat does not support scattering into an pre-existing tensor.')\n if dev is None:\n dev = _callable_dev(indices)\n shape = list(shape)\n num_idx_dims = len(indices.shape)\n transpose_order = [num_idx_dims-1] + list(range(num_idx_dims-1))\n indices = _mx.nd.transpose(indices, transpose_order)\n shape = shape if type(shape) is list else shape.asnumpy().astype(_np.int32).tolist()\n if reduction == 'replace':\n return _mx.nd.scatter_nd(updates, indices, shape).copyto(_mxnet_init_context(dev))\n else:\n raise Exception('MXNet scatter_nd currently only supports reduction mode \"replace\", but {} selected.'.\n format(reduction))\n\n\ndef gather(params, indices, axis=-1, dev=None):\n if dev is None:\n dev = _callable_dev(params)\n index_slices = unstack(indices, -1)\n res = _mx.nd.concat(\n *[_mx.nd.expand_dims(_mx.nd.pick(params, idx_slice, axis), -1) for idx_slice in index_slices], dim=-1)\n res = _mx.nd.reshape(res, indices.shape)\n return res.copyto(_mxnet_init_context(dev))\n\n\ndef gather_nd(params, indices, dev=None):\n if dev is None:\n dev = _callable_dev(params)\n indices_shape = indices.shape\n num_idx_dims = len(indices_shape)\n transpose_order = [num_idx_dims-1] + list(range(num_idx_dims-1))\n indices = _mx.nd.transpose(indices, transpose_order)\n return _mx.nd.gather_nd(params, indices).copyto(_mxnet_init_context(dev))\n\n\ndef linear_resample(x, num_samples, axis=-1):\n x_shape = list(x.shape)\n num_x_dims = len(x_shape)\n axis = axis % num_x_dims\n x_pre_shape = x_shape[0:axis]\n x_pre_size = _reduce(_mul, x_pre_shape) if x_pre_shape else 1\n num_pre_dims = len(x_pre_shape)\n num_vals = x.shape[axis]\n x_post_shape = x_shape[axis+1:]\n x_post_size = _reduce(_mul, x_post_shape) if x_post_shape else 1\n num_post_dims = len(x_post_shape)\n xp = _mx.nd.reshape(_mx.nd.arange(num_vals*x_pre_size*x_post_size), x_shape)\n x_coords = _mx.nd.arange(num_samples) * ((num_vals-1)/(num_samples-1)) * x_post_size\n x_coords = _mx.nd.reshape(x_coords, [1]*num_pre_dims + [num_samples] + [1]*num_post_dims)\n x_coords = _mx.nd.broadcast_to(x_coords, x_pre_shape + [num_samples] + x_post_shape)\n slc = [slice(None)] * num_x_dims\n slc[axis] = slice(0, 1, 1)\n x_coords = x_coords + xp[tuple(slc)]\n x = _mx.nd.reshape(x, (-1,))\n xp = _mx.nd.reshape(xp, (-1,))\n x_coords = _mx.nd.reshape(x_coords, (-1,))\n ret = _mx.nd.array(_mx.np.interp(x_coords.asnumpy(), xp.asnumpy(), x.asnumpy()))\n return _mx.nd.reshape(ret, x_pre_shape + [num_samples] + x_post_shape)\n\n\ndef dtype(x, as_str=False):\n dt = x.dtype\n if as_str:\n return dtype_to_str(dt)\n return x.dtype\n\n\ndef dtype_to_str(dtype_in):\n if isinstance(dtype_in, str):\n return dtype_in\n return DTYPE_TO_STR[dtype_in]\n\n\ndef dtype_from_str(dtype_in):\n if not isinstance(dtype_in, str):\n return dtype_in\n return DTYPE_FROM_STR[dtype_in]\n\n\n# noinspection PyUnusedLocal\ndef compile(func, dynamic=True, example_inputs=None, static_argnums=None, static_argnames=None):\n logging.warning('MXnet does not support compiling arbitrary functions, '\n 'consider writing a function using MXNet Symbolic backend instead for compiling.\\n'\n 'Now returning the unmodified function.')\n return func\n\n\ncurrent_framework_str = lambda: 'mxnet'\ncurrent_framework_str.__name__ = 'current_framework_str'\nmultiprocessing = lambda context=None: _multiprocessing if context is None else _multiprocessing.get_context(context)\ncontainer_types = lambda: []\n\n\ndef inplace_update(x, val):\n if x.shape == ():\n raise Exception('MXNet does not support inplace updates of 0-dimensional arrays')\n x[:] = val\n return x\n\n\ndef inplace_decrement(x, val):\n if x.shape == ():\n raise Exception('MXNet does not support inplace updates of 0-dimensional arrays')\n x -= val\n return x\n\n\ndef inplace_increment(x, val):\n if x.shape == ():\n raise Exception('MXNet does not support inplace updates of 0-dimensional arrays')\n x += val\n return x\n\ninplace_arrays_supported = lambda: True\ninplace_variables_supported = lambda: True\n" ]
[ [ "numpy.array", "numpy.dtype", "numpy.cumsum" ] ]
datacrisis/BOBBY2
[ "0c88d7906acb9d46929a6f220b857d358518edf0" ]
[ "src/data_utils.py" ]
[ "import torch, torchvision\nimport os, PIL, random, csv\nimport numpy as np\nimport xml.etree.ElementTree as ET\nfrom PIL import Image, ImageDraw\nfrom matplotlib import pyplot as plt\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom pathlib import Path\n\n\n\ndef compile_imgs(root_dir):\n \"\"\"\n Deprecated. Used previously when custom Dataset compiles sample paths on instatiation.\n Current custom dataset instead accepts a pre-cooked path list to pos/neg samples.\n \n Use compile_data.\n \"\"\"\n\n _ = [root_dir/i for i in os.listdir(root_dir)]\n heap_main = [root_dir/j/i for j in _ for i in os.listdir(j)] #These are folders for 3862 train seqs\n heap_main.sort()\n heap = [i/j for i in heap_main for j in os.listdir(i)]\n heap.sort()\n \n return heap\n\n\ndef compile_annots(root_dir):\n \"\"\"\n Deprecated. Used previously when custom Dataset compiles sample paths on instatiation.\n Current custom dataset instead accepts a pre-cooked path list to pos/neg samples.\n \n Use compile_data.\n \"\"\"\n \n _ = [root_dir/i for i in os.listdir(root_dir)]\n heap_main = [root_dir/j/i for j in _ for i in os.listdir(j)] #These are folders for 3862 train seqs\n heap_main.sort()\n heap = [i/j for i in heap_main for j in os.listdir(i)]\n heap.sort()\n \n return heap\n\n\ndef compile_data(img_root,ant_root,posneg_ls,pos_ls,neg_ls,neg_ls1,seed=5):\n \"\"\"\n Function that returns a dataset (hardcoded list) of pos and neg samples.\n Returns 2 lists: img_ls and annot_ls.\n \n Pos Sample: Translate and map idx from posneg.csv to \n \"\"\"\n \n ant_heap = []\n img_heap = []\n \n #Read csv\n posneg = parse_csv(posneg_ls)\n vanilla_pos = parse_csv(pos_ls)\n vanilla_neg = parse_csv(neg_ls)\n gen_neg = parse_csv(neg_ls1)\n \n #Random shuffle custom to be generated negative samples for representation.\n random.seed(seed)\n random.shuffle(gen_neg)\n \n #Idx for counting \n vp,vn,gn = 0,0,0\n \n #Parse main list\n for i in posneg:\n \n #If it's neg\n if i == 0 and vn <= len(vanilla_neg)-1:\n _ = [0,Path(vanilla_neg[vn])]\n vn += 1\n \n #If it's neg exceeding vanilla neg\n if i == 0 and vn > len(vanilla_neg)-1:\n _ = [0,Path(gen_neg[gn])]\n gn += 1\n \n #If it's pos\n if i == 1:\n _ = [1,Path(vanilla_pos[vp])]\n vp += 1\n \n ant_heap.append(_)\n \n \n #Compute equal for imgs list\n ant_base = Path(_[1])\n ant_parts = ant_base.parts[-4:-1]\n name = ant_base.stem + '.JPEG'\n img = img_root/Path(*ant_parts)/Path(name)\n \n img_heap.append([i,img])\n \n \n return img_heap,ant_heap\n \n \ndef parse_csv(file):\n \"\"\"\n Helper function that takes a csv path and return lists with csv content.\n \"\"\"\n heap = []\n \n with open(file) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n line_count += 1\n \n try:\n heap.append(int(float(*row)))\n except:\n heap.append(*row)\n print(f'Processed {file} lines.')\n \n return heap\n \n\n\ndef check_trans(img):\n \"\"\"\n Process and displays a batch of transformed scene and exems.\n \"\"\"\n simg = img.permute(1,2,0).cpu() \n\n #Plotting\n plt.imshow(simg)\n plt.show()\n\n\n\ndef rszasneed(full_img,annot):\n \"\"\"\n Helper function that takes a fullscene and the annotations to resize the scene randomly for augmentation and\n returns the proper annotations.\n\n Also accounted for the cases where exem (defined by annot) is larger than the current scene patch dimension.\n \"\"\"\n\n #Set patch size\n patch_sz = (360,360)\n \n #Size it\n img_w, img_h = full_img.size\n ex_tw, ex_th, ex_bw, ex_bh = annot[0], annot[1], annot[0] + annot[2], annot[1] + annot[3]\n \n #Setup resize range\n ratio_w = annot[2]/patch_sz[0]\n ratio_h = annot[3]/patch_sz[1]\n sz_max = max(ratio_w,ratio_h) #See which dim is the largest, that'll be the max resize up.\n \n if ratio_w <1 and ratio_h < 1: #If the exem is by default smaller than patch\n #Random resize that zooms and shrinks\n sz_fc = random.uniform(sz_max+0.5,1.5) #Make sure exem won't be larger than patch. +0.1 buffer\n new_w = img_w / sz_fc\n new_h = img_h / sz_fc\n\n elif ratio_w >= 1 or ratio_h >= 1: #If exem is larger than patch in any dim at all\n #Resize so sz of exem < sz of patch\n sz_fc = random.uniform(sz_max+0.1,sz_max+0.5) #Shrink more (max 3) since exem is large\n new_w = img_w / sz_fc\n new_h = img_h / sz_fc\n \n #Resize img and annot\n img = full_img.resize((round(new_w),round(new_h)),resample=PIL.Image.BICUBIC)\n ex_tw = ex_tw / sz_fc\n ex_th = ex_th / sz_fc\n ex_bw = ex_bw / sz_fc\n ex_bh = ex_bh / sz_fc\n annot = (ex_tw,ex_th,ex_bw,ex_bh)\n\n #Checks\n w = ex_bw - ex_tw\n h = ex_bh - ex_th\n \n assert w < patch_sz[0], \"Error! The exem w is larger than patch_w | w: {}, patch_w: {}\".format(w,patch_sz[0])\n assert h < patch_sz[1], \"Error! The exem h is larger than patch_h | h: {}, patch_h: {}\".format(h,patch_sz[1])\n\n return img, annot\n\n\ndef scene_crop_neg(full_scene,annot,scene_dim=360):\n \"\"\"\n Helper function used in gen_dt to extracte a negative 360x360 patch from full scene.\n Uses the to_square_neg function since it'll work; for both vanilla and custom negatives.\n \"\"\"\n #Crop square. Scene_dim dictates the shape of scene and the GAP on each size of a scene needed.\n scene,ant = to_square_neg(annot,full_scene,scene_dim)\n \n #Resize scene (360x360) crop to 224x224 as needed by net.\n scene = scene.resize((224,224),resample=PIL.Image.BICUBIC)\n \n #No need to compensate ant since negative smp has (0,0,0,0) ants.\n # \n return scene,ant\n\n\n\ndef scene_crop(full_scene,annot,scene_dim=360):\n \"\"\"\n Helper function used in gen_dt to extracte a positive 360x360 patch from full scene.\n \"\"\"\n\n #Normalize dim and exem location in scene. Determine the gap on each side before crop. \n full_scene,annot = rszasneed(full_scene,annot)\n img_w, img_h = full_scene.size\n ex_tw, ex_th, ex_bw, ex_bh = annot[0], annot[1], annot[2], annot[3] #Already added up in rszasneed\n nex_tw, nex_th, nex_bw, nex_bh = ex_tw/img_w, ex_th/img_h, ex_bw/img_w, ex_bh/img_h #normalized exem \n \n ###Required scene patch\n req = (scene_dim/img_w, scene_dim/img_h)\n \n #Only do compute_cc padding if needed patch sz fits in the full scene\n if req[0] <= 1 and req[1] <= 1:\n tw_n,th_n,bw_n,bh_n = compute_cc(nex_tw,nex_th,nex_bw,nex_bh,req)\n \n #Compensate \n tw = tw_n * img_w\n th = th_n * img_h\n bw = bw_n * img_w\n bh = bh_n * img_h\n\n #Crop. Needs to be PIL image.\n cropped = full_scene.crop((tw,th,bw,bh))\n rsz_fc1 = cropped.size[0]/224 #Need to return a 224 img anyhow\n rsz_fc2 = cropped.size[1]/224\n cropped = cropped.resize((224,224),resample=PIL.Image.BICUBIC) #Resize\n \n #Compensate annotations. Clip.\n ant_tw = annot[0] - tw\n ant_th = annot[1] - th\n ant_bw = annot[2] - tw\n ant_bh = annot[3] - th\n\n #Compensate annotations. Resize val.\n ant_tw,ant_bw = ant_tw/rsz_fc1, ant_bw/rsz_fc1\n ant_th,ant_bh = ant_th/rsz_fc2, ant_bh/rsz_fc2 \n ant_ = [i if i <= 224 else 224 for i in [ant_tw,ant_th,ant_bw,ant_bh]] \n\n else:\n #Otherwise use backup pseudo-optimal strat of max square cut with min scretching.\n cropped, ant_ = to_square_scene(to_visfmt(annot),full_scene)\n \n return cropped, ant_\n\n\n\ndef compute_cc(nex_tw,nex_th,nex_bw,nex_bh,req):\n \"\"\"\n Computes the spacing on each side of an exemplar for cropping. \n Returns normalized coordinates to crop with.\n\n If overflows happens in two sides of a same dimension (e.g. scene size req is larger than entire full scene)\n the function will return the largest square image possible covering the exemplar. Make sure to have a resize\n catching such cases on the return of this function.\n \"\"\"\n\n scene_w, scene_h = req[0], req[1]\n\n #Compute exem dim\n exem_w = nex_bw - nex_tw\n exem_h = nex_bh - nex_th\n \n #Catch problematic inputs\n assert scene_w > exem_w, \"Error! The scene patch asked for is smaller than the exemplar. scene_w:{},exem_w:{}\".format(scene_w,exem_w)\n assert scene_h > exem_h, \"Error! The scene patch asked for is smaller than the exemplar. scene_h:{},exem_h:{}\".format(scene_h,exem_h)\n assert req[0] <= 1, \"Error! Patch size asked for is bigger than the actual pic. req[0]: {}\".format(req[0])\n assert req[1] <= 1, \"Error! Patch size asked for is bigger than the actual pic. req[1]: {}\".format(req[1])\n\n #Size the gap needed\n req_w = scene_w - exem_w\n req_h = scene_h - exem_h\n\n #Randomize translation\n spf1 = random.uniform(0,1) #Split factor\n req_w1 = req_w * spf1\n req_w2 = req_w - req_w1\n\n spf2 = random.uniform(0,1)\n req_h1 = req_h * spf2\n req_h2 = req_h - req_h1\n\n #Check which side overflows\n ov_left = True if nex_tw < req_w1 else False \n ov_right = True if (nex_bw + req_w2) > 1 else False\n ov_top = True if nex_th < req_h1 else False\n ov_bottom = True if (nex_bh + req_h2) > 1 else False\n\n ov_FLAGS = [ov_left,ov_top,ov_right,ov_bottom]\n ov = [req_w1-nex_tw, req_h1-nex_th, (nex_bw + req_w2)-1,(nex_bh + req_h2)-1] #How much spill over\n\n need_comp = True if any(ov_FLAGS) else False\n \n #Default cropping with no spillage\n new_th = nex_th - (req_h1)\n new_bh = nex_bh + (req_h2)\n new_tw = nex_tw - (req_w1)\n new_bw = nex_bw + (req_w2)\n output = [new_tw,new_th,new_bw,new_bh]\n\n #Comp needed\n if need_comp:\n ncomp = ov_FLAGS.count(True) #How many sides\n \n #If overflow on single side only\n if ncomp == 1:\n comp_dim = ov_FLAGS.index(True)\n comp_dim_ = (comp_dim-2) if comp_dim > 1 else (comp_dim+2) #Find the opposing dim to add gap to\n comp = abs(ov[comp_dim])\n\n output[comp_dim] = 1 if comp_dim in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim_] = (output[comp_dim_]-comp) if comp_dim in (2,3) else (output[comp_dim_]+comp)\n\n return output\n\n #If overflow on more than one side.\n if ncomp > 1:\n\n #Check which sides spills\n comp_dims = []\n for i,j in enumerate(ov_FLAGS):\n if j is True:\n comp_dims.append(i)\n \n #If spill over both side of a single dimension\n if (0 in comp_dims and 2 in comp_dims) or (1 in comp_dims and 3 in comp_dims):\n raise Exception(\"Not implemented since this does not happen for the VisDrone2018-SOT dataset.\")\n \n #If spill over in sides of different dim\n else:\n comp_dim1 = comp_dims[0]\n comp_dim2 = comp_dims[1]\n comp_dim1_ = (comp_dim1-2) if comp_dim1 > 1 else (comp_dim1+2) #Find the opposing dim to add gap to\n comp_dim2_ = (comp_dim2-2) if comp_dim2 > 1 else (comp_dim2+2) #Find the opposing dim to add gap to\n comp1 = abs(ov[comp_dim1])\n comp2 = abs(ov[comp_dim2])\n\n output[comp_dim1] = 1 if comp_dim1 in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim1_] = (output[comp_dim1_]-comp1) if comp_dim1 in (2,3) else (output[comp_dim1_]+comp1)\n output[comp_dim2] = 1 if comp_dim2 in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim2_] = (output[comp_dim2_]-comp2) if comp_dim2 in (2,3) else (output[comp_dim2_]+comp2) \n\n return output\n\n\n else: #If no need comp\n return output\n\n\ndef compute_excc(nex_tw,nex_th,nex_bw,nex_bh,req):\n \"\"\"\n Computes the spacing on each side of an exemplar for cropping. \n Returns normalized coordinates to crop with.\n\n If overflows happens in two sides of a same dimension (e.g. scene size req is larger than entire full scene)\n the function will return the largest square image possible covering the exemplar. Make sure to have a resize\n catching such cases on the return of this function.\n \"\"\"\n\n scene_w, scene_h = req[0], req[1]\n\n #Compute exem dim\n exem_w = nex_bw - nex_tw\n exem_h = nex_bh - nex_th\n \n #Catch problematic inputs\n assert req[0] <= 1, \"Error! Patch size asked for is bigger than the actual pic. req[0]: {}\".format(req[0])\n assert req[1] <= 1, \"Error! Patch size asked for is bigger than the actual pic. req[1]: {}\".format(req[1])\n\n #Size the gap needed\n req_w = scene_w - exem_w\n req_h = scene_h - exem_h\n\n #Randomize translation\n spf1 = random.uniform(0,1) #Split factor\n req_w1 = req_w * spf1\n req_w2 = req_w - req_w1\n\n spf2 = random.uniform(0,1)\n req_h1 = req_h * spf2\n req_h2 = req_h - req_h1\n\n #Check which side overflows\n ov_left = True if nex_tw < req_w1 else False \n ov_right = True if (nex_bw + req_w2) > 1 else False\n ov_top = True if nex_th < req_h1 else False\n ov_bottom = True if (nex_bh + req_h2) > 1 else False\n\n ov_FLAGS = [ov_left,ov_top,ov_right,ov_bottom]\n ov = [req_w1-nex_tw, req_h1-nex_th, (nex_bw + req_w2)-1,(nex_bh + req_h2)-1] #How much spill over\n\n need_comp = True if any(ov_FLAGS) else False\n \n #Default cropping with no spillage\n new_th = nex_th - (req_h1)\n new_bh = nex_bh + (req_h2)\n new_tw = nex_tw - (req_w1)\n new_bw = nex_bw + (req_w2)\n output = [new_tw,new_th,new_bw,new_bh]\n\n #Comp needed\n if need_comp:\n ncomp = ov_FLAGS.count(True) #How many sides\n \n #If overflow on single side only\n if ncomp == 1:\n comp_dim = ov_FLAGS.index(True)\n comp_dim_ = (comp_dim-2) if comp_dim > 1 else (comp_dim+2) #Find the opposing dim to add gap to\n comp = abs(ov[comp_dim])\n\n output[comp_dim] = 1 if comp_dim in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim_] = (output[comp_dim_]-comp) if comp_dim in (2,3) else (output[comp_dim_]+comp)\n\n return output\n\n #If overflow on more than one side.\n if ncomp > 1:\n\n #Check which sides spills\n comp_dims = []\n for i,j in enumerate(ov_FLAGS):\n if j is True:\n comp_dims.append(i)\n \n #If spill over both side of a single dimension\n if (0 in comp_dims and 2 in comp_dims) or (1 in comp_dims and 3 in comp_dims):\n raise Exception(\"Not implemented since this does not happen for the VisDrone2018-SOT dataset.\")\n \n #If spill over in sides of different dim\n else:\n comp_dim1 = comp_dims[0]\n comp_dim2 = comp_dims[1]\n comp_dim1_ = (comp_dim1-2) if comp_dim1 > 1 else (comp_dim1+2) #Find the opposing dim to add gap to\n comp_dim2_ = (comp_dim2-2) if comp_dim2 > 1 else (comp_dim2+2) #Find the opposing dim to add gap to\n comp1 = abs(ov[comp_dim1])\n comp2 = abs(ov[comp_dim2])\n\n output[comp_dim1] = 1 if comp_dim1 in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim1_] = (output[comp_dim1_]-comp1) if comp_dim1 in (2,3) else (output[comp_dim1_]+comp1)\n output[comp_dim2] = 1 if comp_dim2 in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim2_] = (output[comp_dim2_]-comp2) if comp_dim2 in (2,3) else (output[comp_dim2_]+comp2) \n\n return output\n\n\n else: #If no need comp\n return output\n\n \n \n \n\ndef fetch_exem(img_dir,full_imgs,ex_int,annotls,percent_neg=.5):\n \"\"\"\n Generates exem dynamically. Grab 4 exem frames preceeding the current frame at a given interval.\n Exems taken with a square crop and resized to 224x224.\n \n Capable of returning a given percentage of postive/negative exem samples wrt percent_neg.\n\n [Need optimization and cleaning].\n \"\"\"\n exem_dim = 224\n \n #Parse dir\n name,suffix = img_dir.stem, img_dir.suffix\n \n buffer_size = 4 \n curr_idx = parse_idx(name) + 1 #Compensate for parse_idx which is used to find annotations for an image and hence is 1-indexed.\n to_fetch = []\n imgs_buffer = []\n \n #Check posneg\n neg_req = round(percent_neg*buffer_size)\n pos_req = buffer_size - neg_req\n posneg_FLAG = [] #Need to ensure percent negatives are enforced\n \n assert neg_req + pos_req == buffer_size, \"Error in fetch_exem posneg buffer computation!\"\n \n #Gen exem idx to fetch\n while len(to_fetch) < buffer_size:\n n_name = namify(curr_idx)\n fname = n_name + suffix\n exem_dir = Path(str(img_dir.parent) + '/' + fname)\n \n #Validate if imgs is present in dataset.\n try:\n _ = PIL.Image.open(exem_dir)\n \n except:\n \n #If it fails. Wiggle curr_idx and continue.\n if curr_idx == 0:\n curr_idx += 5 #Just picked 5 so as to not overlap with -1.\n \n else:\n curr_idx -= 1\n continue\n \n #If it passes, append and update curr_idx.\n to_fetch.append(curr_idx)\n curr_idx = max(0,curr_idx - ex_int)\n \n\n for n in to_fetch:\n n_name = namify(n)\n fname = n_name + suffix\n exem_dir = Path(str(img_dir.parent) + '/' + fname)\n \n #Annotation to crop exem from. Hardcode ilsvr ant dir format\n annot_base = Path(*annotls.parts[:-4]) \n annot_dir = annot_base/Path(*exem_dir.parts[-4:-1])/Path(n_name+'.xml')\n annot = to_visfmt(parse_xml(annot_dir)[0][0])\n posneg = 1 if any(annot) == True else 0 #For vanilla neg sample, annots are all 0\n img_ = PIL.Image.open(exem_dir)\n \n \n if neg_req > 0 and posneg == 0:\n\n #Open image and adjust bbox. Then crop, resize and cache.\n img_,comp_annot = to_square_neg(annot,img_,exem_dim)\n\n imgs_buffer.append(img_)\n neg_req -= 1\n posneg_FLAG.append(0)\n \n \n elif pos_req > 0 and posneg == 1:\n #Open image and adjust bbox. Then crop, resize and cache.\n img_,comp_annot = to_square(annot,img_)\n\n imgs_buffer.append(img_)\n pos_req -= 1 \n posneg_FLAG.append(1)\n \n \n elif neg_req > 0 and pos_req == 0 and posneg == 1:\n #Open image and adjust bbox. Then crop, resize and cache.\n img_,comp_annot = to_square_neg(annot,img_,exem_dim)\n\n imgs_buffer.append(img_)\n neg_req -= 1\n posneg_FLAG.append(0)\n\n \n elif pos_req > 0 and neg_req == 0 and posneg == 0:\n \n #If it encounters a vanilla neg sample (no target in scene) just replicate from previous exem\n if len(imgs_buffer) > 0:\n \n #May have no pos in FLAG thus exception may be raised in .index(1) below.\n try:\n idx_to_comp = posneg_FLAG.index(1)\n imgs_buffer.append(imgs_buffer[idx_to_comp])\n pos_req -= 1\n posneg_FLAG.append(1)\n \n except:\n pass\n \n elif len(imgs_buffer) == 0:\n pass\n #No pos_req is deducted here so it'll just loop and find one later.\n \n else:\n raise Exception('Encountered an unforseen and unimplemented check for posneg in fetch_exem.')\n \n \n #Check if we're short on exems for rare case where first exem is vanilla negative.\n if len(imgs_buffer) !=4:\n \n #Check if it's all empty or pos_req > 0 and no pos in posneg_FLAG\n if not posneg_FLAG or (pos_req > 0 and 1 not in posneg_FLAG):\n \n #Manually seek pos sample in sequence and call fetch_exem again.\n manual_img_dir = greedy_posseek(img_dir,full_imgs)\n imgs_buffer = fetch_exem(manual_img_dir,full_imgs,ex_int,annotls,percent_neg)\n \n \n else:\n #If there's sufficient sample in buffer\n while len(imgs_buffer) < 4:\n posneg_ = 1 if pos_req != 0 else 0\n posneg_ = 0 if neg_req != 0 else 1\n idx_to_comp = posneg_FLAG.index(posneg_)\n imgs_buffer.append(imgs_buffer[idx_to_comp])\n\n #Compensate indices\n if posneg_ ==1:\n pos_req -= 1\n elif posneg_ ==0:\n neg_req -= 1\n\n \n return imgs_buffer\n\n\n\ndef greedy_posseek(neg_img,img_ls):\n \"\"\"\n Helper function that takes an negative sample and try to find a positive one in the same sequence, \n obtained by greedily searching through the list of images.\n \"\"\"\n \n seq = neg_img.parts[-2] #Seq dir\n \n #Search\n for i in img_ls:\n stat = i[0] #pos/neg\n img_dir = str(i[1])\n \n if stat == 1 and seq in img_dir:\n target = i[1]\n break\n \n \n return target\n\n\n\n\ndef namify(idx):\n \"\"\"\n Helper function that pads a given file number and return it as per the dataset image name format.\n \"\"\"\n len_data = 6 #Ilsvr images are in the form of 000000.JPEG\n len_ = len(str(idx))\n need = len_data - len_\n\n assert len_data >= len_, \"Error! Image idx being fetched is incorrect. Invalid value.\"\n\n pad = '0'*need\n\n return pad+str(idx) \n\n\n\ndef parse_idx(img_name):\n \"\"\"\n Simple helper function that takes an image name and return the index position of the image.\n \"\"\"\n bk = 0\n\n #Find where the significant digit appears\n prefix = img_name.split('.')[0][3:]\n\n for idx,alpha in enumerate(prefix):\n if int(alpha) == 0:\n continue\n else:\n bk = idx\n break\n\n num = int(prefix[bk:]) - 1 #Since image names start from 1\n\n return num\n\n\ndef parse_ant(ant):\n \"\"\"\n Helper function used to parse the labels returned by dataloader (stringified).\n Returns a list of float.\n \"\"\"\n parsed = []\n \n for a in ant:\n i = a.strip('()').split(',')\n i = [float(j) for j in i]\n parsed.append(i)\n \n return torch.tensor(parsed)\n\n\n\ndef parse_xml(path,args=None):\n orig_shape = None\n new_shape = None\n\n if args is not None:\n orig_shape = args[0]\n new_shape = args[1]\n\n bboxes = []\n track_id = 0\n occ = 0\n w,h = 0,0\n \n tree = ET.parse(path)\n root = tree.getroot()\n\n if root.findall('object'):\n for obj in root.findall('object'):\n #Read w-h\n track_id = float(obj.find('trackid').text)\n occ = float(obj.find('occluded').text)\n w = float(root.find('size').find('width').text)\n h = float(root.find('size').find('height').text)\n\n # Read the bbox\n bbox = obj.find('bndbox')\n x_left = float(bbox.find('xmin').text)\n y_top = float(bbox.find('ymin').text)\n x_right = float(bbox.find('xmax').text)\n y_bottom = float(bbox.find('ymax').text)\n\n if orig_shape is not None and new_shape is not None:\n x_left = x_left*new_shape[1]/orig_shape[1]\n y_top = y_top*new_shape[0]/orig_shape[0]\n x_right = x_right*new_shape[1]/orig_shape[1]\n y_bottom = y_bottom*new_shape[0]/orig_shape[0]\n\n bbox = [int(x_left),int(y_top),int(x_right),int(y_bottom)]\n bboxes.append(bbox)\n \n else:\n bboxes = [[0]]\n\n\n return(bboxes,track_id,occ,w,h)\n \n \n\ndef to_visfmt(annot):\n \"\"\"\n Helper function that changes (tw,th,bw,bh) -> (tw,th,w,h).\n Used to convert annotations from ilsvr to visdrone's since most scripts are already\n written in visdrone's format.\n \"\"\"\n \n #Check if it's an empty bbox (neg scene)\n if len(annot) > 1:\n load = [annot[0], annot[1], annot[2]-annot[0], annot[3]-annot[1]]\n else:\n load = [0,0,0,0]\n \n return load\n\n\n\ndef to_square(annot,img):\n \"\"\"\n Helper function that takes in a target's bbox and convert it to a larger bbox that is square.\n Compensated annot is not needed for exemplars, but implemented as an extra.\n Used for exems.\n \"\"\"\n \n #Check needed dim\n need = max(annot[2],annot[3]) #See if it's taller or wider\n img_sz = min(img.size)\n\n #Compute center\n cw = annot[0] + (annot[2]/2)\n ch = annot[1] + (annot[3]/2)\n \n #Normalize annot\n img_w, img_h = img.size[0], img.size[1] \n annot = [annot[0],annot[1],annot[2]+annot[0],annot[3]+annot[1]] #Format needed by compute_cc\n annot_norm = [annot[0]/img_w,annot[1]/img_h,annot[2]/img_w,annot[3]/img_h]\n \n #If the req bbox to be square is > than img_sz\n if need > img_sz: \n #Squash it as little as possible by making it as square as possible\n req = (img_sz/img_w, img_sz/img_h)\n tw_n,th_n,bw_n,bh_n = compute_excc(*annot_norm,req) #Key step; compute comp \n \n #If it's within the image\n else:\n #Compute compensation when needed \n req = (need/img_w, need/img_h)\n tw_n,th_n,bw_n,bh_n = compute_excc(*annot_norm,req) #Key step; compute comp\n \n \n #Unnormalize\n tw = tw_n * img_w\n th = th_n * img_h\n bw = bw_n * img_w\n bh = bh_n * img_h\n\n #Crop and resize\n cropped = img.crop((annot[0],annot[1],annot[2],annot[3])) #Stretch\n # cropped = img.crop((tw,th,bw,bh)) #No stretch\n rsz_fc1 = cropped.size[0]/224\n rsz_fc2 = cropped.size[1]/224\n cropped = cropped.resize((224,224),resample=PIL.Image.BICUBIC) #Resize\n \n #Compensate annotations. Clip vals.\n ant_tw = max(0,annot[0] - tw)\n ant_th = max(0,annot[1] - th)\n ant_bw = max(0,(bw-tw)-(bw - annot[2])) #basically, scene_w(or h) - gap_scene_exem = exem_anot\n ant_bh = max(0,(bh-th)-(bh - annot[3])) \n \n #Compensate annotations. Resize val.\n ant_tw,ant_bw = ant_tw/rsz_fc1, ant_bw/rsz_fc1\n ant_th,ant_bh = ant_th/rsz_fc2, ant_bh/rsz_fc2 \n compensated_ant = [i if i <= 224 else 224 for i in [ant_tw,ant_th,ant_bw,ant_bh]] #Only clip max after scaling\n \n \n return cropped, compensated_ant\n \n\n \ndef to_square_neg(annot,img,size=224):\n \"\"\"\n Helper function that takes in a target's bbox and convert it to a larger bbox that is square.\n Compensated annot is not needed for exemplars, but implemented as an extra.\n Used for negative exems and scenes.\n \"\"\"\n \n #Conv annot from (tw,th,w,h) -> (tw,th,bw,bh)\n annot_ = [annot[0],annot[1],annot[2]+annot[0],annot[3]+annot[1]]\n \n #Check for empty spaces surrounding annot\n w,h = img.size[0],img.size[1]\n rw = w - annot_[2]\n bh = h - annot_[3]\n lw = annot_[0] - 0\n th = annot_[1] - 0\n \n #Det which space to take\n spaces = [lw,th,rw,bh]\n gap1 = max(spaces)\n gap1_idx = spaces.index(gap1)\n \n #Check counter dim\n if gap1_idx in [0,2]:\n gap2 = h #It's w, check h.\n \n elif gap1_idx in [1,3]:\n gap2 = w #It's h, check w.\n \n #Compute neg exem bbox\n gap_min = min(gap1,gap2)\n gap_min_idx = [gap1,gap2].index(gap_min)\n \n #If it fits\n if gap_min > size:\n \n gap_gap = gap_min - size\n begin = random.randint(0,gap_gap) #Pick random point to start exem square\n end = begin + size \n \n #If it doesn't fit in \n else:\n gap_gap = 0\n begin = random.randint(0,gap_gap) #Pick random point to start exem square\n end = begin + gap_min\n \n \n #Compensate absolute bbox val\n if gap1_idx == 0 or gap1_idx == 1:\n\n #Gap at left of bbox\n crop_ant = [begin,begin,end,end]\n\n elif gap1_idx == 2:\n\n #Gap at right of bbox\n crop_ant = [annot_[2]+begin,begin,annot_[2]+end,end] \n\n elif gap1_idx == 3:\n\n #Gap at bottom of bbox\n crop_ant = [begin,annot_[3]+begin,end,annot_[3]+end]\n \n \n #Crop img and resize if needed\n cropped = img.crop(crop_ant)\n \n if cropped.size[0] != size or cropped.size[1] != size:\n cropped = cropped.resize((size,size),resample=PIL.Image.BICUBIC) #Resize\n \n compensated_ant = [0,0,0,0] #No target in negative sample\n \n \n return cropped, compensated_ant\n\n \n \ndef to_square_scene(annot,img):\n \"\"\"\n Helper function that takes in a target's bbox and convert it to a larger bbox that is square. \n Used for scene.\n \"\"\"\n \n #Check needed dim\n need = 360 #Patch size is fixed at 360x360\n img_sz = min(img.size)\n\n #Normalize annot\n img_w, img_h = img.size[0], img.size[1] \n annot = [annot[0],annot[1],annot[2]+annot[0],annot[3]+annot[1]] #Format needed by compute_cc\n annot_norm = [annot[0]/img_w,annot[1]/img_h,annot[2]/img_w,annot[3]/img_h]\n \n #If the req bbox to be square is > than img_sz\n if need > img_sz:\n #Squash it as little as possible by making it as square as possible\n req = (img_sz/img_w, img_sz/img_h)\n tw_n,th_n,bw_n,bh_n = compute_excc(*annot_norm,req) #Key step; compute comp \n \n #If it's within the image\n else:\n #Compute compensation when needed \n req = (need/img_w, need/img_h)\n tw_n,th_n,bw_n,bh_n = compute_excc(*annot_norm,req) #Key step; compute comp\n \n #Unnormalize\n tw = tw_n * img_w\n th = th_n * img_h\n bw = bw_n * img_w\n bh = bh_n * img_h\n\n #Crop and resize\n cropped = img.crop((tw,th,bw,bh))\n rsz_fc1 = cropped.size[0]/224\n rsz_fc2 = cropped.size[1]/224\n cropped = cropped.resize((224,224),resample=PIL.Image.BICUBIC) #Resize\n\n #Compensate annotations. Clip vals.\n ant_tw = max(0,annot[0] - tw)\n ant_th = max(0,annot[1] - th)\n ant_bw = max(0,(bw-tw)-(bw - annot[2])) #basically, scene_w(or h) - gap_scene_exem = exem_anot\n ant_bh = max(0,(bh-th)-(bh - annot[3])) \n \n #Compensate annotations. Resize val.\n ant_tw,ant_bw = ant_tw/rsz_fc1, ant_bw/rsz_fc1\n ant_th,ant_bh = ant_th/rsz_fc2, ant_bh/rsz_fc2 \n compensated_ant = [i if i <= 224 else 224 for i in [ant_tw,ant_th,ant_bw,ant_bh]] #Only clip max after scaling\n \n return cropped, compensated_ant\n \n\n\ndef dt_trans(trans,scene,exems,buffer_size):\n \"\"\"\n Function to enclose the transformation sequence used in dataset.\n \"\"\"\n\n norm_trans = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n #If transforms\n if trans:\n #Make sure trans applied on exem and scene are similar\n vals = [random.uniform(0.85,1.15) for i in range(4)]\n val_hue = random.uniform(-0.1,0.1)\n scene = transforms.functional.adjust_brightness(scene,vals[0])\n scene = transforms.functional.adjust_contrast(scene,vals[1])\n scene = transforms.functional.adjust_gamma(scene,vals[2])\n scene = transforms.functional.adjust_saturation(scene,vals[3])\n scene = transforms.functional.adjust_hue(scene,val_hue)\n scene = norm_trans(scene)\n\n for i,exem_ in enumerate(exems):\n exem_ = transforms.functional.adjust_brightness(exem_,vals[0])\n exem_ = transforms.functional.adjust_contrast(exem_,vals[1])\n exem_ = transforms.functional.adjust_gamma(exem_,vals[2])\n exem_ = transforms.functional.adjust_saturation(exem_,vals[3])\n exem_ = transforms.functional.adjust_hue(exem_,val_hue)\n exem_ = norm_trans(exem_)\n exems[i] = exem_\n exems = torch.stack(exems)\n\n else:\n scene = norm_trans(scene)\n exems = [norm_trans(exem_) for exem_ in exems]\n exems = torch.stack(exems)\n\n return scene,exems\n\n\n\n\nclass gen_dt(Dataset):\n\n def __init__(self, img_root, ant_root, posneg_ls, pos_ls, neg_ls, cusneg_ls, \n percent_neg = 0,ex_int = 16, transform = True):\n \"\"\"\n General buffer dataset class.\n\n Rather than taking in img_dir, feed in instead a csv that can be decoded to return\n the paths for the negative and positive samples respectively.Instantiate different \n dataset for train and valid.The annotations for the entire train/valid set will be \n loaded on instantiation to prevent read/write at every sample.\n\n Input:\n img_root: Root to ImageNet-VID.\n ant_root: Root to annotations of corresponding img_root.\n posneg_ls: Path to posneg.csv. Determines the positivity-negativity of sample.\n pos_ls: Path to ilsvrc_vanilla_pos.csv. Used to fetch positive samples as dictated by posneg_ls.\n neg_ls: Path to ilsvrc_vanilla_neg.csv. Used to fetch negative samples as dictated by posneg_ls.\n cusneg_ls: Path to neg_below08.csv. Used to generate custom negative samples.\n percent_neg: Valid values are [0,0.25,0.5,0.75]. Used to control percentage of distractor exemplars in buffer.\n ex_int: Time interval (frames) between exemplars in buffer.\n transform: Should always be TRUE in usage. Used to transform images for PyTorch.\n\n Return:\n Img/scene: A portion of the scene with/without a target. Size of [3x224x224]. \n Img/exems: Collection of exemplars taken from the same sequence as the scene. Size of [buffer_sizex3x224x224].\n Img/pth_full: Full path for the scene.\n Img/seq_name: Name of sequence for the scene.\n Img/img_name: Name of image for the scene.\n Annot/bbox: Ground truth bounding box coordinates. Quick-fixed by stringify to prevent jumbling.\n Annot/obj: Ground truth objectness score of scene. Quick-fixed by stringify to prevent jumbling\n \"\"\"\n\n #Setup\n self.ex_int = ex_int\n self.transform = transform\n self.posneg_ls = posneg_ls\n self.pos_ls = pos_ls\n self.neg_ls = neg_ls\n self.cusneg_ls = cusneg_ls\n self.img_root = img_root\n self.ant_root = ant_root\n self.percent_neg = percent_neg \n self.transform = transform\n self.data, self.annot = compile_data(self.img_root,self.ant_root,self.posneg_ls,\n self.pos_ls,self.neg_ls,self.cusneg_ls)\n \n assert len(self.annot) == len(self.data), \"Error! The len(annot) != len(imgs)\"\n\n self.data, self.annot = np.array(self.data),np.array(self.annot)\n\n \n def __len__(self):\n return len(self.data)\n\n\n def __getitem__(self, idx):\n \n ###Parse path\n img_full = self.data[idx][1] #self.data is a list in form [[pos/neg,path],...]\n img_name = img_full.parts[-1]\n seq_name = img_full.parts[-2]\n \n ###Open Image\n full_scene = PIL.Image.open(img_full)\n \n ###Fetch label. \n #Parse_xml returns extra info. (w,h,occ,extra bboxes if >1 target)\n #self.annot is a list in form [[pos/neg,path],...] \n annot = parse_xml(self.annot[idx][1])[0][0]\n \n #convert (tw,th,bw,bh) -> (tw,th,w,h) ; (ilsvr) -> (visdrone) default format\n annot = to_visfmt(annot)\n \n ###Fetch Positive or Negative sample\n stat = self.data[idx][0]\n assert stat == self.annot[idx][0], \"Error! Both img and ant should be equal in pos/neg, not diff.\"\n \n #If it's a positive sample\n if stat == 1:\n \n #Fetch, crop and transform scene with compensated annot\n scene,annot = scene_crop(full_scene,annot) \n \n #If it's a negative sample\n elif stat == 0:\n\n #Fetch, crop and transform scene with compensated annot\n scene,annot = scene_crop_neg(full_scene,annot) \n \n else:\n raise Exception(\"Error! Invalid stat value\")\n\n \n #Fetch exemplars\n exems = fetch_exem(img_full,self.data,self.ex_int,self.annot[0][1],self.percent_neg)\n \n ###Transforms\n scene,exems = dt_trans(self.transform,scene,exems,4)\n \n \n ###Package\n load = {\"Img\":{\"scene\":scene,\n \"exem\":exems,\n \"pth_full\":str(img_full),\n \"seq_name\":str(seq_name),\n \"img_name\":str(img_name)},\n \"Annot\":{\"bbox\":str(annot),\n \"obj\":str(stat)}}\n \n \n return load\n \n" ]
[ [ "torch.stack", "torch.tensor", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "numpy.array" ] ]
Jumpst3r/mine-pytorch
[ "41c68d1388664561996300a15e43e8cc4d805ded" ]
[ "mine/utils/helpers.py" ]
[ "import numpy as np\nimport torch\n\ndef batch(x, y, batch_size=1, shuffle=True):\n assert len(x) == len(\n y), \"Input and target data must contain same number of elements\"\n if isinstance(x, np.ndarray):\n x = torch.from_numpy(x).float()\n if isinstance(y, np.ndarray):\n y = torch.from_numpy(y).float()\n\n n = len(x)\n\n if shuffle:\n rand_perm = torch.randperm(n)\n x = x[rand_perm]\n y = y[rand_perm]\n\n batches = []\n for i in range(n // batch_size):\n x_b = x[i * batch_size: (i + 1) * batch_size]\n y_b = y[i * batch_size: (i + 1) * batch_size]\n\n batches.append((x_b, y_b))\n return batches\n\n" ]
[ [ "torch.from_numpy", "torch.randperm" ] ]
guci314/Chatbot_CN
[ "a0f7194252a189f8bc2b62fd16eb2abe432c0bf9" ]
[ "Chatbot_Model/Question_Pairs_Matching/train.py" ]
[ "import tensorflow as tf\nfrom Chatbot_Model.Question_Pairs_Matching import data_prepare\nfrom tensorflow.contrib import learn\nimport numpy as np\nfrom Chatbot_Model.Question_Pairs_Matching import esim_model\nimport Chatbot_Model.Question_Pairs_Matching.config as config\nfrom tqdm import tqdm\nfrom sklearn.metrics import f1_score\nfrom sklearn import metrics\nimport os\n\ncon = config.Config()\nparent_path = os.path.dirname(os.getcwd())\ndata_pre = data_prepare.Data_Prepare()\n\n\nclass TrainModel(object):\n '''\n 训练模型\n 保存模型\n '''\n def pre_processing(self):\n train_texta, train_textb, train_tag = data_pre.readfile(parent_path+'/data/train.txt')\n data = []\n data.extend(train_texta)\n data.extend(train_textb)\n data_pre.build_vocab(data, parent_path+'/save_model/esim' + '/vocab.pickle')\n # 加载词典\n self.vocab_processor = learn.preprocessing.VocabularyProcessor.restore(parent_path+'/save_model/esim' +\n '/vocab.pickle')\n train_texta_embedding = np.array(list(self.vocab_processor.transform(train_texta)))\n train_textb_embedding = np.array(list(self.vocab_processor.transform(train_textb)))\n\n dev_texta, dev_textb, dev_tag = data_pre.readfile(parent_path+'/data/dev.txt')\n dev_texta_embedding = np.array(list(self.vocab_processor.transform(dev_texta)))\n dev_textb_embedding = np.array(list(self.vocab_processor.transform(dev_textb)))\n return train_texta_embedding, train_textb_embedding, np.array(train_tag), \\\n dev_texta_embedding, dev_textb_embedding, np.array(dev_tag)\n\n def get_batches(self, texta, textb, tag):\n num_batch = int(len(texta) / con.Batch_Size)\n for i in range(num_batch):\n a = texta[i*con.Batch_Size:(i+1)*con.Batch_Size]\n b = textb[i*con.Batch_Size:(i+1)*con.Batch_Size]\n t = tag[i*con.Batch_Size:(i+1)*con.Batch_Size]\n yield a, b, t\n\n def get_length(self, trainX_batch):\n # sentence length\n lengths = []\n for sample in trainX_batch:\n count = 0\n for index in sample:\n if index != 0:\n count += 1\n else:\n break\n lengths.append(count)\n return lengths\n\n def trainModel(self):\n train_texta_embedding, train_textb_embedding, train_tag, \\\n dev_texta_embedding, dev_textb_embedding, dev_tag = self.pre_processing()\n # 定义训练用的循环神经网络模型\n with tf.variable_scope('esim_model', reuse=None):\n # esim model\n model = esim_model.ESIM(True, seq_length=len(train_texta_embedding[0]),\n class_num=len(train_tag[0]),\n vocabulary_size=len(self.vocab_processor.vocabulary_),\n embedding_size=con.embedding_size,\n hidden_num=con.hidden_num,\n l2_lambda=con.l2_lambda,\n learning_rate=con.learning_rate)\n\n # 训练模型\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n saver = tf.train.Saver()\n best_f1 = 0.0\n for time in range(con.epoch):\n print(\"training \" + str(time + 1) + \">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n model.is_trainning = True\n loss_all = []\n accuracy_all = []\n for texta, textb, tag in tqdm(\n self.get_batches(train_texta_embedding, train_textb_embedding, train_tag)):\n feed_dict = {\n model.text_a: texta,\n model.text_b: textb,\n model.y: tag,\n model.dropout_keep_prob: con.dropout_keep_prob,\n model.a_length: np.array(self.get_length(texta)),\n model.b_length: np.array(self.get_length(textb))\n }\n _, cost, accuracy = sess.run([model.train_op, model.loss, model.accuracy], feed_dict)\n loss_all.append(cost)\n accuracy_all.append(accuracy)\n\n print(\"第\" + str((time + 1)) + \"次迭代的损失为:\" + str(np.mean(np.array(loss_all))) + \";准确率为:\" +\n str(np.mean(np.array(accuracy_all))))\n\n def dev_step():\n \"\"\"\n Evaluates model on a dev set\n \"\"\"\n loss_all = []\n accuracy_all = []\n predictions = []\n for texta, textb, tag in tqdm(\n self.get_batches(dev_texta_embedding, dev_textb_embedding, dev_tag)):\n feed_dict = {\n model.text_a: texta,\n model.text_b: textb,\n model.y: tag,\n model.dropout_keep_prob: 1.0,\n model.a_length: np.array(self.get_length(texta)),\n model.b_length: np.array(self.get_length(textb))\n }\n dev_cost, dev_accuracy, prediction = sess.run([model.loss, model.accuracy,\n model.prediction], feed_dict)\n loss_all.append(dev_cost)\n accuracy_all.append(dev_accuracy)\n predictions.extend(prediction)\n y_true = [np.nonzero(x)[0][0] for x in dev_tag]\n y_true = y_true[0:len(loss_all)*con.Batch_Size]\n f1 = f1_score(np.array(y_true), np.array(predictions), average='weighted')\n print('分类报告:\\n', metrics.classification_report(np.array(y_true), predictions))\n print(\"验证集:loss {:g}, acc {:g}, f1 {:g}\\n\".format(np.mean(np.array(loss_all)),\n np.mean(np.array(accuracy_all)), f1))\n return f1\n\n model.is_trainning = False\n f1 = dev_step()\n\n if f1 > best_f1:\n best_f1 = f1\n saver.save(sess, parent_path + \"/save_model/esim/model.ckpt\")\n print(\"Saved model success\\n\")\n\n\nif __name__ == '__main__':\n train = TrainModel()\n train.trainModel()" ]
[ [ "tensorflow.contrib.learn.preprocessing.VocabularyProcessor.restore", "tensorflow.global_variables_initializer", "tensorflow.variable_scope", "tensorflow.Session", "tensorflow.train.Saver", "numpy.array", "numpy.nonzero" ] ]
ValeriaTelles/Physics-Programs
[ "9fdd1b60ad5dd9c6750855bf63c2aa89383a0b1a" ]
[ "Scalar and Vector Fields/src/divCurl.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n\n# deriv computes 1D derivative, dF/dr, using central difference method\ndef deriv(F,r):\n\t# get the length of array F (assume same length for r)\n L = F.size\n\t\n\t# create empty array to store results\n result= np.empty(L)\n\t\n\t# use central diff method for all interior points (we will build in tutorial)\n for i in range(L-2):\n result[i+1] = (F[i+2] - F[i]) / (r[i+2] - r[i])\n \n result[0] = (F[1] - F[0]) / (r[1] - r[0])\n result[L-1] = (F[L-1] - F[L-2]) / (r[L-1] - r[L-2])\n\n return result\n\n# read in the files \"vFieldX.csv\" and \"vFieldY.csv\"\nvFieldX= np.loadtxt( 'vFieldX.csv', delimiter = ',' )\nvFieldY = np.loadtxt( 'vFieldY.csv', delimiter = ',' )\n\n# Create a 2D grid of x, y points using numpy's meshgrid function (see Exercise 1)\nnx, ny = 100,100\nx = np.linspace(-5,5,nx)\ny = np.linspace(-5,5,ny)\nX, Y = np.meshgrid(x,y)\n\n# Divergence \ndivX = np.empty(X.shape)\n\nfor j in range(ny):\n divX[j,:] = deriv(vFieldX[j,:],x)\n\ndivY = np.empty(Y.shape)\n\nfor i in range(nx): \n divY[:,i] = deriv(vFieldY[:,i], y)\n\ntotalDiv = divX + divY\n\n# Curl\ncurlX = np.empty(X.shape)\n\nfor j in range(ny):\n curlX[j,:] = deriv(vFieldY[j,:], x)\n\ncurlY = np.empty(Y.shape)\n\nfor i in range(nx): \n curlY[:,i] = deriv(vFieldX[:,i], y)\n\ntotalCurl = curlX - curlY\n\n# Plotting the Divergence and Curl using subplots\nlines = 10**np.linspace(10, 12, 11)\nlines = sorted(list(-lines)+list(lines))\nfig, (ax1, ax2) = plt.subplots( nrows = 1, ncols = 2, sharex = False, sharey = False )\n\nax1.contourf(X, Y, totalDiv) #levels = Lines, colors = 'k', linewidths = 1)\nCS = ax2.contour(x, y, totalCurl, ) #levels = Lines, colors = 'k', linewidths = 1)\n\nax1.set_title('Divergence of a Vector Field', fontweight = 'bold' )\nax2.set_title('Curl of a Vector Field', fontweight = 'bold' )\n\nax1.set(xlabel = \"X\", ylabel = \"Y\")\nax2.set(xlabel = \"X\", ylabel = \"Y\")\n\nax2.clabel(CS, inline = 1, fontsize = 8)\n\nfig.set_size_inches(9, 5)\n\nplt.savefig('divCurlPlot.png', dpi=300)" ]
[ [ "numpy.empty", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "numpy.meshgrid", "numpy.linspace", "numpy.loadtxt" ] ]
Ascend/pytorch
[ "39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc", "39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc", "39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc" ]
[ "test/test_npu/test_network_ops/test_norm_except_dim.py", "test/test_npu/test_network_ops/test_upsample_bicubic2d_backward.py", "test/test_npu/test_conv1d.py" ]
[ "# Copyright (c) 2020, Huawei Technologies.All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport math\nimport random\nfrom torch._six import nan\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\n\n\nclass TestNormExceptDim(TestCase):\n def generate_data(self, min, max, shape, dtype):\n input1 = np.random.uniform(min, max, shape).astype(dtype)\n input2 = np.random.uniform(min, max, shape).astype(dtype)\n npu_input1 = torch.from_numpy(input1)\n npu_input2 = torch.from_numpy(input2)\n\n return npu_input1, npu_input2\n\n def generate_single_data(self, min, max, shape, dtype):\n input = np.random.uniform(min, max, shape).astype(dtype)\n npu_input = torch.from_numpy(input)\n return npu_input\n\n def generate_int_dim(self, max):\n dim = np.random.randint(0, max)\n return dim\n\n def generate_bool_keepdim(self):\n keepdim = random.choice([True, False])\n return keepdim\n\n def test_norm_except_dim_type(self, device):\n def cpu_op_exec(input1, pow):\n output = torch.norm_except_dim(input1, pow=pow, dim=0)\n output = output.numpy()\n return output\n\n def npu_op_exec(input1, pow):\n print(input1.shape)\n input1 = input1.to(\"npu\")\n output = torch.norm_except_dim(input1, pow=pow, dim=0)\n output = output.to(\"cpu\")\n output = output.numpy()\n print(output.shape)\n return output\n\n def test_norm_except_dim_exec(input_type):\n input1 = self.generate_single_data(0, 100, (5, 3), input_type)\n pow = self.generate_int_dim(10)\n cpu_output = cpu_op_exec(input1, pow)\n npu_output = npu_op_exec(input1, pow)\n return cpu_output, npu_output\n\n for dtype in [np.float32]:\n cpu_output, npu_output = test_norm_except_dim_exec(dtype)\n self.assertRtolEqual(cpu_output, npu_output)\n\n \ninstantiate_device_type_tests(TestNormExceptDim, globals(), except_for=\"cpu\")\n\nif __name__ == \"__main__\":\n run_tests()", "# Copyright (c) 2020 Huawei Technologies Co., Ltd\n# Copyright (c) 2019, Facebook CORPORATION. \n# All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport sys\nimport copy\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\n\nclass TestUpsampleBicubic2dBackward(TestCase):\n\n def cpu_op_exec(self, input1, output_size, align_corners, scale_h, scale_w):\n input1.requires_grad = True\n output = torch._C._nn.upsample_bicubic2d(input1, output_size, align_corners, scale_h, scale_w)\n output.backward(torch.ones_like(output))\n output_grad = input1.grad\n output_grad = output_grad.detach().numpy()\n return output_grad\n\n def npu_op_exec(self, input1, output_size, align_corners, scale_h, scale_w):\n input1.requires_grad = True\n output = torch._C._nn.upsample_bicubic2d(input1, output_size, align_corners, scale_h, scale_w)\n output.backward(torch.ones_like(output))\n output_grad = input1.grad\n output_grad = output_grad.to(\"cpu\").detach().numpy()\n return output_grad\n\n\n def test_upsample_bicubic2d_common_shape_format(self, device):\n shape_format = [\n [[np.float32, -1, (1, 1, 1, 1)], (1, 1), True, 0, 0, 0, 255],\n [[np.float32, -1, (2, 65535, 2, 2)], (2, 2), True, 0, 0, 0, 255],\n [[np.float32, -1, (10, 10, 786432, 8)], (786432, 8), False, 0, 0, 0, 255],\n [[np.float32, -1, (1, 1, 1, 1)], (2, 2), True, 0, 0, 0, 255],\n [[np.float32, -1, (1, 1, 2, 2)], (4, 4), True, 0, 0, 0, 255],\n [[np.float32, -1, (1, 1, 1, 1)], (2, 2), False, 0.5, 0.5, 0, 255],\n [[np.float32, -1, (1, 1, 2, 2)], (4, 4), False, 0.5, 0.5, 0, 255],\n [[np.float32, -1, (32, 32, 32, 32)], (64, 64), False, 0.5, 0.5, 0, 3402823500.0]\n ]\n for item in shape_format:\n cpu_input1, npu_input1 = create_common_tensor(item[0], item[5], item[6])\n cpu_output = self.cpu_op_exec(cpu_input1, item[1], item[2], item[3], item[4])\n npu_output = self.npu_op_exec(npu_input1, item[1], item[2], item[3], item[4])\n self.assertRtolEqual(cpu_output, npu_output)\n\n\n def test_upsample_bicubic2d_float16_shape_format(self, device):\n def cpu_op_exec_fp16(input1, output_size, align_corners, scale_h, scale_w):\n input1 = input1.to(torch.float32)\n input1.requires_grad = True\n output = torch._C._nn.upsample_bicubic2d(input1, output_size, align_corners, scale_h, scale_w)\n output.backward(torch.ones_like(output))\n output_grad = input1.grad\n output_grad = output_grad.detach().numpy()\n output_grad = output_grad.astype(np.float16)\n return output_grad\n \n shape_format = [\n [[np.float16, -1, (1, 1, 1, 1)], (1, 1), True, 0, 0, 0, 255],\n [[np.float16, -1, (2, 65535, 2, 2)], (2, 2), True, 0, 0, 0, 255],\n [[np.float16, -1, (32, 32, 32, 32)], (32, 32), False, 0, 0, 0, 6550.0],\n [[np.float16, -1, (1, 1, 1, 1)], (2, 2), True, 0, 0, 0, 255],\n [[np.float16, -1, (1, 1, 1, 1)], (2, 2), False, 0.5, 0.5, 0, 255],\n [[np.float16, -1, (1, 1, 2, 2)], (4, 4), False, 0.5, 0.5, 0, 255],\n [[np.float16, -1, (32, 32, 32, 32)], (64, 64), False, 0.5, 0.5, 0, 6550.0]\n ]\n \n for item in shape_format:\n cpu_input1, npu_input1 = create_common_tensor(item[0], item[5], item[6])\n cpu_output = cpu_op_exec_fp16(cpu_input1, item[1], item[2], item[3], item[4])\n npu_output = self.npu_op_exec(npu_input1, item[1], item[2], item[3], item[4])\n self.assertRtolEqual(cpu_output, npu_output)\n\ninstantiate_device_type_tests(TestUpsampleBicubic2dBackward, globals(), except_for='cpu')\nif __name__ == \"__main__\":\n run_tests()\n", "# Copyright (c) 2020, Huawei Technologies.All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport copy\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\n\nclass TestConv1d(TestCase):\n def op_exec_cpu(self, input, weight, in_channels, out_channels, kernel_size, padding=0, stride=1, dilation=1, bias=True):\n m = torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias, groups=1)\n m.weight.data = weight\n output = m(input)\n output = output.detach().numpy()\n return output\n\n def op_exec_npu(self, input, weight, in_channels, out_channels, kernel_size, padding=0, stride=1, dilation=1, bias=True):\n m = torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias, groups=1)\n m.weight.data = weight\n m = m.to(\"npu\")\n output = m(input)\n output = output.to(\"cpu\")\n output = output.detach().numpy()\n return output\n\n def test_conv1d_shape_format(self, device):\n shape_format = [ \n [[np.float32, 3, (256, 32, 1, 1)], [np.float32, 3, (8, 32, 1, 1)], 0, (1, 1), (1, 1), (8)],\n [[np.float32, 3, [256, 32, 112, 112]], [np.float32, 0, [16, 32, 1, 1]], 0, 1, 1, None],\n [[np.float32, 0, [256, 3, 224, 224]], [np.float32, 0, [32, 3, 3, 3]], 0, [2, 2], 1, None],\n [[np.float32, 3, (2, 3, 3, 3)], [np.float32, 0, (3, 1, 3, 3)], 3, 1, 1, 1],\n [[np.float32, 3, [1024, 232, 7, 7]], [np.float32, 4, [232, 232, 1, 1]], 0, 1, 1, True],\n ]\n\n for item in shape_format:\n input_cpu, input_npu = create_common_tensor(item[0], -2, 2)\n weight_cpu, weight_npu = create_common_tensor(item[1], -2, 2)\n kernel_size = (item[1][2][2], item[1][2][3])\n cpu_output = self.op_exec_cpu(input_cpu, weight_cpu, item[0][2][1], item[1][2][0], kernel_size=kernel_size,\n padding=item[2], stride=item[3], dilation=item[4], bias=item[5])\n weight_npu = weight_npu.to(\"cpu\")\n npu_output = self.op_exec_npu(input_npu, weight_npu, item[0][2][1], item[1][2][0], kernel_size=kernel_size,\n padding=item[2], stride=item[3], dilation=item[4], bias=item[5]) \n self.assertRtolEqual(cpu_output, npu_output)\n \n def test_conv1d_shape_format_float16(self, device):\n def cpu_op_exec_fp16(input, weight, in_channels, out_channels, kernel_size, padding=0, stride=1, dilation=1, bias=True):\n input = input.to(torch.float32)\n weight = weight.to(torch.float32)\n m = torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias, groups=1)\n m.weight.data = weight\n output = m(input)\n output = output.detach().numpy()\n output = output.astype(np.float16)\n return output\n\n shape_format = [ \n [[np.float16, 3, (256, 32, 1, 1)], [np.float16, 3, (8, 32, 1, 1)], 0, (1, 1), (1, 1), (8)],\n [[np.float16, 3, [256, 32, 112, 112]], [np.float16, 0, [16, 32, 1, 1]], 0, 1, 1, None],\n [[np.float16, 0, [256, 3, 224, 224]], [np.float16, 0, [32, 3, 3, 3]], 0, [2, 2], 1, None],\n [[np.float16, 3, (2, 3, 3, 3)], [np.float16, 0, (3, 1, 3, 3)], 3, 1, 1, 1],\n [[np.float16, 3, [1024, 232, 7, 7]], [np.float16, 4, [232, 232, 1, 1]], 0, 1, 1, True],\n ]\n\n for item in shape_format:\n input_cpu, input_npu = create_common_tensor(item[0], -2, 2)\n weight_cpu, weight_npu = create_common_tensor(item[1], -2, 2)\n kernel_size = (item[1][2][2], item[1][2][3])\n cpu_output = cpu_op_exec_fp16(input_cpu, weight_cpu, item[0][2][1], item[1][2][0], kernel_size=kernel_size,\n padding=item[2], stride=item[3], dilation=item[4], bias=item[5])\n weight_npu = weight_npu.to(\"cpu\")\n npu_output = self.op_exec_npu(input_npu, weight_npu, item[0][2][1], item[1][2][0], kernel_size=kernel_size,\n padding=item[2], stride=item[3], dilation=item[4], bias=item[5]) \n self.assertRtolEqual(cpu_output, npu_output) \n\ninstantiate_device_type_tests(TestConv1d, globals(), except_for='cpu')\nif __name__ == \"__main__\":\n torch.npu.set_device(\"npu:6\")\n run_tests()" ]
[ [ "numpy.random.uniform", "torch.from_numpy", "torch.norm_except_dim", "numpy.random.randint" ], [ "torch.ones_like", "torch._C._nn.upsample_bicubic2d" ], [ "torch.nn.Conv1d", "torch.npu.set_device" ] ]
NeuroDataDesign/kdg
[ "510e27973779a59cc310e1eb7497bc29699f5e4e" ]
[ "benchmarks/spiral_exp.py" ]
[ "#%%\nfrom kdg.utils import generate_spirals, generate_gaussian_parity\nfrom kdg import kdf,kdn\nfrom keras import layers\nimport keras\n# %%\nnetwork = keras.Sequential()\n#network.add(layers.Dense(2, activation=\"relu\", input_shape=(2)))\nnetwork.add(layers.Dense(3, activation='relu', input_shape=(2,)))\nnetwork.add(layers.Dense(3, activation='relu'))\nnetwork.add(layers.Dense(units=2, activation = 'softmax'))\n\n#%%\nn_estimators = 200\nX, y = generate_gaussian_parity(sample, cluster_std=0.5)#generate_spirals(5000, noise=.8, n_class=2)\n\nmodel_kdf = kdn(network,fit_kwargs = {\n \"epochs\": 100,\n \"batch_size\": 32,\n \"verbose\": False\n }) #kdf(k=1/2.5, kwargs={'n_estimators':n_estimators})\nmodel_kdf.fit(X, y)\n# %%\nimport seaborn as sns\nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\n\np = np.arange(-2,2,step=0.006)\nq = np.arange(-2,2,step=0.006)\nxx, yy = np.meshgrid(p,q)\ntmp = np.ones(xx.shape)\n\ngrid_samples = np.concatenate(\n (\n xx.reshape(-1,1),\n yy.reshape(-1,1)\n ),\n axis=1\n ) \n \nproba_kdf = model_kdf.predict_proba(grid_samples)\nproba_rf = model_kdf.network.predict_proba(grid_samples)\n\ndata = pd.DataFrame(data={'x':grid_samples[:,0], 'y':grid_samples[:,1], 'z':proba_kdf[:,0]})\ndata = data.pivot(index='x', columns='y', values='z')\n\ndata_rf = pd.DataFrame(data={'x':grid_samples[:,0], 'y':grid_samples[:,1], 'z':proba_rf[:,0]})\ndata_rf = data_rf.pivot(index='x', columns='y', values='z')\n#%%\nsns.set_context(\"talk\")\nfig, ax = plt.subplots(2,2, figsize=(16,16))\ncmap= sns.diverging_palette(240, 10, n=9)\nax1 = sns.heatmap(data, ax=ax[0][0], vmin=0, vmax=1,cmap=cmap)\nax1.set_xticklabels(['-2','' , '', '', '', '', '','','','','0','','','','','','','','','2'])\nax1.set_yticklabels(['-2','' , '', '', '', '', '','','','','','','0','','','','','','','','','','','','','2'])\n#ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','' , '', '', '', '', '', '','','','','', '0','','' , '', '', '', '', '','','','','','','','','','','','','1'])\nax[0][0].set_title('KDF',fontsize=24)\n#ax[0][0].invert_yaxis()\n\n\nax1 = sns.heatmap(data_rf, ax=ax[0][1], vmin=0, vmax=1,cmap=cmap)\nax1.set_xticklabels(['-2','' , '', '', '', '', '','','','','0','','','','','','','','','2'])\nax1.set_yticklabels(['-2','' , '', '', '', '', '','','','','','','0','','','','','','','','','','','','','2'])\n#ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','' , '', '', '', '', '', '','','','','', '0','','' , '', '', '', '', '','','','','','','','','','','','','1'])\nax[0][1].set_title('RF',fontsize=24)\n#ax[0][1].invert_yaxis()\n\ncolors = sns.color_palette(\"Dark2\", n_colors=2)\nclr = [colors[i] for i in y]\nax[1][0].scatter(X[:, 0], X[:, 1], c=clr, s=50)\n\nplt.savefig('plots/spiral_pdf_kdn.pdf')\nplt.show()\n# %%\n" ]
[ [ "numpy.ones", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.show", "numpy.meshgrid" ] ]
ChangHoon-Sung/streamlit
[ "d153db37d97faada87bf88972886cda5a624f8c8" ]
[ "lib/tests/streamlit/help_test.py" ]
[ "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"st.help unit test.\"\"\"\n\nfrom tests import testutil\nimport streamlit as st\nimport numpy as np\n\n\nclass StHelpTest(testutil.DeltaGeneratorTestCase):\n \"\"\"Test st.help.\"\"\"\n\n def test_basic_func_with_doc(self):\n \"\"\"Test basic function with docstring.\"\"\"\n\n def my_func(some_param, another_param=123):\n \"\"\"This is the doc\"\"\"\n pass\n\n st.help(my_func)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"my_func\", ds.name)\n self.assertEqual(\"help_test\", ds.module)\n self.assertEqual(\"<class 'function'>\", ds.type)\n self.assertEqual(\"(some_param, another_param=123)\", ds.signature)\n self.assertEqual(\"This is the doc\", ds.doc_string)\n\n def test_basic_func_without_doc(self):\n \"\"\"Test basic function without docstring.\"\"\"\n\n def my_func(some_param, another_param=123):\n pass\n\n st.help(my_func)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"my_func\", ds.name)\n self.assertEqual(\"help_test\", ds.module)\n self.assertEqual(\"<class 'function'>\", ds.type)\n self.assertEqual(\"(some_param, another_param=123)\", ds.signature)\n self.assertEqual(\"No docs available.\", ds.doc_string)\n\n def test_deltagenerator_func(self):\n \"\"\"Test Streamlit DeltaGenerator function.\"\"\"\n\n st.help(st.audio)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"audio\", ds.name)\n self.assertEqual(\"streamlit\", ds.module)\n self.assertEqual(\"<class 'method'>\", ds.type)\n self.assertEqual(\"(data, format='audio/wav', start_time=0)\", ds.signature)\n self.assertTrue(ds.doc_string.startswith(\"Display an audio player\"))\n\n def test_unwrapped_deltagenerator_func(self):\n \"\"\"Test unwrapped Streamlit DeltaGenerator function.\"\"\"\n st.help(st.dataframe)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"dataframe\", ds.name)\n self.assertEqual(\"streamlit\", ds.module)\n self.assertEqual(\"<class 'method'>\", ds.type)\n self.assertEqual(\"(data=None, width=None, height=None)\", ds.signature)\n self.assertTrue(ds.doc_string.startswith(\"Display a dataframe\"))\n\n def test_st_cache(self):\n \"\"\"Test st.cache function (since it's from the 'caching' module).\"\"\"\n st.help(st.cache)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"cache\", ds.name)\n self.assertEqual(\"streamlit\", ds.module)\n self.assertEqual(\"<class 'function'>\", ds.type)\n self.assertEqual(\n ds.signature,\n (\n \"(func=None, \"\n \"persist=False, \"\n \"allow_output_mutation=False, \"\n \"show_spinner=True, \"\n \"suppress_st_warning=False, \"\n \"hash_funcs=None, \"\n \"max_entries=None, \"\n \"ttl=None)\"\n ),\n )\n self.assertTrue(ds.doc_string.startswith(\"Function decorator to\"))\n\n def test_st_echo(self):\n \"\"\"Test st.echo function (since it's from __init__).\"\"\"\n st.help(st.echo)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"echo\", ds.name)\n self.assertEqual(\"streamlit\", ds.module)\n self.assertEqual(\"<class 'function'>\", ds.type)\n self.assertEqual(\"(code_location='above')\", ds.signature)\n self.assertTrue(ds.doc_string.startswith(\"Use in a `with` block\"))\n\n def test_builtin_func(self):\n \"\"\"Test a built-in function.\"\"\"\n st.help(dir)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"dir\", ds.name)\n self.assertEqual(\"builtins\", ds.module)\n self.assertEqual(\"<class 'builtin_function_or_method'>\", ds.type)\n self.assertEqual(\"\", ds.signature)\n self.assertTrue(len(ds.doc_string) > 0)\n\n def test_builtin_obj(self):\n \"\"\"Test a built-in function.\"\"\"\n st.help(123)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"\", ds.name)\n self.assertEqual(\"\", ds.module)\n self.assertEqual(\"<class 'int'>\", ds.type)\n self.assertEqual(\"\", ds.signature)\n self.assertTrue(len(ds.doc_string) > 0)\n\n def test_doc_defined_for_type(self):\n \"\"\"When the docs are defined for the type on an object, but not\n the object, we expect the docs of the type. This is the case\n of ndarray generated as follow.\n \"\"\"\n\n array = np.arange(1)\n\n st.help(array)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"\", ds.name)\n self.assertTrue(\"ndarray\" in ds.doc_string)\n\n def test_doc_type_is_type(self):\n \"\"\"When the type of the object is type and no docs are defined,\n we expect docs are not available\"\"\"\n\n class MyClass(object):\n pass\n\n st.help(MyClass)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(type(MyClass), type)\n self.assertEqual(\"MyClass\", ds.name)\n self.assertEqual(\"help_test\", ds.module)\n self.assertEqual(\"No docs available.\", ds.doc_string)\n" ]
[ [ "numpy.arange" ] ]
RobbiePerrone320/onnx-mlir
[ "2127e9177f4cbc28b7e860b0876af936ddae80bc" ]
[ "utils/gen_onnx_mlir.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict, OrderedDict\nfrom io import StringIO\nimport io\nimport os\nimport sys\nimport datetime\nimport argparse\n\nimport numpy as np # type: ignore\n\nfrom onnx import defs, FunctionProto, helper, OperatorStatus\nfrom onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN\nfrom onnx.backend.test.case import collect_snippets\nfrom onnx.backend.sample.ops import collect_sample_implementations\nfrom typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple\n\nimport pprint\nimport onnx\n\n# change this variable only when upgrading the ONNX support within ONNX-MLIR\ncurrent_onnx_version = \"1.9.0\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dry-run-onnx-ops\",\n help=\"Output ONNXOps.td.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nparser.add_argument(\"--dry-run-op-build-table\",\n help=\"Output OpBuildTable.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nparser.add_argument(\"--check-operation-version\",\n help=\"check whether the imported onnx package has new operation or \"\n \" newer version of operation compared with version stored in version_dicts\",\n action=\"store_true\",\n default=False)\n\nargs = parser.parse_args()\n\ncheck_operation_version = args.check_operation_version\ncurrent_onnx_version = \"1.11.0\"\n# check the version of onnx package being used\nif (not check_operation_version) and current_onnx_version != onnx.__version__ :\n print(\"version of expected onnx is {}, \".format(current_onnx_version)+\n \"while onnx package being used is {}\".format(onnx.__version__))\n quit()\n\n# Record the version of each operation that is treated as the current version.\n# To check whether the onnx package being used has newer version operation,\n# run this script with --check-operation-version flag.\n# Update this dictionary when a newer version is implemented\n# TODO: how to keep the old version\nversion_dict = {\n 'Abs': [13],\n 'Acos': [7],\n 'Acosh': [9],\n 'Adagrad': [1],\n 'Adam': [1],\n 'Add': [13],\n 'And': [7],\n 'ArgMax': [13],\n 'ArgMin': [13],\n 'ArrayFeatureExtractor': [1],\n 'Asin': [7],\n 'Asinh': [9],\n 'Atan': [7],\n 'Atanh': [9],\n 'AveragePool': [11],\n 'BatchNormalization': [9],\n 'Binarizer': [1],\n 'BitShift': [11],\n 'Cast': [13],\n 'CastMap': [1],\n 'CategoryMapper': [1],\n 'Ceil': [13],\n 'Celu': [12],\n 'Clip': [13, 12, 11, 6],\n 'Compress': [11],\n 'Concat': [13],\n 'ConcatFromSequence': [11],\n 'Constant': [13],\n 'ConstantOfShape': [9],\n 'Conv': [11],\n 'ConvInteger': [10],\n 'ConvTranspose': [11],\n 'Cos': [7],\n 'Cosh': [9],\n 'CumSum': [11],\n 'DepthToSpace': [13],\n 'DequantizeLinear': [13],\n 'Det': [11],\n 'DictVectorizer': [1],\n 'Div': [13],\n 'Dropout': [13],\n 'DynamicQuantizeLinear': [11],\n 'Einsum': [12],\n 'Elu': [6],\n 'Equal': [13],\n 'Erf': [13],\n 'Exp': [13],\n 'Expand': [13],\n 'EyeLike': [9],\n 'FeatureVectorizer': [1],\n 'Flatten': [13],\n 'Floor': [13],\n 'GRU': [7],\n 'Gather': [13],\n 'GatherElements': [13],\n 'GatherND': [13],\n 'Gemm': [13],\n 'GlobalAveragePool': [1],\n 'GlobalLpPool': [2],\n 'GlobalMaxPool': [1],\n 'Gradient': [1],\n 'Greater': [13],\n 'GreaterOrEqual': [12],\n 'HardSigmoid': [6],\n 'Hardmax': [13],\n 'Identity': [13],\n 'If': [13],\n 'Imputer': [1],\n 'InstanceNormalization': [6],\n 'IsInf': [10],\n 'IsNaN': [13],\n 'LRN': [13],\n 'LSTM': [7],\n 'LabelEncoder': [2],\n 'LeakyRelu': [6],\n 'Less': [13],\n 'LessOrEqual': [12],\n 'LinearClassifier': [1],\n 'LinearRegressor': [1],\n 'Log': [13],\n 'LogSoftmax': [13],\n 'Loop': [13],\n 'LpNormalization': [1],\n 'LpPool': [11],\n 'MatMul': [13],\n 'MatMulInteger': [10],\n 'Max': [13],\n 'MaxPool': [12],\n 'MaxRoiPool': [1],\n 'MaxUnpool': [11],\n 'Mean': [13],\n 'MeanVarianceNormalization': [13],\n 'Min': [13],\n 'Mod': [13],\n 'Momentum': [1],\n 'Mul': [13],\n 'Multinomial': [7],\n 'Neg': [13],\n 'NegativeLogLikelihoodLoss': [13],\n 'NonMaxSuppression': [11],\n 'NonZero': [13],\n 'Normalizer': [1],\n 'Not': [1],\n 'OneHot': [11],\n 'OneHotEncoder': [1],\n 'Or': [7],\n 'PRelu': [9],\n 'Pad': [13, 11, 2],\n 'Pow': [13],\n 'QLinearConv': [10],\n 'QLinearMatMul': [10],\n 'QuantizeLinear': [13],\n 'RNN': [7],\n 'RandomNormal': [1],\n 'RandomNormalLike': [1],\n 'RandomUniform': [1],\n 'RandomUniformLike': [1],\n 'Range': [11],\n 'Reciprocal': [13],\n 'ReduceL1': [13],\n 'ReduceL2': [13],\n 'ReduceLogSum': [13],\n 'ReduceLogSumExp': [13],\n 'ReduceMax': [13],\n 'ReduceMean': [13],\n 'ReduceMin': [13],\n 'ReduceProd': [13],\n 'ReduceSum': [13, 11],\n 'ReduceSumSquare': [13],\n 'Relu': [13],\n 'Reshape': [13],\n 'Resize': [13, 11, 10],\n 'ReverseSequence': [10],\n 'RoiAlign': [10],\n 'Round': [11],\n 'SVMClassifier': [1],\n 'SVMRegressor': [1],\n 'Scaler': [1],\n 'Scan': [11],\n 'Scatter': [11],\n 'ScatterElements': [13],\n 'ScatterND': [13],\n 'Selu': [6],\n 'SequenceAt': [11],\n 'SequenceConstruct': [11],\n 'SequenceEmpty': [11],\n 'SequenceErase': [11],\n 'SequenceInsert': [11],\n 'SequenceLength': [11],\n 'Shape': [13], # When going to 15, rewrite rules must also be changed for start/end\n 'Shrink': [9],\n 'Sigmoid': [13],\n 'Sign': [13],\n 'Sin': [7],\n 'Sinh': [9],\n 'Size': [13],\n 'Slice': [13],\n 'Softmax': [13],\n 'SoftmaxCrossEntropyLoss': [13],\n 'Softplus': [1],\n 'Softsign': [1],\n 'SpaceToDepth': [13],\n 'Split': [13, 11],\n 'SplitToSequence': [11],\n 'Sqrt': [13],\n 'Squeeze': [13, 11],\n 'StringNormalizer': [10],\n 'Sub': [13],\n 'Sum': [13],\n 'Tan': [7],\n 'Tanh': [13],\n 'TfIdfVectorizer': [9],\n 'ThresholdedRelu': [10],\n 'Tile': [13],\n 'TopK': [11],\n 'Transpose': [13],\n 'TreeEnsembleClassifier': [1],\n 'TreeEnsembleRegressor': [1],\n 'Unique': [11],\n 'Unsqueeze': [13, 11],\n 'Upsample': [10, 9, 7],\n 'Where': [9],\n 'Xor': [7],\n 'ZipMap': [1]}\n\n# Manual specification of attribute type.\nspecial_attr_types = dict([(\"Cast.to\", 'type')])\n\n# Special operation importing handlers.\nspecial_op_handler = dict([\n (\"BatchNormalization\", \"ImportNodeBatchNormalization\"),\n (\"Dropout\", \"ImportNodeDropout\"),\n (\"Cast\", \"ImportNodeCast\"),\n (\"MaxPool\", \"ImportNodeMaxPool\"),\n (\"Pad\", \"ImportNodePad\"),\n (\"Slice\", \"ImportNodeSlice\"),\n (\"Softmax\", \"ImportNodeSoftmax\"),\n #(\"Transpose\", \"ImportNodeTranspose\")\n])\n\n# Operations supporting canonicalization (alphabetical order).\nOpsWithCanonicalizer = [\n 'Add',\n 'Cast',\n 'Constant',\n 'Dropout',\n 'GlobalAveragePool',\n 'GlobalMaxPool',\n 'Identity',\n 'Reshape',\n 'Shape',\n 'Size',\n 'Squeeze',\n 'SqueezeV11',\n 'Transpose',\n 'Unsqueeze',\n 'UnsqueezeV11',\n]\n\n# Operations with custom verifiers (alphabetical order).\nOpsWithVerifier = [\n 'AveragePool',\n 'ArgMax',\n 'ArgMin',\n 'CategoryMapper', \n 'Compress',\n 'Concat',\n 'ConstantOfShape',\n 'Conv',\n 'DepthToSpace',\n 'Expand',\n 'Flatten',\n 'Hardmax',\n 'InstanceNormalization',\n 'Mod',\n 'NonMaxSuppression',\n 'OneHot',\n \"PRelu\",\n 'OneHotEncoder',\n 'Pow',\n 'RandomNormalLike',\n 'ReverseSequence',\n \"RoiAlign\",\n \"ScatterElements\",\n 'ScatterND',\n 'SequenceEmpty',\n 'SequenceInsert',\n 'SpaceToDepth',\n 'TopK',\n]\n\nOpsWithHelpers = {\n \"Loop\": \"\"\"\n mlir::Operation::result_range v_final();\n mlir::Operation::result_range scan_outputs();\n \"\"\",\n \"Scan\": \"\"\"\n mlir::Operation::operand_range v_initial();\n mlir::Operation::result_range v_final();\n mlir::Operation::operand_range scan_inputs();\n mlir::Operation::result_range scan_outputs();\n \"\"\"\n}\n# Interface for special handling of type inference\n# The common code are put into get_type_inference_func\nOpsWithResultTypeInference = {\n \"Constant\":\n '''if (auto attr = valueAttr()) {\n resultTypes.push_back(attr.getType());\n } else if (auto attr = sparse_valueAttr()) {\n resultTypes.push_back(attr.getType());\n }''',\n \"Cast\":\n '''// ae auto builder = mlir::OpBuilder(getContext());\n resultTypes.push_back(mlir::UnrankedTensorType::get(to()));''',\n \"ConstantOfShape\":\n '''if (auto attr = valueAttr()) {\n resultTypes.push_back(mlir::UnrankedTensorType::get(\n attr.getType().cast<ShapedType>().getElementType()));\n } else {\n resultTypes.push_back(mlir::UnrankedTensorType::get(\n FloatType::getF32(getContext())));\n }'''\n}\n\n# Add an Op in this list if the Op needs result type deduction which is required\n# when writing declarative rewriting rules. Deduced type is always\n# an UnrankedTensorType whose element type is the same as the first operand's\n# element type.\n#\n# Currently, there are only two build methods generated:\n# - one with operands and attributes having a separate parameter, and\n# - one with operands and attributes having aggregated parameters.\ncustom_builder_unranked_ops_list = [\n 'Abs',\n 'Exp',\n 'Identity',\n 'Neg',\n 'Pad',\n 'ReduceLogSum',\n 'ReduceMax',\n 'ReduceSum',\n 'ReduceSumSquare',\n 'ReduceSumV11',\n 'Softmax',\n 'Split',\n 'Sqrt',\n 'SqueezeV11',\n 'UnsqueezeV11',\n]\n# Custom builder op list for operations with broadcast; we can deduce the right\n# output type, no need to leave it undef as in the above list.\n# Ops must have two operands, not one, not three... And there shall be two.\n# TODO: handle variadic ops omitted here: Max, Min, Min, Sum.\ncustom_builder_broadcast_to_same_type_ops_list = [\n 'Add',\n 'And',\n 'Div',\n 'Mul',\n 'Or',\n 'Pow',\n 'Sub',\n 'Xor',\n]\ncustom_builder_broadcast_to_bool_ops_list = [\n 'Equal',\n 'Greater',\n 'Less',\n]\ncustom_builder_broadcast_ops_list = custom_builder_broadcast_to_same_type_ops_list + \\\n custom_builder_broadcast_to_bool_ops_list\n# union of both\ncustom_builder_ops_list = custom_builder_unranked_ops_list + custom_builder_broadcast_ops_list\n\n#a dictionary to add any special definition for an operation\ncustom_definition_misc = dict([ ('Constant',\n ''' let builders = [\n OpBuilder<(ins \"Attribute\":$sparse_value, \"Attribute\":$value), [{\n if (value) {\n auto tensorType = value.getType();\n build($_builder, $_state, tensorType, sparse_value, value,\n FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());\n } else {\n auto tensorType = sparse_value.getType();\n build($_builder, $_state, tensorType, sparse_value, value,\n FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());\n }\n }]>\n ];'''),\n ('Cast',\n ''' let builders = [\n OpBuilder<(ins \"Value\":$input, \"TypeAttr\":$to), [{\n auto resultType = mlir::UnrankedTensorType::get(to.getValue());\n build($_builder, $_state, resultType, input, to);\n }] >\n ];'''\n )])\n\nonnx_types = (\n 'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16',\n 'float', 'double', 'complex64', 'complex128', 'string'\n)\ntblgen_types = ('AnyI1', 'AnyI8', 'AnyI16', 'AnyI32', 'AnyI64',\n 'BF16', 'F16', 'F32', 'F64', 'Complex<F32>', 'Complex<F64>',\n 'StringType'\n)\n\nMAX_NUM_TYPES=20\n\ndef should_render_domain(domain): # type: (Text) -> bool\n return True\n\n\ndef display_attr_type(v): # type: (OpSchema.AttrType) -> Text\n assert isinstance(v, OpSchema.AttrType)\n s = Text(v)\n s = s[s.rfind('.') + 1:].lower()\n if s[-1] == 's':\n s = 'list of ' + s\n return s\n\n\ndef get_unique_output_name(schema, name):\n for input in schema.inputs:\n if input.name == name:\n return 'out_' + name\n return name\n\n\ndef onnx_attr_type_to_mlir_attr_type(t):\n onnx_attr_type = Text(t)\n onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()\n\n if onnx_attr_type == 'int':\n mlir_attr_type = 'SI64Attr'\n elif onnx_attr_type == 'float':\n mlir_attr_type = 'F32Attr'\n elif onnx_attr_type == 'ints':\n mlir_attr_type = 'I64ArrayAttr'\n elif onnx_attr_type == 'floats':\n mlir_attr_type = 'F32ArrayAttr'\n elif onnx_attr_type == \"string\":\n mlir_attr_type = 'StrAttr'\n elif onnx_attr_type == \"strings\":\n mlir_attr_type = 'StrArrayAttr'\n elif onnx_attr_type == 'type':\n mlir_attr_type = 'TypeAttr'\n else:\n mlir_attr_type = 'AnyAttr'\n #TODO: tensor and sparse tensor\n return mlir_attr_type\n\n\n#TODO: any better way to do this.\ndef tblgen_attr_type_to_cpp_type(t):\n if 'I64Attr' in t:\n cpp_type = 'IntegerAttr'\n elif 'F32Attr' in t:\n cpp_type = 'FloatAttr'\n elif 'I64ArrayAttr' in t or 'F32ArrayAttr' in t:\n cpp_type = 'ArrayAttr'\n elif 'StrAttr' in t:\n cpp_type = 'StringAttr'\n elif 'strings' in t:\n cpp_type = 'ArrayAttr'\n else:\n cpp_type = 'Attribute'\n return cpp_type\n\n\ndef tblgen_operand_type_to_cpp_type(op_type):\n if op_type.startswith('Variadic'):\n mytype = 'ValueRange'\n else:\n mytype = 'Value'\n return mytype\n\n\ndef np_type_to_tblgen_attr_type(tstr):\n index = -1\n for i in range(len(onnx_types)):\n if onnx_types[i] in tstr:\n index = i\n break\n if index == -1:\n return None\n else:\n return tblgen_types[i]\n\ndef get_tblgen_type_index(type_str):\n return tblgen_types.index(type_str)\n\n#the possible data structures are tensor, map and seq(tensor())\ndef get_data_structure_element(allowed_type_str):\n structure_list = ['tensor', 'seq', 'map']\n for structure in structure_list:\n if allowed_type_str.startswith(structure) :\n element = allowed_type_str.replace(\n structure+'(', '', 1).replace(')', '', 1)\n return (structure, element)\n return (None, None)\n\ndef get_allowed_elem_types(schema, input):\n #allowed_types_str = None\n # return allowed_types_str\n # TODO: enable type constraints.\n if input.typeStr :\n tstr = input.typeStr\n structure, element = get_data_structure_element(tstr);\n # In case the type is directly specified\n if structure and element :\n t = np_type_to_tblgen_attr_type(element)\n if t == None :\n return allowed_structure, None\n else :\n return structure, [t]\n else :\n return None\n if schema.type_constraints:\n for type_constraint in schema.type_constraints:\n if type_constraint.type_param_str != tstr :\n continue\n allowed_type_list=[]\n allowedTypes = type_constraint.allowed_type_strs\n allowed_structure = None\n for allowedType in allowedTypes:\n structure, element = get_data_structure_element(allowedType);\n if structure == None or element == None:\n return None, None\n\n if allowed_structure != None and allowed_structure != structure :\n return None, None\n allowed_structure = structure\n t = np_type_to_tblgen_attr_type(element)\n if t == None :\n return allowed_structure, None\n if not t in allowed_type_list :\n allowed_tyoe_list = allowed_type_list.append(t)\n\n return allowed_structure,allowed_type_list\n\n return None, None\n\n\ndef inc_indent(indent=None):\n return \"\" if indent is None else indent + ' ' * 2\n\n\ndef dec_indent(indent):\n return indent[:-2]\n\n\ndef join_args(args):\n return \", \".join(args)\n\ndef get_operands_or_results(schema, type_str_dict, is_input):\n value_list = schema.inputs if is_input else schema.outputs\n if not value_list:\n return OrderedDict()\n\n def any_type_of(types):\n assert isinstance(types, list)\n if len(types) == 1:\n return types[0]\n else:\n return \"AnyTypeOf<[{}]>\".format(\", \".join(types))\n\n name_to_types = OrderedDict()\n for i, value in enumerate(value_list):\n types = get_onnx_mlir_types(schema, type_str_dict, value)\n\n '''\n structure, elem_types = get_allowed_elem_types(schema, type_str_dict, value)\n\n if structure == 'tensor' :\n if elem_types is None:\n types = [\"AnyMemRef\", \"AnyTensor\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TensorOf<[{}]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n elif structure == 'seq' :\n # Seq is not supported yet.\n # Use of TensorOf<[AnyTensor]> as a placeholder for tablegen.\n # When the Operation is used, warning/error will be generated at runtime.\n if elem_types is None:\n types = [\"AnyMemRef\", \"TensorOf<[AnyTensor]>\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TensorOf<[TensorOf<[{}]>]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n elif structure == 'map' :\n # Map is not supported yet.\n # Use of TupleOf as a placeholder for tablegen.\n # When the Operation is used, warning/error will be generated at runtime.\n if elem_types is None:\n types = [\"AnyMemRef\", \"TupleOf<[AnyTensor]>\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TupleOf<[TensorOf<[{}]>]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n else:\n types = [\"AnyMemRef\", \"AnyTensor\"]\n '''\n\n if OpSchema.FormalParameterOption.Optional == value.option:\n types.append(\"NoneType\")\n elif OpSchema.FormalParameterOption.Variadic == value.option:\n if value.isHomogeneous:\n types = [\"Variadic<{}>\".format(any_type_of(types))]\n else:\n #TODO handle(variadic, heterogeneous) \"\n types = [\"Variadic<{}>\".format(any_type_of(types))]\n sys.stderr.write(\"warning: (variadic, heterogeneous) for \" + schema.name +\n ' ' + value.name + \"\\n\")\n\n # Since output name can coincide with that of an input, we explicitly\n # append a suffix \"_out\" to such names for disambiguation.\n if is_input:\n value_name = value.name\n else:\n value_name = get_unique_output_name(schema, value.name)\n\n name_to_types[value_name] = any_type_of(types)\n return name_to_types\n\n\ndef get_attrs(schema):\n def get_attr_type_optional(attr_type):\n return 'OptionalAttr<{}>'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type))\n\n def get_attr_type_with_default(attr_type, attr_default):\n if attr_type == OpSchema.AttrType.STRING:\n return 'DefaultValuedStrAttr<{}, \"{}\">'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)\n else:\n return 'DefaultValuedAttr<{}, \"{}\">'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)\n\n if not schema.attributes:\n return OrderedDict()\n\n name_to_type = OrderedDict()\n for _, attr in sorted(schema.attributes.items()):\n if attr.type == OpSchema.AttrType.GRAPH:\n continue\n\n qualified_attr_name = \"{}.{}\".format(schema.name, attr.name)\n if qualified_attr_name in special_attr_types:\n name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(\n special_attr_types[qualified_attr_name])\n # option holds either required or default value\n elif attr.required:\n name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(\n attr.type)\n elif attr.default_value.name:\n\n def format_value(value): # type: (Any) -> Text\n if isinstance(value, float):\n formatted = str(np.round(value, 5))\n # use default formatting, unless too long.\n if (len(formatted) > 10):\n formatted = str(\"({:e})\".format(value))\n return formatted\n elif isinstance(\n value,\n (bytes, bytearray)) and sys.version_info[0] == 3:\n return str(value.decode('utf-8'))\n return str(value)\n\n default_value = helper.get_attribute_value(attr.default_value)\n if isinstance(default_value, list):\n default_value = [format_value(val) for val in default_value]\n default_value_str = '{}'.format(default_value)\n default_value_str = default_value_str.replace('[', '{', 1)\n default_value_str = default_value_str.replace(']', '}', 1)\n if Text(attr.type) == \"AttrType.STRINGS\":\n default_value_str = default_value_str.replace(\"'\", '\\\\\"')\n else:\n default_value_str = default_value_str.replace(\"'\", '')\n else:\n default_value = format_value(default_value)\n default_value_str = default_value\n\n name_to_type[attr.name] = get_attr_type_with_default(\n attr.type, default_value_str)\n else:\n name_to_type[attr.name] = get_attr_type_optional(attr.type)\n return name_to_type\n\ndef get_numberof_list(mylist):\n expected_num = len(mylist)\n for element in mylist :\n if OpSchema.FormalParameterOption.Variadic == element.option:\n expected_num = -1\n return expected_num\n\ndef get_output_type_mapping(schema):\n mapping=[]\n for output in schema.outputs :\n #if only one type is allowed, just set that\n structure, allowed_elem_types = get_allowed_elem_types(schema, output)\n if allowed_elem_types != None and len(allowed_elem_types) == 1 :\n mapping.append(str(get_tblgen_type_index(allowed_elem_types[0])))\n continue\n\n #map the type string\n if output.typeStr :\n tstr = output.typeStr\n found = False\n for i, input in enumerate(schema.inputs):\n if input.typeStr and input.typeStr == tstr:\n mapping.append(str(i+MAX_NUM_TYPES))\n found = True\n break\n if found:\n continue\n\n #unknown output type\n mapping.append(str(-1))\n\n return mapping\n\ndef get_numberof_inout(s, indent, schema):\n expected_num_operands = get_numberof_list(schema.inputs)\n indent = inc_indent(indent)\n s += indent + \"static int getNumberOfOperands() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(expected_num_operands)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n expected_num_results = get_numberof_list(schema.outputs)\n s += indent + \"static int getNumberOfResults() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(expected_num_results)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n s += indent + \"static std::vector<int> getTypeMap() {\\n\"\n mapping = get_output_type_mapping(schema)\n indent = inc_indent(indent)\n s += indent + \"return {\" + \",\".join(mapping) + \"};\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n return s\n\n\ndef get_promotable_const_operands_func(s, indent, const_operands_name_to_idx):\n cpp_name_to_idx_literal = \"{\" + \", \".join([\n \"{{\\\"{}\\\", {}}}\".format(*name_to_idx)\n for name_to_idx in const_operands_name_to_idx\n ]) + \"}\"\n\n #s += indent + \"let extraClassDeclaration = [{\\n\"\n indent = inc_indent(indent)\n s += indent + \"std::map<std::string, size_t> promotableConstOperands() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(cpp_name_to_idx_literal)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n #indent = dec_indent(indent)\n #s += indent + \"}];\\n\"\n\n return s\n\ndef get_type_inference_func(s, indent, type_inference_code):\n indent = inc_indent(indent)\n\n s += indent + \"std::vector<mlir::Type> resultTypeInference() {\" + \"\\n\"\n indent = inc_indent(indent)\n s += indent + \"std::vector<mlir::Type> resultTypes;\" + \"\\n\"\n\n s += indent + type_inference_code + '\\n'\n\n s += indent + \"return resultTypes;\" + \"\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\" + \"\\n\"\n\n indent = dec_indent(indent)\n return s\n\ndef parse_type_str(allowedType):\n # AnyI may be used for uint because the onnx_mlir is not generating uint output\n # This will be fixed later and UI will be replace AnyI\n onnx_to_mlir_type_dict = { '(': '<[',\n ')': ']>',\n 'tensor' : 'TensorOf',\n 'seq' : 'SeqOf',\n 'map' : 'TupleOf',\n 'bool': 'I1',\n #'uint8' : 'AnyI8',\n #uint16' : 'AnyI16',\n #uint32' : 'AnyI32',\n #uint64' : 'AnyI64',\n 'uint8' : 'UI8',\n 'uint16' : 'UI16',\n 'uint32' : 'UI32',\n 'uint64' : 'UI64',\n 'int8' : 'I8',\n 'int16' : 'I16',\n 'int32' : 'I32',\n 'int64' : 'I64',\n 'float16' : 'F16',\n 'bfloat16' : 'BF16',\n 'float' : 'F32',\n 'double' : 'F64',\n 'unkown' : 'BF16',\n 'complex64' : 'Complex<F32>',\n 'complex128' : 'Complex<F64>',\n 'string' : 'StringType'}\n\n # Apply substitutions in decreasing order of key-length, so that float16 is replaced\n # before float, and uint16 is replaced before int16, etc.\n mapping = list(onnx_to_mlir_type_dict.items())\n mapping.sort(key=lambda pair:len(pair[0]), reverse=True)\n for key, item in mapping:\n allowedType = allowedType.replace(key, item)\n return allowedType\n\ndef parse_a_type_constraint(constraint):\n allowedTypes = constraint.allowed_type_strs\n mlirTypes = []\n for allowedType in allowedTypes:\n mlirType = parse_type_str(allowedType)\n mlirTypes.append(mlirType)\n # Remove redundant and sort.\n # However onnx keeps a consitently meaningful order\n # There is no redundancy as long as each onnx type is mapped uniquely\n # mlirTypes = sorted(list(set(mlirTypes)))\n\n # MemRef is always needed\n mlirTypes.append(\"AnyMemRef\")\n return mlirTypes\n\ndef parse_type_constraints(schema):\n type_str_dict = dict()\n for type_constraint in schema.type_constraints:\n type_str_dict[type_constraint.type_param_str] = parse_a_type_constraint(type_constraint)\n return type_str_dict\n\ndef get_onnx_mlir_types(schema, type_str_dict, input):\n if input.typeStr :\n if not input.typeStr in type_str_dict :\n # some arguments use type description directly\n # instead of constraint\n return [parse_type_str(input.typeStr), \"AnyMemRef\"]\n else :\n return type_str_dict[input.typeStr]\n else :\n print('No typeStr ', schema.name)\n return []\n\ndef gen_op_def(schema, with_version = False):\n indent = inc_indent()\n if with_version :\n opName = schema.name+\"V\"+str(schema.since_version)\n else :\n opName = schema.name\n s = 'def ONNX{0}Op:ONNX_Op<\"{0}\",\\n'.format(opName)\n\n regions = OrderedDict()\n for _, attr in sorted(schema.attributes.items()):\n if attr.type == OpSchema.AttrType.GRAPH:\n if attr.required:\n regions[attr.name] = \"SizedRegion<1>\"\n else:\n regions[attr.name] = \"AnyRegion\"\n\n # Generate decl for op traits.\n traits = [\"NoSideEffect\"]\n # OpsWithShapeInference:\n # Now the ShapeInference traits are added to all operation\n # Dummy implementations are added to ONNXOps.cpp\n # Error will be report if these operations are encountered at runtime\n traits.append(\"DeclareOpInterfaceMethods<ShapeInferenceOpInterface>\")\n if opName in OpsWithResultTypeInference.keys():\n traits.append(\"OpInterface<\\\"ResultTypeInferenceOpInterface\\\">\")\n if len(regions):\n traits.append(\"OpInterface<\\\"HasOnnxSubgraphOpInterface\\\">\")\n s += inc_indent(indent) + '[{}]> {{\\n'.format(join_args(traits))\n\n # Generate decl for canonicalizer.\n indent = inc_indent(indent)\n if opName in OpsWithCanonicalizer:\n s += indent + 'let hasCanonicalizer = 1;\\n'\n\n # Generate decl for summary.\n s += indent + 'let summary = \"ONNX {} operation\";\\n'.format(schema.name)\n\n # Generate description.\n s += indent + 'let description = [{\\n'\n if schema.doc:\n lines = schema.doc.lstrip().splitlines()\n for line in lines:\n escaped_line = line.replace('\"', '\\\\\"')\\\n .replace('}]', '\\\\}\\\\]')\n s += indent + '\"{}\"\\n'.format(escaped_line)\n s += indent + '}];\\n'\n\n # handle the type constraint for input and output\n # parse type constraint into onnx-mlir type string list\n type_str_dict = parse_type_constraints(schema)\n\n # Generate ins (consisting of operands and attributes).\n ins = get_operands_or_results(schema, type_str_dict, is_input=True)\n ins.update(get_attrs(schema))\n\n ins_strs = [\"{1}:${0}\".format(*i) for i in ins.items()]\n s += indent + 'let arguments = (ins {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(ins_strs))\n\n # Generate outs (operation results).\n outs = get_operands_or_results(schema, type_str_dict, is_input=False)\n outs_strs = [\"{1}:${0}\".format(*i) for i in outs.items()]\n s += indent + 'let results = (outs {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(outs_strs))\n\n regions_strs = [\"{1}:${0}\".format(*i) for i in regions.items()]\n\n if len(regions):\n s += indent + 'let regions = (region {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(regions_strs))\n\n # custom_builder_broadcast_ops_list\n\n # add custom builders\n # use element type of the first operand to construct an UnrankedTensorType for the output.\n if opName in custom_builder_ops_list:\n if len(ins) == 0:\n raise RuntimeWarning(\n \"warning: not generate custom build methods for \" +\n schema.name + \" since it does not have operands.\")\n else:\n s += indent + 'let builders = [\\n'\n # Custom builders with operands and attributes having a separate parameter.\n # E.g. OpBuilder<(ins \"Value\":$X, \"Value\":$Y, \"Attribute\":$A), [{}]>\n indent = inc_indent(indent)\n s += indent + 'OpBuilder<(ins '\n operands_dict = get_operands_or_results(schema, type_str_dict, is_input=True)\n attrs_dict = get_attrs(schema)\n s += ', '.join('\"{}\":${}'.format(tblgen_operand_type_to_cpp_type(ty),\n name) for name, ty in operands_dict.items())\n if operands_dict and attrs_dict:\n s += ', '\n s += ', '.join('\"{}\":${}'.format(tblgen_attr_type_to_cpp_type(ty),\n name) for name, ty in attrs_dict.items())\n s += '), [{\\n'\n indent = inc_indent(indent)\n\n # Get output type from first operand's type.\n first_operand_name = list(ins.items())[0][0]\n build_type_name = ''\n bool_type = \"$_builder.getI1Type()\"\n oTy = \"nullptr\"\n if opName in custom_builder_broadcast_to_bool_ops_list:\n oTy = bool_type\n if opName in custom_builder_broadcast_ops_list:\n second_operand_name = list(ins.items())[1][0]\n s += indent + 'auto lhsTy = {}.getType();\\n'. \\\n format(first_operand_name)\n s += indent + 'auto rhsTy = {}.getType();\\n'. \\\n format(second_operand_name)\n s += indent + 'auto oTy = {};\\n'.format(oTy)\n s += indent + 'auto elementType = getBroadcastedRankedType(lhsTy, rhsTy, oTy);\\n'\n s += indent + 'auto shapedType = elementType.dyn_cast_or_null<ShapedType>();\\n';\n s += indent + 'if (!shapedType || !shapedType.hasStaticShape()) {\\n';\n if opName in custom_builder_broadcast_to_bool_ops_list:\n s += indent + indent + 'elementType = {};\\n'.format(bool_type)\n else:\n s += indent + indent + 'elementType = {}'.format(first_operand_name) + \\\n '.getType().cast<ShapedType>().getElementType();\\n';\n s += indent + indent + 'elementType = UnrankedTensorType::get(elementType);\\n'\n s += indent + '}\\n';\n build_type_name = 'elementType'\n else:\n s += indent + 'auto elementType = {}'.format(first_operand_name) + \\\n '.getType().cast<ShapedType>().getElementType();\\n'\n build_type_name = 'UnrankedTensorType::get(elementType)'\n s += indent + 'build($_builder, $_state, {}'.format(build_type_name)\n for name, _ in ins.items():\n s += ', ' + name\n s += ');\\n'\n indent = dec_indent(indent)\n s += indent + '}]>,\\n'\n\n # Custom builders with all operands and attributes having aggregate parameters.\n # E.g. OpBuilder<(ins \"ValueRange operands,\n # ArrayRef<NamedAttribute> attributes\", [{}]>'\n s += indent + 'OpBuilder<(ins ' + \\\n '\"ValueRange\":$operands, \"ArrayRef<NamedAttribute>\":$attributes), [{\\n'\n indent = inc_indent(indent)\n if opName in custom_builder_broadcast_ops_list:\n s += indent + 'auto lhsTy = operands[0].getType();\\n'\n s += indent + 'auto rhsTy = operands[1].getType();\\n'\n s += indent + 'auto oTy = {};\\n'.format(oTy)\n s += indent + 'auto elementType = getBroadcastedRankedType(lhsTy, rhsTy, oTy);\\n'\n s += indent + 'auto shapedType = elementType.dyn_cast_or_null<ShapedType>();\\n';\n s += indent + 'if (!shapedType || !shapedType.hasStaticShape()) {\\n';\n if opName in custom_builder_broadcast_to_bool_ops_list:\n s += indent + indent + 'elementType = {};\\n'.format(bool_type)\n else:\n s += indent + indent + 'elementType = operands[0]' + \\\n '.getType().cast<ShapedType>().getElementType();\\n';\n s += indent + indent + 'elementType = UnrankedTensorType::get(elementType);\\n'\n s += indent + '}\\n';\n else:\n s += indent + 'auto elementType = operands[0].getType().' + \\\n 'cast<ShapedType>().getElementType();\\n'\n s += indent + 'std::vector<mlir::Type> outputTypes;\\n'\n s += indent + 'outputTypes.emplace_back({});\\n'.format(build_type_name)\n s += indent + 'build($_builder, $_state, outputTypes, operands, attributes);\\n'\n indent = dec_indent(indent)\n s += indent + '}]>'\n\n s += '\\n' + indent + '];\\n'\n\n # Generate extraClassDeclaration.\n s += indent + \"let extraClassDeclaration = [{\\n\"\n #indent = inc_indent(indent)\n\n # Generate input/output number.\n s = get_numberof_inout(s, indent, schema)\n\n if opName in OpsWithResultTypeInference:\n s = get_type_inference_func(\n s, indent, OpsWithResultTypeInference[opName])\n\n if opName in OpsWithHelpers:\n s += OpsWithHelpers[opName]\n\n if len(regions):\n s += indent + \"int64_t getSubgraphRegionIdx(const std::string& name) {\\n\"\n indent = inc_indent(indent)\n for idx, region_name in enumerate(regions.keys()):\n s += indent + \"if (name == \\\"{}\\\") return {};\\n\".format(region_name, idx)\n s += indent + \"llvm_unreachable(\\\"region with the specified name does not exist\\\");\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n s += indent + '}];\\n'\n\n if ( opName in custom_definition_misc) :\n s += custom_definition_misc[opName] + '\\n'\n\n # Generate decl for verifier.\n if opName in OpsWithVerifier:\n s += indent + 'let hasVerifier = 1;\\n'\n\n s += '}\\n\\n'\n return s\n\n\ndef gen_op_versions(file) :\n indent = inc_indent()\n s = \"\"\n for key, item in version_dict.items() :\n s += indent + 'op_dialect_version_map_[\"' + key +'\"] = '\n s += \"{\" + \"{}\".format(\", \".join(str(x) for x in item)) + \"};\\n\"\n file.write(s)\n\n\"\"\"\nspecial cases:\n* Split: attr split default value: sizeof(output1) namely 1\n* Conv: attr dilations default value is {num_dim of first input - 2, 1}\n* Conv: attr kernel_shape type is ints\n* Transpose: attr perm default value is {} empty int list\n\"\"\"\n\n\ndef gen_op_importer(schema, file, with_version=False):\n indent = inc_indent()\n if with_version :\n opName = schema.name + \"V\"+str(schema.since_version)\n else :\n opName = schema.name\n s = indent + 'import_handler_map_[\"' + opName +'\"] = \\n '\n\n expected_num_operands = len(schema.inputs)\n expected_num_results = len(schema.outputs)\n for input in schema.inputs:\n if OpSchema.FormalParameterOption.Variadic == input.option:\n expected_num_operands = -1\n for output in schema.outputs:\n if OpSchema.FormalParameterOption.Variadic == output.option:\n expected_num_results = -1\n\n # Only support special op handler for the op without version.\n if with_version:\n handler_func = \"buildOperation<mlir::ONNX{}Op>\".format(opName)\n else:\n handler_func = special_op_handler.get(\n schema.name, \"buildOperation<mlir::ONNX{}Op>\".format(opName))\n\n # Special handlers currently require expected num operands/results to be specified.\n # TODO: remove special handlers.\n args = [\"node\"]\n \"\"\"\n if expected_num_operands != -1 or expected_num_results != -1 or \"buildOperation\" not in handler_func:\n args.append(\n \"/* expected_num_operands = */ {}\".format(expected_num_operands))\n args.append(\n '/* expected_num_results = */ {}'.format(expected_num_results))\n \"\"\"\n s += inc_indent(indent) + '&onnx_mlir::detail::FrontendGenImpl::'\n s += handler_func+';\\n'\n\n file.write(s)\n\n\ndef build_operator_schemas():\n # domain -> support level -> name -> [schema]\n index = defaultdict(lambda: defaultdict(lambda: defaultdict(\n list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]\n for schema in defs.get_all_schemas_with_history():\n index[schema.domain][int(\n schema.support_level)][schema.name].append(schema)\n\n # Preprocess the Operator Schemas\n # [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]\n operator_schemas = list(\n ) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]\n exsting_ops = set() # type: Set[Text]\n for domain, _supportmap in sorted(index.items()):\n if not should_render_domain(domain):\n continue\n processed_supportmap = list()\n for _support, _namemap in sorted(_supportmap.items()):\n processed_namemap = list()\n for n, unsorted_versions in sorted(_namemap.items()):\n versions = sorted(unsorted_versions,\n key=lambda s: s.since_version)\n schema = versions[-1]\n if schema.name in exsting_ops:\n continue\n\n if check_operation_version :\n # Generate operation of the latest version of your onnx.\n exsting_ops.add(schema.name)\n processed_namemap.append((n, schema, versions))\n\n # Add checks against version_dict\n if schema.name not in version_dict :\n print(\"Check-operation-version: Operation {} is new with version {}\"\n .format(schema.name, schema.since_version))\n elif schema.since_version > version_dict[schema.name][0]:\n print(\"Check-operation-version: Operation {}\"\n .format(schema.name)+\n \" has a newer version {} over old version {}\"\n .format(schema.since_version, version_dict[schema.name][0]))\n else:\n # Generate operation according to the version in version_dict.\n if schema.name not in version_dict :\n continue\n found = False\n vcounter = 0\n for schema in reversed(versions):\n # Check the version number against the version_dict\n specified_version = version_dict[schema.name][vcounter]\n if schema.since_version == specified_version:\n exsting_ops.add(schema.name)\n processed_namemap.append((n, schema, versions))\n found = True\n vcounter += 1\n if len(version_dict[schema.name]) == vcounter :\n break\n if not found:\n print(\"Your onnx installation may be too old. \"\n \"The desired version for operation {} is not found.\".format(\n schema.name))\n sys.exit()\n processed_supportmap.append((_support, processed_namemap))\n operator_schemas.append((domain, processed_supportmap))\n return operator_schemas\n\n\ndef main(args): # type: (Type[Args]) -> None\n curr_utc_time = datetime.datetime.now(\n datetime.timezone.utc).strftime(\"%m/%d/%Y, %H:%M:%S\")\n autogen_warning = (\n '//********************************************************\\n'\n '// Do not modify this file directly.\\n'\n '// This file is automatically generated via script.\\n'\n '// Details can be found in docs/ImportONNXDefs.md .\\n'\n '//********************************************************\\n\\n')\n autogen_warning = autogen_warning.format(curr_utc_time)\n\n op_def = args.op_def\n op_def.write(autogen_warning)\n\n op_importer = args.op_importer\n op_importer.write(autogen_warning)\n gen_op_versions(op_importer)\n\n new_version_dict = dict()\n for domain, supportmap in build_operator_schemas():\n for _, namemap in supportmap:\n # Generate Op with version number if not the latest version\n previous_name = \"\"\n for op_type, schema, versions in namemap:\n if check_operation_version:\n new_version_dict[schema.name] = [schema.since_version]\n else:\n with_version = previous_name == schema.name\n gen_op_importer(schema, op_importer, with_version)\n r = gen_op_def(schema, with_version)\n op_def.write(r)\n previous_name = schema.name\n if check_operation_version :\n for key in version_dict :\n if not key in new_version_dict :\n print(\"op {} is not in the version\".format(key))\n # Assume the top version will be upgreaded to the latest version\n # The existing extra version (from index 1) will be kept\n for x in version_dict[key][1:] :\n new_version_dict[key].append(x)\n pprint.pprint(new_version_dict)\n\nif __name__ == '__main__':\n curr_dir = os.path.dirname(os.path.realpath(__file__))\n\n class Args(object):\n dry_run = args.dry_run_onnx_ops or args.dry_run_op_build_table\n\n # If either dry_run_onnx_ops or dry_run_op_build_table is true, then treat both of them\n # as true. Otherwise, one of them runs as a dry-run and one of them runs as a real run\n # creating unnecessary artifacts in the wrong locations in the build tree.\n if dry_run:\n op_def = StringIO()\n op_importer = StringIO()\n else:\n op_def_file_path = os.path.join(curr_dir, 'ONNXOps.td.inc')\n op_def = io.open(op_def_file_path, 'w', newline='')\n op_importer_file_path = os.path.join(curr_dir, 'OpBuildTable.inc')\n op_importer = io.open(op_importer_file_path, 'w', newline='')\n main(Args)\n\n # This is based on diff.py from llvm-project (llvm\\utils\\lit\\lit\\builtin_commands\\diff.py).\n # On Windows, by default, stdout uses \\r\\n for newlines, however, all the files we compare against\n # use \\n. This piece of code forces the windows stdout to use \\n for newlines.\n if sys.platform == \"win32\":\n if hasattr(sys.stdout, 'buffer'):\n # python 3\n sys.stdout = io.TextIOWrapper(sys.stdout.buffer, newline='\\n')\n else:\n # python 2.7\n import msvcrt\n msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n\n # Only output the generated values for the specifically requested dry run.\n if args.dry_run_onnx_ops:\n sys.stdout.write(Args.op_def.getvalue())\n if args.dry_run_op_build_table:\n sys.stdout.write(Args.op_importer.getvalue())\n" ]
[ [ "numpy.round" ] ]
YLFF/2004P_Pytorch-Networks
[ "2d84fe1d904b17d0c55aa2a7a7dba82dea3dae05" ]
[ "3DCNN.py" ]
[ "# --------------------------------------------------------------------------- #\n# ResNet, CVPR2016 bestpaper, https://arxiv.org/abs/1512.03385\n# pytorch implementation by Haiyang Liu ([email protected])\n# --------------------------------------------------------------------------- #\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom config import cfg\nfrom utils import load_cfg,model_complexity\n\n\n__all__ = ['ResNet18','ResNet34','ResNet50','ResNet101','ResNet152']\n\n\nclass LambdaLayer(nn.Module):\n def __init__(self, lambd):\n super(LambdaLayer, self).__init__()\n self.lambd = lambd\n\n def forward(self, x):\n return self.lambd(x)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self,in_dim,out_dim,stride=1,op=\"A\"):\n super(BasicBlock,self).__init__()\n self.subconv_1 = nn.Sequential(\n nn.Conv2d(in_dim,out_dim,3,stride,1,bias=False),\n nn.BatchNorm2d(out_dim),\n nn.ReLU(inplace=True),)\n self.subconv_2 = nn.Sequential(\n nn.Conv2d(out_dim,out_dim,3,1,1,bias=False),\n nn.BatchNorm2d(out_dim))\n if in_dim == out_dim and stride == 1:\n self.downsample = nn.Sequential()\n elif op == 'A':\n self.downsample =LambdaLayer(lambda x: F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, out_dim//4, out_dim//4), \"constant\", 0))\n else:\n self.downsample = nn.Sequential(\n nn.Conv2d(in_dim,out_dim,1,stride,0,bias=False),\n nn.BatchNorm2d(out_dim),\n )\n \n def forward(self,input_):\n x_0 = self.subconv_1(input_)\n x_1 = self.subconv_2(x_0)\n x_input = self.downsample(input_) \n x_final = F.relu(x_input + x_1,inplace=True)\n return x_final\n\n\nclass BottleNeck(nn.Module):\n expansion = 4\n def __init__(self,in_dim,out_dim,stride=1):\n super(BottleNeck,self).__init__()\n self.subconv_1 = nn.Sequential(\n nn.Conv2d(in_dim,int(out_dim/self.expansion),1,stride,0,bias=False),\n nn.BatchNorm2d(int(out_dim/self.expansion)),\n nn.ReLU(inplace=True),)\n self.subconv_2 = nn.Sequential(\n nn.Conv2d(int(out_dim/self.expansion),\n int(out_dim/self.expansion),3,1,1,bias=False),\n nn.BatchNorm2d(int(out_dim/self.expansion)),\n nn.ReLU(inplace=True),)\n self.subconv_3 = nn.Sequential(\n nn.Conv2d(int(out_dim/self.expansion),out_dim,1,1,0,bias=False),\n nn.BatchNorm2d(out_dim),)\n if in_dim == out_dim and stride == 1:\n self.downsample = None\n else:\n self.downsample = nn.Sequential(\n nn.Conv2d(in_dim,out_dim,1,stride,0,bias=False),\n nn.BatchNorm2d(out_dim),\n )\n\n def forward(self,input_):\n x_input = input_\n x_0 = self.subconv_1(input_)\n x_1 = self.subconv_2(x_0)\n x_2 = self.subconv_3(x_1)\n if self.downsample is not None:\n x_input = self.downsample(input_)\n print(x_input.shape)\n x_final = F.relu(x_input+x_2,inplace=True)\n return x_final\n \n\nclass ResNet(nn.Module):\n def __init__(self, cfg, logger):\n '''\n block, BLOCK_LIST, in_dim, \n class_num, BASE=64, use_fc=True, CONV1=(7,2,3),\n MAX_POOL=True, pretrained=False\n '''\n super(ResNet,self).__init__()\n self.head_conv = nn.Sequential(\n nn.Conv2d(cfg.IN_DIM,cfg.BASE,cfg.CONV1[0],cfg.CONV1[1],cfg.CONV1[2],bias=False),\n nn.BatchNorm2d(cfg.BASE),\n nn.ReLU(inplace=True),)\n if cfg.MAX_POOL:\n self.maxpool_1 = nn.MaxPool2d(3,2,1)\n else:\n self.maxpool_1 = nn.Sequential()\n block = BottleNeck if cfg.BLOCK == 'bottleneck' else BasicBlock\n b_ = block.expansion\n self.layer_1 = self._make_layer(block,cfg.BASE,cfg.BASE*b_,cfg.BLOCK_LIST[0],1)\n self.layer_2 = self._make_layer(block,cfg.BASE*b_,cfg.BASE*2*b_,cfg.BLOCK_LIST[1],2)\n self.layer_3 = self._make_layer(block,cfg.BASE*2*b_,cfg.BASE*4*b_,cfg.BLOCK_LIST[2],2)\n self.layer_4 = self._make_layer(block,cfg.BASE*4*b_,cfg.BASE*8*b_,cfg.BLOCK_LIST[3],2)\n\n final_feature = cfg.BASE*4*b_ if cfg.BLOCK_LIST[3] == 0 else cfg.BASE*8*b_\n if cfg.USE_FC:\n self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))\n self.fc_1 = nn.Sequential(\n nn.Flatten(),\n nn.Linear(final_feature,cfg.CLASS_NUM),)\n else:\n self.avgpool_1 = nn.Sequential()\n self.fc_1 = nn.Sequential()\n self.logger = logger\n self.pretrained = cfg.PRETRAINED\n self._initialization()\n \n def _initialization(self):\n if self.pretrained is not False:\n self.modules.load_state_dict(model_zoo.load_url(model_urls[self.pretrained]))\n #TODO(liu):check it correct or not.\n else:\n for name, sub_module in self.named_modules():\n if isinstance(sub_module, nn.Conv2d) or isinstance(sub_module, nn.ConvTranspose2d) or \\\n isinstance(sub_module, nn.Linear):\n nn.init.kaiming_normal_(sub_module.weight)\n # nn.init.kaiming_normal_(sub_module.weight,mode='fan_out'\n # ,nonlinearity='relu')\n if self.logger is not None:\n self.logger.info('init {}.weight as kaiming_normal_'.format(name))\n if sub_module.bias is not None:\n nn.init.constant_(sub_module.bias, 0.0)\n if self.logger is not None:\n self.logger.info('init {}.bias as 0'.format(name))\n # elif isinstance(sub_module, nn.BatchNorm2d):\n # nn.init.constant_(sub_module.weight,1)\n # nn.init.constant_(sub_module.bias,0)\n # if self.logger is not None:\n # self.logger.info('init {}.weight as constant_ 1'.format(name))\n # self.logger.info('init {}.bias as constant_ 0'.format(name))\n \n def _make_layer(self,block,in_dim,out_dim,layer_num,stride):\n net_layers = []\n if layer_num == 0:\n return nn.Sequential()\n else: \n for layer in range(layer_num):\n if layer == 0:\n net_layers.append(block(in_dim,out_dim,stride))\n else:\n net_layers.append(block(out_dim,out_dim,1))\n return nn.Sequential(*net_layers)\n \n def forward(self,input_):\n x = self.head_conv(input_)\n x = self.maxpool_1(x)\n \n x = self.layer_1(x)\n \n x = self.layer_2(x)\n \n x = self.layer_3(x)\n \n x = self.layer_4(x)\n x = self.avgpool_1(x)\n x = self.fc_1(x)\n \n return x \n\n\nclass ThreeDCNN(nn.Module):\n def __init__(self,cfg,logger):\n super(ThreeDCNN,self).__init__()\n self.res1 = ResNet(cfg,logger)\n self.res2 = ResNet(cfg,logger)\n self.res3 = ResNet(cfg,logger)\n self.getheatmap_1 = nn.Conv2d(128,19,1,1,0)\n self.getheatmap_2 = nn.Conv2d(128,19,1,1,0)\n self.getheatmap_3 = nn.Conv2d(128,19,1,1,0)\n\n self.getdepth_1 = nn.Conv2d(128,1,1,1,0)\n self.getdepth_2 = nn.Conv2d(128,1,1,1,0)\n self.getdepth_3 = nn.Conv2d(128,1,1,1,0)\n\n self.tdcnn1 = nn.Conv3d(19,128,3,1,1)#b,in,d,h,w,\n self.tdcnn2 = nn.Conv3d(128,128,3,1,1)\n self.maxpool3d_1 = nn.MaxPool3d(3,1,0)\n self.tdcnn3 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn331 = nn.Conv3d(128,128,3,1,1)\n\n\n self.tdcnn332 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn333 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn334 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn335 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn336 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn337= nn.Conv3d(128,128,3,1,1)\n self.tdcnn338 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn339 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn3310 = nn.Conv3d(128,128,3,1,1)\n \n\n\n self.tdcnn4 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn5 = nn.Conv3d(128,19,3,1,1)\n\n\n self.tdcnn6 = nn.Conv3d(1,128,3,1,1)#b,in,d,h,w,\n self.tdcnn7 = nn.Conv3d(128,128,3,1,1)\n self.maxpool3d_2 = nn.MaxPool3d(3,1,0)\n self.tdcnn8 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn88 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn9 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn10 = nn.Conv3d(128,1,3,1,1)\n\n def forward(self,x):\n x1 = x[:,0,:,:,:]\n x2 = x[:,1,:,:,:]\n x3 = x[:,2,:,:,:]\n\n output1 = self.res1(x1)\n output2 = self.res2(x2)\n output3 = self.res3(x3)\n \n #print(output1.shape)\n de_output1 = self.getdepth_1(output1)\n de_output2 = self.getdepth_2(output2)\n de_output3 = self.getdepth_3(output3)\n\n he_output1 = self.getheatmap_1(output1)#(b,19,h,w)\n he_output2 = self.getheatmap_2(output2)\n he_output3 = self.getheatmap_3(output3)\n \n he_3d = torch.cat((he_output1.unsqueeze(2),\n he_output2.unsqueeze(2),\n he_output3.unsqueeze(2)),dim=2)#(b,19,3,h,w)\n de_3d = torch.cat((de_output1.unsqueeze(2),\n de_output2.unsqueeze(2),\n de_output3.unsqueeze(2)),dim=2)\n \n he_3d = self.tdcnn1(he_3d)\n he_3d = self.tdcnn2(he_3d)\n he_3d = self.maxpool3d_1(he_3d)\n he_3d = self.tdcnn3(he_3d)\n he_3d = self.tdcnn331(he_3d)\n\n he_3d = self.tdcnn332(he_3d)\n he_3d = self.tdcnn333(he_3d)\n he_3d = self.tdcnn334(he_3d)\n he_3d = self.tdcnn335(he_3d)\n he_3d = self.tdcnn336(he_3d)\n he_3d = self.tdcnn337(he_3d)\n he_3d = self.tdcnn338(he_3d)\n he_3d = self.tdcnn339(he_3d)\n he_3d = self.tdcnn3310(he_3d)\n\n he_3d = self.tdcnn4(he_3d)\n he_3d = self.tdcnn5(he_3d)\n\n de_3d = self.tdcnn6(de_3d)\n de_3d = self.tdcnn7(de_3d)\n de_3d = self.maxpool3d_2(de_3d)\n de_3d = self.tdcnn8(de_3d)\n de_3d = self.tdcnn88(de_3d)\n de_3d = self.tdcnn9(de_3d)\n de_3d = self.tdcnn10(de_3d)\n \n return de_3d, he_3d\n\n \nif __name__ == \"__main__\":\n logger = load_cfg(cfg)\n model = ThreeDCNN(cfg.MODEL,logger).cuda()\n from ptflops import get_model_complexity_info\n flops, params = get_model_complexity_info(model, (3,3,368,368), \n as_strings=True, print_per_layer_stat=True)\n logger.info('{:<30} {:<8}'.format('Computational complexity: ', flops))\n logger.info('{:<30} {:<8}'.format('Number of parameters: ', params))\n \n fakeinput = torch.ones((8,3,3,368,368)).cuda()\n output = model(fakeinput)\n mem = torch.cuda.memory_cached() / 1E9\n print(mem)\n\n \n\n\n\n\n\n\n\n\n\n\n# ------------------------------- mistakes ---------------------------------- #\n# downsample also need add batchnorm\n# add first, then relu\n# add input, not first conv output.\n# no bias for all conv layers\n# when using /, need add int()\n# usually we use fin_in for LeCun and he init, here we use fan_out\n# ---------------------------------- end ------------------------------------ #\n\n\n# ---------------------------------- notes ---------------------------------- #\n# main idea: short cut connection\n# parameters: 2.5M Res50, 6M Res152, 1.1M Res20, BN+ReLU\n# sgd+momentum 1e-1 0.9 divide 10 * 3 \n# batch size 256\n# weight decay 1e-4\n# input: resize and crop samll side to 256×256 then augment to 224\n# output: linear 1000 + softmax\n# TODO: Check details in training,testing. bn-relu-conv?\n# TODO: Training check: False\n# ---------------------------------- end ------------------------------------ #\n\n\n# ------------------------- resnet18 model summary -------------------------- #\n# Layer (type) Output Shape Param #\n# ================================================================\n# Conv2d-1 [-1, 64, 112, 112] 9,408\n# BatchNorm2d-2 [-1, 64, 112, 112] 128\n# ReLU-3 [-1, 64, 112, 112] 0\n# MaxPool2d-4 [-1, 64, 56, 56] 0\n# Conv2d-5 [-1, 64, 56, 56] 36,864\n# BatchNorm2d-6 [-1, 64, 56, 56] 128\n# ReLU-7 [-1, 64, 56, 56] 0\n# Conv2d-8 [-1, 64, 56, 56] 36,864\n# ...\n# BatchNorm2d-54 [-1, 512, 7, 7] 1,024\n# ReLU-55 [-1, 512, 7, 7] 0\n# Conv2d-56 [-1, 512, 7, 7] 2,359,296\n# BatchNorm2d-57 [-1, 512, 7, 7] 1,024\n# BasicBlock-58 [-1, 512, 7, 7] 0\n# AdaptiveAvgPool2d-59 [-1, 512, 1, 1] 0\n# Flatten-60 [-1, 512] 0\n# Linear-61 [-1, 1000] 513,000\n# Softmax-62 [-1, 1000] 0\n# ================================================================\n# Total params: 11,689,512\n# Trainable params: 11,689,512\n# Non-trainable params: 0\n# ----------------------------------------------------------------\n# Input size (MB): 0.57\n# Forward/backward pass size (MB): 57.06\n# Params size (MB): 44.59\n# Estimated Total Size (MB): 102.23\n# ---------------------------------- end ------------------------------------ #" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d", "torch.ones", "torch.nn.MaxPool3d", "torch.nn.Linear", "torch.nn.Flatten", "torch.cuda.memory_cached", "torch.nn.init.kaiming_normal_", "torch.nn.AdaptiveAvgPool2d", "torch.nn.functional.pad", "torch.nn.init.constant_", "torch.nn.functional.relu", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.Conv3d" ] ]
ravinsinghd/opencv-basic
[ "b3b59b8808c739bc403b2ef7b499b03225bfab5f" ]
[ "draw.py" ]
[ "import numpy as np\nimport cv2\nimg=np.zeros((512,512,3),np.uint8)\ncv2.line(img,(0,0),(511,511),(255,0,0),5)\ncv2.rectangle(img,(384,0),(510,218),(0,255,0),3)\ncv2.circle(img,(447,63),63,(0,0,255),9)\ncv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1)\npts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)\npts = pts.reshape((-1,1,2))\ncv2.polylines(img,[pts],False,(0,255,255))\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)\ncv2.imshow('image',img)\nk=cv2.waitKey(0)" ]
[ [ "numpy.array", "numpy.zeros" ] ]
w-kq/hhhh777
[ "1be3a333128edf7ab50ef2dc3b281d6f561d9fc0" ]
[ "SLME.py" ]
[ "from numpy import *\r\nimport matplotlib.pyplot as plt\r\nfrom io import BytesIO\r\n\r\ndata_am15 = open(\"AM15G.dat\",'r', encoding='utf-8') # solar spectrum\r\ndata_alpha = open(\"absorption.dat\", 'r',encoding='utf-8') #光吸收系数\r\nEg = 1.40 #带隙,单位eV\r\nL = 3 #输入厚度,单位为μm\r\nf = 1 #f,直接带隙为1,间接带隙需要修改\r\nL_max = 3 #厚度最大值,单位微米\r\nT = 300 #温度,单位K\r\ne = ev = 1.60217648740E-19\r\nh = 6.626068E-34\r\nc = 299792458\r\nh_ev = h / ev\r\nc_nm = c * 1E9\r\nPi = 3.1415926\r\nk = 1.3806505E-23 #单位J/K\r\nk_ev = k / ev\r\nPin = 1000 #太阳光输入效率\r\n\r\n\r\n#将太阳光谱数据转换成二维列表\r\nam15 = []\r\nfor line in data_am15:\r\n s = line.strip().split('\\t')\r\n s1 = ' '.join(s) + '\\n'\r\n s2 = s1.split()\r\n if s2 != []:\r\n am15.append([s2[0],s2[1]])\r\ndata_am15.close()\r\n\r\n#将光吸收系数变为二维列表\r\nalpha = []\r\nfor line in data_alpha:\r\n s = line.strip().split('\\t')\r\n s1 = ' '.join(s) + '\\n'\r\n s2 = s1.split()\r\n if s2 != []:\r\n alpha.append([float(s2[0]), float(s2[1])])\r\n\r\n\r\n# preparing the data for calculating slme\r\n# 差值过程,思路就是将光吸收与SLME的横坐标对标\r\ndata_in = []\r\n\r\nfor l in range(1, len(am15)) : #am15为太阳光谱\r\n# x = am15[l].split()\r\n hv = float(am15[l][0]) #波长,nm\r\n nhv = float(am15[l][1]) #入射能量\r\n for ll in range(len(alpha)-1) :\r\n if alpha[ll][0] <= hv and alpha[ll+1][0] >= hv :\r\n fact = (hv - alpha[ll][0])/(alpha[ll+1][0] - alpha[ll][0])\r\n tmp1 = alpha[ll][1]*(1-fact) + fact*alpha[ll+1][1]\r\n data_in.append([hv, nhv, tmp1])\r\n #数据内容分别为波长,太阳光入射能量,tmp1为光吸收系数\r\n break\r\n\r\ndat = open('data_in_1.dat','w',encoding='utf-8')\r\nfor i in range(len(data_in)):\r\n string = str(data_in[i][0]) + '\\t' + str(data_in[i][1]) + '\\t' + str(data_in[i][2]) +'\\n'\r\n# print(string)\r\n dat.write(string)\r\ndat.close()\r\n\r\ndef get_I(l,f=1,data_in=data_in):\r\n#产生短路电流和暗电流的函数,需要修改的参数有:l,厚度,单位微米;f,直接带隙为1,间接带隙需要修改\r\n\r\n Isc = 0.0\r\n I0 = 0.0\r\n L = l * 1E-4 # 厚度,单位微米\r\n\r\n for l in range(len(data_in) - 1):\r\n hv0 = data_in[l][0] # 积分单元矩阵左横坐标\r\n hv1 = data_in[l + 1][0] # 积分单元矩阵右横坐标\r\n #\r\n des1 = hv1 - hv0\r\n #\r\n aE0 = 1.0 - exp(-2.0 * L * data_in[l][2])\r\n aE1 = 1.0 - exp(-2.0 * L * data_in[l + 1][2])\r\n\r\n is0 = data_in[l][1] * (hv0 / h / c_nm) * aE0\r\n is1 = data_in[l + 1][1] * (hv1 / h / c_nm) * aE1\r\n\r\n Isc = Isc + e * (is0 + is1) * des1 / 2.0\r\n\r\n hv_0 = 1240 / hv0\r\n hv_1 = 1240 / hv1\r\n des2 = hv_0 - hv_1\r\n\r\n irb0 = 2 * Pi * hv_0 ** 2 / h_ev ** 3 / c ** 2 * (exp(-1 * hv_0 / k_ev / T)) * aE0\r\n irb1 = 2 * Pi * hv_1 ** 2 / h_ev ** 3 / c ** 2 * (exp(-1 * hv_1 / k_ev / T)) * aE1\r\n\r\n I0 = I0 + e * Pi / f * (irb0 + irb1) * des2 / 2.0\r\n\r\n return Isc, I0\r\n\r\ndef get_JVcurve(Isc, I0, Eg):\r\n#产生JV曲线的函数,需要用到get_I输出的参数,Eg为带隙,单位为eV\r\n I = []\r\n V = []\r\n npts = int(Eg / 0.001)\r\n for ll in range(npts):\r\n Vap = ll * 0.001\r\n i = Isc - I0 * (exp(Vap / k_ev / T) - 1)\r\n # print(I)\r\n I.append(i)\r\n V.append(Vap)\r\n if i <= 0:\r\n break\r\n\r\n plt.plot(V,I,'r', label='J-V curve')\r\n plt.ylim(0,Isc+50) # xlim、ylim:分别设置X、Y轴的显示范围\r\n plt.xlim(0,Vap+0.05)\r\n plt.title(\"JV curve\") # title:设置子图的标题\r\n plt.savefig('JV-curve.png')\r\n plt.show()\r\n\r\n dat = open('JV-curve.dat', 'w', encoding='utf-8')\r\n for i in range(len(I)):\r\n string = str(V[i]) + '\\t' + str(I[i]) + '\\t' + str(I[i]*V[i]) +'\\n'\r\n # print(string)\r\n dat.write(string)\r\n dat.close()\r\n\r\n print('JV-curve中的信息:')\r\n print('开路电压 = ', Vap)\r\n print('短路电流 = ', Isc)\r\n print('SLME =' + str(get_slme(Isc,I0)) + '\\t' + '厚度 = ' + str(L) + 'μm')\r\n\r\n return 0\r\n\r\ndef get_slme(Isc,I0):\r\n#计算SLME的函数,会同时打印出短路电流,开路电压和SLME数据,需要用到get_I的输出参数\r\n npts = int(Eg / 0.001)\r\n maxIV = 0\r\n IVtmp = 0\r\n for ll in range(npts):\r\n Vap = ll * 0.001\r\n I = Isc - I0 * (exp(Vap / k_ev / T) - 1)\r\n IVtmp = Vap * I\r\n # print(I)\r\n if IVtmp >= maxIV:\r\n maxIV = IVtmp\r\n elif I <= 0:\r\n break\r\n# print(\"短路电流 = \", Isc, \"A/m2\")\r\n# print(\"开路电压 = \", Vap, \"V\")\r\n slme = maxIV / Pin\r\n# print(\"SLME = \", slme)\r\n return slme\r\n\r\n\r\n#主函数部分\r\n#第一部分是画给定厚度的JV曲线,同时给出开路电压,短路电流和SLME\r\n\r\n\r\nIsc,I0 = get_I(l=L, f=f)\r\n#print(I0)\r\nget_JVcurve(Isc, I0, Eg)\r\nget_slme(Isc,I0)\r\n\r\n\r\n#第二部分是画SLME随厚度变化曲线,需要输入曲线中厚度最大值和曲线撒点数\r\n\r\nn = 100 #曲线撒的点\r\n\r\nnpts = int(L_max*n)\r\nY = []\r\nX = []\r\ndat = open('SLME-curve.dat', 'w', encoding='utf-8')\r\nslme = 0\r\nslme_max = 0\r\nfor i in range(npts+1):\r\n l = i / n\r\n Isc, I0 = get_I(l=l)\r\n# print(\"厚度 =\", l,\"μm\")\r\n slme = get_slme(Isc, I0)\r\n Y.append(slme)\r\n X.append(l)\r\n dat.write(str(l) + '\\t' + str(slme) + '\\n')\r\n if slme >= slme_max:\r\n slme_max = slme\r\n l_max = l\r\ndat.close()\r\nprint('SLME-curve内信息:')\r\nprint('SLME_max = ' + str(slme_max) + '\\t' + '厚度 = ' + str(l_max) + 'μm')\r\n\r\nplt.plot(X,Y)\r\nplt.ylim(0,Y[-1]+0.025) # xlim、ylim:分别设置X、Y轴的显示范围\r\nplt.xlim(0,L_max)\r\nplt.title(\"SLME curve\") # title:设置子图的标题\r\nplt.savefig('SLME-curve.png')\r\nplt.show()\r\n\r\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot" ] ]
WeiCheng302/image-segmentation-keras
[ "b34aef73f1a350cc0b4034eeb2dedd61642b9ccb" ]
[ "keras_segmentation/pretrained.py" ]
[ "import keras\nimport tensorflow as tf\nfrom .models.all_models import model_from_name\n\n\ndef model_from_checkpoint_path(model_config, latest_weights):\n\n model = model_from_name[model_config['model_class']](\n model_config['n_classes'], input_height=model_config['input_height'],\n input_width=model_config['input_width'])\n model.load_weights(latest_weights)\n return model\n\n\ndef resnet_pspnet_VOC12_v0_1():\n\n model_config = {\n \"output_height\": 96,\n \"input_height\": 384,\n \"input_width\": 576,\n \"n_classes\": 151,\n \"model_class\": \"resnet50_pspnet\",\n \"output_width\": 144\n }\n\n REPO_URL = \"https://github.com/divamgupta/image-segmentation-keras\"\n MODEL_PATH = \"pretrained_model_1/r2_voc12_resnetpspnet_384x576.24\"\n model_url = \"{0}/releases/download/{1}\".format(REPO_URL, MODEL_PATH)\n latest_weights = tf.keras.utils.get_file(model_url.split(\"/\")[-1], model_url)\n\n return model_from_checkpoint_path(model_config, latest_weights)\n\n\n# pretrained model converted from caffe by Vladkryvoruchko ... thanks !\ndef pspnet_50_ADE_20K():\n\n model_config = {\n \"input_height\": 473,\n \"input_width\": 473,\n \"n_classes\": 150,\n \"model_class\": \"pspnet_50\",\n }\n\n model_url = \"https://www.dropbox.com/s/\" \\\n \"0uxn14y26jcui4v/pspnet50_ade20k.h5?dl=1\"\n latest_weights = tf.keras.utils.get_file(\"pspnet50_ade20k.h5\", model_url)\n\n return model_from_checkpoint_path(model_config, latest_weights)\n\n\ndef pspnet_101_cityscapes():\n\n model_config = {\n \"input_height\": 713,\n \"input_width\": 713,\n \"n_classes\": 19,\n \"model_class\": \"pspnet_101\",\n }\n\n model_url = \"https://www.dropbox.com/s/\" \\\n \"c17g94n946tpalb/pspnet101_cityscapes.h5?dl=1\"\n latest_weights = tf.keras.utils.get_file(\"pspnet101_cityscapes.h5\", model_url)\n\n return model_from_checkpoint_path(model_config, latest_weights)\n\n\ndef pspnet_101_voc12():\n\n model_config = {\n \"input_height\": 473,\n \"input_width\": 473,\n \"n_classes\": 21,\n \"model_class\": \"pspnet_101\",\n }\n\n model_url = \"https://www.dropbox.com/s/\" \\\n \"uvqj2cjo4b9c5wg/pspnet101_voc2012.h5?dl=1\"\n latest_weights = tf.keras.utils.get_file(\"pspnet101_voc2012.h5\", model_url)\n\n return model_from_checkpoint_path(model_config, latest_weights)\n" ]
[ [ "tensorflow.keras.utils.get_file" ] ]