repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
mscheltienne/pyprep | [
"82920228a1bfe46a8f8c04443547cc2726d3d189",
"82920228a1bfe46a8f8c04443547cc2726d3d189"
] | [
"pyprep/removeTrend.py",
"examples/run_ransac.py"
] | [
"\"\"\"High-pass filter and locally detrend the EEG signal.\"\"\"\nimport logging\n\nimport mne\nimport numpy as np\n\nfrom pyprep.utils import _eeglab_create_highpass, _eeglab_fir_filter\n\n\ndef removeTrend(\n EEG,\n sample_rate,\n detrendType=\"high pass\",\n detrendCutoff=1.0,\n detrendChannels=None,\n matlab_strict=False,\n):\n \"\"\"Remove trends (i.e., slow drifts in baseline) from an array of EEG data.\n\n Parameters\n ----------\n EEG : np.ndarray\n A 2-D array of EEG data to detrend.\n sample_rate : float\n The sample rate (in Hz) of the input EEG data.\n detrendType : str, optional\n Type of detrending to be performed: must be one of 'high pass',\n 'high pass sinc, or 'local detrend'. Defaults to 'high pass'.\n detrendCutoff : float, optional\n The high-pass cutoff frequency (in Hz) to use for detrending. Defaults\n to 1.0 Hz.\n detrendChannels : {list, None}, optional\n List of the indices of all channels that require detrending/filtering.\n If ``None``, all channels are used (default).\n matlab_strict : bool, optional\n Whether or not detrending should strictly follow MATLAB PREP's internal\n math, ignoring any improvements made in PyPREP over the original code\n (see :ref:`matlab-diffs` for more details). Defaults to ``False``.\n\n Returns\n -------\n EEG : np.ndarray\n A 2-D array containing the filtered/detrended EEG data.\n\n Notes\n -----\n High-pass filtering is implemented using the MNE filter function\n :func:``mne.filter.filter_data`` unless `matlab_strict` is ``True``, in\n which case it is performed using a minimal re-implementation of EEGLAB's\n ``pop_eegfiltnew``. Local detrending is performed using a Python\n re-implementation of the ``runline`` function from the Chronux package for\n MATLAB [1]_.\n\n References\n ----------\n .. [1] http://chronux.org/\n\n \"\"\"\n if len(EEG.shape) == 1:\n EEG = np.reshape(EEG, (1, EEG.shape[0]))\n\n if detrendType.lower() == \"high pass\":\n if matlab_strict:\n picks = detrendChannels if detrendChannels else range(EEG.shape[0])\n filt = _eeglab_create_highpass(detrendCutoff, sample_rate)\n EEG[picks, :] = _eeglab_fir_filter(EEG[picks, :], filt)\n else:\n EEG = mne.filter.filter_data(\n EEG,\n sfreq=sample_rate,\n l_freq=detrendCutoff,\n h_freq=None,\n picks=detrendChannels,\n )\n\n elif detrendType.lower() == \"high pass sinc\":\n fOrder = np.round(14080 * sample_rate / 512)\n fOrder = int(fOrder + fOrder % 2)\n EEG = mne.filter.filter_data(\n data=EEG,\n sfreq=sample_rate,\n l_freq=1,\n h_freq=None,\n picks=detrendChannels,\n filter_length=fOrder,\n fir_window=\"blackman\",\n )\n\n elif detrendType.lower() == \"local detrend\":\n if detrendChannels is None:\n detrendChannels = np.arange(0, EEG.shape[0])\n windowSize = 1.5 / detrendCutoff\n windowSize = np.minimum(windowSize, EEG.shape[1])\n stepSize = 0.02\n EEG = np.transpose(EEG)\n n = np.round(sample_rate * windowSize)\n dn = np.round(sample_rate * stepSize)\n\n if dn > n or dn < 1:\n logging.error(\n \"Step size should be less than the window size and \"\n \"contain at least 1 sample\"\n )\n if n == EEG.shape[0]:\n # data = scipy.signal.detrend(EEG, axis=0)\n pass\n else:\n for ch in detrendChannels:\n EEG[:, ch] = runline(EEG[:, ch], int(n), int(dn))\n EEG = np.transpose(EEG)\n\n else:\n logging.warning(\n \"No filtering/detreding performed since the detrend type did not match\"\n )\n\n return EEG\n\n\ndef runline(y, n, dn):\n \"\"\"Perform local linear regression on a channel of EEG data.\n\n A re-implementation of the ``runline`` function from the Chronux package\n for MATLAB [1]_.\n\n Parameters\n ----------\n y : np.ndarray\n A 1-D array of data from a single EEG channel.\n n : int\n Length of the detrending window.\n dn : int\n Length of the window step size.\n\n Returns\n -------\n y: np.ndarray\n The detrended signal for the given EEG channel.\n\n References\n ----------\n .. [1] http://chronux.org/\n\n \"\"\"\n nt = y.shape[0]\n y_line = np.zeros((nt, 1))\n norm = np.zeros((nt, 1))\n nwin = int(np.ceil((nt - n) / dn))\n yfit = np.zeros((nwin, n))\n xwt = (np.arange(1, n + 1) - n / 2) / (n / 2)\n wt = np.power(1 - np.power(np.absolute(xwt), 3), 3)\n for j in range(0, nwin):\n tseg = y[dn * j : dn * j + n]\n y1 = np.mean(tseg)\n y2 = np.mean(np.multiply(np.arange(1, n + 1), tseg)) * (2 / (n + 1))\n a = np.multiply(np.subtract(y2, y1), 6 / (n - 1))\n b = np.subtract(y1, a * (n + 1) / 2)\n yfit[j, :] = np.multiply(np.arange(1, n + 1), a) + b\n y_line[j * dn : j * dn + n] = y_line[j * dn : j * dn + n] + np.reshape(\n np.multiply(yfit[j, :], wt), (n, 1)\n )\n norm[j * dn : j * dn + n] = norm[j * dn : j * dn + n] + np.reshape(wt, (n, 1))\n\n for i in range(0, len(norm)):\n if norm[i] > 0:\n y_line[i] = y_line[i] / norm[i]\n indx = (nwin - 1) * dn + n - 1\n npts = len(y) - indx + 1\n y_line[indx - 1 :] = np.reshape(\n (np.multiply(np.arange(n + 1, n + npts + 1), a) + b), (npts, 1)\n )\n for i in range(0, len(y_line)):\n y[i] = y[i] - y_line[i]\n return y\n",
"\"\"\"\n=================\nRun RANSAC\n=================\n\n\nIn this example we show how to run the RANSAC of ``pyprep``.\n\n.. currentmodule:: pyprep\n\"\"\" # noqa: D205 D400\n\n# Authors: Yorguin Mantilla <[email protected]>\n#\n# License: MIT\n# Based On: use_noisy_module.py\n\n###############################################################################\n# First we import what we need for this example.\nimport numpy as np\nimport mne\nfrom scipy import signal as signal\nfrom time import perf_counter\n\nfrom pyprep.find_noisy_channels import NoisyChannels\n\n###############################################################################\n# Now let's make some arbitrary MNE raw object for demonstration purposes.\n# We will think of good channels as sine waves and bad channels correlated with\n# each other as sawtooths. The RANSAC will be biased towards sines in its\n# prediction (they are the majority) so it will identify the sawtooths as bad.\n# We will need to set a montage because the RANSAC needs to interpolate.\n\nsfreq = 1000.0\n\n# We need a montage, because RANSAC uses spherical splines for interpolation\nmontage = mne.channels.make_standard_montage(\"standard_1020\")\n\nch_names = montage.ch_names\n\nn_chans = len(ch_names)\n\ninfo = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=[\"eeg\"] * n_chans)\n\ntime = np.arange(0, 30, 1.0 / sfreq) # 30 seconds of recording\nn_bad_chans = 3\n\nrng = np.random.default_rng(42)\nbad_channels = rng.choice(np.arange(n_chans), n_bad_chans, replace=False)\nbad_channels = [int(i) for i in bad_channels]\nbad_ch_names = [ch_names[i] for i in bad_channels]\n\n# The frequency components to use in the signal for good and bad channels\nfreq_good = 20\nfreq_bad = 20\n\n# Generate the data\nX = [\n signal.sawtooth(2 * np.pi * freq_bad * time)\n if i in bad_channels\n else np.sin(2 * np.pi * freq_good * time)\n for i in range(n_chans)\n]\n# Scale the signal amplitude and add noise.\nX = 2e-5 * np.array(X) + 1e-5 * rng.random((n_chans, time.shape[0]))\n\nraw = mne.io.RawArray(X, info)\n\nraw.set_montage(montage, verbose=False)\n\n\n###############################################################################\n# Assign the mne object to the :class:`NoisyChannels` class. The resulting object\n# will be the place where all following methods are performed.\n\nnd = NoisyChannels(raw, random_state=1337)\nnd2 = NoisyChannels(raw, random_state=1337)\n\n###############################################################################\n# Find all bad channels using channel-wise RANSAC and print a summary\nstart_time = perf_counter()\nnd.find_bad_by_ransac(channel_wise=True)\nprint(\"--- %s seconds ---\" % (perf_counter() - start_time))\n\n# Repeat channel-wise RANSAC using a single channel at a time. This is slower\n# but needs less memory.\nstart_time = perf_counter()\nnd2.find_bad_by_ransac(channel_wise=True, max_chunk_size=1)\nprint(\"--- %s seconds ---\" % (perf_counter() - start_time))\n\n###############################################################################\n# Now the bad channels are saved in `bads` and we can continue processing our\n# `raw` object. For more information, we can access attributes of the ``nd``\n# instance:\n\n# Check channels that go bad together by correlation (RANSAC)\nprint(nd.bad_by_ransac)\nassert set(bad_ch_names) == set(nd.bad_by_ransac)\n\n# Check that the channel wise RANSAC yields identical results\nprint(nd2.bad_by_ransac)\nassert set(bad_ch_names) == set(nd2.bad_by_ransac)\n"
] | [
[
"numpy.absolute",
"numpy.minimum",
"numpy.multiply",
"numpy.reshape",
"numpy.arange",
"numpy.subtract",
"numpy.round",
"numpy.ceil",
"numpy.mean",
"numpy.transpose",
"numpy.zeros"
],
[
"numpy.arange",
"numpy.sin",
"scipy.signal.sawtooth",
"numpy.array",
"numpy.random.default_rng"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
FlorianBury/talos | [
"30f7af4d1f628364f8e8a2e983f72b2631cee6d9",
"30f7af4d1f628364f8e8a2e983f72b2631cee6d9"
] | [
"talos/parameters/ParamGrid.py",
"talos/commands/reporting.py"
] | [
"from numpy import arange, unique, array, column_stack, concatenate\nfrom itertools import product\n\nfrom ..reducers.sample_reducer import sample_reducer\n\n\nclass ParamGrid:\n\n '''Suite for handling parameters internally within Talos\n\n Takes as input the parameter dictionary from the user, and\n returns a class object which can then be used to pick parameters\n for each round together with other parameter related operations.\n\n '''\n\n def __init__(self, main_self):\n\n self.main_self = main_self\n\n # convert the input to useful format\n self._p = self._param_input_conversion()\n\n # build the parameter permutation grid\n self.param_grid = self._param_grid()\n \n # reduce according to downsample\n if self.main_self.grid_downsample is not None:\n self.param_grid = sample_reducer(self)\n\n # create a index for logging purpose\n self.param_log = list(range(len(self.param_grid)))\n\n # add the log index to param grid\n self.param_grid = column_stack((self.param_grid, self.param_log))\n\n # Repeat model a certain amount of times #\n for i in range(1,self.main_self.repetition):\n self.param_grid = concatenate((self.param_grid,self.param_grid),axis=0)\n\n\n\n\n def _param_grid(self):\n\n '''CREATE THE PARAMETER PERMUTATIONS\n\n This is done once before starting the experiment.\n Takes in the parameter dictionary, and returns\n every possible permutation in an array.\n '''\n\n ls = [list(self._p[key]) for key in self._p.keys()]\n _param_grid_out = array(list(product(*ls)), dtype='object')\n\n return _param_grid_out\n\n def _param_input_conversion(self):\n\n '''DETECT PARAM FORMAT\n\n Checks of the hyperparameter input format is list\n or tupple in the params dictionary and expands accordingly.\n\n '''\n\n out = {}\n\n for param in self.main_self.params.keys():\n\n # for range/step style input\n if isinstance(self.main_self.params[param], tuple):\n out[param] = self._param_range(self.main_self.params[param][0],\n self.main_self.params[param][1],\n self.main_self.params[param][2])\n # all other input styles\n else:\n out[param] = self.main_self.params[param]\n\n return out\n\n def _param_range(self, start, end, n):\n\n '''PARAMETER RANGE\n\n Deals with the format where a start, end\n and steps values are given for a parameter\n in a tuple format.\n\n This is called internally from param_format()\n '''\n\n try:\n out = arange(start, end, (end - start) / n, dtype=float)\n # this is for python2\n except ZeroDivisionError:\n out = arange(start, end, (end - start) / float(n), dtype=float)\n\n if type(start) == int and type(end) == int:\n out = out.astype(int)\n out = unique(out)\n\n return out\n",
"from pandas import read_csv\nfrom ..utils.connection_check import is_connected\n\nif is_connected() is True:\n from astetik import line, hist, corr, regs, bargrid, kde, box\n\nfrom ..metrics.names import metric_names\n\n\nclass Reporting:\n\n '''A suite of commands that are useful for analyzing the results\n of a completed scan, or during a scan.\n\n filename :: the name of the experiment log from Scan()'''\n\n def __init__(self, source=None):\n\n '''Takes as input a filename to the experiment\n log or the Scan object'''\n\n if isinstance(source, str):\n self.data = read_csv(source)\n else:\n self.data = source.data\n\n def high(self, metric='val_acc'):\n\n '''Returns the highest value for a given metric'''\n\n return max(self.data[metric])\n\n def rounds(self):\n\n '''Returns the number of rounds in the experiment'''\n\n return len(self.data)\n\n def rounds2high(self, metric='val_acc'):\n\n '''Returns the number of rounds it took to get to the\n highest value for a given metric.'''\n\n return self.data[self.data[metric] == self.data[metric].max()].index[0]\n\n def low(self, metric='val_acc'):\n\n '''Returns the minimum value for a given metric'''\n\n return min(self.data[metric])\n\n def correlate(self, metric='val_acc'):\n\n '''Returns a correlation table against a given metric. Drops\n all other metrics and correlates against hyperparameters only.'''\n\n columns = [c for c in self.data.columns if c not in metric_names()]\n out = self.data[columns]\n out.insert(0, metric, self.data[metric])\n out = out.corr()[metric]\n\n return out[out != 1]\n\n def plot_line(self, metric='val_acc'):\n\n '''A line plot for a given metric where rounds is on x-axis\n\n NOTE: remember to invoke %matplotlib inline if in notebook\n\n metric :: the metric to correlate against\n\n '''\n\n return line(self.data, metric)\n\n def plot_hist(self, metric='val_acc', bins=10):\n\n '''A histogram for a given metric\n\n NOTE: remember to invoke %matplotlib inline if in notebook\n\n metric :: the metric to correlate against\n bins :: number of bins to use in histogram\n\n '''\n\n return hist(self.data, metric, bins=bins)\n\n def plot_corr(self, metric='val_acc', color_grades=5):\n\n '''A heatmap with a single metric and hyperparameters.\n\n NOTE: remember to invoke %matplotlib inline if in notebook\n\n metric :: the metric to correlate against\n color_grades :: number of colors to use in heatmap'''\n\n cols = self._cols(metric)\n\n return corr(self.data[cols], color_grades=color_grades)\n\n def plot_regs(self, x='val_acc', y='val_loss'):\n\n '''A regression plot with data on two axis\n\n x = data for the x axis\n y = data for the y axis\n '''\n\n return regs(self.data, x, y)\n\n def plot_box(self, x, y='val_acc', hue=None):\n\n '''A box plot with data on two axis\n\n x = data for the x axis\n y = data for the y axis\n hue = data for the hue separation\n '''\n\n return box(self.data, x, y, hue)\n\n def plot_bars(self, x, y, hue, col):\n\n '''A comparison plot with 4 axis'''\n\n return bargrid(self.data,\n x=x,\n y=y,\n hue=hue,\n col=col,\n col_wrap=4)\n\n def plot_kde(self, x, y=None):\n\n '''Kernel Destiny Estimation type histogram with\n support for 1 or 2 axis of data'''\n\n return kde(self.data, x, y)\n\n def table(self, metric='val_acc', sort_by=None, ascending=False):\n\n '''Shows a table with hyperparameters and a given metric\n\n EXAMPLE USE:\n\n ra1 = Reporting('diabetes_1.csv')\n ra1.table(sort_by='fmeasure_acc', ascending=False)\n\n PARAMS:\n\n metric :: accepts single column name as string or multiple in list\n sort_by :: the colunm name sorting should be based on\n ascending :: if sorting is ascending or not\n\n '''\n\n cols = self._cols(metric)\n\n if sort_by is None:\n sort_by = metric\n\n out = self.data[cols].sort_values(sort_by, ascending=ascending)\n\n return out\n\n def best_params(self, metric='val_acc', n=10, ascending=False):\n\n '''Get the best parameters of the experiment based on a metric.\n Returns a numpy array with the values in a format that can be used\n with the talos backend in Scan(). Adds an index as the last column.'''\n\n cols = self._cols(metric)\n out = self.data[cols].sort_values(metric, ascending=ascending)\n out = out.drop(metric, axis=1).head(n)\n out.insert(out.shape[1], 'index_num', range(len(out)))\n\n return out.values\n\n def _cols(self, metric):\n\n '''Helper to remove other than desired metric from data table'''\n\n cols = [col for col in self.data.columns if col not in metric_names()]\n\n if isinstance(metric, list) is False:\n metric = [metric]\n for i, metric in enumerate(metric):\n cols.insert(i, metric)\n\n # make sure only unique values in col list\n cols = list(set(cols))\n\n return cols\n"
] | [
[
"numpy.concatenate",
"numpy.arange",
"numpy.unique",
"numpy.column_stack"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
quasiben/bokeh | [
"738343bd18c851dfd1fdc82cf35fe3eb4cdfd475",
"738343bd18c851dfd1fdc82cf35fe3eb4cdfd475"
] | [
"bokeh/charts/stats.py",
"tests/glyphs/Ellipse.py"
] | [
"\"\"\" Statistical methods used to define or modify position of glyphs.\n\nReferences:\n Wilkinson L. The Grammer of Graphics, sections 7, 7.1\n\nMethod Types:\n - Bin: Partitions a space before statistical calculation\n - Summary: Produces a single value comprising a statistical summary\n - Region: Produces two values bounding an interval.\n - Smooth: Produces values representing smoothed versions of the input data.\n - Link: Produces edges from pairs of nodes in a graph.\n\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport pandas as pd\n\nfrom bokeh.models.sources import ColumnDataSource\nfrom bokeh.core.properties import (HasProps, Float, Either, String, Date, Datetime, Int,\n Bool, List, Instance)\nfrom .properties import Column, EitherColumn, ColumnLabel\n\n\nclass Stat(HasProps):\n \"\"\"Represents a statistical operation to summarize a column of data.\n\n Can be computed from either a ColumnLabel with a ColumnDataSource, *or*, a\n discrete column of data.\n \"\"\"\n\n # inputs\n column = ColumnLabel(help=\"\"\"A column to use for the stat calculation. Required\n when providing a ColumnDataSource as input.\"\"\")\n source = Instance(ColumnDataSource, help=\"\"\"One option for providing the data\n source for stat calculation.\"\"\")\n values = EitherColumn(Column(Float), Column(Int), Column(String),\n Column(Date), Column(Datetime), Column(Bool), default=None, help=\"\"\"\n Second option for providing values for stat calculation is by\n passing the actual column of data.\"\"\")\n\n # output\n value = Float(help=\"\"\"The value calculated for the stat. Some stats could use\n multiple properties to provide the calculation if required.\"\"\")\n\n def __init__(self, **properties):\n\n source = properties.pop('source', None)\n if source is not None:\n if isinstance(source, pd.DataFrame):\n source = ColumnDataSource(source)\n properties['source'] = source\n\n super(Stat, self).__init__(**properties)\n self._refresh()\n\n def _refresh(self):\n \"\"\"Lazy update of properties, used for initial transform init.\"\"\"\n if self.get_data() is not None:\n self.update()\n self.calculate()\n\n def set_data(self, data, column=None):\n \"\"\"Set data properties and update all dependent properties.\"\"\"\n if isinstance(data, pd.DataFrame):\n data = ColumnDataSource(data)\n\n if isinstance(data, ColumnDataSource):\n self.source = data\n if column is not None:\n self.column = column\n else:\n self.values = data\n\n self.update()\n self.calculate()\n\n def get_data(self, column=None):\n \"\"\"Returns the available columnlabel/source values or column values.\"\"\"\n if self.source is not None and (self.column is not None or column is not None):\n if column is not None:\n col = column\n else:\n col = self.column\n\n return pd.Series(self.source.data[col])\n elif self.values is None and self.source is not None:\n return pd.Series(self.source.to_df().index)\n elif self.values is not None:\n return self.values\n else:\n return None\n\n def calculate(self):\n \"\"\"Return transformed value from column label/source or column-like data.\"\"\"\n raise NotImplementedError('You must implement the calculate method '\n 'for each stat type.')\n\n def update(self):\n \"\"\"Perform any initial work before the actual calculation is performed.\"\"\"\n pass\n\n\nclass Sum(Stat):\n def calculate(self):\n self.value = self.get_data().sum()\n\n\nclass Mean(Stat):\n def calculate(self):\n self.value = self.get_data().mean()\n\n\nclass Count(Stat):\n def calculate(self):\n self.value = self.get_data().count()\n\n\nclass CountDistinct(Stat):\n def calculate(self):\n self.value = self.get_data().nunique()\n\n\nclass Median(Stat):\n def calculate(self):\n self.value = self.get_data().median()\n\n\nclass StdDeviation(Stat):\n def calculate(self):\n self.value = self.get_data().std()\n\n\nclass Min(Stat):\n def calculate(self):\n self.value = self.get_data().min()\n\n\nclass Max(Stat):\n def calculate(self):\n self.value = self.get_data().max()\n\n\nclass Quantile(Stat):\n \"\"\"Produces the cutpoint that divides the input data by the interval.\n\n Quartiles are a special case of quartiles that divide a dataset into four\n equal-size groups. (https://en.wikipedia.org/wiki/Quantile)\n \"\"\"\n interval = Float(default=0.5)\n\n def calculate(self):\n self.value = self.get_data().quantile(self.interval)\n\n\nclass Bin(Stat):\n \"\"\"Represents a single bin of data values and attributes of the bin.\"\"\"\n label = Either(String, List(String))\n start = Either(Float, List(Float))\n stop = Either(Float, List(Float))\n\n start_label = String()\n stop_label = String()\n\n center = Either(Float, List(Float))\n\n stat = Instance(Stat, default=Count())\n width = Float()\n\n def __init__(self, bin_label, values=None, source=None, **properties):\n if isinstance(bin_label, tuple):\n bin_label = list(bin_label)\n else:\n bin_label = [bin_label]\n properties['label'] = bin_label\n\n bounds = self.process_bounds(bin_label)\n\n starts, stops = zip(*bounds)\n centers = [(start + stop)/2.0 for start, stop in zip(starts, stops)]\n if len(starts) == 1:\n starts = starts[0]\n stops = stops[0]\n centers = centers[0]\n else:\n starts = list(starts)\n stops = list(stops)\n centers = list(centers)\n\n properties['start'] = starts\n properties['stop'] = stops\n properties['center'] = centers\n properties['values'] = values\n super(Bin, self).__init__(**properties)\n\n @staticmethod\n def binstr_to_list(bins):\n \"\"\"Produce a consistent display of a bin of data.\"\"\"\n value_chunks = bins.split(',')\n value_chunks = [val.replace('[', '').replace(']', '').replace('(', '').replace(')', '') for val in value_chunks]\n bin_values = [float(value) for value in value_chunks]\n\n return bin_values[0], bin_values[1]\n\n def process_bounds(self, bin_label):\n if isinstance(bin_label, list):\n return [self.binstr_to_list(dim) for dim in bin_label]\n else:\n return [self.binstr_to_list(bin_label)]\n\n def update(self):\n self.stat.set_data(self.values)\n\n def calculate(self):\n self.value = self.stat.value\n\n\nclass BinStats(Stat):\n \"\"\"A set of statistical calculations for binning values.\n\n Bin counts using: https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule\n \"\"\"\n bins = Either(Int, Float, List(Float), default=None, help=\"\"\"\n If bins is an int, it defines the number of equal-width bins in the\n given range. If bins is a sequence, it defines the\n bin edges, including the rightmost edge, allowing for non-uniform\n bin widths.\n\n (default: None, use Freedman-Diaconis rule)\n \"\"\")\n bin_width = Float(default=None, help='Use Freedman-Diaconis rule if None.')\n q1 = Quantile(interval=0.25)\n q3 = Quantile(interval=0.75)\n labels = List(String)\n\n def __init__(self, values=None, column=None, **properties):\n properties['values'] = values\n properties['column'] = column or 'values'\n\n super(BinStats, self).__init__(**properties)\n\n def update(self):\n values = self.get_data()\n self.q1.set_data(values)\n self.q3.set_data(values)\n if self.bins is None:\n self.calc_num_bins(values)\n\n def calc_num_bins(self, values):\n \"\"\"Calculate optimal number of bins using IQR.\n\n From: http://stats.stackexchange.com/questions/114490/optimal-bin-width-for-two-dimensional-histogram\n\n \"\"\"\n iqr = self.q3.value - self.q1.value\n\n if iqr == 0:\n self.bin_width = np.sqrt(values.size)\n else:\n self.bin_width = 2 * iqr * (len(values) ** -(1. / 3.))\n\n self.bins = int(np.ceil((values.max() - values.min()) / self.bin_width))\n\n if self.bins <= 1:\n self.bins = 3\n\n def calculate(self):\n pass\n\n\nclass BinnedStat(Stat):\n \"\"\" Base class for shared functionality accross bins and aggregates\n dimensions for plotting.\n\n \"\"\"\n bin_stat = Instance(BinStats, help=\"\"\"\n A mapping between each dimension and associated binning calculations.\n \"\"\")\n\n bins = List(Instance(Bin), help=\"\"\"\n A list of the `Bin` instances that were produced as result of the inputs.\n Iterating over `Bins` will iterate over this list. Each `Bin` can be inspected\n for metadata about the bin and the values associated with it.\n \"\"\")\n\n stat = Instance(Stat, default=Count(), help=\"\"\"\n The statistical operation to be used on the values in each bin.\n \"\"\")\n\n bin_column = String()\n centers_column = String()\n\n aggregate = Bool(default=True)\n\n bin_values = Bool(default=False)\n\n bin_width = Float()\n\n def __init__(self, values=None, column=None, bins=None,\n stat='count', source=None, **properties):\n\n if isinstance(stat, str):\n stat = stats[stat]()\n\n properties['column'] = column or 'vals'\n properties['stat'] = stat\n properties['values'] = values\n properties['source'] = source\n self._bins = bins\n super(BinnedStat, self).__init__(**properties)\n\n\n def _get_stat(self):\n stat_kwargs = {}\n\n if self.source is not None:\n stat_kwargs['source'] = self.source\n stat_kwargs['column'] = self.column\n\n elif self.values is not None:\n stat_kwargs['values'] = self.values\n\n stat_kwargs['bins'] = self._bins\n\n return BinStats(**stat_kwargs)\n\n def update(self):\n self.bin_stat = self._get_stat()\n self.bin_stat.update()\n\n\nclass Bins(BinnedStat):\n \"\"\"Bins and aggregates dimensions for plotting.\n\n Takes the inputs and produces a list of bins that can be iterated over and\n inspected for their metadata. The bins provide easy access to consistent labeling,\n bounds, and values.\n \"\"\"\n\n def calculate(self):\n\n bin_str = '_bin'\n self.bin_column = self.column + bin_str\n bin_models = []\n\n data = self.bin_stat.get_data()\n bins = self.bin_stat.bins\n\n # Choose bin bounds when data range is ill-defined; pd.cut()\n # does not handle this well for values that are <= 0\n if data.size < 2:\n raise ValueError('Histogram data must have at least two elements.')\n if data.ndim == 1 and data.std() == 0:\n margin = 0.01 * abs(float(data[0])) or 0.01\n bins = np.linspace(data[0] - margin, data[0] + margin, bins+1)\n\n binned, bin_bounds = pd.cut(data, bins,\n retbins=True, include_lowest=True, precision=0)\n\n self.bin_width = np.round(bin_bounds[2] - bin_bounds[1], 1)\n\n if self.source is not None:\n # add bin column to data source\n self.source.add(binned.tolist(), name=self.bin_column)\n df = self.source.to_df()\n else:\n df = pd.DataFrame({self.column: self.values, self.bin_column: binned})\n\n for name, group in df.groupby(self.bin_column):\n bin_models.append(Bin(bin_label=name, values=group[self.column],\n stat=self.stat))\n\n self.bins = bin_models\n\n centers = binned.copy()\n centers = centers.astype(str)\n for bin in self.bins:\n centers[binned == bin.label] = bin.center\n\n self.centers_column = self.column + '_center'\n if self.source is not None:\n self.source.add(centers.tolist(), name=self.centers_column)\n else:\n df[self.centers_column] = centers\n\n def __getitem__(self, item):\n return self.bins[item]\n\n def apply(self, data):\n self.set_data(data.source)\n return self.source.to_df()\n\n def sort(self, ascending=True):\n if self.bins is not None:\n self.bins = list(sorted(self.bins, key=lambda x: x.center,\n reverse=~ascending))\n\n\nclass Histogram(BinnedStat):\n \"\"\"Bins and aggregates dimensions for plotting.\n\n Takes the inputs and produces a list of bins that can be iterated over and\n inspected for their metadata. The bins provide easy access to consistent labeling,\n bounds, and values.\n \"\"\"\n\n density = Bool(False, help=\"\"\"\n Whether to normalize the histogram.\n\n If True, the result is the value of the probability *density* function\n at the bin, normalized such that the *integral* over the range is 1. If\n False, the result will contain the number of samples in each bin.\n\n For more info check ``numpy.histogram`` function documentation.\n\n (default: False)\n \"\"\")\n\n def calculate(self):\n bin_str = '_bin'\n self.bin_column = self.column + bin_str\n\n data = self.bin_stat.get_data()\n bins = self.bin_stat.bins\n\n binned, bin_bounds = np.histogram(\n np.array(data), density=self.density, bins=bins\n )\n\n self.bin_width = np.round(bin_bounds[2] - bin_bounds[1], 1)\n self.bins = []\n\n for i, b in enumerate(binned):\n width = bin_bounds[i+1] - bin_bounds[i]\n if i == 0:\n lbl = \"[%.1f, %.1f]\" % (bin_bounds[i], bin_bounds[i+1])\n else:\n lbl = \"(%.1f, %.1f]\" % (bin_bounds[i], bin_bounds[i+1])\n self.bins.append(Bin(bin_label=lbl, values=[binned[i]], stat=Max(),\n width=width))\n\n\ndef bins(data, values=None, column=None, bins=None, labels=None,\n **kwargs):\n \"\"\"Specify binning or bins to be used for column or values.\"\"\"\n\n if isinstance(data, str):\n column = data\n values = None\n else:\n column = None\n\n return Bins(values=values, column=column, bins=bins, **kwargs)\n\n\nstats = {\n 'sum': Sum,\n 'mean': Mean,\n 'count': Count,\n 'nunique': CountDistinct,\n 'median': Median,\n 'stddev': StdDeviation,\n 'min': Min,\n 'max': Max,\n 'quantile': Quantile\n}\n",
"import numpy as np\n\nfrom bokeh.document import Document\nfrom bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid\nfrom bokeh.models.glyphs import Ellipse\nfrom bokeh.plotting import show\n\nN = 9\nx = np.linspace(-2, 2, N)\ny = x**2\nw = x/15.0 + 0.3\nh = y/20.0 + 0.3\n\nsource = ColumnDataSource(dict(x=x, y=y, w=w, h=h))\n\nxdr = DataRange1d()\nydr = DataRange1d()\n\nplot = Plot(\n title=None, x_range=xdr, y_range=ydr, plot_width=300, plot_height=300,\n h_symmetry=False, v_symmetry=False, min_border=0, toolbar_location=None)\n\nglyph = Ellipse(x=\"x\", y=\"y\", width=\"w\", height=\"h\", angle=-0.7, fill_color=\"#CAB2D6\")\nplot.add_glyph(source, glyph)\n\nxaxis = LinearAxis()\nplot.add_layout(xaxis, 'below')\n\nyaxis = LinearAxis()\nplot.add_layout(yaxis, 'left')\n\nplot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\nplot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\ndoc = Document()\ndoc.add_root(plot)\n\nshow(doc)\n"
] | [
[
"numpy.sqrt",
"pandas.Series",
"numpy.linspace",
"pandas.DataFrame",
"numpy.round",
"pandas.cut",
"numpy.array"
],
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
floft/squeezeDet | [
"e7c0860eb1d141729cf02a2ec9cafc0cfb4a21aa"
] | [
"src/dataset/kitti.py"
] | [
"# Author: Bichen Wu ([email protected]) 08/25/2016\n\n\"\"\"Image data base class for kitti\"\"\"\n\nimport cv2\nimport os \nimport numpy as np\nimport subprocess\n\nfrom dataset.imdb import imdb\nfrom utils.util import bbox_transform_inv, batch_iou\n\nclass kitti(imdb):\n def __init__(self, image_set, data_path, mc):\n imdb.__init__(self, 'kitti_'+image_set, mc)\n self._image_set = image_set\n self._data_root_path = data_path\n self._image_path = os.path.join(self._data_root_path, 'training', 'image_2')\n self._label_path = os.path.join(self._data_root_path, 'training', 'label_2')\n self._classes = self.mc.CLASS_NAMES\n self._class_to_idx = dict(zip(self.classes, xrange(self.num_classes)))\n\n # a list of string indices of images in the directory\n self._image_idx = self._load_image_set_idx() \n # a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by\n # the image width and height\n self._rois = self._load_kitti_annotation()\n\n ## batch reader ##\n self._perm_idx = None\n self._cur_idx = 0\n # TODO(bichen): add a random seed as parameter\n self._shuffle_image_idx()\n\n self._eval_tool = './src/dataset/kitti-eval/cpp/evaluate_object'\n\n def _load_image_set_idx(self):\n image_set_file = os.path.join(\n self._data_root_path, 'ImageSets', self._image_set+'.txt')\n assert os.path.exists(image_set_file), \\\n 'File does not exist: {}'.format(image_set_file)\n\n with open(image_set_file) as f:\n image_idx = [x.strip() for x in f.readlines()]\n return image_idx\n\n def _image_path_at(self, idx):\n image_path = os.path.join(self._image_path, idx+'.png')\n assert os.path.exists(image_path), \\\n 'Image does not exist: {}'.format(image_path)\n return image_path\n\n def _load_kitti_annotation(self):\n def _get_obj_level(obj):\n height = float(obj[7]) - float(obj[5]) + 1\n truncation = float(obj[1])\n occlusion = float(obj[2])\n if height >= 40 and truncation <= 0.15 and occlusion <= 0:\n return 1\n elif height >= 25 and truncation <= 0.3 and occlusion <= 1:\n return 2\n elif height >= 25 and truncation <= 0.5 and occlusion <= 2:\n return 3\n else:\n return 4\n\n idx2annotation = {}\n for index in self._image_idx:\n filename = os.path.join(self._label_path, index+'.txt')\n with open(filename, 'r') as f:\n lines = f.readlines()\n f.close()\n bboxes = []\n for line in lines:\n obj = line.strip().split(' ')\n try:\n cls = self._class_to_idx[obj[0].lower().strip()]\n except:\n continue\n\n if self.mc.EXCLUDE_HARD_EXAMPLES and _get_obj_level(obj) > 3:\n continue\n xmin = float(obj[4])\n ymin = float(obj[5])\n xmax = float(obj[6])\n ymax = float(obj[7])\n assert xmin >= 0.0 and xmin <= xmax, \\\n 'Invalid bounding box x-coord xmin {} or xmax {} at {}.txt' \\\n .format(xmin, xmax, index)\n assert ymin >= 0.0 and ymin <= ymax, \\\n 'Invalid bounding box y-coord ymin {} or ymax {} at {}.txt' \\\n .format(ymin, ymax, index)\n x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])\n bboxes.append([x, y, w, h, cls])\n\n idx2annotation[index] = bboxes\n\n return idx2annotation\n\n def evaluate_detections(self, eval_dir, global_step, all_boxes):\n \"\"\"Evaluate detection results.\n Args:\n eval_dir: directory to write evaluation logs\n global_step: step of the checkpoint\n all_boxes: all_boxes[cls][image] = N x 5 arrays of \n [xmin, ymin, xmax, ymax, score]\n Returns:\n aps: array of average precisions.\n names: class names corresponding to each ap\n \"\"\"\n det_file_dir = os.path.join(\n eval_dir, 'detection_files_{:s}'.format(global_step), 'data')\n if not os.path.isdir(det_file_dir):\n os.makedirs(det_file_dir)\n\n for im_idx, index in enumerate(self._image_idx):\n filename = os.path.join(det_file_dir, index+'.txt')\n with open(filename, 'wt') as f:\n for cls_idx, cls in enumerate(self._classes):\n dets = all_boxes[cls_idx][im_idx]\n for k in xrange(len(dets)):\n f.write(\n '{:s} -1 -1 0.0 {:.2f} {:.2f} {:.2f} {:.2f} 0.0 0.0 0.0 0.0 0.0 '\n '0.0 0.0 {:.3f}\\n'.format(\n cls.lower(), dets[k][0], dets[k][1], dets[k][2], dets[k][3],\n dets[k][4])\n )\n\n cmd = self._eval_tool + ' ' \\\n + os.path.join(self._data_root_path, 'training') + ' ' \\\n + os.path.join(self._data_root_path, 'ImageSets',\n self._image_set+'.txt') + ' ' \\\n + os.path.dirname(det_file_dir) + ' ' + str(len(self._image_idx))\n\n print('Running: {}'.format(cmd))\n status = subprocess.call(cmd, shell=True)\n\n aps = []\n names = []\n for cls in self._classes:\n det_file_name = os.path.join(\n os.path.dirname(det_file_dir), 'stats_{:s}_ap.txt'.format(cls))\n if os.path.exists(det_file_name):\n with open(det_file_name, 'r') as f:\n lines = f.readlines()\n assert len(lines) == 3, \\\n 'Line number of {} should be 3'.format(det_file_name)\n\n aps.append(float(lines[0].split('=')[1].strip()))\n aps.append(float(lines[1].split('=')[1].strip()))\n aps.append(float(lines[2].split('=')[1].strip()))\n else:\n aps.extend([0.0, 0.0, 0.0])\n\n names.append(cls+'_easy')\n names.append(cls+'_medium')\n names.append(cls+'_hard')\n\n return aps, names\n\n def do_detection_analysis_in_eval(self, eval_dir, global_step):\n det_file_dir = os.path.join(\n eval_dir, 'detection_files_{:s}'.format(global_step), 'data')\n det_error_dir = os.path.join(\n eval_dir, 'detection_files_{:s}'.format(global_step),\n 'error_analysis')\n if not os.path.exists(det_error_dir):\n os.makedirs(det_error_dir)\n det_error_file = os.path.join(det_error_dir, 'det_error_file.txt')\n\n stats = self.analyze_detections(det_file_dir, det_error_file)\n ims = self.visualize_detections(\n image_dir=self._image_path,\n image_format='.png',\n det_error_file=det_error_file,\n output_image_dir=det_error_dir,\n num_det_per_type=10\n )\n\n return stats, ims\n\n def analyze_detections(self, detection_file_dir, det_error_file):\n def _save_detection(f, idx, error_type, det, score):\n f.write(\n '{:s} {:s} {:.1f} {:.1f} {:.1f} {:.1f} {:s} {:.3f}\\n'.format(\n idx, error_type,\n det[0]-det[2]/2., det[1]-det[3]/2.,\n det[0]+det[2]/2., det[1]+det[3]/2.,\n self._classes[int(det[4])], \n score\n )\n )\n\n # load detections\n self._det_rois = {}\n for idx in self._image_idx:\n det_file_name = os.path.join(detection_file_dir, idx+'.txt')\n with open(det_file_name) as f:\n lines = f.readlines()\n f.close()\n bboxes = []\n for line in lines:\n obj = line.strip().split(' ')\n cls = self._class_to_idx[obj[0].lower().strip()]\n xmin = float(obj[4])\n ymin = float(obj[5])\n xmax = float(obj[6])\n ymax = float(obj[7])\n score = float(obj[-1])\n\n x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])\n bboxes.append([x, y, w, h, cls, score])\n bboxes.sort(key=lambda x: x[-1], reverse=True)\n self._det_rois[idx] = bboxes\n\n # do error analysis\n num_objs = 0.\n num_dets = 0.\n num_correct = 0.\n num_loc_error = 0.\n num_cls_error = 0.\n num_bg_error = 0.\n num_repeated_error = 0.\n num_detected_obj = 0.\n\n with open(det_error_file, 'w') as f:\n for idx in self._image_idx:\n gt_bboxes = np.array(self._rois[idx])\n num_objs += len(gt_bboxes)\n detected = [False]*len(gt_bboxes)\n\n det_bboxes = self._det_rois[idx]\n if len(gt_bboxes) < 1:\n continue\n\n for i, det in enumerate(det_bboxes):\n if i < len(gt_bboxes):\n num_dets += 1\n ious = batch_iou(gt_bboxes[:, :4], det[:4])\n max_iou = np.max(ious)\n gt_idx = np.argmax(ious)\n if max_iou > 0.1:\n if gt_bboxes[gt_idx, 4] == det[4]:\n if max_iou >= 0.5:\n if i < len(gt_bboxes):\n if not detected[gt_idx]:\n num_correct += 1\n detected[gt_idx] = True\n else:\n num_repeated_error += 1\n else:\n if i < len(gt_bboxes):\n num_loc_error += 1\n _save_detection(f, idx, 'loc', det, det[5])\n else:\n if i < len(gt_bboxes):\n num_cls_error += 1\n _save_detection(f, idx, 'cls', det, det[5])\n else:\n if i < len(gt_bboxes):\n num_bg_error += 1\n _save_detection(f, idx, 'bg', det, det[5])\n\n for i, gt in enumerate(gt_bboxes):\n if not detected[i]:\n _save_detection(f, idx, 'missed', gt, -1.0)\n num_detected_obj += sum(detected)\n f.close()\n\n print ('Detection Analysis:')\n print (' Number of detections: {}'.format(num_dets))\n print (' Number of objects: {}'.format(num_objs))\n print (' Percentage of correct detections: {}'.format(\n num_correct/num_dets))\n print (' Percentage of localization error: {}'.format(\n num_loc_error/num_dets))\n print (' Percentage of classification error: {}'.format(\n num_cls_error/num_dets))\n print (' Percentage of background error: {}'.format(\n num_bg_error/num_dets))\n print (' Percentage of repeated detections: {}'.format(\n num_repeated_error/num_dets))\n print (' Recall: {}'.format(\n num_detected_obj/num_objs))\n\n out = {}\n out['num of detections'] = num_dets\n out['num of objects'] = num_objs\n out['% correct detections'] = num_correct/num_dets\n out['% localization error'] = num_loc_error/num_dets\n out['% classification error'] = num_cls_error/num_dets\n out['% background error'] = num_bg_error/num_dets\n out['% repeated error'] = num_repeated_error/num_dets\n out['% recall'] = num_detected_obj/num_objs\n\n return out\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZhangShiqiu1993/CSCI-567-machine-learning | [
"07144b299aeb9f29c304798549ef2d44fe1f4083"
] | [
"Assignment-3/decision_tree_test.py"
] | [
"import numpy as np\nfrom sklearn.metrics import accuracy_score\nimport json\n\nimport data_loader\nimport decision_tree\n\n# load data\nX_train, X_test, y_train, y_test = data_loader.discrete_2D_iris_dataset()\n\n# set classifier\ndTree = decision_tree.DecisionTree()\n\n# training\ndTree.train(X_train, y_train)\ny_est_train = dTree.predict(X_train)\ntrain_accu = accuracy_score(y_est_train, y_train)\nprint('train_accu', train_accu)\n\n# testing\ny_est_test = dTree.predict(X_test)\ntest_accu = accuracy_score(y_est_test, y_test)\nprint('test_accu', test_accu)\n\n\n\n# print\ndTree.print_tree()\n\n# save\njson.dump({'train_accu': train_accu, 'test_accu': test_accu},\n\t\t\topen('decision_tree.json', 'w'))"
] | [
[
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ashwinb/pytorch-lightning | [
"89787947304a0db3a98a1ddd0e818a91a924e43f",
"89787947304a0db3a98a1ddd0e818a91a924e43f"
] | [
"tests/models/test_gpu.py",
"pl_examples/domain_templates/reinforce_learn_Qnet.py"
] | [
"import os\n\nimport pytest\nimport torch\n\nimport tests.base.utils as tutils\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.core import memory\nfrom pytorch_lightning.trainer.distrib_parts import parse_gpu_ids, determine_root_gpu_device\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.base import EvalModelTemplate\n\nPRETEND_N_OF_GPUS = 16\n\n\[email protected]\[email protected](\"backend\", ['dp', 'ddp', 'ddp2'])\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_model(tmpdir, backend):\n \"\"\"Make sure DDP works.\"\"\"\n tutils.set_random_master_port()\n\n trainer_options = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n gpus=[0, 1],\n distributed_backend=backend,\n )\n\n model = EvalModelTemplate()\n # tutils.run_model_test(trainer_options, model)\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n assert result\n\n # test memory helper functions\n memory.get_memory_profile('min_max')\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_ddp_all_dataloaders_passed_to_fit(tmpdir):\n \"\"\"Make sure DDP works with dataloaders passed to fit()\"\"\"\n tutils.set_random_master_port()\n\n trainer_options = dict(default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n gpus=[0, 1],\n distributed_backend='ddp')\n\n model = EvalModelTemplate()\n fit_options = dict(train_dataloader=model.train_dataloader(),\n val_dataloaders=model.val_dataloader())\n\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model, **fit_options)\n assert result == 1, \"DDP doesn't work with dataloaders passed to fit().\"\n\n\ndef test_cpu_slurm_save_load(tmpdir):\n \"\"\"Verify model save/load/checkpoint on CPU.\"\"\"\n hparams = EvalModelTemplate.get_default_hparams()\n model = EvalModelTemplate(hparams)\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n version = logger.version\n\n # fit model\n trainer = Trainer(\n max_epochs=1,\n logger=logger,\n checkpoint_callback=ModelCheckpoint(tmpdir)\n )\n result = trainer.fit(model)\n real_global_step = trainer.global_step\n\n # traning complete\n assert result == 1, 'cpu model failed to complete'\n\n # predict with trained model before saving\n # make a prediction\n dataloaders = model.test_dataloader()\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n for dataloader in dataloaders:\n for batch in dataloader:\n break\n\n x, y = batch\n x = x.view(x.size(0), -1)\n\n model.eval()\n pred_before_saving = model(x)\n\n # test HPC saving\n # simulate snapshot on slurm\n saved_filepath = trainer.hpc_save(tmpdir, logger)\n assert os.path.exists(saved_filepath)\n\n # new logger file to get meta\n logger = tutils.get_default_logger(tmpdir, version=version)\n\n trainer = Trainer(\n max_epochs=1,\n logger=logger,\n checkpoint_callback=ModelCheckpoint(tmpdir),\n )\n model = EvalModelTemplate(hparams)\n\n # set the epoch start hook so we can predict before the model does the full training\n def assert_pred_same():\n assert trainer.global_step == real_global_step and trainer.global_step > 0\n\n # predict with loaded model to make sure answers are the same\n trainer.model.eval()\n new_pred = trainer.model(x)\n assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1\n\n model.on_epoch_start = assert_pred_same\n\n # by calling fit again, we trigger training, loading weights from the cluster\n # and our hook to predict using current model before any more weight updates\n trainer.fit(model)\n\n\[email protected]\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_multi_gpu_none_backend(tmpdir):\n \"\"\"Make sure when using multiple GPUs the user can't use `distributed_backend = None`.\"\"\"\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=1,\n train_percent_check=0.1,\n val_percent_check=0.1,\n gpus='-1'\n )\n\n model = EvalModelTemplate()\n with pytest.warns(UserWarning):\n tutils.run_model_test(trainer_options, model)\n\n\[email protected]\ndef mocked_device_count(monkeypatch):\n def device_count():\n return PRETEND_N_OF_GPUS\n\n monkeypatch.setattr(torch.cuda, 'device_count', device_count)\n\n\[email protected]\ndef mocked_device_count_0(monkeypatch):\n def device_count():\n return 0\n\n monkeypatch.setattr(torch.cuda, 'device_count', device_count)\n\n\[email protected]_param_tests\[email protected]([\"gpus\", \"expected_num_gpus\", \"distributed_backend\"], [\n pytest.param(None, 0, None, id=\"None - expect 0 gpu to use.\"),\n pytest.param(0, 0, None, id=\"Oth gpu, expect 1 gpu to use.\"),\n pytest.param(1, 1, None, id=\"1st gpu, expect 1 gpu to use.\"),\n pytest.param(-1, PRETEND_N_OF_GPUS, \"ddp\", id=\"-1 - use all gpus\"),\n pytest.param('-1', PRETEND_N_OF_GPUS, \"ddp\", id=\"'-1' - use all gpus\"),\n pytest.param(3, 3, \"ddp\", id=\"3rd gpu - 1 gpu to use (backend:ddp)\")\n])\ndef test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus\n\n\[email protected]_param_tests\[email protected]([\"gpus\", \"expected_num_gpus\", \"distributed_backend\"], [\n pytest.param(None, 0, None, id=\"None - expect 0 gpu to use.\"),\n pytest.param(None, 0, \"ddp\", id=\"None - expect 0 gpu to use.\"),\n])\ndef test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).num_gpus == expected_num_gpus\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(None, None, \"ddp\", id=\"None is None\"),\n pytest.param(0, None, \"ddp\", id=\"O gpus, expect gpu root device to be None.\"),\n pytest.param(1, 0, \"ddp\", id=\"1 gpu, expect gpu root device to be 0.\"),\n pytest.param(-1, 0, \"ddp\", id=\"-1 - use all gpus, expect gpu root device to be 0.\"),\n pytest.param('-1', 0, \"ddp\", id=\"'-1' - use all gpus, expect gpu root device to be 0.\"),\n pytest.param(3, 0, \"ddp\", id=\"3 gpus, expect gpu root device to be 0.(backend:ddp)\")\n])\ndef test_root_gpu_property(mocked_device_count, gpus, expected_root_gpu, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(None, None, None, id=\"None is None\"),\n pytest.param(None, None, \"ddp\", id=\"None is None\"),\n pytest.param(0, None, \"ddp\", id=\"None is None\"),\n])\ndef test_root_gpu_property_0_passing(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):\n assert Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu == expected_root_gpu\n\n\n# Asking for a gpu when non are available will result in a MisconfigurationException\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu', \"distributed_backend\"], [\n pytest.param(1, None, \"ddp\"),\n pytest.param(3, None, \"ddp\"),\n pytest.param(3, None, \"ddp\"),\n pytest.param([1, 2], None, \"ddp\"),\n pytest.param([0, 1], None, \"ddp\"),\n pytest.param(-1, None, \"ddp\"),\n pytest.param('-1', None, \"ddp\")\n])\ndef test_root_gpu_property_0_raising(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):\n with pytest.raises(MisconfigurationException):\n Trainer(gpus=gpus, distributed_backend=distributed_backend).root_gpu\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_root_gpu'], [\n pytest.param(None, None, id=\"No gpus, expect gpu root device to be None\"),\n pytest.param([0], 0, id=\"Oth gpu, expect gpu root device to be 0.\"),\n pytest.param([1], 1, id=\"1st gpu, expect gpu root device to be 1.\"),\n pytest.param([3], 3, id=\"3rd gpu, expect gpu root device to be 3.\"),\n pytest.param([1, 2], 1, id=\"[1, 2] gpus, expect gpu root device to be 1.\"),\n])\ndef test_determine_root_gpu_device(gpus, expected_root_gpu):\n assert determine_root_gpu_device(gpus) == expected_root_gpu\n\n\[email protected]_param_tests\[email protected](['gpus', 'expected_gpu_ids'], [\n pytest.param(None, None),\n pytest.param(0, None),\n pytest.param(1, [0]),\n pytest.param(3, [0, 1, 2]),\n pytest.param(-1, list(range(PRETEND_N_OF_GPUS)), id=\"-1 - use all gpus\"),\n pytest.param([0], [0]),\n pytest.param([1, 3], [1, 3]),\n pytest.param('0', [0]),\n pytest.param('3', [3]),\n pytest.param('1, 3', [1, 3]),\n pytest.param('2,', [2]),\n pytest.param('-1', list(range(PRETEND_N_OF_GPUS)), id=\"'-1' - use all gpus\"),\n])\ndef test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):\n assert parse_gpu_ids(gpus) == expected_gpu_ids\n\n\[email protected]_param_tests\[email protected](['gpus'], [\n pytest.param(0.1),\n pytest.param(-2),\n pytest.param(False),\n pytest.param([]),\n pytest.param([-1]),\n pytest.param([None]),\n pytest.param(['0']),\n pytest.param((0, 1)),\n])\ndef test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n\n\[email protected]_param_tests\[email protected](\"gpus\", [[1, 2, 19], -1, '-1'])\ndef test_parse_gpu_fail_on_non_existent_id(mocked_device_count_0, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n\n\[email protected]_param_tests\ndef test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids([1, 2, 19])\n\n\[email protected]_param_tests\[email protected](\"gpus\", [-1, '-1'])\ndef test_parse_gpu_returns_None_when_no_devices_are_available(mocked_device_count_0, gpus):\n with pytest.raises(MisconfigurationException):\n parse_gpu_ids(gpus)\n",
"\"\"\"\nDeep Reinforcement Learning: Deep Q-network (DQN)\n\nThis example is based on https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On-\nSecond-Edition/blob/master/Chapter06/02_dqn_pong.py\n\nThe template illustrates using Lightning for Reinforcement Learning. The example builds a basic DQN using the\nclassic CartPole environment.\n\nTo run the template just run:\npython reinforce_learn_Qnet.py\n\nAfter ~1500 steps, you will see the total_reward hitting the max score of 200. Open up TensorBoard to\nsee the metrics:\n\ntensorboard --logdir default\n\"\"\"\n\nimport pytorch_lightning as pl\n\nfrom typing import Tuple, List\n\nimport argparse\nfrom collections import OrderedDict, deque, namedtuple\n\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import IterableDataset\n\n\nclass DQN(nn.Module):\n \"\"\"\n Simple MLP network\n\n Args:\n obs_size: observation/state size of the environment\n n_actions: number of discrete actions available in the environment\n hidden_size: size of hidden layers\n \"\"\"\n\n def __init__(self, obs_size: int, n_actions: int, hidden_size: int = 128):\n super(DQN, self).__init__()\n self.net = nn.Sequential(\n nn.Linear(obs_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, n_actions)\n )\n\n def forward(self, x):\n return self.net(x.float())\n\n\n# Named tuple for storing experience steps gathered in training\nExperience = namedtuple(\n 'Experience', field_names=['state', 'action', 'reward',\n 'done', 'new_state'])\n\n\nclass ReplayBuffer:\n \"\"\"\n Replay Buffer for storing past experiences allowing the agent to learn from them\n\n Args:\n capacity: size of the buffer\n \"\"\"\n\n def __init__(self, capacity: int) -> None:\n self.buffer = deque(maxlen=capacity)\n\n def __len__(self) -> int:\n return len(self.buffer)\n\n def append(self, experience: Experience) -> None:\n \"\"\"\n Add experience to the buffer\n\n Args:\n experience: tuple (state, action, reward, done, new_state)\n \"\"\"\n self.buffer.append(experience)\n\n def sample(self, batch_size: int) -> Tuple:\n indices = np.random.choice(len(self.buffer), batch_size, replace=False)\n states, actions, rewards, dones, next_states = zip(*[self.buffer[idx] for idx in indices])\n\n return (np.array(states), np.array(actions), np.array(rewards, dtype=np.float32),\n np.array(dones, dtype=np.bool), np.array(next_states))\n\n\nclass RLDataset(IterableDataset):\n \"\"\"\n Iterable Dataset containing the ExperienceBuffer\n which will be updated with new experiences during training\n\n Args:\n buffer: replay buffer\n sample_size: number of experiences to sample at a time\n \"\"\"\n\n def __init__(self, buffer: ReplayBuffer, sample_size: int = 200) -> None:\n self.buffer = buffer\n self.sample_size = sample_size\n\n def __iter__(self) -> Tuple:\n states, actions, rewards, dones, new_states = self.buffer.sample(self.sample_size)\n for i in range(len(dones)):\n yield states[i], actions[i], rewards[i], dones[i], new_states[i]\n\n\nclass Agent:\n \"\"\"\n Base Agent class handeling the interaction with the environment\n\n Args:\n env: training environment\n replay_buffer: replay buffer storing experiences\n \"\"\"\n\n def __init__(self, env: gym.Env, replay_buffer: ReplayBuffer) -> None:\n self.env = env\n self.replay_buffer = replay_buffer\n self.reset()\n self.state = self.env.reset()\n\n def reset(self) -> None:\n \"\"\"Resets the environment and updates the state\"\"\"\n self.state = self.env.reset()\n\n def get_action(self, net: nn.Module, epsilon: float, device: str) -> int:\n \"\"\"\n Using the given network, decide what action to carry out\n using an epsilon-greedy policy\n\n Args:\n net: DQN network\n epsilon: value to determine likelihood of taking a random action\n device: current device\n\n Returns:\n action\n \"\"\"\n if np.random.random() < epsilon:\n action = self.env.action_space.sample()\n else:\n state = torch.tensor([self.state])\n\n if device not in ['cpu']:\n state = state.cuda(device)\n\n q_values = net(state)\n _, action = torch.max(q_values, dim=1)\n action = int(action.item())\n\n return action\n\n @torch.no_grad()\n def play_step(self, net: nn.Module, epsilon: float = 0.0, device: str = 'cpu') -> Tuple[float, bool]:\n \"\"\"\n Carries out a single interaction step between the agent and the environment\n\n Args:\n net: DQN network\n epsilon: value to determine likelihood of taking a random action\n device: current device\n\n Returns:\n reward, done\n \"\"\"\n\n action = self.get_action(net, epsilon, device)\n\n # do step in the environment\n new_state, reward, done, _ = self.env.step(action)\n\n exp = Experience(self.state, action, reward, done, new_state)\n\n self.replay_buffer.append(exp)\n\n self.state = new_state\n if done:\n self.reset()\n return reward, done\n\n\nclass DQNLightning(pl.LightningModule):\n \"\"\" Basic DQN Model \"\"\"\n\n def __init__(self, hparams: argparse.Namespace) -> None:\n super().__init__()\n self.hparams = hparams\n\n self.env = gym.make(self.hparams.env)\n obs_size = self.env.observation_space.shape[0]\n n_actions = self.env.action_space.n\n\n self.net = DQN(obs_size, n_actions)\n self.target_net = DQN(obs_size, n_actions)\n\n self.buffer = ReplayBuffer(self.hparams.replay_size)\n self.agent = Agent(self.env, self.buffer)\n self.total_reward = 0\n self.episode_reward = 0\n self.populate(self.hparams.warm_start_steps)\n\n def populate(self, steps: int = 1000) -> None:\n \"\"\"\n Carries out several random steps through the environment to initially fill\n up the replay buffer with experiences\n\n Args:\n steps: number of random steps to populate the buffer with\n \"\"\"\n for i in range(steps):\n self.agent.play_step(self.net, epsilon=1.0)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Passes in a state `x` through the network and gets the `q_values` of each action as an output\n\n Args:\n x: environment state\n\n Returns:\n q values\n \"\"\"\n output = self.net(x)\n return output\n\n def dqn_mse_loss(self, batch: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:\n \"\"\"\n Calculates the mse loss using a mini batch from the replay buffer\n\n Args:\n batch: current mini batch of replay data\n\n Returns:\n loss\n \"\"\"\n states, actions, rewards, dones, next_states = batch\n\n state_action_values = self.net(states).gather(1, actions.unsqueeze(-1)).squeeze(-1)\n\n with torch.no_grad():\n next_state_values = self.target_net(next_states).max(1)[0]\n next_state_values[dones] = 0.0\n next_state_values = next_state_values.detach()\n\n expected_state_action_values = next_state_values * self.hparams.gamma + rewards\n\n return nn.MSELoss()(state_action_values, expected_state_action_values)\n\n def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], nb_batch) -> OrderedDict:\n \"\"\"\n Carries out a single step through the environment to update the replay buffer.\n Then calculates loss based on the minibatch received\n\n Args:\n batch: current mini batch of replay data\n nb_batch: batch number\n\n Returns:\n Training loss and log metrics\n \"\"\"\n device = self.get_device(batch)\n epsilon = max(self.hparams.eps_end, self.hparams.eps_start -\n self.global_step + 1 / self.hparams.eps_last_frame)\n\n # step through environment with agent\n reward, done = self.agent.play_step(self.net, epsilon, device)\n self.episode_reward += reward\n\n # calculates training loss\n loss = self.dqn_mse_loss(batch)\n\n if done:\n self.total_reward = self.episode_reward\n self.episode_reward = 0\n\n # Soft update of target network\n if self.global_step % self.hparams.sync_rate == 0:\n self.target_net.load_state_dict(self.net.state_dict())\n\n log = {'total_reward': torch.tensor(self.total_reward).to(device),\n 'reward': torch.tensor(reward).to(device),\n 'steps': torch.tensor(self.global_step).to(device)}\n\n return OrderedDict({'loss': loss, 'log': log, 'progress_bar': log})\n\n def configure_optimizers(self) -> List[Optimizer]:\n \"\"\"Initialize Adam optimizer\"\"\"\n optimizer = optim.Adam(self.net.parameters(), lr=self.hparams.lr)\n return [optimizer]\n\n def __dataloader(self) -> DataLoader:\n \"\"\"Initialize the Replay Buffer dataset used for retrieving experiences\"\"\"\n dataset = RLDataset(self.buffer, self.hparams.episode_length)\n dataloader = DataLoader(dataset=dataset,\n batch_size=self.hparams.batch_size,\n sampler=None\n )\n return dataloader\n\n def train_dataloader(self) -> DataLoader:\n \"\"\"Get train loader\"\"\"\n return self.__dataloader()\n\n def get_device(self, batch) -> str:\n \"\"\"Retrieve device currently being used by minibatch\"\"\"\n return batch[0].device.index if self.on_gpu else 'cpu'\n\n\ndef main(hparams) -> None:\n model = DQNLightning(hparams)\n\n trainer = pl.Trainer(\n gpus=1,\n distributed_backend='dp',\n early_stop_callback=False,\n val_check_interval=100\n )\n\n trainer.fit(model)\n\n\nif __name__ == '__main__':\n torch.manual_seed(0)\n np.random.seed(0)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--batch_size\", type=int, default=16, help=\"size of the batches\")\n parser.add_argument(\"--lr\", type=float, default=1e-2, help=\"learning rate\")\n parser.add_argument(\"--env\", type=str, default=\"CartPole-v0\", help=\"gym environment tag\")\n parser.add_argument(\"--gamma\", type=float, default=0.99, help=\"discount factor\")\n parser.add_argument(\"--sync_rate\", type=int, default=10,\n help=\"how many frames do we update the target network\")\n parser.add_argument(\"--replay_size\", type=int, default=1000,\n help=\"capacity of the replay buffer\")\n parser.add_argument(\"--warm_start_size\", type=int, default=1000,\n help=\"how many samples do we use to fill our buffer at the start of training\")\n parser.add_argument(\"--eps_last_frame\", type=int, default=1000,\n help=\"what frame should epsilon stop decaying\")\n parser.add_argument(\"--eps_start\", type=float, default=1.0, help=\"starting value of epsilon\")\n parser.add_argument(\"--eps_end\", type=float, default=0.01, help=\"final value of epsilon\")\n parser.add_argument(\"--episode_length\", type=int, default=200, help=\"max length of an episode\")\n parser.add_argument(\"--max_episode_reward\", type=int, default=200,\n help=\"max episode reward in the environment\")\n parser.add_argument(\"--warm_start_steps\", type=int, default=1000,\n help=\"max episode reward in the environment\")\n\n args = parser.parse_args()\n\n main(args)\n"
] | [
[
"torch.eq",
"torch.cuda.device_count"
],
[
"numpy.random.random",
"torch.max",
"numpy.random.seed",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.nn.Linear",
"torch.no_grad",
"torch.nn.ReLU",
"numpy.array",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VITA-Group/Peek-a-Boo | [
"9290d4e5e3aee0dff994e1a664ec91bd6ec93176"
] | [
"main_imagenet.py"
] | [
"import argparse\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n# import torchvision.models as models\n\nimport logging\nfrom logger import set_logging_config\nimport models\nfrom bop import Bop\n\nfrom models.seed_conv import SeedConv2d\nfrom models.masked_psg_seed_conv import PredictiveSeedConv2d\nimport pruners\nfrom generator import masked_parameters\nfrom prune import prune_loop\n\n\nprint = print\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--optimizer', default='SGD', type=str,\n help='choose among [`SGD`, `BOP`, `Counter`]')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--savedir', default='results', type=str,\n help='root dir to save exp checkpoints and logs')\nparser.add_argument('--exp-name', default='SeedNet', type=str,\n help='path to location to save logs and checkpoints')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=100, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n# SeedNet options\nparser.add_argument('--sign-grouped-dim', default=\"\", type=str,\n help='dimensions that will be grouped for sign parameters')\nparser.add_argument('--init-method', default='standard', type=str,\n help='initialization method for conv weights')\nparser.add_argument('--hidden-act', type=str, default='standard',\n help='choose among [`pruning`, `flipping`, `ternery`, `none`]')\nparser.add_argument('--scaling-input', action='store_true',\n help='whether scale the input in SeedNet models')\n# BOP options\nparser.add_argument('--ar', type=float,\n help='list of layer-wise inital adaptivity rates in BOP')\nparser.add_argument('--tau', type=float,\n help='list of layer-wise thresholds in BOP')\nparser.add_argument('--ar-decay-freq', type=int, default=100,\n help='freqency to decay the ar hyperparameter in BOP')\nparser.add_argument('--ar-decay-ratio', type=float, default=0.1,\n help='decay ratio when decay ar')\n# PSG options\nparser.add_argument('--psg-no-backward', action='store_true',\n help='Do predictive gradient calculation in backward')\nparser.add_argument('--msb-bits', type=int, default=4,\n help='MSB bits for the input')\nparser.add_argument('--msb-bits-weight', type=int, default=4,\n help='MSB bits for the weight')\nparser.add_argument('--msb-bits-grad', type=int, default=8,\n help='MSB bits for the grad')\nparser.add_argument('--psg-threshold', type=float, default=0.0,\n help='Threshold used in PSG')\nparser.add_argument('--psg-sparsify', action='store_true',\n help='Sparsify by ignoring small gradients')\nparser.add_argument('--psg-no-take-sign', action='store_true',\n help='Do not take sign for PSG')\n# Pruning options\nparser.add_argument('--pruner', type=str, default=None, choices=['Mag', 'SNIP', 'GraSP', 'SynFlow'],\n help='pruning strategy')\nparser.add_argument('--prune-epoch', type=int, default=0,\n help='epoch number to finish sparsifying by')\nparser.add_argument('--prune-ratio', type=float, default=1.0,\n help='fraction of non-zero parameters after pruning')\nparser.add_argument('--prune-iters', type=int, default=1,\n help='number of iterations for scoring (should be 1 for Mag, SNIP, and GraSP)')\nparser.add_argument('--prune-batch-size', type=int, default=256,\n help='size of sample mini-batch for pruning methods')\nparser.add_argument('--prune-schedule', type=str, default='exponential', choices=['linear', 'exponential'],\n help='scheduling method for iterative pruning (SynFlow)')\nparser.add_argument('--prune-scope', type=str, default='global', choices=['global', 'local'],\n help='masking scope')\nparser.add_argument('--prune-shots', type=int, default=1,\n help='number of shots for pruning')\n\nbest_acc1 = 0\n\n\ndef main():\n args = parser.parse_args()\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n args.gpu = gpu\n\n args.savedir = os.path.join(args.savedir, args.exp_name)\n if not os.path.isdir(args.savedir):\n os.makedirs(args.savedir)\n args.logger = set_logging_config(args.savedir)\n\n if args.gpu is not None:\n args.logger.info(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n # create model\n if args.pretrained:\n args.logger.info(\"=> using pre-trained model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=True)\n else:\n args.logger.info(\"=> creating model '{}'\".format(args.arch))\n if args.arch.startswith('seed_resnet'):\n pass\n if args.arch.startswith('psg'):\n model = models.__dict__[args.arch](\n init_method=args.init_method,\n predictive_backward = not args.psg_no_backward,\n msb_bits = args.msb_bits,\n msb_bits_weight = args.msb_bits_weight,\n msb_bits_grad = args.msb_bits_grad,\n threshold = args.psg_threshold,\n sparsify = args.psg_sparsify,\n sign = not args.psg_no_take_sign\n )\n temp_arch = args.arch[9:] if 'seed' in args.arch else args.arch[4:]\n model_for_pruning = models.__dict__[temp_arch](init_method=args.init_method)\n else:\n model = models.__dict__[args.arch](init_method=args.init_method)\n model_for_pruning = None\n\n if not torch.cuda.is_available():\n print('using CPU, this will be slow')\n elif args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n model_without_ddp = model.module\n if model_for_pruning is not None:\n model_for_pruning.cuda(args.gpu)\n model_for_pruning = torch.nn.parallel.DistributedDataParallel(model_for_pruning, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n model_without_ddp = model.module\n if model_for_pruning is not None:\n model_for_pruning.cuda()\n model_for_pruning = torch.nn.parallel.DistributedDataParallel(model_for_pruning)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n\n if args.optimizer == 'SGD':\n parameters = [p for p in model_without_ddp.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(parameters, args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n bop_optimizer = None\n elif args.optimizer == 'BOP':\n bop_params, non_bop_params = model_without_ddp.get_bop_params(), model_without_ddp.get_non_bop_params()\n bop_param_masks = model_without_ddp.get_bop_param_masks()\n bop_dict = [{'params': bop_params, 'adaptivity_rate': args.ar, 'threshold': args.tau}]\n # optimizer = optim.SGD(non_bop_params, lr=args.lr, momentum=0.9, weight_decay=5e-4)\n optimizer = torch.optim.SGD(non_bop_params, args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n # bop_optimizer = Bop(bop_params, None, ar=args.ar, threshold=args.tau)\n bop_optimizer = Bop(bop_params, None, bop_param_masks, ar=args.ar, threshold=args.tau, device=args.gpu)\n # schedulers = (optim.lr_scheduler.MultiStepLR(non_bop_optimizer, milestones=[80, 120], gamma=0.1),)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n args.logger.info(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n if args.gpu is not None:\n # best_acc1 may be from a checkpoint from a different GPU\n best_acc1 = best_acc1.to(args.gpu)\n model_without_ddp.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n args.logger.info(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n args.logger.info(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = True\n\n # Data loading code\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n sample_batch_indices = torch.randperm(len(train_dataset))[:100]\n sample_batch = torch.utils.data.Subset(train_dataset, sample_batch_indices)\n pruneloader = torch.utils.data.DataLoader(sample_batch, args.prune_batch_size, shuffle=True, num_workers=4)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion, args)\n return\n\n # Create pruner\n num_classes = 1000\n # if args.pruner:\n # pruner = pruners.__dict__[args.pruner](masked_parameters(model, False, False, False), num_classes)\n\n seed_convs = list(filter(lambda m: isinstance(m, (SeedConv2d, PredictiveSeedConv2d,)), model.modules()))\n cur_shot = 0\n prune_interval = int(args.prune_epoch / args.prune_shots)\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args)\n if args.optimizer == 'BOP' and (epoch + 1) % args.ar_decay_freq == 0:\n bop_optimizer.decay_ar(args.ar_decay_ratio)\n\n # Enable gradients for pruning in SeedNet\n for seed_conv in seed_convs:\n seed_conv.enable_weight_grad()\n if args.pruner and epoch == (cur_shot + 1) * prune_interval and cur_shot < args.prune_shots:\n target_sparsity = 1 - (1 - args.prune_ratio) * (cur_shot + 1) / args.prune_shots\n if args.arch.lower().startswith('psg'):\n model_for_pruning.load_state_dict(model.state_dict(), strict=False)\n # pruner = pruners.__dict__[args.pruner](masked_parameters(model_for_pruning, False, False, False), num_classes)\n # prune_loop(model_for_pruning, criterion, pruner, pruneloader, num_classes, args.gpu, target_sparsity,\n # args.prune_schedule, args.prune_scope, args.prune_iters)\n prune_loop(model_for_pruning, criterion, args.pruner,\n pruneloader, num_classes, args.gpu, target_sparsity,\n args.prune_schedule, args.prune_scope, args.prune_iters,\n prune_bias=False, prune_batchnorm=False, prune_residual=False,\n weight_flips=None, score_threshold=None)\n model.load_state_dict(model_for_pruning.state_dict(), strict=False)\n else:\n # prune_loop(model, criterion, pruner, pruneloader, num_classes, args.gpu, target_sparsity,\n # args.prune_schedule, args.prune_scope, args.prune_iters)\n prune_loop(model, criterion, args.pruner,\n pruneloader, num_classes, args.gpu, target_sparsity,\n args.prune_schedule, args.prune_scope, args.prune_iters,\n prune_bias=False, prune_batchnorm=False, prune_residual=False,\n weight_flips=None, score_threshold=None)\n # Really copy the mask to the model\n # with torch.no_grad():\n # pruned_masks = [m for m, _ in pruner.masked_parameters]\n # model_masks = [m for m, _ in masked_parameters(model, False, False, False)]\n # for model_mask, pruned_mask in zip(model_masks, pruned_masks):\n # model_mask.copy_(pruned_mask.data.detach().clone())\n # Disable gradients when resuming training for SeedNet\n for seed_conv in seed_convs:\n seed_conv.disable_weight_grad()\n cur_shot += 1\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args, bop_optimizer=bop_optimizer)\n\n # evaluate on validation set\n acc1, acc5 = validate(val_loader, model, criterion, args)\n if args.gpu == 0:\n args.logger.info('epoch {} \\t Top-1 acc {} \\t Top-5 acc {}'.format(epoch + 1, acc1, acc5))\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n args.logger.info(f'Max accuracy: {best_acc1}')\n best_acc1_acc5 = acc5\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model_without_ddp.state_dict(),\n 'best_acc1': best_acc1,\n 'acc5': best_acc1_acc5,\n 'optimizer' : optimizer.state_dict(),\n }, is_best)\n\n args.logger.info('best Top-1 acc {} \\t corresponding Top-5 acc {}'.format(best_acc1, best_acc1_acc5))\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args, bop_optimizer=None):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n if bop_optimizer is not None:\n bop_optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if bop_optimizer is not None:\n bop_optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if args.gpu == 0 and i % args.print_freq == 0:\n progress.display(i)\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if args.gpu == 0 and i % args.print_freq == 0:\n progress.display(i)\n\n # TODO: this should also be done with the ProgressMeter\n top1.synchronize()\n top5.synchronize()\n # args.logger.info(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n # .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def synchronize(self):\n \"\"\"\n Warning: does not synchronize `val`\n \"\"\"\n t = torch.tensor([self.sum, self.count], dtype=torch.float64, device='cuda')\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.sum = float(t[0])\n self.count = int(t[1])\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"torch.multiprocessing.spawn",
"torch.load",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.data.Subset",
"torch.save",
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.distributed.barrier",
"torch.tensor",
"torch.optim.SGD",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.nn.DataParallel",
"torch.distributed.all_reduce"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
annagitel/ocs-ci | [
"284fe04aeb6e3d6cb70c99e65fec8ff1b1ea1dd5"
] | [
"ocs_ci/utility/utils.py"
] | [
"from functools import reduce\nimport io\nimport json\nimport logging\nimport os\nimport platform\nimport random\nimport re\nimport shlex\nimport smtplib\nimport string\nimport subprocess\nimport time\nimport traceback\nimport stat\nfrom copy import deepcopy\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom scipy.stats import tmean, scoreatpercentile\nfrom shutil import which, move, rmtree\n\nimport hcl2\nimport requests\nimport yaml\nimport git\nfrom bs4 import BeautifulSoup\nfrom paramiko import SSHClient, AutoAddPolicy\nfrom paramiko.auth_handler import AuthenticationException, SSHException\nfrom semantic_version import Version\nfrom tempfile import NamedTemporaryFile, mkdtemp\n\nfrom ocs_ci.framework import config\nfrom ocs_ci.ocs import constants, defaults\nfrom ocs_ci.ocs.exceptions import (\n CephHealthException,\n ClientDownloadError,\n CommandFailed,\n TagNotFoundException,\n TimeoutException,\n TimeoutExpiredError,\n UnavailableBuildException,\n UnexpectedImage,\n UnsupportedOSType,\n)\nfrom ocs_ci.utility import version as version_module\nfrom ocs_ci.utility.flexy import load_cluster_info\nfrom ocs_ci.utility.retry import retry\n\n\nlog = logging.getLogger(__name__)\n\n# variables\nmounting_dir = \"/mnt/cephfs/\"\nclients = []\nmd5sum_list1 = []\nmd5sum_list2 = []\nfuse_clients = []\nkernel_clients = []\nmon_node = \"\"\nmon_node_ip = \"\"\nmds_nodes = []\nmd5sum_file_lock = []\nactive_mdss = []\nRC = []\nfailure = {}\noutput = []\nunique_test_names = []\n\n\n# function for getting the clients\ndef get_client_info(ceph_nodes, clients):\n log.info(\"Getting Clients\")\n for node in ceph_nodes:\n if node.role == \"client\":\n clients.append(node)\n # Identifying MON node\n for node in ceph_nodes:\n if node.role == \"mon\":\n mon_node = node\n out, err = mon_node.exec_command(cmd=\"sudo hostname -I\")\n mon_node_ip = out.read().decode().rstrip(\"\\n\")\n break\n for node in ceph_nodes:\n if node.role == \"mds\":\n mds_nodes.append(node)\n for node in clients:\n node.exec_command(cmd=\"sudo yum install -y attr\")\n\n fuse_clients = clients[0:2] # seperating clients for fuse and kernel\n kernel_clients = clients[2:4]\n return (\n fuse_clients,\n kernel_clients,\n mon_node,\n mounting_dir,\n mds_nodes,\n md5sum_file_lock,\n mon_node_ip,\n )\n\n\n# function for providing authorization to the clients from MON ndoe\ndef auth_list(clients, mon_node):\n for node in clients:\n log.info(\"Giving required permissions for clients from MON node:\")\n mon_node.exec_command(\n cmd=\"sudo ceph auth get-or-create client.%s mon 'allow *' mds 'allow *, allow rw path=/' \"\n \"osd 'allow rw pool=cephfs_data' -o /etc/ceph/ceph.client.%s.keyring\"\n % (node.hostname, node.hostname)\n )\n out, err = mon_node.exec_command(\n sudo=True, cmd=\"cat /etc/ceph/ceph.client.%s.keyring\" % (node.hostname)\n )\n keyring = out.read().decode()\n key_file = node.write_file(\n sudo=True,\n file_name=\"/etc/ceph/ceph.client.%s.keyring\" % (node.hostname),\n file_mode=\"w\",\n )\n key_file.write(keyring)\n\n key_file.flush()\n\n node.exec_command(\n cmd=\"sudo chmod 644 /etc/ceph/ceph.client.%s.keyring\" % (node.hostname)\n )\n # creating mounting directory\n node.exec_command(cmd=\"sudo mkdir %s\" % (mounting_dir))\n\n\n# MOunting single FS with ceph-fuse\ndef fuse_mount(fuse_clients, mounting_dir):\n try:\n for client in fuse_clients:\n log.info(\"Creating mounting dir:\")\n log.info(\"Mounting fs with ceph-fuse on client %s:\" % (client.hostname))\n client.exec_command(\n cmd=\"sudo ceph-fuse -n client.%s %s\" % (client.hostname, mounting_dir)\n )\n out, err = client.exec_command(cmd=\"mount\")\n mount_output = out.read().decode()\n mount_output.split()\n log.info(\"Checking if fuse mount is is passed of failed:\")\n if \"fuse\" in mount_output:\n log.info(\"ceph-fuse mounting passed\")\n else:\n log.error(\"ceph-fuse mounting failed\")\n return md5sum_list1\n except Exception as e:\n log.error(e)\n\n\ndef kernel_mount(mounting_dir, mon_node_ip, kernel_clients):\n try:\n for client in kernel_clients:\n out, err = client.exec_command(\n cmd=\"sudo ceph auth get-key client.%s\" % (client.hostname)\n )\n secret_key = out.read().decode().rstrip(\"\\n\")\n mon_node_ip = mon_node_ip.replace(\" \", \"\")\n client.exec_command(\n cmd=\"sudo mount -t ceph %s:6789:/ %s -o name=%s,secret=%s\"\n % (mon_node_ip, mounting_dir, client.hostname, secret_key)\n )\n out, err = client.exec_command(cmd=\"mount\")\n mount_output = out.read().decode()\n mount_output.split()\n log.info(\"Checking if kernel mount is is passed of failed:\")\n if \"%s:6789:/\" % (mon_node_ip) in mount_output:\n log.info(\"kernel mount passed\")\n else:\n log.error(\"kernel mount failed\")\n return md5sum_list2\n except Exception as e:\n log.error(e)\n\n\ndef fuse_client_io(client, mounting_dir):\n try:\n rand_count = random.randint(1, 5)\n rand_bs = random.randint(100, 300)\n log.info(\"Performing IOs on fuse-clients\")\n client.exec_command(\n cmd=\"sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d\"\n % (mounting_dir, client.hostname, rand_bs, rand_count),\n long_running=True,\n )\n except Exception as e:\n log.error(e)\n\n\ndef kernel_client_io(client, mounting_dir):\n try:\n rand_count = random.randint(1, 6)\n rand_bs = random.randint(100, 500)\n log.info(\"Performing IOs on kernel-clients\")\n client.exec_command(\n cmd=\"sudo dd if=/dev/zero of=%snewfile_%s bs=%dM count=%d\"\n % (mounting_dir, client.hostname, rand_bs, rand_count),\n long_running=True,\n )\n except Exception as e:\n log.error(e)\n\n\ndef fuse_client_md5(fuse_clients, md5sum_list1):\n try:\n log.info(\"Calculating MD5 sums of files in fuse-clients:\")\n for client in fuse_clients:\n md5sum_list1.append(\n client.exec_command(\n cmd=\"sudo md5sum %s* | awk '{print $1}' \" % (mounting_dir),\n long_running=True,\n )\n )\n\n except Exception as e:\n log.error(e)\n\n\ndef kernel_client_md5(kernel_clients, md5sum_list2):\n try:\n log.info(\"Calculating MD5 sums of files in kernel-clients:\")\n for client in kernel_clients:\n md5sum_list2.append(\n client.exec_command(\n cmd=\"sudo md5sum %s* | awk '{print $1}' \" % (mounting_dir),\n long_running=True,\n )\n )\n except Exception as e:\n log.error(e)\n\n\n# checking file locking mechanism\ndef file_locking(client):\n try:\n to_lock_file = \"\"\"\nimport fcntl\nimport subprocess\nimport time\ntry:\n f = open('/mnt/cephfs/to_test_file_lock', 'w+')\n fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)\n print \"locking file:--------------------------------\"\n subprocess.check_output([\"sudo\",\"dd\",\"if=/dev/zero\",\"of=/mnt/cephfs/to_test_file_lock\",\"bs=1M\",\"count=2\"])\nexcept IOError as e:\n print e\nfinally:\n print \"Unlocking file:------------------------------\"\n fcntl.lockf(f,fcntl.LOCK_UN)\n \"\"\"\n to_lock_code = client.write_file(\n sudo=True, file_name=\"/home/cephuser/file_lock.py\", file_mode=\"w\"\n )\n to_lock_code.write(to_lock_file)\n to_lock_code.flush()\n out, err = client.exec_command(cmd=\"sudo python /home/cephuser/file_lock.py\")\n output = out.read().decode()\n output.split()\n if \"Errno 11\" in output:\n log.info(\"File locking achieved, data is not corrupted\")\n elif \"locking\" in output:\n log.info(\"File locking achieved, data is not corrupted\")\n else:\n log.error(\"Data is corrupted\")\n\n out, err = client.exec_command(\n cmd=\"sudo md5sum %sto_test_file_lock | awk '{print $1}'\" % (mounting_dir)\n )\n\n md5sum_file_lock.append(out.read().decode())\n\n except Exception as e:\n log.error(e)\n\n\ndef activate_multiple_mdss(mds_nodes):\n try:\n log.info(\"Activating Multiple MDSs\")\n for node in mds_nodes:\n out1, err = node.exec_command(\n cmd=\"sudo ceph fs set cephfs allow_multimds true --yes-i-really-mean-it\"\n )\n out2, err = node.exec_command(cmd=\"sudo ceph fs set cephfs max_mds 2\")\n break\n\n except Exception as e:\n log.error(e)\n\n\ndef mkdir_pinning(clients, range1, range2, dir_name, pin_val):\n try:\n log.info(\"Creating Directories and Pinning to MDS %s\" % (pin_val))\n for client in clients:\n for num in range(range1, range2):\n out, err = client.exec_command(\n cmd=\"sudo mkdir %s%s_%d\" % (mounting_dir, dir_name, num)\n )\n if pin_val != \"\":\n client.exec_command(\n cmd=\"sudo setfattr -n ceph.dir.pin -v %s %s%s_%d\"\n % (pin_val, mounting_dir, dir_name, num)\n )\n else:\n print(\"Pin val not given\")\n print(out.read().decode())\n print(time.time())\n break\n except Exception as e:\n log.error(e)\n\n\ndef allow_dir_fragmentation(mds_nodes):\n try:\n log.info(\"Allowing directorty fragmenation for splitting\")\n for node in mds_nodes:\n node.exec_command(cmd=\"sudo ceph fs set cephfs allow_dirfrags 1\")\n break\n except Exception as e:\n log.error(e)\n\n\ndef mds_fail_over(mds_nodes):\n try:\n rand = random.randint(0, 1)\n for node in mds_nodes:\n log.info(\"Failing MDS %d\" % (rand))\n node.exec_command(cmd=\"sudo ceph mds fail %d\" % (rand))\n break\n\n except Exception as e:\n log.error(e)\n\n\ndef pinned_dir_io(clients, mds_fail_over, num_of_files, range1, range2):\n try:\n log.info(\"Performing IOs and MDSfailovers on clients\")\n for client in clients:\n client.exec_command(cmd=\"sudo pip install crefi\")\n for num in range(range1, range2):\n if mds_fail_over != \"\":\n mds_fail_over(mds_nodes)\n out, err = client.exec_command(\n cmd=\"sudo crefi -n %d %sdir_%d\" % (num_of_files, mounting_dir, num)\n )\n rc = out.channel.recv_exit_status()\n print(out.read().decode())\n RC.append(rc)\n print(time.time())\n if rc == 0:\n log.info(\"Client IO is going on,success\")\n else:\n log.error(\"Client IO got interrupted\")\n failure.update({client: out})\n break\n break\n\n except Exception as e:\n log.error(e)\n\n\ndef custom_ceph_config(suite_config, custom_config, custom_config_file):\n \"\"\"\n Combines and returns custom configuration overrides for ceph.\n Hierarchy is as follows::\n\n custom_config > custom_config_file > suite_config\n\n Args:\n suite_config: ceph_conf_overrides that currently exist in the test suite\n custom_config: custom config args provided by the cli (these all go to the global scope)\n custom_config_file: path to custom config yaml file provided by the cli\n\n Returns\n New value to be used for ceph_conf_overrides in test config\n \"\"\"\n log.debug(\"Suite config: {}\".format(suite_config))\n log.debug(\"Custom config: {}\".format(custom_config))\n log.debug(\"Custom config file: {}\".format(custom_config_file))\n\n full_custom_config = suite_config or {}\n cli_config_dict = {}\n custom_config_dict = {}\n\n # retrieve custom config from file\n if custom_config_file:\n with open(custom_config_file) as f:\n custom_config_dict = yaml.safe_load(f)\n log.info(\"File contents: {}\".format(custom_config_dict))\n\n # format cli configs into dict\n if custom_config:\n cli_config_dict = dict(item.split(\"=\") for item in custom_config)\n\n # combine file and cli configs\n if cli_config_dict:\n if not custom_config_dict.get(\"global\"):\n custom_config_dict[\"global\"] = {}\n for key, value in cli_config_dict.items():\n custom_config_dict[\"global\"][key] = value\n\n # combine file and suite configs\n for key, value in custom_config_dict.items():\n subsection = {}\n if full_custom_config.get(key):\n subsection.update(full_custom_config[key])\n subsection.update(value)\n full_custom_config[key] = subsection\n\n log.info(\"Full custom config: {}\".format(full_custom_config))\n return full_custom_config\n\n\ndef mask_secrets(plaintext, secrets):\n \"\"\"\n Replace secrets in plaintext with asterisks\n\n Args:\n plaintext (str or list): The plaintext to remove the secrets from or\n list of strings to remove secrets from\n secrets (list): List of secret strings to replace in the plaintext\n\n Returns:\n str: The censored version of plaintext\n\n \"\"\"\n if secrets:\n for secret in secrets:\n if isinstance(plaintext, list):\n plaintext = [string.replace(secret, \"*\" * 5) for string in plaintext]\n else:\n plaintext = plaintext.replace(secret, \"*\" * 5)\n return plaintext\n\n\ndef run_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):\n \"\"\"\n *The deprecated form of exec_cmd.*\n Run an arbitrary command locally\n\n Args:\n cmd (str): command to run\n secrets (list): A list of secrets to be masked with asterisks\n This kwarg is popped in order to not interfere with\n subprocess.run(``**kwargs``)\n timeout (int): Timeout for the command, defaults to 600 seconds.\n ignore_error (bool): True if ignore non zero return code and do not\n raise the exception.\n\n Raises:\n CommandFailed: In case the command execution fails\n\n Returns:\n (str) Decoded stdout of command\n \"\"\"\n completed_process = exec_cmd(cmd, secrets, timeout, ignore_error, **kwargs)\n return mask_secrets(completed_process.stdout.decode(), secrets)\n\n\ndef exec_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):\n \"\"\"\n Run an arbitrary command locally\n\n Args:\n cmd (str): command to run\n secrets (list): A list of secrets to be masked with asterisks\n This kwarg is popped in order to not interfere with\n subprocess.run(``**kwargs``)\n timeout (int): Timeout for the command, defaults to 600 seconds.\n ignore_error (bool): True if ignore non zero return code and do not\n raise the exception.\n\n Raises:\n CommandFailed: In case the command execution fails\n\n Returns:\n (CompletedProcess) A CompletedProcess object of the command that was executed\n CompletedProcess attributes:\n args: The list or str args passed to run().\n returncode (str): The exit code of the process, negative for signals.\n stdout (str): The standard output (None if not captured).\n stderr (str): The standard error (None if not captured).\n\n \"\"\"\n masked_cmd = mask_secrets(cmd, secrets)\n log.info(f\"Executing command: {masked_cmd}\")\n if isinstance(cmd, str):\n cmd = shlex.split(cmd)\n completed_process = subprocess.run(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n timeout=timeout,\n **kwargs,\n )\n masked_stdout = mask_secrets(completed_process.stdout.decode(), secrets)\n if len(completed_process.stdout) > 0:\n log.debug(f\"Command stdout: {masked_stdout}\")\n else:\n log.debug(\"Command stdout is empty\")\n\n masked_stderr = mask_secrets(completed_process.stderr.decode(), secrets)\n if len(completed_process.stderr) > 0:\n log.warning(f\"Command stderr: {masked_stderr}\")\n else:\n log.debug(\"Command stderr is empty\")\n log.debug(f\"Command return code: {completed_process.returncode}\")\n if completed_process.returncode and not ignore_error:\n raise CommandFailed(\n f\"Error during execution of command: {masked_cmd}.\"\n f\"\\nError is {masked_stderr}\"\n )\n return completed_process\n\n\ndef download_file(url, filename, **kwargs):\n \"\"\"\n Download a file from a specified url\n\n Args:\n url (str): URL of the file to download\n filename (str): Name of the file to write the download to\n kwargs (dict): additional keyword arguments passed to requests.get(...)\n\n \"\"\"\n log.debug(f\"Download '{url}' to '{filename}'.\")\n with open(filename, \"wb\") as f:\n r = requests.get(url, **kwargs)\n assert r.ok, f\"The URL {url} is not available! Status: {r.status_code}.\"\n f.write(r.content)\n\n\ndef get_url_content(url, **kwargs):\n \"\"\"\n Return URL content\n\n Args:\n url (str): URL address to return\n kwargs (dict): additional keyword arguments passed to requests.get(...)\n Returns:\n str: Content of URL\n\n Raises:\n AssertionError: When couldn't load URL\n\n \"\"\"\n log.debug(f\"Download '{url}' content.\")\n r = requests.get(url, **kwargs)\n assert r.ok, f\"Couldn't load URL: {url} content! Status: {r.status_code}.\"\n return r.content\n\n\ndef expose_ocp_version(version):\n \"\"\"\n This helper function exposes latest nightly version or GA version of OCP.\n When the version string ends with .nightly (e.g. 4.2.0-0.nightly) it will\n expose the version to latest accepted OCP build\n (e.g. 4.2.0-0.nightly-2019-08-08-103722)\n If the version ends with -ga than it will find the latest GA OCP version\n and will expose 4.2-ga to for example 4.2.22.\n\n Args:\n version (str): Verison of OCP\n\n Returns:\n str: Version of OCP exposed to full version if latest nighly passed\n\n \"\"\"\n if version.endswith(\".nightly\"):\n latest_nightly_url = (\n f\"https://amd64.ocp.releases.ci.openshift.org/api/v1/\"\n f\"releasestream/{version}/latest\"\n )\n version_url_content = get_url_content(latest_nightly_url)\n version_json = json.loads(version_url_content)\n return version_json[\"name\"]\n if version.endswith(\"-ga\"):\n channel = config.DEPLOYMENT.get(\"ocp_channel\", \"stable\")\n ocp_version = version.rstrip(\"-ga\")\n index = config.DEPLOYMENT.get(\"ocp_version_index\", -1)\n return get_latest_ocp_version(f\"{channel}-{ocp_version}\", index)\n else:\n return version\n\n\ndef get_openshift_installer(\n version=None,\n bin_dir=None,\n force_download=False,\n):\n \"\"\"\n Download the OpenShift installer binary, if not already present.\n Update env. PATH and get path of the openshift installer binary.\n\n Args:\n version (str): Version of the installer to download\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n force_download (bool): Force installer download even if already present\n\n Returns:\n str: Path to the installer binary\n\n \"\"\"\n version = version or config.DEPLOYMENT[\"installer_version\"]\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n installer_filename = \"openshift-install\"\n installer_binary_path = os.path.join(bin_dir, installer_filename)\n if os.path.isfile(installer_binary_path) and force_download:\n delete_file(installer_binary_path)\n if os.path.isfile(installer_binary_path):\n log.debug(f\"Installer exists ({installer_binary_path}), skipping download.\")\n # TODO: check installer version\n else:\n version = expose_ocp_version(version)\n log.info(f\"Downloading openshift installer ({version}).\")\n prepare_bin_dir()\n # record current working directory and switch to BIN_DIR\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n tarball = f\"{installer_filename}.tar.gz\"\n url = get_openshift_mirror_url(installer_filename, version)\n download_file(url, tarball)\n run_cmd(f\"tar xzvf {tarball} {installer_filename}\")\n delete_file(tarball)\n # return to the previous working directory\n os.chdir(previous_dir)\n\n installer_version = run_cmd(f\"{installer_binary_path} version\")\n log.info(f\"OpenShift Installer version: {installer_version}\")\n return installer_binary_path\n\n\ndef get_ocm_cli(\n version=None,\n bin_dir=None,\n force_download=False,\n):\n \"\"\"\n Download the OCM binary, if not already present.\n Update env. PATH and get path of the OCM binary.\n\n Args:\n version (str): Version of the OCM to download\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n force_download (bool): Force OCM download even if already present\n\n Returns:\n str: Path to the OCM binary\n\n \"\"\"\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n ocm_filename = \"ocm\"\n ocm_binary_path = os.path.join(bin_dir, ocm_filename)\n if os.path.isfile(ocm_binary_path) and force_download:\n delete_file(ocm_binary_path)\n if os.path.isfile(ocm_binary_path):\n log.debug(f\"ocm exists ({ocm_binary_path}), skipping download.\")\n else:\n log.info(f\"Downloading ocm cli ({version}).\")\n prepare_bin_dir()\n # record current working directory and switch to BIN_DIR\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n url = f\"https://github.com/openshift-online/ocm-cli/releases/download/v{version}/ocm-linux-amd64\"\n download_file(url, ocm_filename)\n # return to the previous working directory\n os.chdir(previous_dir)\n\n current_file_permissions = os.stat(ocm_binary_path)\n os.chmod(\n ocm_binary_path,\n current_file_permissions.st_mode | stat.S_IEXEC,\n )\n ocm_version = run_cmd(f\"{ocm_binary_path} version\")\n log.info(f\"OCM version: {ocm_version}\")\n\n return ocm_binary_path\n\n\ndef get_rosa_cli(\n version=None,\n bin_dir=None,\n force_download=False,\n):\n \"\"\"\n Download the ROSA binary, if not already present.\n Update env. PATH and get path of the ROSA binary.\n\n Args:\n version (str): Version of the ROSA to download\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n force_download (bool): Force ROSA download even if already present\n\n Returns:\n str: Path to the rosa binary\n\n \"\"\"\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n rosa_filename = \"rosa\"\n rosa_binary_path = os.path.join(bin_dir, rosa_filename)\n if os.path.isfile(rosa_binary_path) and force_download:\n delete_file(rosa_binary_path)\n if os.path.isfile(rosa_binary_path):\n log.debug(f\"rosa exists ({rosa_binary_path}), skipping download.\")\n else:\n log.info(f\"Downloading rosa cli ({version}).\")\n prepare_bin_dir()\n # record current working directory and switch to BIN_DIR\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n url = f\"https://github.com/openshift/rosa/releases/download/v{version}/rosa-linux-amd64\"\n download_file(url, rosa_filename)\n # return to the previous working directory\n os.chdir(previous_dir)\n\n current_file_permissions = os.stat(rosa_binary_path)\n os.chmod(\n rosa_binary_path,\n current_file_permissions.st_mode | stat.S_IEXEC,\n )\n rosa_version = run_cmd(f\"{rosa_binary_path} version\")\n log.info(f\"rosa version: {rosa_version}\")\n\n return rosa_binary_path\n\n\ndef get_openshift_client(\n version=None, bin_dir=None, force_download=False, skip_comparison=False\n):\n \"\"\"\n Download the OpenShift client binary, if not already present.\n Update env. PATH and get path of the oc binary.\n\n Args:\n version (str): Version of the client to download\n (default: config.RUN['client_version'])\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n force_download (bool): Force client download even if already present\n skip_comparison (bool): Skip the comparison between the existing OCP client\n version and the configured one.\n\n Returns:\n str: Path to the client binary\n\n \"\"\"\n version = version or config.RUN[\"client_version\"]\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n client_binary_path = os.path.join(bin_dir, \"oc\")\n kubectl_binary_path = os.path.join(bin_dir, \"kubectl\")\n download_client = True\n client_version = None\n try:\n version = expose_ocp_version(version)\n except Exception:\n log.exception(\"Unable to expose OCP version, skipping client download.\")\n skip_comparison = True\n download_client = False\n force_download = False\n\n if force_download:\n log.info(\"Forcing client download.\")\n elif os.path.isfile(client_binary_path) and not skip_comparison:\n current_client_version = get_client_version(client_binary_path)\n if current_client_version != version:\n log.info(\n f\"Existing client version ({current_client_version}) does not match \"\n f\"configured version ({version}).\"\n )\n else:\n log.debug(\n f\"Client exists ({client_binary_path}) and matches configured version, \"\n f\"skipping download.\"\n )\n download_client = False\n\n if download_client:\n # Move existing client binaries to backup location\n client_binary_backup = f\"{client_binary_path}.bak\"\n kubectl_binary_backup = f\"{kubectl_binary_path}.bak\"\n\n try:\n os.rename(client_binary_path, client_binary_backup)\n os.rename(kubectl_binary_path, kubectl_binary_backup)\n except FileNotFoundError:\n pass\n\n # Download the client\n log.info(f\"Downloading openshift client ({version}).\")\n prepare_bin_dir()\n # record current working directory and switch to BIN_DIR\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n url = get_openshift_mirror_url(\"openshift-client\", version)\n tarball = \"openshift-client.tar.gz\"\n download_file(url, tarball)\n run_cmd(f\"tar xzvf {tarball} oc kubectl\")\n delete_file(tarball)\n\n try:\n client_version = run_cmd(f\"{client_binary_path} version --client\")\n except CommandFailed:\n log.error(\"Unable to get version from downloaded client.\")\n\n if client_version:\n try:\n delete_file(client_binary_backup)\n delete_file(kubectl_binary_backup)\n log.info(\"Deleted backup binaries.\")\n except FileNotFoundError:\n pass\n else:\n try:\n os.rename(client_binary_backup, client_binary_path)\n os.rename(kubectl_binary_backup, kubectl_binary_path)\n log.info(\"Restored backup binaries to their original location.\")\n except FileNotFoundError:\n raise ClientDownloadError(\n \"No backups exist and new binary was unable to be verified.\"\n )\n\n # return to the previous working directory\n os.chdir(previous_dir)\n\n log.info(f\"OpenShift Client version: {client_version}\")\n return client_binary_path\n\n\ndef get_vault_cli(bind_dir=None, force_download=False):\n \"\"\"\n Download vault based on platform\n basically for CLI purpose. Binary will be directly\n put into ocs_ci/bin/ directory\n\n Args:\n bind_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n force_download (bool): Force vault cli download even if already present\n\n \"\"\"\n res = requests.get(constants.VAULT_VERSION_INFO_URL)\n version = res.url.split(\"/\")[-1].lstrip(\"v\")\n bin_dir = os.path.expanduser(bind_dir or config.RUN[\"bin_dir\"])\n system = platform.system()\n if \"Darwin\" not in system and \"Linux\" not in system:\n raise UnsupportedOSType(\"Not a supported platform for vault\")\n\n system = system.lower()\n zip_file = f\"vault_{version}_{system}_amd64.zip\"\n vault_cli_filename = \"vault\"\n vault_binary_path = os.path.join(bin_dir, vault_cli_filename)\n if os.path.isfile(vault_binary_path) and force_download:\n delete_file(vault_binary_path)\n if os.path.isfile(vault_binary_path):\n log.debug(\n f\"Vault CLI binary already exists {vault_binary_path}, skipping download.\"\n )\n else:\n log.info(f\"Downloading vault cli {version}\")\n prepare_bin_dir()\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n url = f\"{constants.VAULT_DOWNLOAD_BASE_URL}/{version}/{zip_file}\"\n download_file(url, zip_file)\n run_cmd(f\"unzip {zip_file}\")\n delete_file(zip_file)\n os.chdir(previous_dir)\n vault_ver = run_cmd(f\"{vault_binary_path} version\")\n log.info(f\"Vault cli version:{vault_ver}\")\n\n\ndef ensure_nightly_build_availability(build_url):\n base_build_url = build_url.rsplit(\"/\", 1)[0]\n r = requests.get(base_build_url)\n extracting_condition = b\"Extracting\" in r.content\n if extracting_condition:\n log.info(\"Build is extracting now, may take up to a minute.\")\n return r.ok and not extracting_condition\n\n\ndef get_openshift_mirror_url(file_name, version):\n \"\"\"\n Format url to OpenShift mirror (for client and installer download).\n\n Args:\n file_name (str): Name of file\n version (str): Version of the installer or client to download\n\n Returns:\n str: Url of the desired file (installer or client)\n\n Raises:\n UnsupportedOSType: In case the OS type is not supported\n UnavailableBuildException: In case the build url is not reachable\n \"\"\"\n if platform.system() == \"Darwin\":\n os_type = \"mac\"\n elif platform.system() == \"Linux\":\n os_type = \"linux\"\n else:\n raise UnsupportedOSType\n url_template = config.DEPLOYMENT.get(\n \"ocp_url_template\",\n \"https://openshift-release-artifacts.apps.ci.l2s4.p1.openshiftapps.com/\"\n \"{version}/{file_name}-{os_type}-{version}.tar.gz\",\n )\n url = url_template.format(\n version=version,\n file_name=file_name,\n os_type=os_type,\n )\n sample = TimeoutSampler(\n timeout=540,\n sleep=5,\n func=ensure_nightly_build_availability,\n build_url=url,\n )\n if not sample.wait_for_func_status(result=True):\n raise UnavailableBuildException(f\"The build url {url} is not reachable\")\n return url\n\n\ndef prepare_bin_dir(bin_dir=None):\n \"\"\"\n Prepare bin directory for OpenShift client and installer\n\n Args:\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n \"\"\"\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n try:\n os.mkdir(bin_dir)\n log.info(f\"Directory '{bin_dir}' successfully created.\")\n except FileExistsError:\n log.debug(f\"Directory '{bin_dir}' already exists.\")\n\n\ndef add_path_to_env_path(path):\n \"\"\"\n Add path to the PATH environment variable (if not already there).\n\n Args:\n path (str): Path which should be added to the PATH env. variable\n\n \"\"\"\n env_path = os.environ[\"PATH\"].split(os.pathsep)\n if path not in env_path:\n os.environ[\"PATH\"] = os.pathsep.join([path] + env_path)\n log.info(f\"Path '{path}' added to the PATH environment variable.\")\n log.debug(f\"PATH: {os.environ['PATH']}\")\n\n\ndef delete_file(file_name):\n \"\"\"\n Delete file_name\n\n Args:\n file_name (str): Path to the file you want to delete\n \"\"\"\n os.remove(file_name)\n\n\ndef delete_dir(dir_name):\n \"\"\"\n Deletes the directory\n\n Args:\n dir_name (str): Directory path to delete\n\n \"\"\"\n try:\n rmtree(dir_name)\n except OSError as e:\n log.error(f\"Failed to delete the directory {dir_name}. Error: {e.strerror}\")\n\n\nclass TimeoutSampler(object):\n \"\"\"\n Samples the function output.\n\n This is a generator object that at first yields the output of function\n `func`. After the yield, it either raises instance of `timeout_exc_cls` or\n sleeps `sleep` seconds.\n\n Yielding the output allows you to handle every value as you wish.\n\n Feel free to set the instance variables.\n\n\n Args:\n timeout (int): Timeout in seconds\n sleep (int): Sleep interval in seconds\n func (function): The function to sample\n func_args: Arguments for the function\n func_kwargs: Keyword arguments for the function\n \"\"\"\n\n def __init__(self, timeout, sleep, func, *func_args, **func_kwargs):\n self.timeout = timeout\n self.sleep = sleep\n # check that given timeout and sleep values makes sense\n if self.timeout < self.sleep:\n raise ValueError(\"timeout should be larger than sleep time\")\n\n self.func = func\n self.func_args = func_args\n self.func_kwargs = func_kwargs\n\n # Timestamps of the first and most recent samples\n self.start_time = None\n self.last_sample_time = None\n # The exception to raise\n self.timeout_exc_cls = TimeoutExpiredError\n # Arguments that will be passed to the exception\n self.timeout_exc_args = [self.timeout]\n try:\n self.timeout_exc_args.append(\n f\"Timed out after {timeout}s running {self._build_call_string()}\"\n )\n except Exception:\n log.exception(\n \"Failed to assemble call string. Not necessarily a test failure.\"\n )\n\n def _build_call_string(self):\n def stringify(value):\n if isinstance(value, str):\n return f'\"{value}\"'\n return str(value)\n\n args = list(map(stringify, self.func_args))\n kwargs = [f\"{stringify(k)}={stringify(v)}\" for k, v in self.func_kwargs.items()]\n all_args_string = \", \".join(args + kwargs)\n return f\"{self.func.__name__}({all_args_string})\"\n\n def __iter__(self):\n if self.start_time is None:\n self.start_time = time.time()\n while True:\n self.last_sample_time = time.time()\n if self.timeout <= (self.last_sample_time - self.start_time):\n raise self.timeout_exc_cls(*self.timeout_exc_args)\n try:\n yield self.func(*self.func_args, **self.func_kwargs)\n except Exception as ex:\n msg = f\"Exception raised during iteration: {ex}\"\n log.exception(msg)\n if self.timeout <= (time.time() - self.start_time):\n raise self.timeout_exc_cls(*self.timeout_exc_args)\n log.info(\"Going to sleep for %d seconds before next iteration\", self.sleep)\n time.sleep(self.sleep)\n\n def wait_for_func_value(self, value):\n \"\"\"\n Implements common usecase of TimeoutSampler: waiting until func (given\n function) returns a given value.\n\n Args:\n value: Expected return value of func we are waiting for.\n \"\"\"\n try:\n for i_value in self:\n if i_value == value:\n break\n except self.timeout_exc_cls:\n log.error(\n \"function %s failed to return expected value %s \"\n \"after multiple retries during %d second timeout\",\n self.func.__name__,\n value,\n self.timeout,\n )\n raise\n\n def wait_for_func_status(self, result):\n \"\"\"\n Get function and run it for given time until success or timeout.\n (using __iter__ function)\n\n Args:\n result (bool): Expected result from func.\n\n Examples::\n\n sample = TimeoutSampler(\n timeout=60, sleep=1, func=some_func, func_arg1=\"1\",\n func_arg2=\"2\"\n )\n if not sample.wait_for_func_status(result=True):\n raise Exception\n\n \"\"\"\n try:\n self.wait_for_func_value(result)\n return True\n except self.timeout_exc_cls:\n return False\n\n\nclass TimeoutIterator(TimeoutSampler):\n \"\"\"\n Wrapper of TimeoutSampler which separates parameters of the class itself\n and func arguments in __init__ method. Such way of passing function with\n parameters is used in python standard library.\n\n This allows more explicit usage, which improves readability, eg.::\n\n t1 = TimeoutIterator(timeout=60, sleep=5, func=foo, func_args=[bar])\n t2 = TimeoutIterator(3600, sleep=10, func=foo, func_args=[bar])\n \"\"\"\n\n def __init__(self, timeout, sleep, func, func_args=None, func_kwargs=None):\n if func_args is None:\n func_args = []\n if func_kwargs is None:\n func_kwargs = {}\n super().__init__(timeout, sleep, func, *func_args, **func_kwargs)\n\n\ndef get_random_str(size=13):\n \"\"\"\n generates the random string of given size\n\n Args:\n size (int): number of random characters to generate\n\n Returns:\n str : string of random characters of given size\n\n \"\"\"\n chars = string.ascii_lowercase + string.digits\n return \"\".join(random.choice(chars) for _ in range(size))\n\n\ndef run_async(command):\n \"\"\"\n Run command locally and return without waiting for completion\n\n Args:\n command (str): The command to run.\n\n Returns:\n An open descriptor to be used by the calling function.\n\n Example:\n command = 'oc delete pvc pvc1'\n proc = run_async(command)\n ret, out, err = proc.async_communicate()\n \"\"\"\n log.info(f\"Executing command: {command}\")\n popen_obj = subprocess.Popen(\n command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True,\n encoding=\"utf-8\",\n )\n\n def async_communicate():\n \"\"\"\n Wait for command to complete and fetch the result\n\n Returns:\n retcode, stdout, stderr of the command\n \"\"\"\n stdout, stderr = popen_obj.communicate()\n retcode = popen_obj.returncode\n return retcode, stdout, stderr\n\n popen_obj.async_communicate = async_communicate\n return popen_obj\n\n\ndef is_cluster_running(cluster_path):\n from ocs_ci.ocs.openshift_ops import OCP\n\n return config.RUN[\"cli_params\"].get(\"cluster_path\") and OCP.set_kubeconfig(\n os.path.join(cluster_path, config.RUN.get(\"kubeconfig_location\"))\n )\n\n\ndef decompose_html_attributes(soup, attributes):\n \"\"\"\n Decomposes the given html attributes\n\n Args:\n soup (obj): BeautifulSoup object\n attributes (list): attributes to decompose\n\n Returns: None\n\n \"\"\"\n for attribute in attributes:\n tg = soup.find_all(attrs={\"class\": attribute})\n for each in tg:\n each.decompose()\n\n\ndef parse_html_for_email(soup):\n \"\"\"\n Parses the html and filters out the unnecessary data/tags/attributes\n for email reporting\n\n Args:\n soup (obj): BeautifulSoup object\n\n \"\"\"\n attributes_to_decompose = [\"extra\"]\n if not config.RUN.get(\"logs_url\"):\n attributes_to_decompose.append(\"col-links\")\n decompose_html_attributes(soup, attributes_to_decompose)\n soup.find(id=\"not-found-message\").decompose()\n\n if not config.RUN.get(\"logs_url\"):\n for tr in soup.find_all(\"tr\"):\n for th in tr.find_all(\"th\"):\n if \"Links\" in th.text:\n th.decompose()\n\n for p in soup.find_all(\"p\"):\n if \"(Un)check the boxes to filter the results.\" in p.text:\n p.decompose()\n if \"pytest-html\" in p.text:\n data = p.text.split(\"by\")[0]\n p.string = data\n\n for ip in soup.find_all(\"input\"):\n if not ip.has_attr(\"disabled\"):\n ip[\"disabled\"] = \"true\"\n\n for td in soup.find_all(\"td\"):\n if \"pytest\" in td.text or \"html\" in td.text:\n data = td.text.replace(\"&apos\", \"\")\n td.string = data\n\n main_header = soup.find(\"h1\")\n main_header.string.replace_with(\"OCS-CI RESULTS\")\n\n\ndef add_squad_analysis_to_email(session, soup):\n \"\"\"\n Add squad analysis to the html test results used in email reporting\n\n Args:\n session (obj): Pytest session object\n soup (obj): BeautifulSoup object of HTML Report data\n\n \"\"\"\n failed = {}\n skipped = {}\n # sort out failed and skipped test cases to failed and skipped dicts\n for result in session.results.values():\n if result.failed or result.skipped:\n unassigned = True\n for squad, res in constants.SQUADS.items():\n for item in res:\n if item in result.nodeid:\n if result.failed:\n if squad not in failed:\n failed[squad] = []\n failed[squad].append(result.nodeid)\n unassigned = False\n\n if result.skipped:\n if squad not in skipped:\n skipped[squad] = []\n try:\n skipped_message = result.longrepr[2][8:]\n except TypeError:\n skipped_message = \"--unknown--\"\n skipped[squad].append((result.nodeid, skipped_message))\n unassigned = False\n if unassigned:\n if result.failed:\n if \"UNASSIGNED\" not in failed:\n failed[\"UNASSIGNED\"] = []\n failed[\"UNASSIGNED\"].append(result.nodeid)\n if result.skipped:\n if \"UNASSIGNED\" not in skipped:\n skipped[\"UNASSIGNED\"] = []\n try:\n skipped_message = result.longrepr[2][8:]\n except TypeError:\n skipped_message = \"--unknown--\"\n skipped[\"UNASSIGNED\"].append((result.nodeid, skipped_message))\n\n # no failed or skipped tests - exit the function\n if not failed and not skipped:\n return\n\n # add CSS for the Squad Analysis report\n style = soup.find(\"style\")\n # use colors for squad names from squad names\n style.string += \"\\n\".join(\n [\n f\"h4.squad-{color.lower()} {{\\n color: {color.lower()};\\n}}\"\n for color in constants.SQUADS\n ]\n )\n # few additional styles\n style.string += \"\"\"\n .squad-analysis {\n color: black;\n font-family: monospace;\n background-color: #eee;\n padding: 5px;\n margin-top: 10px;\n }\n .squad-analysis h2 {\n margin: 0px;\n }\n .squad-analysis h3 {\n margin: 0px;\n margin-top: 10px;\n }\n .squad-analysis h4 {\n margin: 0px;\n }\n .squad-analysis ul {\n margin: 0px;\n }\n .squad-analysis ul li em {\n margin-left: 1em;\n }\n .squad-unassigned {\n background-color: #FFBA88;\n }\n h4.squad-yellow {\n color: black;\n background-color: yellow;\n display: inline;\n }\n \"\"\"\n # prepare place for the Squad Analysis in the email\n squad_analysis_div = soup.new_tag(\"div\")\n squad_analysis_div[\"class\"] = \"squad-analysis\"\n main_header = soup.find(\"h1\")\n main_header.insert_after(squad_analysis_div)\n failed_h2_tag = soup.new_tag(\"h2\")\n failed_h2_tag.string = \"Squad Analysis - please analyze:\"\n squad_analysis_div.append(failed_h2_tag)\n if failed:\n # print failed testcases peer squad\n failed_div_tag = soup.new_tag(\"div\")\n squad_analysis_div.append(failed_div_tag)\n failed_h3_tag = soup.new_tag(\"h3\")\n failed_h3_tag.string = \"Failures:\"\n failed_div_tag.append(failed_h3_tag)\n for squad in failed:\n failed_h4_tag = soup.new_tag(\"h4\")\n failed_h4_tag.string = f\"{squad} squad\"\n failed_h4_tag[\"class\"] = f\"squad-{squad.lower()}\"\n failed_div_tag.append(failed_h4_tag)\n failed_ul_tag = soup.new_tag(\"ul\")\n failed_ul_tag[\"class\"] = f\"squad-{squad.lower()}\"\n failed_div_tag.append(failed_ul_tag)\n for test in failed[squad]:\n failed_li_tag = soup.new_tag(\"li\")\n failed_li_tag.string = test\n failed_ul_tag.append(failed_li_tag)\n if skipped:\n # print skipped testcases with reason peer squad\n skips_div_tag = soup.new_tag(\"div\")\n squad_analysis_div.append(skips_div_tag)\n skips_h3_tag = soup.new_tag(\"h3\")\n skips_h3_tag.string = \"Skips:\"\n skips_div_tag.append(skips_h3_tag)\n for squad in skipped:\n skips_h4_tag = soup.new_tag(\"h4\")\n skips_h4_tag.string = f\"{squad} squad\"\n skips_h4_tag[\"class\"] = f\"squad-{squad.lower()}\"\n skips_div_tag.append(skips_h4_tag)\n skips_ul_tag = soup.new_tag(\"ul\")\n skips_ul_tag[\"class\"] = f\"squad-{squad.lower()}\"\n skips_div_tag.append(skips_ul_tag)\n for test in skipped[squad]:\n skips_li_tag = soup.new_tag(\"li\")\n skips_test_span_tag = soup.new_tag(\"span\")\n skips_test_span_tag.string = test[0]\n skips_li_tag.append(skips_test_span_tag)\n skips_li_tag.append(soup.new_tag(\"br\"))\n skips_reason_em_tag = soup.new_tag(\"em\")\n skips_reason_em_tag.string = f\"Reason: {test[1]}\"\n skips_li_tag.append(skips_reason_em_tag)\n skips_ul_tag.append(skips_li_tag)\n\n\ndef move_summary_to_top(soup):\n \"\"\"\n Move summary to the top of the eamil report\n\n \"\"\"\n summary = []\n summary.append(soup.find(\"h2\", text=\"Summary\"))\n for tag in summary[0].next_siblings:\n if tag.name == \"h2\":\n break\n else:\n summary.append(tag)\n for tag in summary:\n tag.extract()\n main_header = soup.find(\"h1\")\n # because we are inserting the tags just after the header one by one, we\n # have to insert them in reverse order\n summary.reverse()\n for tag in summary:\n main_header.insert_after(tag)\n\n\ndef email_reports(session):\n \"\"\"\n Email results of test run\n\n \"\"\"\n # calculate percentage pass\n # reporter = session.config.pluginmanager.get_plugin(\"terminalreporter\")\n # passed = len(reporter.stats.get(\"passed\", []))\n # failed = len(reporter.stats.get(\"failed\", []))\n # error = len(reporter.stats.get(\"error\", []))\n # total = passed + failed + error\n # percentage_passed = (passed / total) * 100\n\n try:\n build_id = get_ocs_build_number()\n except Exception:\n build_id = \"\"\n log.exception(\"Getting OCS operator build number failed!\")\n build_str = f\"BUILD ID: {build_id} \" if build_id else \"\"\n mailids = config.RUN[\"cli_params\"][\"email\"]\n recipients = []\n [recipients.append(mailid) for mailid in mailids.split(\",\")]\n sender = \"[email protected]\"\n msg = MIMEMultipart(\"alternative\")\n msg[\"Subject\"] = (\n f\"ocs-ci results for {get_testrun_name()} \"\n f\"({build_str}\"\n f\"RUN ID: {config.RUN['run_id']}) \"\n # f\"Passed: {percentage_passed:.0f}%\"\n )\n msg[\"From\"] = sender\n msg[\"To\"] = \", \".join(recipients)\n\n html = config.RUN[\"cli_params\"][\"--html\"]\n with open(os.path.expanduser(html)) as fd:\n html_data = fd.read()\n soup = BeautifulSoup(html_data, \"html.parser\")\n\n parse_html_for_email(soup)\n if config.RUN[\"cli_params\"].get(\"squad_analysis\"):\n add_squad_analysis_to_email(session, soup)\n move_summary_to_top(soup)\n part1 = MIMEText(soup, \"html\")\n msg.attach(part1)\n try:\n s = smtplib.SMTP(config.REPORTING[\"email\"][\"smtp_server\"])\n s.sendmail(sender, recipients, msg.as_string())\n s.quit()\n log.info(f\"Results have been emailed to {recipients}\")\n except Exception:\n log.exception(\"Sending email with results failed!\")\n\n\ndef get_cluster_version_info():\n \"\"\"\n Gets the complete cluster version information\n\n Returns:\n dict: cluster version information\n\n \"\"\"\n # importing here to avoid circular imports\n from ocs_ci.ocs.ocp import OCP\n\n ocp = OCP(kind=\"clusterversion\")\n cluster_version_info = ocp.get(\"version\")\n return cluster_version_info\n\n\ndef get_ocs_build_number():\n \"\"\"\n Gets the build number for ocs operator\n\n Return:\n str: build number for ocs operator version\n\n \"\"\"\n # Importing here to avoid circular dependency\n from ocs_ci.ocs.resources.csv import get_csvs_start_with_prefix\n from ocs_ci.ocs.resources.catalog_source import CatalogSource\n from ocs_ci.ocs.resources.packagemanifest import get_selector_for_ocs_operator\n\n build_num = \"\"\n if (\n version_module.get_semantic_ocs_version_from_config()\n >= version_module.VERSION_4_9\n ):\n operator_name = defaults.ODF_OPERATOR_NAME\n else:\n operator_name = defaults.OCS_OPERATOR_NAME\n ocs_csvs = get_csvs_start_with_prefix(\n operator_name,\n defaults.ROOK_CLUSTER_NAMESPACE,\n )\n try:\n ocs_csv = ocs_csvs[0]\n csv_labels = ocs_csv[\"metadata\"][\"labels\"]\n if \"full_version\" in csv_labels:\n return csv_labels[\"full_version\"]\n build_num = ocs_csv[\"spec\"][\"version\"]\n operator_selector = get_selector_for_ocs_operator()\n # This is a temporary solution how to get the build id from the registry image.\n # Because we are now missing build ID in the CSV. If catalog source with our\n # internal label exists, we will be getting build id from the tag of the image\n # in catalog source. Boris is working on better way how to populate the internal\n # build version in the CSV.\n if operator_selector:\n catalog_source = CatalogSource(\n resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME,\n namespace=constants.MARKETPLACE_NAMESPACE,\n selector=operator_selector,\n )\n cs_data = catalog_source.get()[\"items\"][0]\n cs_image = cs_data[\"spec\"][\"image\"]\n image_tag = cs_image.split(\":\")[1]\n if \"-\" in image_tag:\n build_id = image_tag.split(\"-\")[1]\n build_num += f\"-{build_id}\"\n\n except (IndexError, AttributeError, CommandFailed, KeyError):\n log.exception(\"No version info found for OCS operator\")\n return build_num\n\n\ndef get_cluster_version():\n \"\"\"\n Gets the cluster version\n\n Returns:\n str: cluster version\n\n \"\"\"\n return get_cluster_version_info()[\"status\"][\"desired\"][\"version\"]\n\n\ndef get_cluster_image():\n \"\"\"\n Gets the cluster image\n\n Returns:\n str: cluster image\n\n \"\"\"\n return get_cluster_version_info()[\"status\"][\"desired\"][\"image\"]\n\n\ndef get_ceph_version():\n \"\"\"\n Gets the ceph version\n\n Returns:\n str: ceph version\n\n \"\"\"\n # importing here to avoid circular imports\n from ocs_ci.ocs.resources import pod\n\n ct_pod = pod.get_ceph_tools_pod()\n ceph_version = ct_pod.exec_ceph_cmd(\"ceph version\")\n return re.split(r\"ceph version \", ceph_version[\"version\"])[1]\n\n\ndef get_rook_version():\n \"\"\"\n Gets the rook version\n\n Returns:\n str: rook version\n\n \"\"\"\n # importing here to avoid circular imports\n from ocs_ci.ocs.resources import pod\n\n ct_pod = pod.get_ceph_tools_pod()\n rook_versions = ct_pod.exec_ceph_cmd(\"rook version\", format=\"\")\n return rook_versions[\"rook\"]\n\n\ndef get_csi_versions():\n \"\"\"\n Gets the CSI related version information\n\n Returns:\n dict: CSI related version information\n\n \"\"\"\n csi_versions = {}\n # importing here to avoid circular imports\n from ocs_ci.ocs.ocp import OCP\n\n ocp_pod_obj = OCP(\n kind=constants.POD, namespace=config.ENV_DATA[\"cluster_namespace\"]\n )\n csi_provisioners = [\"csi-cephfsplugin-provisioner\", \"csi-rbdplugin-provisioner\"]\n for provisioner in csi_provisioners:\n csi_provisioner_pod = run_cmd(\n f\"oc -n {config.ENV_DATA['cluster_namespace']} get pod -l \"\n f\"'app={provisioner}' -o jsonpath='{{.items[0].metadata.name}}'\"\n )\n desc = ocp_pod_obj.get(csi_provisioner_pod)\n for container in desc[\"spec\"][\"containers\"]:\n name = container[\"name\"]\n version = container[\"image\"].split(\"/\")[-1].split(\":\")[1]\n csi_versions[name] = version\n return csi_versions\n\n\ndef get_ocp_version(seperator=None):\n \"\"\"\n Get current ocp version\n\n Args:\n seperator (str): String that would seperate major and\n minor version nubers\n\n Returns:\n string : If seperator is 'None', version string will be returned as is\n eg: '4.2', '4.3'.\n If seperator is provided then '.' in the version string would be\n replaced by seperator and resulting string will be returned.\n eg: If seperator is '_' then string returned would be '4_2'\n\n \"\"\"\n char = seperator if seperator else \".\"\n if config.ENV_DATA.get(\"skip_ocp_deployment\"):\n raw_version = json.loads(run_cmd(\"oc version -o json\"))[\"openshiftVersion\"]\n else:\n raw_version = config.DEPLOYMENT[\"installer_version\"]\n version = Version.coerce(raw_version)\n return char.join([str(version.major), str(version.minor)])\n\n\ndef get_running_ocp_version(separator=None):\n \"\"\"\n Get current running ocp version\n\n Args:\n separator (str): String that would separate major and\n minor version numbers\n\n Returns:\n string : If separator is 'None', version string will be returned as is\n eg: '4.2', '4.3'.\n If separator is provided then '.' in the version string would be\n replaced by separator and resulting string will be returned.\n eg: If separator is '_' then string returned would be '4_2'\n\n \"\"\"\n char = separator if separator else \".\"\n namespace = config.ENV_DATA[\"cluster_namespace\"]\n try:\n # if the cluster exist, this part will be run\n results = run_cmd(f\"oc get clusterversion -n {namespace} -o yaml\")\n build = yaml.safe_load(results)[\"items\"][0][\"status\"][\"desired\"][\"version\"]\n return char.join(build.split(\".\")[0:2])\n except Exception:\n # this part will return version from the config file in case\n # cluster is not exists.\n return get_ocp_version(seperator=char)\n\n\ndef get_ocp_repo():\n \"\"\"\n Get ocp repo file, name will be generated dynamically based on\n ocp version.\n\n Returns:\n string : Path to ocp repo file\n\n \"\"\"\n repo_path = os.path.join(constants.REPO_DIR, f\"ocp_{get_ocp_version('_')}.repo\")\n path = os.path.expanduser(repo_path)\n assert os.path.exists(path), f\"OCP repo file {path} doesn't exists!\"\n return path\n\n\ndef parse_pgsql_logs(data):\n \"\"\"\n Parse the pgsql benchmark data from ripsaw and return\n the data in list format\n\n Args:\n data (str): log data from pgsql bench run\n\n Returns:\n list_data (list): data digestable by scripts with below format\n e.g.:\n\n [\n {1: {'num_clients': '2','num_threads': '7','latency_avg': '7',\n 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},\n {2: {'num_clients': '2','num_threads': '7','latency_avg': '7',\n 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},\n {3: {'num_clients': '2','num_threads': '7','latency_avg': '7',\n 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'},\n ]\n where keys{1,2,3} are run-IDs\n\n \"\"\"\n match = data.split(\"PGBench Results\")\n list_data = []\n for i in range(2, len(match)):\n log = \"\".join(match[i].split(\"\\n\"))\n pgsql_data = dict()\n pgsql_data[i - 1] = {}\n clients = re.search(r\"scaling_factor\\':\\s+(\\d+),\", log)\n if clients and clients.group(1):\n pgsql_data[i - 1][\"scaling_factor\"] = clients.group(1)\n clients = re.search(r\"number_of_clients\\':\\s+(\\d+),\", log)\n if clients and clients.group(1):\n pgsql_data[i - 1][\"num_clients\"] = clients.group(1)\n threads = re.search(r\"number_of_threads\\':\\s+(\\d+)\", log)\n if threads and threads.group(1):\n pgsql_data[i - 1][\"num_threads\"] = threads.group(1)\n clients = re.search(r\"number_of_transactions_per_client\\':\\s+(\\d+),\", log)\n if clients and clients.group(1):\n pgsql_data[i - 1][\"number_of_transactions_per_client\"] = clients.group(1)\n clients = re.search(\n r\"number_of_transactions_actually_processed\\':\\s+(\\d+),\", log\n )\n if clients and clients.group(1):\n pgsql_data[i - 1][\n \"number_of_transactions_actually_processed\"\n ] = clients.group(1)\n lat_avg = re.search(r\"latency_average_ms\\':\\s+(\\d+)\", log)\n if lat_avg and lat_avg.group(1):\n pgsql_data[i - 1][\"latency_avg\"] = lat_avg.group(1)\n lat_stddev = re.search(r\"latency_stddev_ms\\':\\s+(\\d+)\", log)\n if lat_stddev and lat_stddev.group(1):\n pgsql_data[i - 1][\"lat_stddev\"] = lat_stddev.group(1)\n tps_incl = re.search(r\"tps_incl_con_est\\':\\s+(\\w+)\", log)\n if tps_incl and tps_incl.group(1):\n pgsql_data[i - 1][\"tps_incl\"] = tps_incl.group(1)\n tps_excl = re.search(r\"tps_excl_con_est\\':\\s+(\\w+)\", log)\n if tps_excl and tps_excl.group(1):\n pgsql_data[i - 1][\"tps_excl\"] = tps_excl.group(1)\n list_data.append(pgsql_data)\n\n return list_data\n\n\ndef create_directory_path(path):\n \"\"\"\n Creates directory if path doesn't exists\n \"\"\"\n path = os.path.expanduser(path)\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n log.debug(f\"{path} already exists\")\n\n\ndef ocsci_log_path():\n \"\"\"\n Construct the full path for the log directory.\n\n Returns:\n str: full path for ocs-ci log directory\n\n \"\"\"\n return os.path.expanduser(\n os.path.join(config.RUN[\"log_dir\"], f\"ocs-ci-logs-{config.RUN['run_id']}\")\n )\n\n\ndef get_testrun_name():\n \"\"\"\n Prepare testrun ID for Polarion (and other reports).\n\n Returns:\n str: String containing testrun name\n\n \"\"\"\n markers = config.RUN[\"cli_params\"].get(\"-m\", \"\").replace(\" \", \"-\")\n us_ds = config.REPORTING.get(\"us_ds\")\n if us_ds.upper() == \"US\":\n us_ds = \"Upstream\"\n elif us_ds.upper() == \"DS\":\n us_ds = \"Downstream\"\n ocp_version = \".\".join(config.DEPLOYMENT.get(\"installer_version\").split(\".\")[:-2])\n ocp_version_string = f\"OCP{ocp_version}\" if ocp_version else \"\"\n ocs_version = config.ENV_DATA.get(\"ocs_version\")\n ocs_version_string = f\"OCS{ocs_version}\" if ocs_version else \"\"\n worker_os = \"RHEL\" if config.ENV_DATA.get(\"rhel_workers\") else \"RHCOS\"\n build_user = None\n baremetal_config = None\n if config.ENV_DATA.get(\"mon_type\"):\n baremetal_config = (\n f\"MON {config.ENV_DATA.get('mon_type').upper()} \"\n f\"OSD {config.ENV_DATA.get('osd_type').upper()}\"\n )\n\n lso_deployment = \"\"\n if not baremetal_config and config.DEPLOYMENT.get(\"local_storage\"):\n lso_deployment = \"LSO \"\n\n if config.REPORTING.get(\"display_name\"):\n testrun_name = config.REPORTING.get(\"display_name\")\n else:\n build_user = config.REPORTING.get(\"build_user\")\n testrun_name = (\n f\"{config.ENV_DATA.get('platform', '').upper()} \"\n f\"{config.ENV_DATA.get('deployment_type', '').upper()} \"\n )\n if baremetal_config:\n testrun_name = f\"LSO {baremetal_config} {testrun_name}\"\n\n testrun_name = (\n f\"{testrun_name}\"\n f\"{get_az_count()}AZ \"\n f\"{worker_os} \"\n f\"{lso_deployment}\"\n f\"{config.ENV_DATA.get('master_replicas')}M \"\n f\"{config.ENV_DATA.get('worker_replicas')}W \"\n f\"{markers}\"\n )\n testrun_name = (\n f\"{ocs_version_string} {us_ds} {ocp_version_string} \" f\"{testrun_name}\"\n )\n if build_user:\n testrun_name = f\"{build_user} {testrun_name}\"\n # replace invalid character(s) by '-'\n testrun_name = testrun_name.translate(\n str.maketrans({key: \"-\" for key in \"\"\" \\\\/.:*\"<>|~!@#$?%^&'*(){}+`,=\\t\"\"\"})\n )\n log.info(\"testrun_name: %s\", testrun_name)\n return testrun_name\n\n\ndef get_az_count():\n \"\"\"\n Using a number of different configuration attributes, determine how many\n availability zones the cluster is configured for.\n\n Returns:\n int: number of availability zones\n\n \"\"\"\n if config.ENV_DATA.get(\"availability_zone_count\"):\n return int(config.ENV_DATA.get(\"availability_zone_count\"))\n elif config.ENV_DATA.get(\"worker_availability_zones\"):\n return len(config.ENV_DATA.get(\"worker_availability_zones\"))\n elif config.ENV_DATA.get(\"platform\") == \"vsphere\":\n return 1\n else:\n return 1\n\n\ndef ceph_health_check(namespace=None, tries=20, delay=30):\n \"\"\"\n Args:\n namespace (str): Namespace of OCS\n (default: config.ENV_DATA['cluster_namespace'])\n tries (int): Number of retries\n delay (int): Delay in seconds between retries\n\n Returns:\n bool: ceph_health_check_base return value with default retries of 20,\n delay of 30 seconds if default values are not changed via args.\n\n \"\"\"\n if config.ENV_DATA[\"platform\"].lower() == constants.IBM_POWER_PLATFORM:\n delay = 60\n return retry(\n (CephHealthException, CommandFailed, subprocess.TimeoutExpired),\n tries=tries,\n delay=delay,\n backoff=1,\n )(ceph_health_check_base)(namespace)\n\n\ndef ceph_health_check_base(namespace=None):\n \"\"\"\n Exec `ceph health` cmd on tools pod to determine health of cluster.\n\n Args:\n namespace (str): Namespace of OCS\n (default: config.ENV_DATA['cluster_namespace'])\n\n Raises:\n CephHealthException: If the ceph health returned is not HEALTH_OK\n CommandFailed: If the command to retrieve the tools pod name or the\n command to get ceph health returns a non-zero exit code\n Returns:\n boolean: True if HEALTH_OK\n\n \"\"\"\n namespace = namespace or config.ENV_DATA[\"cluster_namespace\"]\n run_cmd(\n f\"oc wait --for condition=ready pod \"\n f\"-l app=rook-ceph-tools \"\n f\"-n {namespace} \"\n f\"--timeout=120s\"\n )\n tools_pod = run_cmd(\n f\"oc -n {namespace} get pod -l 'app=rook-ceph-tools' \"\n f\"-o jsonpath='{{.items[0].metadata.name}}'\",\n timeout=60,\n )\n health = run_cmd(f\"oc -n {namespace} exec {tools_pod} -- ceph health\")\n if health.strip() == \"HEALTH_OK\":\n log.info(\"Ceph cluster health is HEALTH_OK.\")\n return True\n else:\n raise CephHealthException(f\"Ceph cluster health is not OK. Health: {health}\")\n\n\ndef get_rook_repo(branch=\"master\", to_checkout=None):\n \"\"\"\n Clone and checkout the rook repository to specific branch/commit.\n\n Args:\n branch (str): Branch name to checkout\n to_checkout (str): Commit id or tag to checkout\n\n \"\"\"\n cwd = constants.ROOK_REPO_DIR\n if not os.path.isdir(cwd):\n log.info(f\"Cloning rook repository into {cwd}.\")\n run_cmd(f\"git clone {constants.ROOK_REPOSITORY} {cwd}\")\n else:\n log.info(\n f\"The rook directory {cwd} already exists, ocs-ci will skip the \"\n f\"clone of rook repository.\"\n )\n log.info(\"Fetching latest changes from rook repository.\")\n run_cmd(\"git fetch --all\", cwd=cwd)\n log.info(f\"Checkout rook repository to specific branch: {branch}\")\n run_cmd(f\"git checkout {branch}\", cwd=cwd)\n log.info(f\"Reset branch: {branch} with latest changes\")\n run_cmd(f\"git reset --hard origin/{branch}\", cwd=cwd)\n if to_checkout:\n run_cmd(f\"git checkout {to_checkout}\", cwd=cwd)\n\n\ndef clone_repo(url, location, branch=\"master\", to_checkout=None):\n \"\"\"\n Clone a repository or checkout latest changes if it already exists at\n specified location.\n\n Args:\n url (str): location of the repository to clone\n location (str): path where the repository will be cloned to\n branch (str): branch name to checkout\n to_checkout (str): commit id or tag to checkout\n \"\"\"\n if not os.path.isdir(location):\n log.info(\"Cloning repository into %s\", location)\n run_cmd(f\"git clone {url} {location}\")\n else:\n log.info(\"Repository already cloned at %s, skipping clone\", location)\n log.info(\"Fetching latest changes from repository\")\n run_cmd(\"git fetch --all\", cwd=location)\n log.info(\"Checking out repository to specific branch: %s\", branch)\n run_cmd(f\"git checkout {branch}\", cwd=location)\n log.info(\"Reset branch: %s with latest changes\", branch)\n run_cmd(f\"git reset --hard origin/{branch}\", cwd=location)\n if to_checkout:\n run_cmd(f\"git checkout {to_checkout}\", cwd=location)\n\n\ndef get_latest_ds_olm_tag(upgrade=False, latest_tag=None):\n \"\"\"\n This function returns latest tag of OCS downstream registry or one before\n latest if upgrade parameter is True\n\n Args:\n upgrade (str): If True then it returns one version of the build before\n the latest.\n latest_tag (str): Tag of the latest build. If not specified\n config.DEPLOYMENT['default_latest_tag'] or 'latest' will be used.\n\n Returns:\n str: latest tag for downstream image from quay registry\n\n Raises:\n TagNotFoundException: In case no tag found\n\n \"\"\"\n latest_tag = latest_tag or config.DEPLOYMENT.get(\"default_latest_tag\", \"latest\")\n tags = get_ocs_olm_operator_tags()\n latest_image = None\n ocs_version = config.ENV_DATA[\"ocs_version\"]\n upgrade_ocs_version = config.UPGRADE.get(\"upgrade_ocs_version\")\n use_rc_build = config.UPGRADE.get(\"use_rc_build\")\n previous_rc_build = config.UPGRADE.get(\"previous_rc_build\")\n upgrade_version_change = upgrade_ocs_version and ocs_version != upgrade_ocs_version\n if upgrade and use_rc_build and previous_rc_build and not upgrade_version_change:\n latest_tag = previous_rc_build\n if upgrade_version_change:\n upgrade = False\n for tag in tags:\n if tag[\"name\"] == latest_tag:\n latest_image = tag[\"manifest_digest\"]\n break\n if not latest_image:\n raise TagNotFoundException(\"Couldn't find latest tag!\")\n latest_tag_found = False\n for tag in tags:\n if not upgrade:\n if (\n not any(t in tag[\"name\"] for t in constants.LATEST_TAGS)\n and tag[\"manifest_digest\"] == latest_image\n ):\n return tag[\"name\"]\n if upgrade:\n if not latest_tag_found and tag[\"name\"] == latest_tag:\n latest_tag_found = True\n continue\n if not latest_tag_found:\n continue\n if (\n not any(t in tag[\"name\"] for t in constants.LATEST_TAGS)\n and tag[\"manifest_digest\"] != latest_image\n and ocs_version in tag[\"name\"]\n ):\n if config.UPGRADE.get(\"use_rc_build\") and \"rc\" not in tag[\"name\"]:\n continue\n return tag[\"name\"]\n raise TagNotFoundException(\"Couldn't find any desired tag!\")\n\n\ndef get_next_version_available_for_upgrade(current_tag):\n \"\"\"\n This function returns the tag built after the current_version\n\n Args:\n current_tag (str): Current build tag from which to search the next one\n build tag.\n\n Returns:\n str: tag for downstream image from quay registry built after\n the current_tag.\n\n Raises:\n TagNotFoundException: In case no tag suitable for upgrade found\n\n \"\"\"\n tags = get_ocs_olm_operator_tags()\n if any(t in current_tag for t in constants.LATEST_TAGS):\n return current_tag\n current_tag_index = None\n for index, tag in enumerate(tags):\n if tag[\"name\"] == current_tag:\n if index < 2:\n raise TagNotFoundException(\"Couldn't find tag for upgrade!\")\n current_tag_index = index\n break\n sliced_reversed_tags = tags[:current_tag_index]\n sliced_reversed_tags.reverse()\n ocs_version = config.ENV_DATA[\"ocs_version\"]\n for tag in sliced_reversed_tags:\n if (\n not any(t in tag[\"name\"] for t in constants.LATEST_TAGS)\n and ocs_version in tag[\"name\"]\n ):\n if config.UPGRADE.get(\"use_rc_build\") and \"rc\" not in tag[\"name\"]:\n continue\n return tag[\"name\"]\n raise TagNotFoundException(\"Couldn't find any tag!\")\n\n\ndef load_auth_config():\n \"\"\"\n Load the authentication config YAML from /data/auth.yaml\n\n Raises:\n FileNotFoundError: if the auth config is not found\n\n Returns:\n dict: A dictionary reprensenting the YAML file\n\n \"\"\"\n log.info(\"Retrieving the authentication config dictionary\")\n auth_file = os.path.join(constants.TOP_DIR, \"data\", constants.AUTHYAML)\n try:\n with open(auth_file) as f:\n return yaml.safe_load(f)\n except FileNotFoundError:\n log.warning(\n f\"Unable to find the authentication configuration at {auth_file}, \"\n f\"please refer to the getting started guide ({constants.AUTH_CONFIG_DOCS})\"\n )\n return {}\n\n\ndef get_ocs_olm_operator_tags(limit=100):\n \"\"\"\n Query the OCS OLM Operator repo and retrieve a list of tags. Since we are limited\n to 100 tags per page, we end up making several API calls and combining the results\n into a single list of tags.\n\n Args:\n limit: the number of tags to limit the request to\n\n Raises:\n KeyError: if the auth config isn't setup properly\n requests.RequestException: if the response return code is not ok\n\n Returns:\n list: OCS OLM Operator tags\n\n \"\"\"\n try:\n quay_access_token = load_auth_config()[\"quay\"][\"access_token\"]\n except (KeyError, TypeError):\n log.error(\n \"Unable to retrieve the access token for quay, please refer to \"\n f\"the getting started guide ({constants.AUTH_CONFIG_DOCS}) \"\n \"to properly setup your authentication configuration\"\n )\n raise\n headers = {\"Authorization\": f\"Bearer {quay_access_token}\"}\n image = \"ocs-registry\"\n try:\n ocs_version = float(config.ENV_DATA.get(\"ocs_version\"))\n if ocs_version < 4.5:\n image = \"ocs-olm-operator\"\n except (ValueError, TypeError):\n log.warning(\"Invalid ocs_version given, defaulting to ocs-registry image\")\n pass\n all_tags = []\n page = 1\n while True:\n log.info(f\"Retrieving OCS OLM Operator tags (limit {limit}, page {page})\")\n resp = requests.get(\n constants.OPERATOR_CS_QUAY_API_QUERY.format(\n tag_limit=limit,\n image=image,\n page=page,\n ),\n headers=headers,\n )\n if not resp.ok:\n raise requests.RequestException(resp.json())\n tags = resp.json()[\"tags\"]\n if len(tags) == 0:\n log.info(\"No more tags to retrieve\")\n break\n log.debug(tags)\n all_tags.extend(tags)\n page += 1\n return all_tags\n\n\ndef check_if_executable_in_path(exec_name):\n \"\"\"\n Checks whether an executable can be found in the $PATH\n\n Args:\n exec_name: Name of executable to look for\n\n Returns:\n Boolean: Whether the executable was found\n\n \"\"\"\n return which(exec_name) is not None\n\n\ndef upload_file(server, localpath, remotepath, user=None, password=None, key_file=None):\n \"\"\"\n Upload a file to remote server\n\n Args:\n server (str): Name of the server to upload\n localpath (str): Local file to upload\n remotepath (str): Target path on the remote server. filename should be included\n user (str): User to use for the remote connection\n\n \"\"\"\n if not user:\n user = \"root\"\n try:\n ssh = SSHClient()\n ssh.set_missing_host_key_policy(AutoAddPolicy())\n if password:\n ssh.connect(hostname=server, username=user, password=password)\n else:\n log.info(key_file)\n ssh.connect(hostname=server, username=user, key_filename=key_file)\n sftp = ssh.open_sftp()\n log.info(f\"uploading {localpath} to {user}@{server}:{remotepath}\")\n sftp.put(localpath, remotepath)\n sftp.close()\n ssh.close()\n except AuthenticationException as authException:\n log.error(f\"Authentication failed: {authException}\")\n raise authException\n except SSHException as sshException:\n log.error(f\"SSH connection failed: {sshException}\")\n raise sshException\n\n\ndef read_file_as_str(filepath):\n \"\"\"\n Reads the file content\n\n Args:\n filepath (str): File to read\n\n Returns:\n str : File contents in string\n\n \"\"\"\n with open(rf\"{filepath}\") as fd:\n content = fd.read()\n return content\n\n\ndef replace_content_in_file(file, old, new, match_and_replace_line=False):\n \"\"\"\n Replaces contents in file, if old value is not found, it adds\n new value to the file\n\n Args:\n file (str): Name of the file in which contents will be replaced\n old (str): Data to search for\n new (str): Data to replace the old value\n match_and_replace_line (bool): If True, it will match a line if\n `old` pattern is found in the line. The whole line will be replaced\n with `new` content.\n Otherwise it will replace only `old` string with `new` string but\n the rest of the line will be intact. This is the default option.\n\n \"\"\"\n # Read the file\n with open(rf\"{file}\", \"r\") as fd:\n file_data = [line.rstrip(\"\\n\") for line in fd.readlines()]\n\n if match_and_replace_line:\n # Replace the whole line with `new` string if the line contains `old`\n # string pattern.\n file_data = [new if old in line else line for line in file_data]\n else:\n # Replace the old string by new\n file_data = [\n line.replace(old, new) if old in line else line for line in file_data\n ]\n updated_data = [line for line in file_data if new in line]\n # In case the old pattern wasn't found it will be added as first line\n if not updated_data:\n file_data.insert(0, new)\n file_data = [f\"{line}\\n\" for line in file_data]\n # Write the file out again\n with open(rf\"{file}\", \"w\") as fd:\n fd.writelines(file_data)\n\n\n@retry((CommandFailed), tries=100, delay=10, backoff=1)\ndef wait_for_co(operator):\n \"\"\"\n Waits for ClusterOperator to created\n\n Args:\n operator (str): Name of the ClusterOperator\n\n \"\"\"\n from ocs_ci.ocs.ocp import OCP\n\n ocp = OCP(kind=\"ClusterOperator\")\n ocp.get(operator)\n\n\ndef censor_values(data_to_censor):\n \"\"\"\n This function censor string and numeric values in dictionary based on\n keys that match pattern defined in config_keys_patterns_to_censor in\n constants. It is performed recursively for nested dictionaries.\n\n Args:\n data_to_censor (dict): Data to censor.\n\n Returns:\n dict: filtered data\n\n \"\"\"\n for key in data_to_censor:\n if isinstance(data_to_censor[key], dict):\n censor_values(data_to_censor[key])\n elif isinstance(data_to_censor[key], (str, int, float)):\n for pattern in constants.config_keys_patterns_to_censor:\n if pattern in key.lower():\n data_to_censor[key] = \"*\" * 5\n return data_to_censor\n\n\ndef dump_config_to_file(file_path):\n \"\"\"\n Dump the config to the yaml file with censored secret values.\n\n Args:\n file_path (str): Path to file where to write the configuration.\n\n \"\"\"\n config_copy = deepcopy(config.to_dict())\n censor_values(config_copy)\n with open(file_path, \"w+\") as fs:\n yaml.safe_dump(config_copy, fs)\n\n\ndef create_rhelpod(namespace, pod_name, timeout=300):\n \"\"\"\n Creates the RHEL pod\n\n Args:\n namespace (str): Namespace to create RHEL pod\n pod_name (str): Pod name\n timeout (int): wait time for RHEL pod to be in Running state\n\n Returns:\n pod: Pod instance for RHEL\n\n \"\"\"\n # importing here to avoid dependencies\n from ocs_ci.helpers import helpers\n\n rhelpod_obj = helpers.create_pod(\n namespace=namespace,\n pod_name=pod_name,\n pod_dict_path=constants.RHEL_7_7_POD_YAML,\n )\n helpers.wait_for_resource_state(rhelpod_obj, constants.STATUS_RUNNING, timeout)\n return rhelpod_obj\n\n\ndef check_timeout_reached(start_time, timeout, err_msg=None):\n \"\"\"\n Check if timeout reached and if so raise the exception.\n\n Args:\n start_time (time): Star time of the operation.\n timeout (int): Timeout in seconds.\n err_msg (str): Error message for the exception.\n\n Raises:\n TimeoutException: In case the timeout reached.\n\n \"\"\"\n msg = f\"Timeout {timeout} reached!\"\n if err_msg:\n msg += \" Error: {err_msg}\"\n\n if timeout < (time.time() - start_time):\n raise TimeoutException(msg)\n\n\ndef convert_yaml2tfvars(yaml):\n \"\"\"\n Converts yaml file to tfvars. It creates the tfvars with the\n same filename in the required format which is used for deployment.\n\n Args:\n yaml (str): File path to yaml\n\n Returns:\n str: File path to tfvars\n\n \"\"\"\n # importing here to avoid dependencies\n from ocs_ci.utility.templating import load_yaml\n\n data = load_yaml(yaml)\n tfvars_file = os.path.splitext(yaml)[0]\n log.debug(f\"Converting {yaml} to {tfvars_file}\")\n with open(tfvars_file, \"w+\") as fd:\n for key, val in data.items():\n if key == \"control_plane_ignition\":\n fd.write(\"control_plane_ignition = <<END_OF_MASTER_IGNITION\\n\")\n fd.write(f\"{val}\\n\")\n fd.write(\"END_OF_MASTER_IGNITION\\n\")\n continue\n\n if key == \"compute_ignition\":\n fd.write(\"compute_ignition = <<END_OF_WORKER_IGNITION\\n\")\n fd.write(f\"{val}\\n\")\n fd.write(\"END_OF_WORKER_IGNITION\\n\")\n continue\n\n if key == \"vm_dns_addresses\":\n fd.write(f'vm_dns_addresses = [\"{val}\"]\\n')\n continue\n\n fd.write(key)\n fd.write(\" = \")\n fd.write('\"')\n fd.write(f\"{val}\")\n fd.write('\"\\n')\n\n return tfvars_file\n\n\ndef remove_keys_from_tf_variable_file(tf_file, keys):\n \"\"\"\n Removes the keys from the tf files and convert to json format\n\n Args:\n tf_file (str): path to tf file\n keys (list): list of keys to remove\n\n \"\"\"\n # importing here to avoid dependencies\n from ocs_ci.utility.templating import dump_data_to_json\n\n with open(tf_file, \"r\") as fd:\n obj = hcl2.load(fd)\n for key in keys:\n obj[\"variable\"].pop(key)\n\n dump_data_to_json(obj, f\"{tf_file}.json\")\n os.rename(tf_file, f\"{tf_file}.backup\")\n\n\ndef get_kubeadmin_password():\n filename = os.path.join(\n config.ENV_DATA[\"cluster_path\"], config.RUN[\"password_location\"]\n )\n with open(filename) as f:\n return f.read()\n\n\ndef get_infra_id(cluster_path):\n \"\"\"\n Get infraID from metadata.json in given cluster_path\n\n Args:\n cluster_path: path to cluster install directory\n\n Returns:\n str: metadata.json['infraID']\n\n \"\"\"\n metadata_file = os.path.join(cluster_path, \"metadata.json\")\n with open(metadata_file) as f:\n metadata = json.load(f)\n return metadata[\"infraID\"]\n\n\ndef get_cluster_name(cluster_path):\n \"\"\"\n Get clusterName from metadata.json in given cluster_path\n\n Args:\n cluster_path: path to cluster install directory\n\n Returns:\n str: metadata.json['clusterName']\n\n \"\"\"\n metadata_file = os.path.join(cluster_path, \"metadata.json\")\n with open(metadata_file) as f:\n metadata = json.load(f)\n return metadata[\"clusterName\"]\n\n\ndef skipif_ocp_version(expressions):\n \"\"\"\n This function evaluates the condition for test skip\n based on expression\n\n Args:\n expressions (str OR list): condition for which we need to check,\n eg: A single expression string '>=4.2' OR\n A list of expressions like ['<4.3', '>4.2'], ['<=4.3', '>=4.2']\n\n Return:\n 'True' if test needs to be skipped else 'False'\n\n \"\"\"\n skip_this = True\n ocp_version = get_running_ocp_version()\n expr_list = [expressions] if isinstance(expressions, str) else expressions\n for expr in expr_list:\n comparision_str = ocp_version + expr\n skip_this = skip_this and eval(comparision_str)\n # skip_this will be either True or False after eval\n return skip_this\n\n\ndef skipif_ocs_version(expressions):\n \"\"\"\n This function evaluates the condition for test skip\n based on expression\n\n Args:\n expressions (str OR list): condition for which we need to check,\n eg: A single expression string '>=4.2' OR\n A list of expressions like ['<4.3', '>4.2'], ['<=4.3', '>=4.2']\n\n Return:\n 'True' if test needs to be skipped else 'False'\n \"\"\"\n expr_list = [expressions] if isinstance(expressions, str) else expressions\n return any(eval(config.ENV_DATA[\"ocs_version\"] + expr) for expr in expr_list)\n\n\ndef skipif_ui_not_support(ui_test):\n \"\"\"\n This function evaluates the condition for ui test skip\n based on ui_test expression\n\n Args:\n ui_test (str): condition for which we need to check,\n\n Return:\n 'True' if test needs to be skipped else 'False'\n\n \"\"\"\n from ocs_ci.ocs.ui.views import locators\n\n ocp_version = get_running_ocp_version()\n if (\n config.ENV_DATA[\"platform\"].lower() == constants.IBMCLOUD_PLATFORM\n or config.ENV_DATA[\"platform\"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM\n or config.ENV_DATA[\"platform\"].lower() == constants.ROSA_PLATFORM\n ):\n return True\n try:\n locators[ocp_version][ui_test]\n except KeyError:\n return True\n return False\n\n\ndef get_ocs_version_from_image(image):\n \"\"\"\n Parse major.minor version from OCS image tag.\n\n Args:\n image (str): image in format url:tag\n\n Returns\n str: Version in x.y format\n\n Raises:\n ValueError: In case of the tag which we cannot parse to version.\n\n \"\"\"\n try:\n version = image.rsplit(\":\", 1)[1].lstrip(\"latest-\").lstrip(\"stable-\")\n version = Version.coerce(version)\n return \"{major}.{minor}\".format(major=version.major, minor=version.minor)\n except ValueError:\n log.error(f\"The version: {version} couldn't be parsed!\")\n raise\n\n\ndef get_available_ocp_versions(channel):\n \"\"\"\n Find all available OCP versions for specific channel.\n\n Args:\n channel (str): Channel of OCP (e.g. stable-4.2 or fast-4.2)\n\n Returns\n list: Sorted list with OCP versions for specified channel.\n\n \"\"\"\n headers = {\"Accept\": \"application/json\"}\n req = requests.get(\n constants.OPENSHIFT_UPGRADE_INFO_API.format(channel=channel), headers=headers\n )\n data = req.json()\n versions = [Version(node[\"version\"]) for node in data[\"nodes\"]]\n versions.sort()\n return versions\n\n\ndef get_latest_ocp_version(channel, index=-1):\n \"\"\"\n Find latest OCP version for specific channel.\n\n Args:\n channel (str): Channel of OCP (e.g. stable-4.2 or fast-4.2)\n index (int): Index to get from all available versions list\n e.g. default -1 is latest version (version[-1]). If you want to get\n previous version pass index -2 and so on.\n\n Returns\n str: Latest OCP version for specified channel.\n\n \"\"\"\n versions = get_available_ocp_versions(channel)\n return str(versions[index])\n\n\ndef load_config_file(config_file):\n \"\"\"\n Loads config file to the ocs-ci config\n\n Args:\n config_file (str): Path to yaml config file.\n\n Raises:\n FileNotFoundError: In the case the config file not found.\n\n \"\"\"\n config_file = os.path.expanduser(config_file)\n assert os.path.exists(config_file), f\"Config file {config_file} doesn't exist!\"\n with open(os.path.abspath(os.path.expanduser(config_file)), \"r\") as file_stream:\n custom_config_data = yaml.safe_load(file_stream)\n config.update(custom_config_data)\n\n\ndef destroy_cluster(installer, cluster_path, log_level=\"DEBUG\"):\n \"\"\"\n Destroy OCP cluster specific\n\n\n Args:\n installer (str): The path to the installer binary\n cluster_path (str): The path of the cluster\n log_level (str): log level openshift-installer (default: DEBUG)\n\n \"\"\"\n destroy_cmd = (\n f\"{installer} destroy cluster \"\n f\"--dir {cluster_path} \"\n f\"--log-level {log_level}\"\n )\n\n try:\n # Execute destroy cluster using OpenShift installer\n log.info(f\"Destroying cluster defined in {cluster_path}\")\n run_cmd(destroy_cmd, timeout=1200)\n except CommandFailed:\n log.error(traceback.format_exc())\n raise\n except Exception:\n log.error(traceback.format_exc())\n\n\ndef config_to_string(config):\n \"\"\"\n Convert ConfigParser object to string in INI format.\n\n Args:\n config (obj): ConfigParser object\n\n Returns:\n str: Config in one string\n\n \"\"\"\n strio = io.StringIO()\n config.write(strio, space_around_delimiters=False)\n return strio.getvalue()\n\n\nclass AZInfo(object):\n \"\"\"\n A class for getting different az numbers across calls\n \"\"\"\n\n zone_number = 0\n\n def get_zone_number(self):\n \"\"\"\n Increment current zone_number and perform modulus op\n to roll-on to next available number\n\n Returns:\n int: zone number index\n \"\"\"\n prev = AZInfo.zone_number\n AZInfo.zone_number += 1\n AZInfo.zone_number %= get_az_count()\n return prev\n\n\ndef convert_device_size(unformatted_size, units_to_covert_to):\n \"\"\"\n Convert a string representing a size to an int according to the given units\n to convert to\n\n Args:\n unformatted_size (str): The size to convert (i.e, '1Gi'/'100Mi')\n units_to_covert_to (str): The units to convert the size to (i.e, TB/GB/MB)\n\n Returns:\n int: The converted size\n\n \"\"\"\n units = unformatted_size[-2:]\n abso = int(unformatted_size[:-2])\n conversion = {\n \"TB\": {\"Ti\": abso, \"Gi\": abso / 1000, \"Mi\": abso / 1e6, \"Ki\": abso / 1e9},\n \"GB\": {\"Ti\": abso * 1000, \"Gi\": abso, \"Mi\": abso / 1000, \"Ki\": abso / 1e6},\n \"MB\": {\"Ti\": abso * 1e6, \"Gi\": abso * 1000, \"Mi\": abso, \"Ki\": abso / 1000},\n \"KB\": {\"Ti\": abso * 1e9, \"Gi\": abso * 1e6, \"Mi\": abso * 1000, \"Ki\": abso},\n \"B\": {\"Ti\": abso * 1e12, \"Gi\": abso * 1e9, \"Mi\": abso * 1e6, \"Ki\": abso * 1000},\n }\n return conversion[units_to_covert_to][units]\n\n\ndef prepare_customized_pull_secret(images=None):\n \"\"\"\n Prepare customized pull-secret containing auth section related to given\n image(s). If image(s) not defined or no related section is found, it will\n use whole content of pull-secret.\n\n Args:\n images (str, list): image (or images) to match with auth section\n\n Returns:\n NamedTemporaryFile: prepared pull-secret\n\n \"\"\"\n log.debug(f\"Prepare customized pull-secret for images: {images}\")\n if type(images) == str:\n images = [images]\n # load pull-secret file to pull_secret dict\n pull_secret_path = os.path.join(constants.TOP_DIR, \"data\", \"pull-secret\")\n with open(pull_secret_path) as pull_secret_fo:\n pull_secret = json.load(pull_secret_fo)\n\n authfile_content = {\"auths\": {}}\n # if images defined, try to find auth section related to specified images\n if images:\n for image in images:\n # find all auths which might be related to the specified image\n tmp_auths = [auth for auth in pull_secret[\"auths\"] if auth in image]\n # get the most specific auth for particular image\n tmp_auths = sorted(tmp_auths, key=len, reverse=True)\n if tmp_auths:\n # if there is match to particular auth, prepare authfile just with the\n # matching auth\n auth = tmp_auths[0]\n # as key use only server name, without namespace\n authfile_content[\"auths\"][auth.split(\"/\", 1)[0]] = pull_secret[\"auths\"][\n auth\n ]\n\n if not authfile_content[\"auths\"]:\n authfile_content = pull_secret\n\n # create temporary auth file\n authfile_fo = NamedTemporaryFile(mode=\"w\", prefix=\"authfile_\")\n json.dump(authfile_content, authfile_fo)\n # ensure the content will be saved into the file\n authfile_fo.flush()\n return authfile_fo\n\n\ndef inspect_image(image, authfile_fo):\n \"\"\"\n Inspect image\n\n Args:\n image (str): image to inspect\n authfile_fo (NamedTemporaryFile): pull-secret required for pulling the given image\n\n Returns:\n dict: json object of the inspected image\n\n \"\"\"\n # pull original image (to be able to inspect it)\n exec_cmd(f\"podman image pull {image} --authfile {authfile_fo.name}\")\n # inspect the image\n cmd_result = exec_cmd(f\"podman image inspect {image}\")\n image_inspect = json.loads(cmd_result.stdout)\n return image_inspect\n\n\ndef get_image_with_digest(image):\n \"\"\"\n Return image with sha256 digest for usage in disconnected environment\n\n Args:\n image (str): image\n\n Raises:\n UnexpectedImage: In case the image information is unexpected\n\n Returns:\n str: image with sha256 digest specification\n\n \"\"\"\n if \"@sha256:\" in image:\n return image\n with prepare_customized_pull_secret(image) as authfile_fo:\n image_inspect = inspect_image(image, authfile_fo)\n\n # we expect, that 'Digest' will match one of the images in 'RepoDigests',\n # if not, raise UnexpectedImage\n for image in image_inspect[0][\"RepoDigests\"]:\n if image_inspect[0][\"Digest\"] in image:\n return image\n else:\n raise UnexpectedImage(\n f\"Image digest ({image_inspect[0]['Digest']}) doesn't match with \"\n f\"any image from RepoDigests ({image_inspect[0]['RepoDigests']}).\"\n )\n\n\ndef login_to_mirror_registry(authfile):\n \"\"\"\n Login to mirror registry\n\n Args:\n authfile (str): authfile (pull-secret) path\n\n \"\"\"\n # load cluster info\n load_cluster_info()\n\n mirror_registry = config.DEPLOYMENT[\"mirror_registry\"]\n mirror_registry_user = config.DEPLOYMENT[\"mirror_registry_user\"]\n mirror_registry_password = config.DEPLOYMENT[\"mirror_registry_password\"]\n login_cmd = (\n f\"podman login --authfile {authfile} \"\n f\"{mirror_registry} -u {mirror_registry_user} \"\n f\"-p {mirror_registry_password} --tls-verify=false\"\n )\n exec_cmd(login_cmd, (mirror_registry_user, mirror_registry_password))\n\n\ndef mirror_image(image):\n \"\"\"\n Mirror image to mirror image registry.\n\n Args:\n image (str): image to be mirrored, can be defined just with name or\n with full url, with or without tag or digest\n\n Returns:\n str: the mirrored image link\n\n \"\"\"\n with prepare_customized_pull_secret(image) as authfile_fo:\n # login to mirror registry\n login_to_mirror_registry(authfile_fo.name)\n\n # if there is any tag specified, use it in the full image url,\n # otherwise use url with digest\n image_inspect = inspect_image(image, authfile_fo)\n if image_inspect[0].get(\"RepoTags\"):\n orig_image_full = image_inspect[0][\"RepoTags\"][0]\n else:\n orig_image_full = image_inspect[0][\"RepoDigests\"][0]\n # prepare mirrored image url\n mirror_registry = config.DEPLOYMENT[\"mirror_registry\"]\n mirrored_image = mirror_registry + re.sub(r\"^[^/]*\", \"\", orig_image_full)\n # mirror the image\n log.info(\n f\"Mirroring image '{image}' ('{orig_image_full}') to '{mirrored_image}'\"\n )\n exec_cmd(\n f\"oc image mirror --insecure --registry-config\"\n f\" {authfile_fo.name} {orig_image_full} {mirrored_image}\"\n )\n return mirrored_image\n\n\ndef update_container_with_mirrored_image(job_pod_dict):\n \"\"\"\n Update Job or Pod configuration dict with mirrored image (required for\n disconnected installation).\n\n Args:\n job_pod_dict (dict): dictionary with Job or Pod configuration\n\n Returns:\n dict: for disconnected installation, returns updated Job or Pod dict,\n for normal installation return unchanged job_pod_dict\n\n \"\"\"\n if config.DEPLOYMENT.get(\"disconnected\"):\n if \"containers\" in job_pod_dict[\"spec\"]:\n container = job_pod_dict[\"spec\"][\"containers\"][0]\n else:\n container = job_pod_dict[\"spec\"][\"template\"][\"spec\"][\"containers\"][0]\n container[\"image\"] = mirror_image(container[\"image\"])\n return job_pod_dict\n\n\ndef get_trim_mean(values, percentage=20):\n \"\"\"\n Get the trimmed mean of a list of values.\n Explanation: This function finds the arithmetic mean of given values,\n ignoring values outside the given limits.\n\n Args:\n values (list): The list of values\n percentage (int): The percentage to be trimmed\n\n Returns:\n float: Trimmed mean. In case trimmed mean calculation fails,\n the regular mean average is returned\n\n \"\"\"\n lower_limit = scoreatpercentile(values, percentage)\n upper_limit = scoreatpercentile(values, 100 - percentage)\n try:\n return tmean(values, limits=(lower_limit, upper_limit))\n except ValueError:\n log.warning(\n f\"Failed to calculate the trimmed mean of {values}. The \"\n f\"Regular mean average will be calculated instead\"\n )\n return sum(values) / len(values)\n\n\ndef set_selinux_permissions(workers=None):\n \"\"\"\n Workaround for #1777384 - enable container_use_cephfs on RHEL workers\n Ticket: RHSTOR-787, see more details in the issue: #1151\n\n Args:\n workers (list): List of worker nodes to set selinux permissions\n\n \"\"\"\n log.info(\"Running WA for ticket: RHSTOR-787\")\n from ocs_ci.ocs import ocp\n\n ocp_obj = ocp.OCP()\n cmd = [\"/usr/sbin/setsebool -P container_use_cephfs on\"]\n cmd_list = cmd.copy()\n if not workers:\n from ocs_ci.ocs.node import get_typed_worker_nodes\n\n worker_nodes = get_typed_worker_nodes(os_id=\"rhel\")\n else:\n worker_nodes = workers\n\n for worker in worker_nodes:\n node = worker.get().get(\"metadata\").get(\"name\") if not workers else worker\n log.info(f\"{node} is a RHEL based worker - applying '{cmd_list}'\")\n if config.ENV_DATA[\"platform\"] == constants.IBMCLOUD_PLATFORM:\n retry(CommandFailed, tries=10, delay=3, backoff=2)(\n ocp_obj.exec_oc_debug_cmd\n )(node=node, cmd_list=cmd_list)\n else:\n retry(CommandFailed)(ocp_obj.exec_oc_debug_cmd)(\n node=node, cmd_list=cmd_list\n )\n\n\ndef set_registry_to_managed_state():\n \"\"\"\n In order to be able to deploy from stage we need to change\n image registry config to Managed state.\n More described in BZs:\n https://bugzilla.redhat.com/show_bug.cgi?id=1806593\n https://bugzilla.redhat.com/show_bug.cgi?id=1807471#c3\n We need to change to managed state as described here:\n https://github.com/red-hat-storage/ocs-ci/issues/1436\n So this is not suppose to be deleted as WA case we really need to do\n this operation for OCS deployment as was originally done here:\n https://github.com/red-hat-storage/ocs-ci/pull/1437\n Currently it has to be moved here to enable CA certificate to be\n properly propagated for the stage deployment as mentioned in BZ.\n \"\"\"\n # In RHV platform config is already set to Managed and storage pre-configured\n on_prem_platform_to_exclude = [constants.RHV_PLATFORM]\n platform_list_to_exclude = constants.CLOUD_PLATFORMS + on_prem_platform_to_exclude\n if config.ENV_DATA[\"platform\"] not in platform_list_to_exclude:\n cluster_config = yaml.safe_load(\n exec_cmd(f\"oc get {constants.IMAGE_REGISTRY_CONFIG} -o yaml\").stdout\n )\n if \"emptyDir\" not in cluster_config[\"spec\"].get(\"storage\", {}).keys():\n run_cmd(\n f\"oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p \"\n f'\\'{{\"spec\":{{\"storage\": {{\"emptyDir\":{{}}}}}}}}\\''\n )\n if cluster_config[\"spec\"].get(\"managementState\") != \"Managed\":\n run_cmd(\n f\"oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p \"\n f'\\'{{\"spec\":{{\"managementState\": \"Managed\"}}}}\\''\n )\n\n\ndef add_stage_cert():\n \"\"\"\n Deploy stage certificate to the cluster.\n \"\"\"\n log.info(\"Create configmap stage-registry-config with stage CA.\")\n run_cmd(\n f\"oc -n openshift-config create configmap stage-registry-config\"\n f\" --from-file=registry.stage.redhat.io={constants.STAGE_CA_FILE}\"\n )\n\n log.info(\"Add stage-registry-config to additionalTrustedCA.\")\n additional_trusted_ca_patch = (\n '{\"spec\":{\"additionalTrustedCA\":{\"name\":\"stage-registry-config\"}}}'\n )\n run_cmd(\n f\"oc patch image.config.openshift.io cluster --type=merge\"\n f\" -p '{additional_trusted_ca_patch}'\"\n )\n\n\ndef get_terraform(version=None, bin_dir=None):\n \"\"\"\n Downloads the terraform binary\n\n Args:\n version (str): Version of the terraform to download\n bin_dir (str): Path to bin directory (default: config.RUN['bin_dir'])\n\n Returns:\n str: Path to the terraform binary\n\n \"\"\"\n if platform.system() == \"Darwin\":\n os_type = \"darwin\"\n elif platform.system() == \"Linux\":\n os_type = \"linux\"\n else:\n raise UnsupportedOSType\n\n version = version or config.DEPLOYMENT[\"terraform_version\"]\n bin_dir = os.path.expanduser(bin_dir or config.RUN[\"bin_dir\"])\n terraform_zip_file = f\"terraform_{version}_{os_type}_amd64.zip\"\n terraform_filename = \"terraform\"\n terraform_binary_path = os.path.join(bin_dir, terraform_filename)\n log.info(f\"Downloading terraform version {version}\")\n previous_dir = os.getcwd()\n os.chdir(bin_dir)\n url = f\"https://releases.hashicorp.com/terraform/{version}/\" f\"{terraform_zip_file}\"\n download_file(url, terraform_zip_file)\n run_cmd(f\"unzip -o {terraform_zip_file}\")\n delete_file(terraform_zip_file)\n # return to the previous working directory\n os.chdir(previous_dir)\n\n return terraform_binary_path\n\n\ndef get_terraform_ignition_provider(terraform_dir, version=None):\n \"\"\"\n Downloads the terraform ignition provider\n\n Args:\n terraform_dir (str): Path to terraform working directory\n version (str): Version of the terraform ignition provider to download\n\n \"\"\"\n version = version or constants.TERRAFORM_IGNITION_PROVIDER_VERSION\n terraform_ignition_provider_zip_file = (\n f\"terraform-provider-ignition-{version}-linux-amd64.tar.gz\"\n )\n terraform_ignition_provider_dir = (\n f\"terraform-provider-ignition-{version}-linux-amd64\"\n )\n terraform_plugins_path = \".terraform/plugins/linux_amd64/\"\n log.info(f\"Downloading terraform ignition proivider version {version}\")\n previous_dir = os.getcwd()\n os.chdir(terraform_dir)\n url = (\n \"https://github.com/community-terraform-providers/\"\n f\"terraform-provider-ignition/releases/download/{version}/\"\n f\"{terraform_ignition_provider_zip_file}\"\n )\n\n # Download and untar\n download_file(url, terraform_ignition_provider_zip_file)\n run_cmd(f\"tar xzf {terraform_ignition_provider_zip_file}\")\n\n # move the ignition provider binary to plugins path\n create_directory_path(terraform_plugins_path)\n move(\n f\"{terraform_ignition_provider_dir}/terraform-provider-ignition\",\n terraform_plugins_path,\n )\n\n # delete the downloaded files\n delete_file(terraform_ignition_provider_zip_file)\n delete_dir(terraform_ignition_provider_dir)\n\n # return to the previous working directory\n os.chdir(previous_dir)\n\n\ndef get_module_ip(terraform_state_file, module):\n \"\"\"\n Gets the node IP from terraform.tfstate file\n\n Args:\n terraform_state_file (str): Path to terraform state file\n module (str): Module name in terraform.tfstate file\n e.g: constants.LOAD_BALANCER_MODULE\n\n Returns:\n list: IP of the node\n\n \"\"\"\n ips = []\n with open(terraform_state_file) as fd:\n obj = json.loads(fd.read())\n\n if config.ENV_DATA.get(\"folder_structure\"):\n resources = obj[\"resources\"]\n log.debug(f\"Extracting module information for {module}\")\n log.debug(f\"Resource in {terraform_state_file}: {resources}\")\n for resource in resources:\n if resource.get(\"module\") == module and resource.get(\"mode\") == \"data\":\n for each_resource in resource[\"instances\"]:\n resource_body = each_resource[\"attributes\"][\"body\"]\n ips.append(resource_body.split('\"')[3])\n else:\n modules = obj[\"modules\"]\n target_module = module.split(\"_\")[1]\n log.debug(f\"Extracting module information for {module}\")\n log.debug(f\"Modules in {terraform_state_file}: {modules}\")\n for each_module in modules:\n if target_module in each_module[\"path\"]:\n return each_module[\"outputs\"][\"ip_addresses\"][\"value\"]\n\n return ips\n\n\ndef set_aws_region(region=None):\n \"\"\"\n Exports environment variable AWS_REGION\n\n Args:\n region (str): AWS region to export\n\n \"\"\"\n log.debug(\"Exporting environment variable AWS_REGION\")\n region = region or config.ENV_DATA[\"region\"]\n os.environ[\"AWS_REGION\"] = region\n\n\ndef get_system_architecture():\n \"\"\"\n Get output from 'uname -m' command run on first worker node.\n\n Returns:\n str: Architecture of system\n\n \"\"\"\n from ocs_ci.ocs.node import get_nodes\n\n log.info(\"Checking architecture of system\")\n node = get_nodes(node_type=constants.WORKER_MACHINE)[0]\n return node.ocp.exec_oc_debug_cmd(node.data[\"metadata\"][\"name\"], [\"uname -m\"])\n\n\ndef wait_for_machineconfigpool_status(node_type, timeout=900):\n \"\"\"\n Check for Machineconfigpool status\n\n Args:\n node_type (str): The node type to check machineconfigpool\n status is updated.\n e.g: worker, master and all if we want to check for all nodes\n timeout (int): Time in seconds to wait\n\n \"\"\"\n # importing here to avoid dependencies\n from ocs_ci.ocs import ocp\n\n node_types = [node_type]\n if node_type == \"all\":\n node_types = [f\"{constants.WORKER_MACHINE}\", f\"{constants.MASTER_MACHINE}\"]\n\n for role in node_types:\n log.info(f\"Checking machineconfigpool status for {role} nodes\")\n ocp_obj = ocp.OCP(kind=constants.MACHINECONFIGPOOL, resource_name=role)\n machine_count = ocp_obj.get()[\"status\"][\"machineCount\"]\n\n assert ocp_obj.wait_for_resource(\n condition=str(machine_count),\n column=\"READYMACHINECOUNT\",\n timeout=timeout,\n sleep=5,\n )\n\n\ndef configure_chrony_and_wait_for_machineconfig_status(\n node_type=constants.WORKER_MACHINE, timeout=900\n):\n \"\"\"\n Configure chrony on the nodes\n\n Args:\n node_type (str): The node type to configure chrony\n e.g: worker, master and all if we want to configure on all nodes\n timeout (int): Time in seconds to wait\n\n \"\"\"\n # importing here to avoid dependencies\n from ocs_ci.utility.templating import load_yaml\n from ocs_ci.ocs.resources.ocs import OCS\n\n chrony_data = load_yaml(constants.NTP_CHRONY_CONF)\n\n node_types = [node_type]\n if node_type == \"all\":\n node_types = [f\"{constants.WORKER_MACHINE}\", f\"{constants.MASTER_MACHINE}\"]\n\n for role in node_types:\n log.info(f\"Creating chrony for {role} nodes\")\n chrony_data[\"metadata\"][\"labels\"][\n \"machineconfiguration.openshift.io/role\"\n ] = role\n chrony_data[\"metadata\"][\"name\"] = f\"{role}-chrony-configuration\"\n chrony_obj = OCS(**chrony_data)\n chrony_obj.create()\n\n # sleep here to start update machineconfigpool status\n time.sleep(60)\n wait_for_machineconfigpool_status(role, timeout=timeout)\n\n\ndef modify_csv(csv, replace_from, replace_to):\n \"\"\"\n Modify the CSV\n\n Args:\n csv (str): The CSV name\n replace_from (str): The pattern to replace from in the CSV\n replace_to (str): The pattern to replace to in the CSV\n\n \"\"\"\n data = (\n f\"oc -n openshift-storage get csv {csv} -o yaml | sed\"\n f\" 's,{replace_from},{replace_to},g' | oc replace -f -\"\n )\n log.info(\n f\"CSV {csv} will be modified: {replace_from} will be replaced \"\n f\"with {replace_to}.\\nThe command that will be used for that is:\\n{data}\"\n )\n\n temp_file = NamedTemporaryFile(mode=\"w+\", prefix=\"csv_modification\", suffix=\".sh\")\n\n with open(temp_file.name, \"w\") as t_file:\n t_file.writelines(data)\n\n run_cmd(f\"chmod 777 {temp_file.name}\")\n run_cmd(f\"sh {temp_file.name}\")\n\n\ndef check_for_rhcos_images(url):\n \"\"\"\n Check for rhcos images are present in given location\n\n Args:\n url (str): rhcos_images url\n Returns:\n (bool): True if images present if not false\n\n \"\"\"\n r = requests.head(url)\n return r.status_code == requests.codes.ok\n\n\ndef download_file_from_git_repo(git_repo_url, path_to_file_in_git, filename):\n \"\"\"\n Download a file from a specified git repository\n\n Args:\n git_repo_url (str): The git repository url\n path_to_file_in_git (str): Path to the file to download\n in git repository\n filename (str): Name of the file to write the download to\n\n \"\"\"\n log.debug(\n f\"Download file '{path_to_file_in_git}' from \"\n f\"git repository {git_repo_url} to local file '{filename}'.\"\n )\n temp_dir = mkdtemp()\n git.Repo.clone_from(git_repo_url, temp_dir, branch=\"master\", depth=1)\n move(os.path.join(temp_dir, path_to_file_in_git), filename)\n rmtree(temp_dir)\n\n\ndef skipif_upgraded_from(version_list):\n \"\"\"\n This function evaluates the condition to skip a test if the cluster\n is upgraded from a particular OCS version\n\n Args:\n version_list (list): List of versions to check\n\n Return:\n (bool): True if test needs to be skipped else False\n\n \"\"\"\n try:\n from ocs_ci.ocs.resources.ocs import get_ocs_csv\n\n skip_this = False\n version_list = [version_list] if isinstance(version_list, str) else version_list\n ocs_csv = get_ocs_csv()\n csv_info = ocs_csv.get()\n prev_version = csv_info.get(\"spec\").get(\"replaces\", \"\")\n for version in version_list:\n if f\".v{version}\" in prev_version:\n skip_this = True\n break\n return skip_this\n except Exception as err:\n log.error(str(err))\n return False\n\n\ndef get_cluster_id(cluster_path):\n \"\"\"\n Get ClusterID from metadata.json in given cluster_path\n\n Args:\n cluster_path: path to cluster install directory\n\n Returns:\n str: metadata.json['clusterID']\n\n \"\"\"\n metadata_file = os.path.join(cluster_path, \"metadata.json\")\n with open(metadata_file) as f:\n metadata = json.load(f)\n return metadata[\"clusterID\"]\n\n\ndef get_running_cluster_id():\n \"\"\"\n Get cluster UUID\n Not relying on metadata.json as user sometimes want to run\n only with kubeconfig for some tests. For this function to work\n cluster has to be in running state\n\n Returns:\n str: cluster UUID\n\n \"\"\"\n cluster_id = run_cmd(\n \"oc get clusterversion version -o jsonpath='{.spec.clusterID}'\"\n )\n return cluster_id\n\n\ndef get_ocp_upgrade_history():\n \"\"\"\n Gets the OCP upgrade history for the cluster\n\n Returns:\n list: List of OCP upgrade paths. Latest version in the\n beginning of the list\n\n \"\"\"\n # importing here to avoid circular imports\n from ocs_ci.ocs.ocp import OCP\n\n ocp = OCP(kind=\"clusterversion\")\n cluster_version_info = ocp.get(\"version\")\n upgrade_history_info = cluster_version_info[\"status\"][\"history\"]\n upgrade_history = [each_upgrade[\"version\"] for each_upgrade in upgrade_history_info]\n return upgrade_history\n\n\ndef get_attr_chain(obj, attr_chain):\n \"\"\"\n Attempt to retrieve object attributes when uncertain about the existence of the attribute\n or a different attribute in a given attribute chain. If the retrieval fails, None is returned.\n The function can be used to retrieve a direct attribute, or a chain of attributes.\n i.e. - obj.attr_a, obj_attr_a.sub_attr\n\n Another example - trying to access \"sub_attr_b\" in object.attr.sub_attr_a.sub_attr_b -\n get_attr_chain(object, \"attr.sub_attr_a.sub_attr_b\")\n\n The function can be used to try and retrieve \"sub_attribute_b\" without an exception,\n even in cases where \"attr\" or \"sub_attr_a\" might not exist.\n In those cases, the function will return None.\n\n Args:\n obj: An object\n attr_chain (str): A string containing one attribute or several sub-attributes\n separated by dots (i.e. - \"attr.sub_attr_a.sub_attr_b\")\n\n Returns:\n The requested attribute if found, otherwise None\n \"\"\"\n return reduce(\n lambda _obj, _attr: getattr(_obj, _attr, None), attr_chain.split(\".\"), obj\n )\n\n\ndef get_default_if_keyval_empty(dictionary, key, default_val):\n \"\"\"\n if Key has an empty value OR key doesn't exist\n then return default value\n\n Args:\n dictionary (dict): Dictionary where we have to lookup\n key (str): key to lookup\n default_val (str): If key doesn't have value then return\n this default_val\n\n Returns:\n dictionary[key] if value is present else default_val\n\n \"\"\"\n if not dictionary.get(key):\n return default_val\n return dictionary.get(key)\n\n\ndef get_client_version(client_binary_path):\n \"\"\"\n Get version reported by `oc version`.\n\n Args:\n client_binary_path (str): path to `oc` binary\n\n Returns:\n str: version reported by `oc version`.\n None if the client does not exist at the provided path.\n\n \"\"\"\n if os.path.isfile(client_binary_path):\n cmd = f\"{client_binary_path} version --client -o json\"\n resp = exec_cmd(cmd)\n stdout = json.loads(resp.stdout.decode())\n return stdout[\"releaseClientVersion\"]\n\n\ndef clone_notify():\n \"\"\"\n Repository contains the source code of notify tool,\n which is a python3 based tool wrapped by a container\n used to configure Ceph Bucket Notifications\n\n Returns:\n notify_path (str): Path location of the notify code\n\n \"\"\"\n notify_dir = mkdtemp(prefix=\"notify_\")\n log.info(f\"cloning repo notify in {notify_dir}\")\n git_clone_cmd = f\"git clone {constants.RGW_KAFKA_NOTIFY}\"\n subprocess.run(git_clone_cmd, shell=True, cwd=notify_dir, check=True)\n notify_path = f\"{notify_dir}/notify/notify.py\"\n return notify_path\n\n\ndef add_chrony_to_ocp_deployment():\n \"\"\"\n Create and Add necessary chrony resources\n\n \"\"\"\n for role in [\"master\", \"worker\"]:\n log.info(f\"Creating and Adding Chrony file for {role}\")\n with open(constants.CHRONY_TEMPLATE) as file_stream:\n chrony_template_obj = yaml.safe_load(file_stream)\n chrony_template_obj[\"metadata\"][\"labels\"][\n \"machineconfiguration.openshift.io/role\"\n ] = role\n chrony_template_obj[\"metadata\"][\"name\"] = f\"99-{role}-chrony-configuration\"\n ignition_version = config.DEPLOYMENT[\"ignition_version\"]\n chrony_template_obj[\"spec\"][\"config\"][\"ignition\"][\"version\"] = ignition_version\n\n if Version.coerce(ignition_version) < Version.coerce(\"3.0\"):\n chrony_template_obj[\"spec\"][\"config\"][\"storage\"][\"files\"][0][\n \"filesystem\"\n ] = \"root\"\n\n chrony_template_str = yaml.safe_dump(chrony_template_obj)\n chrony_file = os.path.join(\n config.ENV_DATA[\"cluster_path\"],\n \"openshift\",\n f\"99-{role}-chrony-configuration.yaml\",\n )\n with open(chrony_file, \"w\") as f:\n f.write(chrony_template_str)\n\n\ndef enable_huge_pages():\n log.info(\"Enabling huge pages.\")\n exec_cmd(f\"oc apply -f {constants.HUGE_PAGES_TEMPLATE}\")\n time.sleep(10)\n log.info(\"Waiting for machine config will be applied with huge pages\")\n wait_for_machineconfigpool_status(node_type=constants.WORKER_MACHINE)\n"
] | [
[
"scipy.stats.tmean",
"scipy.stats.scoreatpercentile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
dcronqvist/restberry-api | [
"35a2698ae946fc392e5e7d56dbc22b0719d6f5b6"
] | [
"api_v1/namespaces/ai.py"
] | [
"from datetime import datetime\nfrom flask import request\nfrom flask_restx import Resource, Namespace, fields, reqparse\nfrom api_v1 import privilege_required\nimport pandas as pd\nimport pickle\nfrom db import coll_accounts\n\napi = Namespace(\"ai\", path=\"/ai\", description=\"Endpoints utilizing some of my trained scikit models.\")\n\npost_model = api.model(\"accountant_payload\", {\n \"amount\": fields.Float(example=39.9, required=True, min=0),\n \"date_trans\": fields.Integer(example=round(datetime.now().timestamp()), required=True),\n \"desc\": fields.String(example=\"Transaction for stuff\", required=True),\n \"is_outcome\": fields.Boolean(example=True, required=True),\n \"is_swish\": fields.Boolean(example=False, required=True),\n \"account_names\": fields.Boolean(example=True, default=False, help=\"If true, account names will also be returned.\")\n})\n\naccountant_post_doc = \"\"\"\n### A model for predicting transaction accounts\n\nBy supplying only very little information about a transaction, this model will be able to quite accurately predict both which account the transaction's amount is going FROM, but also TO.\n\"\"\"\n\ndef get_known(desc):\n known_tech_stores = [\n \"webhall\",\n \"elgig\",\n \"clas ohl\",\n \"nintendo\",\n \"steam\",\n \"adobe\",\n \"blizzard\",\n \"komplett\",\n \"inet\",\n \"KJELL & CO\",\n \"Electrokit\",\n \"Billigtekn\",\n \"SLOJD \",\n \"DISCORD\",\n \"Proshop\",\n \"Miss Hosting\"\n ]\n known_grocery_stores = [\n \"coop\",\n \"ica\",\n \"willys\",\n \"hemköp\",\n \"wh götebo\",\n \"SAIGON\",\n \"matse\",\n \"HEMK@P\",\n \"tempo\"\n ]\n known_restaurants = [\n \"sanneg\",\n \"miss faj\",\n \"taco bar\",\n \"tugg\",\n \"max\",\n \"bruncho\",\n \"lucy\",\n \"pizza\",\n \"pizz\",\n \"hamburg\",\n \"foodora\",\n \"UBER *EATS\",\n \"frasses\",\n \"brodernas\",\n \"iZ *DATATEKNOLOG\",\n \"sush\",\n \"plankan\",\n \"dine\",\n \"O LEARYS\",\n \"john sco\",\n \"UBER * EATS\",\n \"taverna\",\n \"W.O.K\",\n \"mat ute\",\n \"restaurang\",\n \"äta ute\",\n \"åt ute\",\n \"restaurant\"\n ]\n known_snacks = [\n \"snacks\",\n \"fika\",\n \"godis\",\n \"glass\",\n \"klubba\",\n \"snickers\",\n \"selecta\",\n \"alltgodis\",\n \"alltigodis\",\n \"pressbyr\",\n \"condeco\",\n \"espresso\",\n \"pomona\",\n \"cafe\",\n \"too good to go\",\n \"7-ELEVEN\",\n \"CIRCLE K\"\n ] \n known_stuff = {\n 1: known_grocery_stores,\n 2: known_snacks,\n 3: known_restaurants,\n 4: known_tech_stores,\n 5: [\"västtrafik\", \"buss\", \"public transport\", \"spårvagn\", \"tunnelbana\", \"tbana\"],\n 6: [\"lyko\", \"salong\", \"levi\", \"zalando\"]\n } \n for known in known_stuff:\n if any([k.lower() in desc.lower() for k in known_stuff[known]]):\n return known\n return 0\n\nwith open(\"scikit-models/from_account_v1.ai\", \"rb\") as f:\n from_account_model = pickle.load(f)\n\nwith open(\"scikit-models/to_account_v1.ai\", \"rb\") as f:\n to_account_model = pickle.load(f)\n\[email protected](\"/accountant\")\nclass TransactionAccountsPredictor(Resource):\n @api.doc(description=accountant_post_doc)\n @api.expect(post_model, validate=True)\n def post(self):\n trans = api.payload\n df = pd.DataFrame()\n\n df[\"Transaktionsdag\"] = [trans[\"date_trans\"]]\n df[\"Belopp\"] = [trans[\"amount\"]]\n df[\"IsOutcome\"] = [trans[\"is_outcome\"]]\n df[\"IsSwish\"] = [trans[\"is_swish\"]]\n df[\"DayOfWeek\"] = [datetime.fromtimestamp(trans[\"date_trans\"]).weekday()]\n df[\"IsWeekend\"] = [datetime.fromtimestamp(trans[\"date_trans\"]) in [5,6]]\n df[\"Known\"] = [get_known(trans[\"desc\"])]\n\n predicted_from = from_account_model.predict(df)\n predicted_to = to_account_model.predict(df)\n\n trans[\"from_account\"] = [int(x) for x in list(predicted_from)][0]\n trans[\"to_account\"] = [int(x) for x in list(predicted_to)][0]\n\n if trans[\"account_names\"]:\n trans[\"from_account_info\"] = coll_accounts.find_one({ \"number\": trans[\"from_account\"], \"user\": \"dani\" }, { \"_id\": 0, \"user\": 0, \"number\": 0})\n trans[\"to_account_info\"] = coll_accounts.find_one({ \"number\": trans[\"to_account\"], \"user\": \"dani\" }, { \"_id\": 0, \"user\": 0, \"number\": 0})\n \n\n del trans[\"account_names\"]\n del trans[\"is_outcome\"]\n del trans[\"is_swish\"] \n\n return trans, 200\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mestagtx/deepimpute | [
"a6bb01f6d000d265557f7e681b10b9eaac458fdd"
] | [
"deepimpute/multinet.py"
] | [
"import os\nimport warnings\nimport tempfile\n\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import pearsonr\n\nimport tensorflow.keras as keras\nfrom keras import backend as K\nfrom keras.models import Model,model_from_json\nfrom keras.layers import Dense,Dropout,Input\nfrom keras.callbacks import EarlyStopping\nimport keras.losses\n\nimport tensorflow as tf\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\ndef get_distance_matrix(raw, n_pred=None):\n\n VMR = raw.std() / raw.mean()\n VMR[np.isinf(VMR)] = 0\n \n if n_pred is None:\n potential_pred = raw.columns[VMR > 0]\n else:\n print(\"Using {} predictors\".format(n_pred))\n potential_pred = VMR.sort_values(ascending=False).index[:n_pred]\n\n covariance_matrix = pd.DataFrame(np.abs(np.corrcoef(raw.T.loc[potential_pred])),\n index=potential_pred,\n columns=potential_pred).fillna(0)\n return covariance_matrix\n\ndef wMSE(y_true, y_pred, binary=False):\n if binary:\n weights = tf.cast(y_true>0, tf.float32)\n else:\n weights = y_true\n return tf.reduce_mean(weights*tf.square(y_true-y_pred))\n\ndef inspect_data(data):\n # Check if there area any duplicated cell/gene labels\n \n if sum(data.index.duplicated()):\n print(\"ERROR: duplicated cell labels. Please provide unique cell labels.\")\n exit(1)\n \n if sum(data.columns.duplicated()):\n print(\"ERROR: duplicated gene labels. Please provide unique gene labels.\")\n exit(1)\n \n max_value = np.max(data.values)\n if max_value < 10:\n print(\"ERROR: max value = {}. Is your data log-transformed? Please provide raw counts\"\n .format(max_value))\n exit(1)\n \n print(\"Input dataset is {} cells (rows) and {} genes (columns)\"\n .format(*data.shape))\n print(\"First 3 rows and columns:\")\n print(data.iloc[:3,:3])\n\nclass MultiNet:\n\n def __init__(self,\n learning_rate=1e-4,\n batch_size=64,\n max_epochs=500,\n patience=5,\n ncores=-1,\n loss=\"wMSE\",\n output_prefix=tempfile.mkdtemp(),\n sub_outputdim=512,\n verbose=1,\n seed=1234,\n architecture=None\n ):\n self.NN_parameters = {\"learning_rate\": learning_rate,\n \"batch_size\": batch_size,\n \"loss\": loss,\n \"architecture\": architecture,\n \"max_epochs\": max_epochs,\n \"patience\": patience}\n self.sub_outputdim = sub_outputdim\n self.outputdir = output_prefix\n self.verbose = verbose\n self.seed = seed\n self.setCores(ncores)\n\n def setCores(self, ncores):\n if ncores > 0:\n self.ncores = ncores\n else:\n self.ncores = os.cpu_count()\n print(\"Using all the cores ({})\".format(self.ncores))\n \n def loadDefaultArchitecture(self):\n self.NN_parameters['architecture'] = [\n {\"type\": \"dense\", \"neurons\": self.sub_outputdim//2, \"activation\": \"relu\"},\n {\"type\": \"dropout\", \"rate\": 0.2},\n ]\n \n def save(self, model):\n os.system(\"mkdir -p {}\".format(self.outputdir))\n \n model_json = model.to_json()\n \n with open(\"{}/model.json\".format(self.outputdir), \"w\") as json_file:\n json_file.write(model_json)\n \n # serialize weights to HDF5\n model.save_weights(\"{}/model.h5\".format(self.outputdir))\n print(\"Saved model to disk in {}\".format(self.outputdir))\n\n def load(self):\n json_file = open('{}/model.json'.format(self.outputdir), 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n model = model_from_json(loaded_model_json)\n model.load_weights('{}/model.h5'.format(self.outputdir))\n\n return model\n \n def build(self, inputdims):\n if self.NN_parameters['architecture'] is None:\n self.loadDefaultArchitecture()\n\n print(self.NN_parameters['architecture'])\n\n inputs = [ Input(shape=(inputdim,)) for inputdim in inputdims ]\n outputs = inputs\n\n for layer in self.NN_parameters['architecture']:\n if layer['type'].lower() == 'dense':\n outputs = [ Dense(layer['neurons'], activation=layer['activation'])(output)\n for output in outputs ]\n elif layer['type'].lower() == 'dropout':\n outputs = [ Dropout(layer['rate'], seed=self.seed)(output)\n for output in outputs] \n else:\n print(\"Unknown layer type.\")\n\n outputs = [Dense(self.sub_outputdim, activation=\"softplus\")(output)\n for output in outputs]\n \n model = Model(inputs=inputs, outputs=outputs)\n\n loss = self.NN_parameters['loss']\n\n if loss in [k for k, v in globals().items() if callable(v)]:\n # if loss is a defined function\n loss = eval(self.NN_parameters['loss'])\n \n if not callable(loss):\n # it is defined in Keras\n if hasattr(keras.losses, loss):\n loss = getattr(keras.losses, loss) \n else:\n print('Unknown loss: {}. Aborting.'.format(loss))\n exit(1)\n\n model.compile(optimizer=keras.optimizer_v2.adam.Adam(lr=self.NN_parameters['learning_rate']),\n loss=loss)\n\n return model\n\n def fit(self,\n raw,\n cell_subset=1,\n NN_lim=None,\n genes_to_impute=None,\n n_pred=None,\n ntop=5,\n minVMR=0.5,\n mode='random',\n ):\n \n inspect_data(raw)\n \n if self.seed is not None:\n np.random.seed(self.seed)\n\n if cell_subset != 1:\n if cell_subset < 1:\n raw = raw.sample(frac=cell_subset)\n else:\n raw = raw.sample(cell_subset)\n\n gene_metric = (raw.var()/(1+raw.mean())).sort_values(ascending=False)\n gene_metric = gene_metric[gene_metric > 0]\n\n if genes_to_impute is None:\n genes_to_impute = self.filter_genes(gene_metric, minVMR, NN_lim=NN_lim)\n else:\n # Make the number of genes to impute a multiple of the network output dim\n n_genes = len(genes_to_impute)\n if n_genes % self.sub_outputdim != 0:\n print(\"The number of input genes is not a multiple of {}. Filling with other genes.\".format(n_genes))\n fill_genes = gene_metric.index[:self.sub_outputdim-n_genes]\n\n if len(fill_genes) < self.sub_outputdim-n_genes:\n # Not enough genes in gene_metric. Sample with replacement\n rest = self.sub_outputdim - n_genes - len(fill_genes)\n fill_genes = np.concatenate([fill_genes,\n np.random.choice(gene_metric.index, rest, replace=True)])\n\n genes_to_impute = np.concatenate([genes_to_impute, fill_genes])\n\n covariance_matrix = get_distance_matrix(raw, n_pred=n_pred)\n\n self.setTargets(raw.reindex(columns=genes_to_impute), mode=mode)\n self.setPredictors(covariance_matrix, ntop=ntop)\n\n print(\"Normalization\")\n norm_data = np.log1p(raw).astype(np.float32) # normalizer.transform(raw)\n\n np.random.seed(self.seed)\n tf.random.set_seed(self.seed)\n \n tf.config.threading.set_inter_op_parallelism_threads(self.ncores)\n tf.config.threading.set_intra_op_parallelism_threads(self.ncores)\n\n print(\"Building network\")\n model = self.build([len(genes) for genes in self.predictors])\n\n test_cells = np.random.choice(norm_data.index, int(0.05 * norm_data.shape[0]), replace=False)\n train_cells = np.setdiff1d(norm_data.index, test_cells)\n\n X_train = [norm_data.loc[train_cells, inputgenes].values for inputgenes in self.predictors]\n Y_train = [norm_data.loc[train_cells, targetgenes].values for targetgenes in self.targets]\n \n X_test = [norm_data.loc[test_cells, inputgenes].values for inputgenes in self.predictors]\n Y_test = [norm_data.loc[test_cells, targetgenes].values for targetgenes in self.targets]\n\n print(\"Fitting with {} cells\".format(norm_data.shape[0]))\n result = model.fit(X_train, Y_train,\n validation_data=(X_test,Y_test),\n epochs=self.NN_parameters[\"max_epochs\"],\n batch_size=self.NN_parameters[\"batch_size\"],\n callbacks=[EarlyStopping(monitor='val_loss',\n patience=self.NN_parameters[\"patience\"])],\n verbose=self.verbose)\n\n self.trained_epochs = len(result.history['loss'])\n print(\"Stopped fitting after {} epochs\".format(self.trained_epochs))\n\n self.save(model)\n\n # Save some metrics on test data\n Y_test_raw = np.hstack(Y_test).flatten()\n Y_test_imputed = np.hstack(model.predict(X_test)).flatten()\n\n # Keep only positive values (since negative values could be dropouts)\n Y_test_imputed = Y_test_imputed[Y_test_raw>0]\n Y_test_raw = Y_test_raw[Y_test_raw>0]\n\n self.test_metrics = {\n 'correlation': pearsonr(Y_test_raw,Y_test_imputed)[0],\n 'MSE': np.sum((Y_test_raw-Y_test_imputed)**2)/len(Y_test_raw)\n } \n\n return self\n\n def predict(self,\n raw,\n imputed_only=False,\n policy=\"restore\"):\n\n norm_raw = np.log1p(raw)\n\n inputs = [ norm_raw.loc[:,predictors].values.astype(np.float32)\n for predictors in self.predictors ]\n\n model = self.load()\n\n predicted = model.predict(inputs)\n if len(inputs)>1:\n predicted = np.hstack(predicted)\n \n predicted = pd.DataFrame(predicted, index=raw.index, columns=self.targets.flatten())\n\n predicted = predicted.groupby(by=predicted.columns, axis=1).mean()\n not_predicted = norm_raw.drop(self.targets.flatten(), axis=1)\n\n imputed = (pd.concat([predicted,not_predicted],axis=1)\n .loc[raw.index, raw.columns]\n .values)\n \n # To prevent overflow\n imputed[ (imputed > 2*norm_raw.values.max()) | (np.isnan(imputed)) ] = 0\n # Convert back to counts\n imputed = np.expm1(imputed)\n\n if policy == \"restore\":\n print(\"Filling zeros\")\n mask = (raw.values > 0)\n imputed[mask] = raw.values[mask]\n elif policy == \"max\":\n print(\"Imputing data with 'max' policy\")\n mask = (raw.values > imputed)\n imputed[mask] = raw.values[mask]\n\n imputed = pd.DataFrame(imputed, index=raw.index, columns=raw.columns)\n\n if imputed_only:\n return imputed.loc[:, predicted.columns]\n else:\n return imputed\n \n def filter_genes(self,\n gene_metric, # assumes gene_metric is sorted\n threshold,\n NN_lim=None\n ):\n if not str(NN_lim).isdigit():\n NN_lim = (gene_metric > threshold).sum()\n\n n_subsets = int(np.ceil(NN_lim / self.sub_outputdim))\n genes_to_impute = gene_metric.index[:n_subsets*self.sub_outputdim]\n\n rest = self.sub_outputdim - (len(genes_to_impute) % self.sub_outputdim)\n\n if rest > 0:\n fill_genes = np.random.choice(gene_metric.index, rest)\n genes_to_impute = np.concatenate([genes_to_impute, fill_genes])\n\n print(\"{} genes selected for imputation\".format(len(genes_to_impute)))\n\n return genes_to_impute\n\n def setTargets(self,data, mode='random'):\n \n n_subsets = int(data.shape[1]/self.sub_outputdim)\n\n if mode == 'progressive':\n self.targets = data.columns.values.reshape([n_subsets, self.sub_outputdim])\n else:\n self.targets = np.random.choice(data.columns,\n [n_subsets, self.sub_outputdim],\n replace=False)\n \n def setPredictors(self, covariance_matrix, ntop=5):\n self.predictors = []\n \n for i,targets in enumerate(self.targets):\n\n genes_not_in_target = np.setdiff1d(covariance_matrix.columns, targets)\n\n if genes_not_in_target.size == 0:\n warnings.warn('Warning: number of target genes lower than output dim. Consider lowering down the sub_outputdim parameter',\n UserWarning)\n genes_not_in_target = covariance_matrix.columns\n \n subMatrix = ( covariance_matrix\n .loc[targets, genes_not_in_target]\n )\n sorted_idx = np.argsort(-subMatrix.values, axis=1)\n predictors = subMatrix.columns[sorted_idx[:,:ntop].flatten()]\n\n self.predictors.append(predictors.unique())\n\n print(\"Net {}: {} predictors, {} targets\"\n .format(i,len(np.unique(predictors)),len(targets)))\n\n def score(self, data, policy=None):\n warnings.warn(\n \"This method is deprecated. Please use model.test_metrics to measure model accuracy instead\",\n DeprecationWarning)\n Y_hat = self.predict(data, policy=policy)\n Y = data.loc[Y_hat.index, Y_hat.columns]\n\n return pearsonr(Y_hat.values.reshape(-1), Y.values.reshape(-1))\n \n\n\n\n \n"
] | [
[
"tensorflow.cast",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.max",
"tensorflow.config.threading.set_intra_op_parallelism_threads",
"tensorflow.random.set_seed",
"numpy.hstack",
"numpy.unique",
"numpy.ceil",
"tensorflow.square",
"tensorflow.config.threading.set_inter_op_parallelism_threads",
"numpy.log1p",
"pandas.concat",
"numpy.random.choice",
"numpy.isnan",
"scipy.stats.pearsonr",
"numpy.corrcoef",
"numpy.argsort",
"numpy.sum",
"numpy.isinf",
"numpy.random.seed",
"numpy.setdiff1d",
"numpy.expm1",
"tensorflow.keras.optimizer_v2.adam.Adam"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
searchsolved/sentence-transformers-master | [
"50f345322d602ebab9e6d2b5e2a98e7e9d0cf9a3"
] | [
"sentence_transformers/models/Pooling.py"
] | [
"import torch\nfrom torch import Tensor\nfrom torch import nn\nfrom typing import Union, Tuple, List, Iterable, Dict\nimport os\nimport json\n\n\nclass Pooling(nn.Module):\n \"\"\"Performs pooling (max or mean) on the token embeddings.\n\n Using pooling, it generates from a variable sized sentence a fixed sized sentence embedding. This layer also allows to use the CLS token if it is returned by the underlying word embedding model.\n You can concatenate multiple poolings together.\n\n :param word_embedding_dimension: Dimensions for the word embeddings\n :param pooling_mode: Can be a string: mean/max/cls. If set, overwrites the other pooling_mode_* settings\n :param pooling_mode_cls_token: Use the first token (CLS token) as text representations\n :param pooling_mode_max_tokens: Use max in each dimension over all tokens.\n :param pooling_mode_mean_tokens: Perform mean-pooling\n :param pooling_mode_mean_sqrt_len_tokens: Perform mean-pooling, but devide by sqrt(input_length).\n \"\"\"\n def __init__(self,\n word_embedding_dimension: int,\n pooling_mode: str = None,\n pooling_mode_cls_token: bool = False,\n pooling_mode_max_tokens: bool = False,\n pooling_mode_mean_tokens: bool = True,\n pooling_mode_mean_sqrt_len_tokens: bool = False,\n ):\n super(Pooling, self).__init__()\n\n self.config_keys = ['word_embedding_dimension', 'pooling_mode_cls_token', 'pooling_mode_mean_tokens', 'pooling_mode_max_tokens', 'pooling_mode_mean_sqrt_len_tokens']\n\n if pooling_mode is not None: #Set pooling mode by string\n pooling_mode = pooling_mode.lower()\n assert pooling_mode in ['mean', 'max', 'cls']\n pooling_mode_cls_token = (pooling_mode == 'cls')\n pooling_mode_max_tokens = (pooling_mode == 'max')\n pooling_mode_mean_tokens = (pooling_mode == 'mean')\n\n self.word_embedding_dimension = word_embedding_dimension\n self.pooling_mode_cls_token = pooling_mode_cls_token\n self.pooling_mode_mean_tokens = pooling_mode_mean_tokens\n self.pooling_mode_max_tokens = pooling_mode_max_tokens\n self.pooling_mode_mean_sqrt_len_tokens = pooling_mode_mean_sqrt_len_tokens\n\n pooling_mode_multiplier = sum([pooling_mode_cls_token, pooling_mode_max_tokens, pooling_mode_mean_tokens, pooling_mode_mean_sqrt_len_tokens])\n self.pooling_output_dimension = (pooling_mode_multiplier * word_embedding_dimension)\n\n\n def __repr__(self):\n return \"Pooling({})\".format(self.get_config_dict())\n\n def get_pooling_mode_str(self) -> str:\n \"\"\"\n Returns the pooling mode as string\n \"\"\"\n modes = []\n if self.pooling_mode_cls_token:\n modes.append('cls')\n if self.pooling_mode_mean_tokens:\n modes.append('mean')\n if self.pooling_mode_max_tokens:\n modes.append('max')\n if self.pooling_mode_mean_sqrt_len_tokens:\n modes.append('mean_sqrt_len_tokens')\n\n return \"+\".join(modes)\n\n def forward(self, features: Dict[str, Tensor]):\n token_embeddings = features['token_embeddings']\n attention_mask = features['attention_mask']\n\n ## Pooling strategy\n output_vectors = []\n if self.pooling_mode_cls_token:\n cls_token = features.get('cls_token_embeddings', token_embeddings[:, 0]) # Take first token by default\n output_vectors.append(cls_token)\n if self.pooling_mode_max_tokens:\n input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n token_embeddings[input_mask_expanded == 0] = -1e9 # Set padding tokens to large negative value\n max_over_time = torch.max(token_embeddings, 1)[0]\n output_vectors.append(max_over_time)\n if self.pooling_mode_mean_tokens or self.pooling_mode_mean_sqrt_len_tokens:\n input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)\n\n #If tokens are weighted (by WordWeights layer), feature 'token_weights_sum' will be present\n if 'token_weights_sum' in features:\n sum_mask = features['token_weights_sum'].unsqueeze(-1).expand(sum_embeddings.size())\n else:\n sum_mask = input_mask_expanded.sum(1)\n\n sum_mask = torch.clamp(sum_mask, min=1e-9)\n\n if self.pooling_mode_mean_tokens:\n output_vectors.append(sum_embeddings / sum_mask)\n if self.pooling_mode_mean_sqrt_len_tokens:\n output_vectors.append(sum_embeddings / torch.sqrt(sum_mask))\n\n output_vector = torch.cat(output_vectors, 1)\n features.update({'sentence_embedding': output_vector})\n return features\n\n def get_sentence_embedding_dimension(self):\n return self.pooling_output_dimension\n\n def get_config_dict(self):\n return {key: self.__dict__[key] for key in self.config_keys}\n\n def save(self, output_path):\n with open(os.path.join(output_path, 'config.json'), 'w') as fOut:\n json.dump(self.get_config_dict(), fOut, indent=2)\n\n @staticmethod\n def load(input_path):\n with open(os.path.join(input_path, 'config.json')) as fIn:\n config = json.load(fIn)\n\n return Pooling(**config)\n"
] | [
[
"torch.max",
"torch.cat",
"torch.sqrt",
"torch.sum",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JanSchulz/statsmodels | [
"a160bbc790ef447ec365651ad01da3cf11e75f7f"
] | [
"statsmodels/stats/tests/test_multi.py"
] | [
"'''Tests for multipletests and fdr pvalue corrections\n\nAuthor : Josef Perktold\n\n\n['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n', 'fdr_tsbh']\nare tested against R:multtest\n\n'hommel' is tested against R stats p_adjust (not available in multtest\n\n'fdr_gbs', 'fdr_2sbky' I did not find them in R, currently tested for\n consistency only\n\n'''\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_equal, assert_\n\nfrom statsmodels.stats.multitest import (multipletests, fdrcorrection,\n fdrcorrection_twostage)\nfrom statsmodels.stats.multicomp import tukeyhsd\n\npval0 = np.array([0.838541367553 , 0.642193923795 , 0.680845947633 ,\n 0.967833824309 , 0.71626938238 , 0.177096952723 , 5.23656777208e-005 ,\n 0.0202732688798 , 0.00028140506198 , 0.0149877310796])\n\nres_multtest1 = np.array([[ 5.2365677720800003e-05, 5.2365677720800005e-04,\n 5.2365677720800005e-04, 5.2365677720800005e-04,\n 5.2353339704891422e-04, 5.2353339704891422e-04,\n 5.2365677720800005e-04, 1.5337740764175588e-03],\n [ 2.8140506198000000e-04, 2.8140506197999998e-03,\n 2.5326455578199999e-03, 2.5326455578199999e-03,\n 2.8104897961789277e-03, 2.5297966317768816e-03,\n 1.4070253098999999e-03, 4.1211324652269442e-03],\n [ 1.4987731079600001e-02, 1.4987731079600000e-01,\n 1.1990184863680001e-01, 1.1990184863680001e-01,\n 1.4016246580579017e-01, 1.1379719679449507e-01,\n 4.9959103598666670e-02, 1.4632862843720582e-01],\n [ 2.0273268879800001e-02, 2.0273268879799999e-01,\n 1.4191288215860001e-01, 1.4191288215860001e-01,\n 1.8520270949069695e-01, 1.3356756197485375e-01,\n 5.0683172199499998e-02, 1.4844940238274187e-01],\n [ 1.7709695272300000e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 8.5760763426056130e-01, 6.8947825122356643e-01,\n 3.5419390544599999e-01, 1.0000000000000000e+00],\n [ 6.4219392379499995e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 9.9996560644133570e-01, 9.9413539782557070e-01,\n 8.9533672797500008e-01, 1.0000000000000000e+00],\n [ 6.8084594763299999e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 9.9998903512635740e-01, 9.9413539782557070e-01,\n 8.9533672797500008e-01, 1.0000000000000000e+00],\n [ 7.1626938238000004e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 9.9999661886871472e-01, 9.9413539782557070e-01,\n 8.9533672797500008e-01, 1.0000000000000000e+00],\n [ 8.3854136755300002e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 9.9999998796038225e-01, 9.9413539782557070e-01,\n 9.3171263061444454e-01, 1.0000000000000000e+00],\n [ 9.6783382430900000e-01, 1.0000000000000000e+00,\n 1.0000000000000000e+00, 9.6783382430900000e-01,\n 9.9999999999999878e-01, 9.9413539782557070e-01,\n 9.6783382430900000e-01, 1.0000000000000000e+00]])\n\n\nres_multtest2_columns = ['rawp', 'Bonferroni', 'Holm', 'Hochberg', 'SidakSS', 'SidakSD',\n 'BH', 'BY', 'ABH', 'TSBH_0.05']\n\nrmethods = {'rawp':(0,'pval'), 'Bonferroni':(1,'b'), 'Holm':(2,'h'),\n 'Hochberg':(3,'sh'), 'SidakSS':(4,'s'), 'SidakSD':(5,'hs'),\n 'BH':(6,'fdr_i'), 'BY':(7,'fdr_n'),\n 'TSBH_0.05':(9, 'fdr_tsbh')}\n\nNA = np.nan\n# all rejections, except for Bonferroni and Sidak\nres_multtest2 = np.array([\n 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.012, 0.024, 0.036, 0.048,\n 0.06, 0.072, 0.012, 0.02, 0.024, 0.024, 0.024, 0.024, 0.012, 0.012,\n 0.012, 0.012, 0.012, 0.012, 0.01194015976019192, 0.02376127616613988,\n 0.03546430060660932, 0.04705017875634587, 0.058519850599,\n 0.06987425045000606, 0.01194015976019192, 0.01984063872102404,\n 0.02378486270400004, 0.023808512, 0.023808512, 0.023808512, 0.012,\n 0.012, 0.012, 0.012, 0.012, 0.012, 0.0294, 0.0294, 0.0294, 0.0294,\n 0.0294, 0.0294, NA, NA, NA, NA, NA, NA, 0, 0, 0, 0, 0, 0\n ]).reshape(6,10, order='F')\n\nres_multtest3 = np.array([\n 0.001, 0.002, 0.003, 0.004, 0.005, 0.05, 0.06, 0.07, 0.08, 0.09, 0.01,\n 0.02, 0.03, 0.04, 0.05, 0.5, 0.6, 0.7, 0.8, 0.9, 0.01, 0.018, 0.024,\n 0.028, 0.03, 0.25, 0.25, 0.25, 0.25, 0.25, 0.01, 0.018, 0.024, 0.028,\n 0.03, 0.09, 0.09, 0.09, 0.09, 0.09, 0.00995511979025177,\n 0.01982095664805061, 0.02959822305108317, 0.03928762649718986,\n 0.04888986953422814, 0.4012630607616213, 0.4613848859051006,\n 0.5160176928207072, 0.5656115457763677, 0.6105838818818925,\n 0.00995511979025177, 0.0178566699880266, 0.02374950634358763,\n 0.02766623106147537, 0.02962749064373438, 0.2262190625000001,\n 0.2262190625000001, 0.2262190625000001, 0.2262190625000001,\n 0.2262190625000001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.08333333333333334,\n 0.0857142857142857, 0.0875, 0.0888888888888889, 0.09,\n 0.02928968253968254, 0.02928968253968254, 0.02928968253968254,\n 0.02928968253968254, 0.02928968253968254, 0.2440806878306878,\n 0.2510544217687075, 0.2562847222222222, 0.2603527336860670,\n 0.2636071428571428, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0.005,\n 0.005, 0.005, 0.005, 0.005, 0.04166666666666667, 0.04285714285714286,\n 0.04375, 0.04444444444444445, 0.045\n ]).reshape(10,10, order='F')\n\nres0_large = np.array([\n 0.00031612, 0.0003965, 0.00048442, 0.00051932, 0.00101436, 0.00121506,\n 0.0014516, 0.00265684, 0.00430043, 0.01743686, 0.02080285, 0.02785414,\n 0.0327198, 0.03494679, 0.04206808, 0.08067095, 0.23882767, 0.28352304,\n 0.36140401, 0.43565145, 0.44866768, 0.45368782, 0.48282088,\n 0.49223781, 0.55451638, 0.6207473, 0.71847853, 0.72424145, 0.85950263,\n 0.89032747, 0.0094836, 0.011895, 0.0145326, 0.0155796, 0.0304308,\n 0.0364518, 0.043548, 0.0797052, 0.1290129, 0.5231058, 0.6240855,\n 0.8356242, 0.981594, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 0.0094836, 0.0114985, 0.01356376, 0.01402164, 0.02637336,\n 0.0303765, 0.0348384, 0.06110732, 0.09460946, 0.36617406, 0.416057,\n 0.52922866, 0.5889564, 0.59409543, 0.67308928, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 0.0094836, 0.0114985, 0.01356376, 0.01402164,\n 0.02637336, 0.0303765, 0.0348384, 0.06110732, 0.09460946, 0.36617406,\n 0.416057, 0.52922866, 0.5889564, 0.59409543, 0.67308928, 0.89032747,\n 0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.89032747,\n 0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.89032747,\n 0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.009440257627368331,\n 0.01182686507401931, 0.01443098172617119, 0.01546285007478554,\n 0.02998742566629453, 0.03581680249125385, 0.04264369065603335,\n 0.0767094173291795, 0.1212818694859857, 0.410051586220387,\n 0.4677640287633493, 0.5715077903157826, 0.631388450393325,\n 0.656016359012282, 0.724552174001554, 0.919808283456286,\n 0.999721715014484, 0.9999547032674126, 0.9999985652190126,\n 0.999999964809746, 0.999999982525548, 0.999999986719131,\n 0.999999997434160, 0.999999998521536, 0.999999999970829,\n 0.999999999999767, 1, 1, 1, 1, 0.009440257627368331,\n 0.01143489901147732, 0.0134754287611275, 0.01392738605848343,\n 0.0260416568490015, 0.02993768724817902, 0.0342629726119179,\n 0.0593542206208364, 0.09045742964699988, 0.308853956167216,\n 0.343245865702423, 0.4153483370083637, 0.4505333180190900,\n 0.453775200643535, 0.497247406680671, 0.71681858015803,\n 0.978083969553718, 0.986889206426321, 0.995400461639735,\n 0.9981506396214986, 0.9981506396214986, 0.9981506396214986,\n 0.9981506396214986, 0.9981506396214986, 0.9981506396214986,\n 0.9981506396214986, 0.9981506396214986, 0.9981506396214986,\n 0.9981506396214986, 0.9981506396214986, 0.0038949, 0.0038949,\n 0.0038949, 0.0038949, 0.0060753, 0.0060753, 0.006221142857142857,\n 0.00996315, 0.01433476666666667, 0.05231058, 0.05673504545454545,\n 0.06963535, 0.07488597857142856, 0.07488597857142856, 0.08413616,\n 0.15125803125, 0.421460594117647, 0.4725384, 0.570637910526316,\n 0.6152972625, 0.6152972625, 0.6152972625, 0.6152972625, 0.6152972625,\n 0.665419656, 0.7162468846153845, 0.775972982142857, 0.775972982142857,\n 0.889140651724138, 0.89032747, 0.01556007537622183,\n 0.01556007537622183, 0.01556007537622183, 0.01556007537622183,\n 0.02427074531648065, 0.02427074531648065, 0.02485338565390302,\n 0.0398026560334295, 0.0572672083580799, 0.2089800939109816,\n 0.2266557764630925, 0.2781923271071372, 0.2991685206792373,\n 0.2991685206792373, 0.336122876445059, 0.6042738882921044, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.00220711, 0.00220711, 0.00220711,\n 0.00220711, 0.00344267, 0.00344267, 0.003525314285714285, 0.005645785,\n 0.00812303444444444, 0.029642662, 0.0321498590909091,\n 0.03946003166666667, 0.04243538785714285, 0.04243538785714285,\n 0.0476771573333333, 0.085712884375, 0.23882767, 0.26777176,\n 0.323361482631579, 0.34866844875, 0.34866844875, 0.34866844875,\n 0.34866844875, 0.34866844875, 0.3770711384, 0.4058732346153846,\n 0.4397180232142857, 0.4397180232142857, 0.503846369310345,\n 0.504518899666667, 0.00272643, 0.00272643, 0.00272643, 0.00272643,\n 0.00425271, 0.00425271, 0.0043548, 0.006974205, 0.01003433666666667,\n 0.036617406, 0.03971453181818182, 0.048744745, 0.052420185,\n 0.052420185, 0.058895312, 0.105880621875, 0.295022415882353,\n 0.33077688, 0.399446537368421, 0.43070808375, 0.43070808375,\n 0.43070808375, 0.43070808375, 0.43070808375, 0.4657937592,\n 0.5013728192307692, 0.5431810875, 0.5431810875, 0.622398456206897,\n 0.623229229\n ]).reshape(30,10, order='F')\n\n\nclass CheckMultiTestsMixin(object):\n def test_multi_pvalcorrection(self):\n #test against R package multtest mt.rawp2adjp\n\n res_multtest = self.res2\n pval0 = res_multtest[:,0]\n\n for k,v in rmethods.items():\n if v[1] in self.methods:\n reject, pvalscorr = multipletests(pval0,\n alpha=self.alpha,\n method=v[1])[:2]\n assert_almost_equal(pvalscorr, res_multtest[:,v[0]], 15)\n assert_equal(reject, pvalscorr <= self.alpha)\n\n pvalscorr = np.sort(fdrcorrection(pval0, method='n')[1])\n assert_almost_equal(pvalscorr, res_multtest[:,7], 15)\n pvalscorr = np.sort(fdrcorrection(pval0, method='i')[1])\n assert_almost_equal(pvalscorr, res_multtest[:,6], 15)\n\nclass TestMultiTests1(CheckMultiTestsMixin):\n def __init__(self):\n self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n']\n self.alpha = 0.1\n self.res2 = res_multtest1\n\nclass TestMultiTests2(CheckMultiTestsMixin):\n # case: all hypothesis rejected (except 'b' and 's'\n def __init__(self):\n self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n']\n self.alpha = 0.05\n self.res2 = res_multtest2\n\nclass TestMultiTests3(CheckMultiTestsMixin):\n def __init__(self):\n self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n',\n 'fdr_tsbh']\n self.alpha = 0.05\n self.res2 = res0_large\n\nclass TestMultiTests4(CheckMultiTestsMixin):\n # in simulations, all two stage fdr, fdr_tsbky, fdr_tsbh, fdr_gbs, have in\n # some cases (cases with large Alternative) an FDR that looks too large\n # this is the first case #rejected = 12, DGP : has 10 false\n def __init__(self):\n self.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n',\n 'fdr_tsbh']\n self.alpha = 0.05\n self.res2 = res_multtest3\n\ndef test_pvalcorrection_reject():\n # consistency test for reject boolean and pvalscorr\n\n for alpha in [0.01, 0.05, 0.1]:\n for method in ['b', 's', 'sh', 'hs', 'h', 'hommel', 'fdr_i', 'fdr_n',\n 'fdr_tsbky', 'fdr_tsbh', 'fdr_gbs']:\n for ii in range(11):\n pval1 = np.hstack((np.linspace(0.0001, 0.0100, ii),\n np.linspace(0.05001, 0.11, 10 - ii)))\n # using .05001 instead of 0.05 to avoid edge case issue #768\n reject, pvalscorr = multipletests(pval1, alpha=alpha,\n method=method)[:2]\n #print 'reject.sum', v[1], reject.sum()\n msg = 'case %s %3.2f rejected:%d\\npval_raw=%r\\npvalscorr=%r' % (\n method, alpha, reject.sum(), pval1, pvalscorr)\n #assert_equal(reject, pvalscorr <= alpha, err_msg=msg)\n yield assert_equal, reject, pvalscorr <= alpha, msg\n\n\ndef test_hommel():\n #tested agains R stats p_adjust(pval0, method='hommel')\n pval0 = np.array(\n [ 0.00116, 0.00924, 0.01075, 0.01437, 0.01784, 0.01918,\n 0.02751, 0.02871, 0.03054, 0.03246, 0.04259, 0.06879,\n 0.0691 , 0.08081, 0.08593, 0.08993, 0.09386, 0.09412,\n 0.09718, 0.09758, 0.09781, 0.09788, 0.13282, 0.20191,\n 0.21757, 0.24031, 0.26061, 0.26762, 0.29474, 0.32901,\n 0.41386, 0.51479, 0.52461, 0.53389, 0.56276, 0.62967,\n 0.72178, 0.73403, 0.87182, 0.95384])\n\n result_ho = np.array(\n [ 0.0464 , 0.25872 , 0.29025 ,\n 0.3495714285714286, 0.41032 , 0.44114 ,\n 0.57771 , 0.60291 , 0.618954 ,\n 0.6492 , 0.7402725000000001, 0.86749 ,\n 0.86749 , 0.8889100000000001, 0.8971477777777778,\n 0.8993 , 0.9175374999999999, 0.9175374999999999,\n 0.9175374999999999, 0.9175374999999999, 0.9175374999999999,\n 0.9175374999999999, 0.95384 , 0.9538400000000001,\n 0.9538400000000001, 0.9538400000000001, 0.9538400000000001,\n 0.9538400000000001, 0.9538400000000001, 0.9538400000000001,\n 0.9538400000000001, 0.9538400000000001, 0.9538400000000001,\n 0.9538400000000001, 0.9538400000000001, 0.9538400000000001,\n 0.9538400000000001, 0.9538400000000001, 0.9538400000000001,\n 0.9538400000000001])\n\n rej, pvalscorr, _, _ = multipletests(pval0, alpha=0.1, method='ho')\n assert_almost_equal(pvalscorr, result_ho, 15)\n assert_equal(rej, result_ho < 0.1) #booleans\n\ndef test_fdr_bky():\n # test for fdrcorrection_twostage\n # example from BKY\n pvals = [0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, 0.0459,\n 0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000 ]\n\n #no test for corrected p-values, but they are inherited\n #same number of rejection as in BKY paper:\n #single step-up:4, two-stage:8, iterated two-step:9\n #also alpha_star is the same as theirs for TST\n #print fdrcorrection0(pvals, alpha=0.05, method='indep')\n #print fdrcorrection_twostage(pvals, alpha=0.05, iter=False)\n res_tst = fdrcorrection_twostage(pvals, alpha=0.05, iter=False)\n assert_almost_equal([0.047619, 0.0649], res_tst[-1][:2],3) #alpha_star for stage 2\n assert_equal(8, res_tst[0].sum())\n #print fdrcorrection_twostage(pvals, alpha=0.05, iter=True)\n\ndef test_tukeyhsd():\n #example multicomp in R p 83\n\n res = '''\\\n pair diff lwr upr p adj\n P-M 8.150000 -10.037586 26.3375861 0.670063958\n S-M -3.258333 -21.445919 14.9292527 0.982419709\n T-M 23.808333 5.620747 41.9959194 0.006783701\n V-M 4.791667 -13.395919 22.9792527 0.931020848\n S-P -11.408333 -29.595919 6.7792527 0.360680099\n T-P 15.658333 -2.529253 33.8459194 0.113221634\n V-P -3.358333 -21.545919 14.8292527 0.980350080\n T-S 27.066667 8.879081 45.2542527 0.002027122\n V-S 8.050000 -10.137586 26.2375861 0.679824487\n V-T -19.016667 -37.204253 -0.8290806 0.037710044\n '''\n\n res = np.array([[ 8.150000, -10.037586, 26.3375861, 0.670063958],\n [-3.258333, -21.445919, 14.9292527, 0.982419709],\n [23.808333, 5.620747, 41.9959194, 0.006783701],\n [ 4.791667, -13.395919, 22.9792527, 0.931020848],\n [-11.408333, -29.595919, 6.7792527, 0.360680099],\n [15.658333, -2.529253, 33.8459194, 0.113221634],\n [-3.358333, -21.545919, 14.8292527, 0.980350080],\n [27.066667, 8.879081, 45.2542527, 0.002027122],\n [ 8.050000, -10.137586, 26.2375861, 0.679824487],\n [-19.016667, -37.204253, -0.8290806, 0.037710044]])\n\n m_r = [94.39167, 102.54167, 91.13333, 118.20000, 99.18333]\n myres = tukeyhsd(m_r, 6, 110.8, alpha=0.05, df=4)\n from numpy.testing import assert_almost_equal, assert_equal\n pairs, reject, meandiffs, std_pairs, confint, q_crit = myres[:6]\n assert_almost_equal(meandiffs, res[:, 0], decimal=5)\n assert_almost_equal(confint, res[:, 1:3], decimal=2)\n assert_equal(reject, res[:, 3]<0.05)\n\n"
] | [
[
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.linspace",
"numpy.testing.assert_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
olantwin/zfit | [
"dae89fd95fc2158c0e7530664d8ca999db4802c5",
"dae89fd95fc2158c0e7530664d8ca999db4802c5"
] | [
"zfit/core/loss.py",
"zfit/core/parameter.py"
] | [
"import abc\nfrom collections import OrderedDict\n\nimport tensorflow as tf\nfrom typing import Optional, Union, List\n\nfrom zfit import ztf\nfrom zfit.util import ztyping\nfrom zfit.util.cache import Cachable\nfrom zfit.util.graph import get_dependents_auto\nfrom .baseobject import BaseObject, BaseDependentsMixin\nfrom .interfaces import ZfitLoss, ZfitSpace, ZfitModel, ZfitData, ZfitPDF\nfrom ..models.functions import SimpleFunc\nfrom ..util.container import convert_to_container, is_container\nfrom ..util.exception import IntentionNotUnambiguousError, NotExtendedPDFError\nfrom zfit.settings import ztypes\n\n\ndef _unbinned_nll_tf(model: ztyping.PDFInputType, data: ztyping.DataInputType, fit_range: ZfitSpace):\n \"\"\"Return unbinned negative log likelihood graph for a PDF\n\n Args:\n model (ZfitModel): PDFs with a `.pdf` method. Has to be as many models as data\n data (ZfitData):\n fit_range ():\n\n Returns:\n graph: the unbinned nll\n\n Raises:\n ValueError: if both `probs` and `log_probs` are specified.\n \"\"\"\n\n if is_container(model):\n nlls = [_unbinned_nll_tf(model=p, data=d, fit_range=r)\n for p, d, r in zip(model, data, fit_range)]\n nll_finished = tf.reduce_sum(nlls)\n else:\n with data.set_data_range(fit_range):\n probs = model.pdf(data, norm_range=fit_range)\n log_probs = tf.log(probs)\n if data.weights is not None:\n log_probs *= data.weights # because it's prob ** weights\n nll = -tf.reduce_sum(log_probs)\n nll_finished = nll\n return nll_finished\n\n\ndef _nll_constraints_tf(constraints):\n if not constraints:\n return ztf.constant(0.) # adding 0 to nll\n probs = []\n for param, dist in constraints.items():\n probs.append(dist.pdf(param))\n # probs = [dist.pdf(param) for param, dist in constraints.items()]\n constraints_neg_log_prob = -tf.reduce_sum(tf.log(probs))\n return constraints_neg_log_prob\n\n\nclass BaseLoss(BaseDependentsMixin, ZfitLoss, Cachable, BaseObject):\n\n def __init__(self, model, data, fit_range: ztyping.LimitsTypeInput = None, constraints: List[tf.Tensor] = None):\n # first doc line left blank on purpose, subclass adds class docstring (Sphinx autodoc adds the two)\n \"\"\"\n\n A \"simultaneous fit\" can be performed by giving one or more `model`, `data`, `fit_range`\n to the loss. The length of each has to match the length of the others.\n\n Args:\n model (Iterable[ZfitModel]): The model or models to evaluate the data on\n data (Iterable[ZfitData]): Data to use\n fit_range (Iterable[:py:class:`~zfit.Space`]): The fitting range. It's the norm_range for the models (if\n they\n have a norm_range) and the data_range for the data.\n constraints (Iterable[tf.Tensor): A Tensor representing a loss constraint. Using\n `zfit.constraint.*` allows for easy use of predefined constraints.\n \"\"\"\n super().__init__(name=type(self).__name__)\n model, data, fit_range = self._input_check(pdf=model, data=data, fit_range=fit_range)\n self._model = model\n self._data = data\n self._fit_range = fit_range\n if constraints is None:\n constraints = []\n self._constraints = convert_to_container(constraints, list)\n\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n cls._name = \"UnnamedSubBaseLoss\"\n\n def _input_check(self, pdf, data, fit_range):\n if is_container(pdf) ^ is_container(data):\n raise ValueError(\"`pdf` and `data` either both have to be a list or not.\")\n if not is_container(pdf):\n if isinstance(fit_range, list):\n raise TypeError(\"`pdf` and `data` are not a `list`, `fit_range` can't be a `list` then.\")\n if isinstance(pdf, tuple):\n raise TypeError(\"`pdf` has to be a pdf or a list of pdfs, not a tuple.\")\n\n if isinstance(data, tuple):\n raise TypeError(\"`data` has to be a data or a list of data, not a tuple.\")\n\n pdf, data = (convert_to_container(obj, non_containers=[tuple]) for obj in (pdf, data))\n # TODO: data, range consistency?\n if fit_range is None:\n fit_range = []\n for p, d in zip(pdf, data):\n if not p.norm_range == d.data_range:\n raise IntentionNotUnambiguousError(\"No `fit_range` is specified and `pdf` {} as \"\n \"well as `data` {} have different ranges they\"\n \"are defined in. Either make them (all) consistent\"\n \"or specify the `fit_range`\")\n fit_range.append(p.norm_range)\n else:\n fit_range = convert_to_container(fit_range, non_containers=[tuple])\n\n # simultaneous fit\n # if is_container(pdf):\n # if not is_container(fit_range) or not isinstance(fit_range[0], Space):\n # raise ValueError(\n # \"If several pdfs are specified, the `fit_range` has to be given as a list of `Space` \"\n # \"objects and not as pure tuples.\")\n\n # else:\n # fit_range = pdf.convert_sort_space(limits=fit_range) # fit_range may be a tuple\n if not len(pdf) == len(data) == len(fit_range):\n raise ValueError(\"pdf, data and fit_range don't have the same number of components:\"\n \"\\npdf: {}\"\n \"\\ndata: {}\"\n \"\\nfit_range: {}\".format(pdf, data, fit_range))\n\n # sanitize fit_range\n fit_range = [p.convert_sort_space(limits=range_) for p, range_ in zip(pdf, fit_range)]\n # TODO: sanitize pdf, data?\n self.add_cache_dependents(cache_dependents=pdf)\n self.add_cache_dependents(cache_dependents=data)\n self.add_cache_dependents(cache_dependents=fit_range)\n return pdf, data, fit_range\n\n def gradients(self, params: ztyping.ParamTypeInput = None) -> List[tf.Tensor]:\n if params is None:\n params = list(self.get_dependents())\n else:\n params = convert_to_container(params)\n return self._gradients(params=params)\n\n def add_constraints(self, constraints):\n return self._add_constraints(constraints)\n\n def _add_constraints(self, constraints):\n constraints = convert_to_container(constraints, container=list)\n self._constraints.extend(constraints)\n return constraints\n\n @property\n def name(self):\n return self._name\n\n @property\n def model(self):\n return self._model\n\n @property\n def data(self):\n return self._data\n\n @property\n def fit_range(self):\n fit_range = self._fit_range\n return fit_range\n\n @property\n def constraints(self):\n return self._constraints\n\n def _get_dependents(self):\n pdf_dependents = self._extract_dependents(self.model)\n return pdf_dependents\n\n @abc.abstractmethod\n def _loss_func(self, model, data, fit_range, constraints):\n raise NotImplementedError\n\n def value(self):\n return self._value()\n\n def _value(self):\n try:\n return self._loss_func(model=self.model, data=self.data, fit_range=self.fit_range,\n constraints=self.constraints)\n except NotImplementedError:\n raise NotImplementedError(\"_loss_func not properly defined!\")\n\n def __add__(self, other):\n if not isinstance(other, BaseLoss):\n raise TypeError(\"Has to be a subclass of `BaseLoss` or overwrite `__add__`.\")\n if not type(other) == type(self):\n raise ValueError(\"cannot safely add two different kind of loss.\")\n model = self.model + other.model\n data = self.data + other.data\n fit_range = self.fit_range + other.fit_range\n constraints = self.constraints + other.constraints\n loss = type(self)(model=model, data=data, fit_range=fit_range, constraints=constraints)\n return loss\n\n def _gradients(self, params):\n return tf.gradients(self.value(), params)\n\n\nclass CachedLoss(BaseLoss):\n\n def __init__(self, model, data, fit_range=None, constraints=None):\n super().__init__(model=model, data=data, fit_range=fit_range, constraints=constraints)\n\n @abc.abstractmethod\n def _cache_add_constraints(self, constraints):\n raise NotImplementedError\n\n def _value(self):\n if self._cache.get('loss') is None:\n loss = super()._value()\n self._cache['loss'] = loss\n else:\n loss = self._cache['loss']\n return loss\n\n def _add_constraints(self, constraints):\n super()._add_constraints(constraints=constraints)\n self._cache_add_constraints(constraints=constraints)\n\n def _gradients(self, params):\n params_cache = self._cache.get('gradients', {})\n params_todo = []\n for param in params:\n if param not in params_cache:\n params_todo.append(param)\n if params_todo:\n gradients = {(p, grad) for p, grad in zip(params_todo, super()._gradients(params_todo))}\n params_cache.update(gradients)\n\n self._cache['gradients'] = params_cache\n\n param_gradients = [params_cache[param] for param in params]\n return param_gradients\n\n\nclass UnbinnedNLL(CachedLoss):\n \"\"\"The Unbinned Negative Log Likelihood.\"\"\"\n\n _name = \"UnbinnedNLL\"\n\n def _loss_func(self, model, data, fit_range, constraints):\n nll = _unbinned_nll_tf(model=model, data=data, fit_range=fit_range)\n if constraints:\n constraints = ztf.reduce_sum(constraints)\n nll += constraints\n return nll\n\n def _cache_add_constraints(self, constraints):\n if self._cache.get('loss') is not None:\n self._cache['loss'] += ztf.reduce_sum(constraints)\n\n @property\n def errordef(self) -> Union[float, int]:\n return 0.5\n\n\nclass ExtendedUnbinnedNLL(UnbinnedNLL):\n \"\"\"An Unbinned Negative Log Likelihood with an additional poisson term for the\"\"\"\n\n def _loss_func(self, model, data, fit_range, constraints):\n nll = super()._loss_func(model=model, data=data, fit_range=fit_range, constraints=constraints)\n poisson_terms = []\n for mod, dat in zip(model, data):\n if not mod.is_extended:\n raise NotExtendedPDFError(\"The pdf {} is not extended but has to be (for an extended fit)\".format(mod))\n nevents = dat.nevents if dat.weights is None else ztf.reduce_sum(dat.weights)\n poisson_terms.append(-mod.get_yield() + ztf.to_real(nevents) * tf.log(mod.get_yield()))\n nll -= tf.reduce_sum(poisson_terms)\n return nll\n\n\nclass SimpleLoss(CachedLoss):\n _name = \"SimpleLoss\"\n\n def __init__(self, func, dependents=None, errordef=None):\n self._simple_func = func\n self._simple_errordef = errordef\n self._simple_func_dependents = convert_to_container(dependents, container=set)\n\n super().__init__(model=[], data=[], fit_range=[])\n\n def _get_dependents(self):\n dependents = self._simple_func_dependents\n if dependents is None:\n independent_params = tf.get_collection(\"zfit_independent\")\n dependents = get_dependents_auto(tensor=self.value(), candidates=independent_params)\n self._simple_func_dependents = dependents\n return dependents\n\n @property\n def errordef(self):\n errordef = self._simple_errordef\n if errordef is None:\n errordef = -999\n # raise RuntimeError(\"For this SimpleLoss, no error calculation is possible.\")\n else:\n return errordef\n\n def _loss_func(self, model, data, fit_range, constraints=None):\n loss = self._simple_func\n return loss()\n\n def __add__(self, other):\n raise IntentionNotUnambiguousError(\"Cannot add a SimpleLoss, 'addition' of losses can mean anything.\"\n \"Add them manually\")\n",
"\"\"\"Define Parameter which holds the value.\"\"\"\nfrom contextlib import suppress\n\nimport numpy as np\nimport tensorflow as tf\n\n# TF backwards compatibility\nfrom tensorflow.python import ops, array_ops\n\nimport zfit\nfrom zfit import ztf\n\nfrom tensorflow.python.ops.resource_variable_ops import ResourceVariable as TFBaseVariable\nfrom tensorflow.python.ops.resource_variable_ops import ResourceVariable\n\nfrom ..util.temporary import TemporarilySet\nfrom ..core.baseobject import BaseNumeric, BaseObject\nfrom ..util.cache import Cachable, invalidates_cache\nfrom ..util import ztyping\nfrom ..util.execution import SessionHolderMixin\nfrom .interfaces import ZfitModel, ZfitParameter\nfrom ..util.graph import get_dependents_auto\nfrom ..util.exception import LogicalUndefinedOperationError, NameAlreadyTakenError\nfrom . import baseobject as zbaseobject\nfrom . import interfaces as zinterfaces\nfrom ..settings import ztypes\n\n\nclass MetaBaseParameter(type(TFBaseVariable), type(zinterfaces.ZfitParameter)): # resolve metaclasses\n pass\n\n\n# drop-in replacement for ResourceVariable\nclass ZfitBaseVariable(metaclass=type(TFBaseVariable)):\n\n def __init__(self, variable: tf.Variable, **kwargs):\n self.variable = variable\n\n # @property\n # def name(self):\n # return self.variable.op.name\n\n @property\n def dtype(self):\n return self.variable.dtype\n\n def value(self):\n return self.variable.value()\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n return self.variable.assign(value=value, use_locking=use_locking,\n name=name, read_value=read_value)\n\n def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n del name\n if dtype is not None and dtype != self.dtype:\n return NotImplemented\n if as_ref:\n return self.variable.read_value().op.inputs[0]\n else:\n return self.variable.value()\n\n def _AsTensor(self):\n return self.variable.value()\n\n @staticmethod\n def _OverloadAllOperators(): # pylint: disable=invalid-name\n \"\"\"Register overloads for all operators.\"\"\"\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n ZfitBaseVariable._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(ZfitBaseVariable, \"__getitem__\", array_ops._SliceHelperVar)\n\n @staticmethod\n def _OverloadOperator(operator): # pylint: disable=invalid-name\n \"\"\"Defer an operator overload to `ops.Tensor`.\n We pull the operator out of ops.Tensor dynamically to avoid ordering issues.\n Args:\n operator: string. The operator name.\n \"\"\"\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ZfitBaseVariable, operator, _run_op)\n\n\ndef _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):\n return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref)\n\n\nops.register_tensor_conversion_function(ZfitBaseVariable, _dense_var_to_tensor)\n# ops.register_session_run_conversion_functions()\n\nZfitBaseVariable._OverloadAllOperators()\n\n\nclass ComposedResourceVariable(ResourceVariable):\n def __init__(self, name, initial_value, **kwargs):\n super().__init__(name=name, initial_value=initial_value, **kwargs)\n self._value_tensor = initial_value\n\n def value(self):\n # with tf.control_dependencies([self._value_tensor]):\n # return 5.\n return self._value_tensor\n\n def read_value(self):\n # raise RuntimeError()\n return self._value_tensor\n\n\n# class ComposedVariable(tf.Variable, metaclass=type(tf.Variable)):\nclass ComposedVariable(ResourceVariable, metaclass=type(tf.Variable)):\n\n def __init__(self, name: str, initial_value: tf.Tensor, **kwargs):\n # super().__init__(initial_value=initial_value, **kwargs, use_resource=True)\n super().__init__(initial_value=initial_value, **kwargs)\n self._value_tensor = tf.convert_to_tensor(initial_value, preferred_dtype=ztypes.float)\n # self._name = name\n\n @property\n def name(self):\n return self.op.name\n\n @property\n def dtype(self):\n return self._value_tensor.dtype\n\n def value(self):\n return self._value_tensor\n\n def read_value(self):\n return self.value()\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n raise LogicalUndefinedOperationError(\"Cannot assign to a fixed/composed parameter\")\n\n def load(self, value, session=None):\n raise LogicalUndefinedOperationError(\"Cannot load to a fixed/composed parameter\")\n\n def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n del name\n if dtype is not None and dtype != self.dtype:\n return NotImplemented\n if as_ref:\n # return \"NEVER READ THIS\"\n raise LogicalUndefinedOperationError(\"There is no ref for the fixed/composed parameter\")\n else:\n return self._value_tensor\n\n def _AsTensor(self):\n return self._value_tensor\n\n @staticmethod\n def _OverloadAllOperators(): # pylint: disable=invalid-name\n \"\"\"Register overloads for all operators.\"\"\"\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n ComposedVariable._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(ComposedVariable, \"__getitem__\", array_ops._SliceHelperVar)\n\n @staticmethod\n def _OverloadOperator(operator): # pylint: disable=invalid-name\n \"\"\"Defer an operator overload to `ops.Tensor`.\n We pull the operator out of ops.Tensor dynamically to avoid ordering issues.\n Args:\n operator: string. The operator name.\n \"\"\"\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ComposedVariable, operator, _run_op)\n\n\ndef _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):\n return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref)\n\n\nops.register_tensor_conversion_function(ComposedVariable, _dense_var_to_tensor)\nfetch_function = lambda variable: ([variable.read_value()],\n lambda val: val[0])\nfeed_function = lambda feed, feed_val: [(feed.read_value(), feed_val)]\nfeed_function_for_partial_run = lambda feed: [feed.read_value()]\n\nfrom tensorflow.python.client.session import register_session_run_conversion_functions\n\n# ops.register_dense_tensor_like_type()\nregister_session_run_conversion_functions(tensor_type=ComposedResourceVariable, fetch_function=fetch_function,\n feed_function=feed_function,\n feed_function_for_partial_run=feed_function_for_partial_run)\n\nregister_session_run_conversion_functions(tensor_type=ComposedVariable, fetch_function=fetch_function,\n feed_function=feed_function,\n feed_function_for_partial_run=feed_function_for_partial_run)\n\nComposedVariable._OverloadAllOperators()\n\n\nclass BaseParameter(ZfitParameter, metaclass=MetaBaseParameter):\n pass\n\n\nclass ZfitParameterMixin(BaseNumeric):\n _existing_names = set()\n\n def __init__(self, name, initial_value, floating=True, **kwargs):\n if name in self._existing_names:\n raise NameAlreadyTakenError(\"Another parameter is already named {}. \"\n \"Use a different, unique one.\".format(name))\n self._existing_names.update((name,))\n self._name = name\n super().__init__(initial_value=initial_value, name=name, **kwargs)\n # try:\n # new_name = self.op.name\n # except AttributeError: # no `op` attribute -> take normal name\n # new_name = self.name\n # new_name = self.name.rsplit(':', 1)[0] # get rid of tf node\n # new_name = self.name # get rid of tf node\n # new_name = new_name.rsplit('/', 1)[-1] # get rid of the scope preceding the name\n # if not new_name == name: # name has been mangled because it already exists\n # raise NameAlreadyTakenError(\"Another parameter is already named {}. \"\n # \"Use a different, unique one.\".format(name))\n\n @property\n def name(self):\n return self._name\n\n @property\n def floating(self):\n if self._floating and not self.trainable:\n raise RuntimeError(\"Floating is set to true but tf Variable is not trainable.\")\n return self._floating\n\n @floating.setter\n def floating(self, value):\n if not isinstance(value, bool):\n raise TypeError(\"floating has to be a boolean.\")\n self._floating = value\n\n def __add__(self, other):\n if isinstance(other, (ZfitModel, ZfitParameter)):\n from . import operations\n with suppress(NotImplementedError):\n return operations.add(self, other)\n return super().__add__(other)\n\n def __radd__(self, other):\n if isinstance(other, (ZfitModel, ZfitParameter)):\n from . import operations\n with suppress(NotImplementedError):\n return operations.add(other, self)\n return super().__radd__(other)\n\n def __mul__(self, other):\n if isinstance(other, (ZfitModel, ZfitParameter)):\n from . import operations\n with suppress(NotImplementedError):\n return operations.multiply(self, other)\n return super().__mul__(other)\n\n def __rmul__(self, other):\n if isinstance(other, (ZfitModel, ZfitParameter)):\n from . import operations\n with suppress(NotImplementedError):\n return operations.multiply(other, self)\n return super().__rmul__(other)\n\n\nclass Parameter(SessionHolderMixin, ZfitParameterMixin, TFBaseVariable, BaseParameter):\n \"\"\"Class for fit parameters, derived from TF Variable class.\n \"\"\"\n _independent = True\n\n def __init__(self, name, value, lower_limit=None, upper_limit=None, step_size=None, floating=True,\n dtype=ztypes.float, **kwargs):\n \"\"\"\n Constructor.\n name : name of the parameter,\n value : starting value\n lower_limit : lower limit\n upper_limit : upper limit\n step_size : step size (set to 0 for fixed parameters)\n \"\"\"\n\n # TODO: sanitize input\n if lower_limit is None:\n lower_limit = -np.infty\n if upper_limit is None:\n upper_limit = np.infty\n # no_limits = -lower_limit == upper_limit == np.infty\n value = tf.cast(value, dtype=ztypes.float)\n\n def constraint(x):\n return tf.clip_by_value(x, clip_value_min=self.lower_limit,\n clip_value_max=self.upper_limit)\n\n # self.constraint = constraint\n\n super().__init__(initial_value=value, dtype=dtype, name=name, constraint=constraint,\n params={}, **kwargs)\n self.lower_limit = tf.cast(lower_limit, dtype=ztypes.float)\n self.upper_limit = tf.cast(upper_limit, dtype=ztypes.float)\n if self.independent:\n tf.add_to_collection(\"zfit_independent\", self)\n else:\n tf.add_to_collection(\"zfit_dependent\", self)\n # value = tf.cast(value, dtype=ztypes.float) # TODO: init value mandatory?\n self.floating = floating\n self.step_size = step_size\n zfit.run.auto_initialize(self)\n\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n cls._independent = True # overwriting independent only for subclass/instance\n\n @property\n def lower_limit(self):\n return self._lower_limit\n\n @lower_limit.setter\n @invalidates_cache\n def lower_limit(self, value):\n self._lower_limit = value\n\n @property\n def upper_limit(self):\n return self._upper_limit\n\n @upper_limit.setter\n @invalidates_cache\n def upper_limit(self, value):\n self._upper_limit = value\n\n @property\n def has_limits(self):\n no_limits = -self.lower_limit == self.upper_limit == np.infty\n return not no_limits\n\n def value(self):\n value = super().value()\n if self.has_limits:\n value = self.constraint(value)\n return value\n\n def read_value(self):\n value = super().read_value()\n if self.has_limits:\n value = self.constraint(value)\n return value\n\n def _get_dependents(self):\n return {self}\n\n @property\n def independent(self):\n return self._independent\n\n @property\n def step_size(self): # TODO: improve default step_size?\n step_size = self._step_size\n if step_size is None:\n # auto-infer from limits\n step_splits = 1e4\n # step_size = (self.upper_limit - self.lower_limit) / step_splits # TODO improve? can be tensor?\n step_size = 0.001\n if step_size == np.nan:\n if self.lower_limit == -np.infty or self.upper_limit == np.infty:\n step_size = 0.001\n else:\n raise ValueError(\"Could not set step size. Is NaN.\")\n # TODO: how to deal with infinities?\n step_size = ztf.to_real(step_size)\n self.step_size = step_size\n\n return step_size\n\n @step_size.setter\n def step_size(self, value):\n if value is not None:\n value = ztf.convert_to_tensor(value)\n self._step_size = value\n\n def load(self, value: ztyping.NumericalScalarType):\n \"\"\":py:class:`~zfit.Parameter` takes on the `value`. Is not part of the graph, does a session run.\n\n Args:\n value (numerical):\n \"\"\"\n return super().load(value=value, session=self.sess)\n\n def set_value(self, value: ztyping.NumericalScalarType):\n \"\"\"Set the :py:class:`~zfit.Parameter` to `value` (temporarily if used in a context manager).\n\n Args:\n value (float): The value the parameter will take on.\n \"\"\"\n super_load = super().load\n\n def getter():\n return self.sess.run(self)\n\n def setter(value):\n super_load(value=value, session=self.sess)\n\n return TemporarilySet(value=value, setter=setter, getter=getter)\n\n # TODO: make it a random variable? return tensor that evaluates new all the time?\n def randomize(self, minval=None, maxval=None):\n \"\"\"Update the value with a randomised value between minval and maxval.\n\n Args:\n minval (Numerical):\n maxval (Numerical):\n \"\"\"\n if minval is None:\n minval = self.sess.run(self.lower_limit)\n # else:\n # minval = tf.cast(minval, dtype=self.dtype)\n if maxval is None:\n maxval = self.sess.run(self.upper_limit)\n # else:\n # maxval = tf.cast(maxval, dtype=self.dtype)\n\n # value = ztf.random_uniform(shape=self.shape, minval=minval, maxval=maxval, dtype=self.dtype)\n shape = self.shape.as_list()\n # if shape == []:\n # size = 1\n # value = self.sess.run(value)\n eps = 1e-8\n value = np.random.uniform(size=self.shape, low=minval + eps, high=maxval - eps)\n # value = np.random.uniform(size=size, low=minval, high=maxval)\n # if shape == []:\n # value = value[0]\n self.load(value=value)\n return value\n\n\nclass BaseComposedParameter(ZfitParameterMixin, ComposedVariable, BaseParameter):\n\n def __init__(self, params, value, name=\"BaseComposedParameter\", **kwargs):\n super().__init__(initial_value=value, name=name, params=params, **kwargs)\n # self.params = params\n\n def _get_dependents(self):\n dependents = self._extract_dependents(list(self.params.values()))\n return dependents\n\n @property\n def floating(self):\n raise LogicalUndefinedOperationError(\"Cannot be floating or not. Look at the dependents.\")\n\n @property\n def params(self):\n return self._params\n\n @params.setter\n def params(self, value):\n if not isinstance(value, dict):\n raise TypeError(\"Parameters has to be a dict\")\n self._params = value\n\n @property\n def independent(self):\n return False\n\n\nclass ComposedParameter(BaseComposedParameter):\n def __init__(self, name, tensor, dtype=ztypes.float, **kwargs):\n tensor = ztf.convert_to_tensor(tensor, dtype=dtype)\n independent_params = tf.get_collection(\"zfit_independent\")\n params = get_dependents_auto(tensor=tensor, candidates=independent_params)\n # params_init_op = [param.initializer for param in params]\n params = {p.name: p for p in params}\n # with tf.control_dependencies(params_init_op):\n super().__init__(params=params, value=tensor, name=name, dtype=dtype, **kwargs)\n\n\nclass ComplexParameter(ComposedParameter):\n def __init__(self, name, value, dtype=ztypes.complex, **kwargs):\n self._conj = None\n self._mod = None\n self._arg = None\n self._imag = None\n self._real = None\n\n super().__init__(name, value, dtype, **kwargs)\n\n @staticmethod\n def from_cartesian(name, real, imag, dtype=ztypes.complex, floating=True,\n **kwargs): # TODO: correct dtype handling, also below\n real = convert_to_parameter(real, name=name + \"_real\", prefer_floating=floating)\n imag = convert_to_parameter(imag, name=name + \"_imag\", prefer_floating=floating)\n param = ComplexParameter(name=name, value=tf.cast(tf.complex(real, imag), dtype=dtype),\n **kwargs)\n param._real = real\n param._imag = imag\n return param\n\n @staticmethod\n def from_polar(name, mod, arg, dtype=ztypes.complex, floating=True, **kwargs):\n mod = convert_to_parameter(mod, name=name + \"_mod\", prefer_floating=floating)\n arg = convert_to_parameter(arg, name=name + \"_arg\", prefer_floating=floating)\n param = ComplexParameter(name=name, value=tf.cast(tf.complex(mod * tf.math.cos(arg),\n mod * tf.math.sin(arg)),\n dtype=dtype), **kwargs)\n param._mod = mod\n param._arg = arg\n return param\n\n @property\n def conj(self):\n if self._conj is None:\n self._conj = ComplexParameter(name='{}_conj'.format(self.name), value=tf.math.conj(self),\n dtype=self.dtype)\n return self._conj\n\n @property\n def real(self):\n real = self._real\n if real is None:\n real = tf.real(self)\n return real\n\n @property\n def imag(self):\n imag = self._imag\n if imag is None:\n imag = tf.imag(self)\n return imag\n\n @property\n def mod(self):\n mod = self._mod\n if mod is None:\n mod = tf.math.abs(self)\n return mod\n\n @property\n def arg(self):\n arg = self._arg\n if arg is None:\n arg = tf.math.atan(self.imag / self.real)\n return arg\n\n\n_auto_number = 0\n\n\ndef get_auto_number():\n global _auto_number\n auto_number = _auto_number\n _auto_number += 1\n return auto_number\n\n\ndef convert_to_parameter(value, name=None, prefer_floating=False) -> \"ZfitParameter\":\n \"\"\"Convert a *numerical* to a fixed parameter or return if already a parameter.\n\n Args:\n value ():\n \"\"\"\n floating = False\n is_python = False\n if name is not None:\n name = str(name)\n\n if isinstance(value, ZfitParameter): # TODO(Mayou36): autoconvert variable. TF 2.0?\n return value\n elif isinstance(value, tf.Variable):\n raise TypeError(\"Currently, cannot autoconvert tf.Variable to zfit.Parameter.\")\n\n # convert to Tensor if not yet\n if not isinstance(value, tf.Tensor):\n is_python = True\n if isinstance(value, complex):\n value = ztf.to_complex(value)\n else:\n floating = prefer_floating\n value = ztf.to_real(value)\n\n if value.dtype.is_complex:\n if name is None:\n name = \"FIXED_complex_autoparam_\" + str(get_auto_number())\n value = ComplexParameter(name, value=value, floating=False)\n\n else:\n # value = Parameter(\"FIXED_autoparam_\" + str(get_auto_number()), value=value, floating=False)\n if is_python:\n params = {}\n else:\n independend_params = tf.get_collection(\"zfit_independent\")\n params = get_dependents_auto(tensor=value, candidates=independend_params)\n if params:\n if name is None:\n name = \"composite_autoparam_\" + str(get_auto_number())\n value = ComposedParameter(name, tensor=value)\n else:\n if name is None:\n name = \"FIXED_autoparam_\" + str(get_auto_number())\n value = Parameter(name, value=value, floating=floating)\n\n # value.floating = False\n return value\n"
] | [
[
"tensorflow.get_collection",
"tensorflow.reduce_sum",
"tensorflow.log"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.clip_by_value",
"tensorflow.math.abs",
"tensorflow.math.cos",
"tensorflow.get_collection",
"tensorflow.real",
"tensorflow.cast",
"tensorflow.math.atan",
"tensorflow.python.ops.register_tensor_conversion_function",
"tensorflow.math.conj",
"tensorflow.math.sin",
"tensorflow.complex",
"numpy.random.uniform",
"tensorflow.imag",
"tensorflow.add_to_collection",
"tensorflow.python.client.session.register_session_run_conversion_functions"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
HuangShiqing/Paddle-Lite | [
"061f94e4624c7cc657ff914c0e589bf1192d73c5",
"061f94e4624c7cc657ff914c0e589bf1192d73c5"
] | [
"lite/tests/unittest_py/op/test_scale_op.py",
"lite/tests/unittest_py/op/test_fill_constant_op.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append('../')\n\nfrom auto_scan_test import AutoScanTest, IgnoreReasons\nfrom program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\nimport unittest\n\nimport hypothesis\nfrom hypothesis import given, settings, seed, example, assume\nimport hypothesis.strategies as st\nimport numpy as np\nfrom functools import partial\nimport argparse\n\n\nclass TestScaleOp(AutoScanTest):\n def __init__(self, *args, **kwargs):\n AutoScanTest.__init__(self, *args, **kwargs)\n self.enable_testing_on_place(\n TargetType.ARM,\n PrecisionType.FP32,\n DataLayoutType.NCHW,\n thread=[1, 4])\n self.enable_testing_on_place(\n TargetType.X86,\n PrecisionType.FP32,\n DataLayoutType.NCHW,\n thread=[1, 4])\n opencl_places = [\n Place(TargetType.OpenCL, PrecisionType.FP16,\n DataLayoutType.ImageDefault), Place(\n TargetType.OpenCL, PrecisionType.FP16,\n DataLayoutType.ImageFolder),\n Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),\n Place(TargetType.OpenCL, PrecisionType.Any,\n DataLayoutType.ImageDefault), Place(\n TargetType.OpenCL, PrecisionType.Any,\n DataLayoutType.ImageFolder),\n Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),\n Place(TargetType.Host, PrecisionType.FP32)\n ]\n self.enable_testing_on_place(places=opencl_places)\n metal_places = [\n Place(TargetType.Metal, PrecisionType.FP32,\n DataLayoutType.MetalTexture2DArray),\n Place(TargetType.Metal, PrecisionType.FP16,\n DataLayoutType.MetalTexture2DArray),\n Place(TargetType.ARM, PrecisionType.FP32),\n Place(TargetType.Host, PrecisionType.FP32)\n ]\n self.enable_testing_on_place(places=metal_places)\n self.enable_testing_on_place(\n TargetType.ARM,\n PrecisionType.FP16,\n DataLayoutType.NCHW,\n thread=[1, 4])\n self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)\n self.enable_devices_on_nnadapter(device_names=[\n \"kunlunxin_xtcl\", \"cambricon_mlu\", \"nvidia_tensorrt\"\n ])\n\n def is_program_valid(self,\n program_config: ProgramConfig,\n predictor_config: CxxConfig) -> bool:\n x_dtype = program_config.inputs[\"input_data\"].dtype\n target_type = predictor_config.target()\n if target_type in [TargetType.ARM]:\n if predictor_config.precision(\n ) == PrecisionType.FP16 and x_dtype != np.float32:\n return False\n if target_type == TargetType.NNAdapter:\n if program_config.inputs[\"input_data\"].dtype != np.float32:\n return False\n return True\n\n def sample_program_configs(self, draw):\n in_shape = draw(\n st.lists(\n st.integers(\n min_value=1, max_value=8), min_size=1, max_size=4))\n bias = draw(st.floats(min_value=-5, max_value=5))\n bias_after_scale = draw(st.booleans())\n scale = draw(st.floats(min_value=-5, max_value=5))\n input_type = draw(st.sampled_from([\"int32\", \"int64\", \"float32\"]))\n has_scale_tensor = False # draw(st.booleans())\n\n def generate_data(*args, **kwargs):\n low, high = -10, 10\n dtype = \"float32\"\n shape = kwargs[\"shape\"]\n if \"low\" in kwargs:\n low = kwargs[\"low\"]\n if \"high\" in kwargs:\n high = kwargs[\"high\"]\n if \"dtype\" in kwargs:\n dtype = kwargs[\"dtype\"]\n\n if dtype == \"int32\":\n if low == high:\n return low * np.ones(shape).astype(np.int32)\n else:\n return np.random.randint(low, high, shape).astype(np.int32)\n elif dtype == \"int64\":\n if low == high:\n return low * np.ones(shape).astype(np.int64)\n else:\n return np.random.randint(low, high, shape).astype(np.int64)\n elif dtype == \"float32\":\n return (high - low\n ) * np.random.random(shape).astype(np.float32) + low\n\n input_dict = {\"X\": [\"input_data\"]}\n input_data_dict = {\n \"input_data\": TensorConfig(data_gen=partial(\n generate_data, dtype=input_type, shape=in_shape))\n }\n if has_scale_tensor:\n input_dict[\"ScaleTensor\"] = \"scale_tensor_data\"\n input_data_dict[\"scale_tensor_data\"] = TensorConfig(shape=[1, ])\n\n scale_op = OpConfig(\n type=\"scale\",\n inputs=input_dict,\n outputs={\"Out\": [\"output_data\"]},\n attrs={\n \"bias\": bias,\n \"bias_after_scale\": bias_after_scale,\n \"scale\": scale\n })\n\n program_config = ProgramConfig(\n ops=[scale_op],\n weights={},\n inputs=input_data_dict,\n outputs=[\"output_data\"])\n\n return program_config\n\n def sample_predictor_configs(self):\n atol, rtol = 1e-5, 1e-5\n target_str = self.get_target()\n if target_str == \"Metal\":\n atol, rtol = 1e-2, 1e-2\n return self.get_predictor_configs(), [\"scale\"], (atol, rtol)\n\n def add_ignore_pass_case(self):\n def _teller1(program_config, predictor_config):\n target_type = predictor_config.target()\n in_shape = list(program_config.inputs[\"input_data\"].shape)\n in_data_type = program_config.inputs[\"input_data\"].dtype\n if target_type == TargetType.Metal:\n if len(in_shape) != 4 or in_data_type != \"float32\":\n return True\n\n def _teller2(program_config, predictor_config):\n target_type = predictor_config.target()\n if target_type == TargetType.Metal:\n return True\n\n def _teller3(program_config, predictor_config):\n target_type = predictor_config.target()\n x_dtype = program_config.inputs[\"input_data\"].dtype\n if target_type == TargetType.OpenCL:\n if x_dtype == np.int32 or x_dtype == np.int64:\n return True\n\n self.add_ignore_check_case(\n _teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,\n \"Lite does not support this op in a specific case. We need to fix it as soon as possible.\"\n )\n self.add_ignore_check_case(\n _teller2, IgnoreReasons.ACCURACY_ERROR,\n \"The op output has diff in a specific case on metal. We need to fix it as soon as possible.\"\n )\n self.add_ignore_check_case(\n _teller3, IgnoreReasons.PADDLELITE_NOT_SUPPORT,\n \"Lite does not support this op when dtype is int32 or int64 on Opencl. \"\n )\n\n def test(self, *args, **kwargs):\n target_str = self.get_target()\n max_examples = 100\n if target_str in [\"OpenCL\", \"Metal\"]:\n # Make sure to generate enough valid cases for specific targets\n max_examples = 2000\n elif target_str in [\"NNAdapter\"]:\n # Make sure to generate enough valid cases for specific targets\n max_examples = 300\n self.run_and_statis(\n quant=False, min_success_num=25, max_examples=max_examples)\n\n\nif __name__ == \"__main__\":\n unittest.main(argv=[''])\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append('../')\n\nfrom auto_scan_test import AutoScanTest, IgnoreReasons\nfrom program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\nimport unittest\n\nimport hypothesis\nfrom hypothesis import given, settings, seed, example, assume\n\nimport numpy as np\nfrom functools import partial\nimport hypothesis.strategies as st\n\n\nclass TestFillConstantOp(AutoScanTest):\n def __init__(self, *args, **kwargs):\n AutoScanTest.__init__(self, *args, **kwargs)\n self.enable_testing_on_place(\n TargetType.Host,\n PrecisionType.FP32,\n DataLayoutType.NCHW,\n thread=[1, 2, 4])\n self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)\n self.enable_devices_on_nnadapter(\n device_names=[\"cambricon_mlu\", \"nvidia_tensorrt\"])\n\n def is_program_valid(self,\n program_config: ProgramConfig,\n predictor_config: CxxConfig) -> bool:\n return True\n\n def sample_program_configs(self, draw):\n in_shape = draw(\n st.lists(\n st.integers(\n min_value=1, max_value=10), min_size=1, max_size=4))\n tensor_shape = draw(\n st.lists(\n st.integers(\n min_value=1, max_value=10), min_size=1, max_size=4))\n dtype = draw(st.sampled_from([2, 3, 5]))\n\n with_value_tensor = draw(st.sampled_from([True, False]))\n with_shape_tensor = draw(st.sampled_from([True, False]))\n if self.get_nnadapter_device_name() == \"nvidia_tensorrt\":\n with_shape_tensor = False\n # nvidia_tensorrt now just supports shape is from attr\n\n def generate_shape_tensor(*args, **kwargs):\n return np.array(tensor_shape).astype(np.int32)\n\n def generate_input(*args, **kwargs):\n if kwargs[\"type\"] == \"int32\":\n return np.random.randint(kwargs[\"low\"], kwargs[\"high\"],\n kwargs[\"shape\"]).astype(np.int32)\n elif kwargs[\"type\"] == \"int64\":\n return np.random.randint(kwargs[\"low\"], kwargs[\"high\"],\n kwargs[\"shape\"]).astype(np.int64)\n elif kwargs[\"type\"] == \"float32\":\n return (kwargs[\"high\"] - kwargs[\"low\"]) * np.random.random(\n kwargs[\"shape\"]).astype(np.float32) + kwargs[\"low\"]\n\n if dtype == 2:\n input_type = \"int32\"\n elif dtype == 3:\n input_type = \"int64\"\n else:\n input_type = \"float32\"\n\n value = draw(st.floats(min_value=-10, max_value=10))\n op_inputs = {}\n program_inputs = {}\n\n #ShapeTensorList not support now \n if (with_value_tensor and with_shape_tensor):\n op_inputs = {\n \"ValueTensor\": [\"value_data\"],\n \"ShapeTensor\": [\"shape_data\"]\n }\n program_inputs = {\n \"value_data\": TensorConfig(data_gen=partial(\n generate_input,\n type=input_type,\n low=-10,\n high=10,\n shape=[1])),\n \"shape_data\":\n TensorConfig(data_gen=partial(generate_shape_tensor))\n }\n elif ((not with_value_tensor) and with_shape_tensor):\n op_inputs = {\"ShapeTensor\": [\"shape_data\"]}\n program_inputs = {\n \"shape_data\":\n TensorConfig(data_gen=partial(generate_shape_tensor))\n }\n elif (with_value_tensor and (not with_shape_tensor)):\n op_inputs = {\"ValueTensor\": [\"value_data\"]}\n program_inputs = {\n \"value_data\": TensorConfig(data_gen=partial(\n generate_input,\n type=input_type,\n low=-10,\n high=10,\n shape=[1]))\n }\n\n fill_constant_op = OpConfig(\n type=\"fill_constant\",\n inputs=op_inputs,\n outputs={\"Out\": [\"output_data\"]},\n attrs={\n \"dtype\": dtype,\n \"shape\": in_shape,\n \"value\": value,\n \"force_cpu\": False\n })\n program_config = ProgramConfig(\n ops=[fill_constant_op],\n weights={},\n inputs=program_inputs,\n outputs=[\"output_data\"])\n return program_config\n\n def sample_predictor_configs(self):\n return self.get_predictor_configs(), [\"fill_constant\"], (1e-5, 1e-5)\n\n def add_ignore_pass_case(self):\n def _teller1(program_config, predictor_config):\n dtype = program_config.ops[0].attrs[\"dtype\"]\n is_shape_tensor = \"ShapeTensor\" in list(program_config.inputs.keys(\n ))\n if self.get_nnadapter_device_name(\n ) == \"nvidia_tensorrt\" and dtype != 5:\n return True\n\n self.add_ignore_check_case(\n _teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,\n \"nvidia_tensorrt does now support shape is form tensor now and dtype must be float\"\n )\n\n def test(self, *args, **kwargs):\n self.run_and_statis(quant=False, max_examples=25)\n\n\nif __name__ == \"__main__\":\n unittest.main(argv=[''])\n"
] | [
[
"numpy.ones",
"numpy.random.random",
"numpy.random.randint"
],
[
"numpy.array",
"numpy.random.random",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nuttamas/PycQED_py3 | [
"1ee35c7428d36ed42ba4afb5d4bda98140b2283e",
"1ee35c7428d36ed42ba4afb5d4bda98140b2283e",
"1ee35c7428d36ed42ba4afb5d4bda98140b2283e"
] | [
"pycqed/measurement/CBox_sweep_functions.py",
"pycqed/measurement/pulse_sequences/fluxing_sequences.py",
"pycqed/instrument_drivers/meta_instrument/LutMans/base_lutman.py"
] | [
"import numpy as np\nimport logging\nfrom pycqed.measurement import sweep_functions as swf\nfrom pycqed.measurement.sweep_functions import Soft_Sweep\nfrom pycqed.measurement.waveform_control_CC import waveform as wf\n\n# FIXME: Commented out as there is no module named Experiments.CLEAR.prepare_for_CLEAR.prepare_for_CLEAR\n# from Experiments.CLEAR.prepare_for_CLEAR import prepare_for_CLEAR\n\nimport time\nimport imp\ngauss_width = 10\nimp.reload(wf)\n\n\nclass CBox_Sweep(swf.Hard_Sweep):\n\n def __init__(self, Duplexer=False, **kw):\n self.sweep_control = 'hard'\n if not hasattr(self, 'cal_points'):\n self.cal_points = kw.pop('cal_points', 10)\n\n def prepare(self, **kw):\n pass\n\n def finish(self, **kw):\n pass\n\n######################\n# Time domain sweeps #\n######################\n\n\nclass T1(CBox_Sweep):\n '''\n Performs a T1 measurement using a tektronix and the CBox.\n The tektronix is used for timing the pulses, a Pi-pulse is loaded onto the\n CBox.\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", NoSegments=70, stepsize=2000,\n amp180=4000, f_modulation=-0.02, **kw):\n self.name = 'T1'\n self.parameter_name = 'time'\n self.unit = 'ns'\n # Available stepsizes needs to be verified! this is copy from AWG_swf\n self.available_stepsizes = [50, 100, 200, 500, 1000, 1500, 2000, 3000]\n\n self.filename = 'FPGA_T1_%i_5014' % (stepsize)\n\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.NoSegments = NoSegments\n self.amp180 = amp180\n self.gauss_width = gauss_width\n self.f_modulation = f_modulation\n\n if stepsize not in self.available_stepsizes:\n logging.error('Stepsize not available')\n super(self.__class__, self).__init__(**kw)\n\n def prepare(self):\n self.CBox.set_awg_mode(0, 1)\n self.CBox.set_awg_mode(1, 1)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n\n Wave_I, Wave_Q = wf.mod_gauss(self.amp180, self.gauss_width,\n self.f_modulation)\n self.CBox.set_awg_lookuptable(0, 0, 1, np.round(Wave_I))\n self.CBox.set_awg_lookuptable(0, 0, 0, np.round(Wave_Q))\n # additionally loading to AWG1 for scope\n self.CBox.set_awg_lookuptable(1, 0, 1, np.round(Wave_I))\n self.CBox.set_awg_lookuptable(1, 0, 0, np.round(Wave_Q))\n\n # These two lines should be combined to CBox.set_No_Samples but is\n # untested\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n\n\nclass Lutman_par_with_reload(Soft_Sweep):\n\n def __init__(self, LutMan, parameter):\n '''\n Generic sweep function that combines setting a LutMan parameter\n with reloading lookuptables.\n '''\n super().__init__()\n self.LutMan = LutMan\n self.parameter = parameter\n self.name = parameter.name\n self.parameter_name = parameter.label\n self.unit = parameter.unit\n\n def set_parameter(self, val):\n self.parameter.set(val)\n self.LutMan.load_pulses_onto_AWG_lookuptable()\n\n\nclass Lutman_par_with_reload_single_pulse(Soft_Sweep):\n\n def __init__(self, LutMan, parameter, pulse_names=['X180']):\n '''\n Generic sweep function that combines setting a LutMan parameter\n with reloading lookuptables.\n '''\n super().__init__()\n self.LutMan = LutMan\n self.parameter = parameter\n self.name = parameter.name\n self.parameter_name = parameter.label\n self.unit = parameter.unit\n self.pulse_names = pulse_names\n self.label = parameter.label\n\n def set_parameter(self, val):\n self.parameter.set(val)\n for pulse_name in self.pulse_names:\n self.LutMan.load_pulse_onto_AWG_lookuptable(pulse_name)\n\n\nclass LutMan_amp180_90(Soft_Sweep):\n '''\n Sweeps both the amp180 parameter and the amp90 of the CBox_lut_man\n Automatically sets amp90 to half of amp180.\n The amp180 is the sweep parameter that is set and tracked.\n '''\n\n def __init__(self, LutMan, reload_pulses=True, awg_nr=0):\n super(self.__class__, self).__init__()\n self.awg_nr = awg_nr\n self.reload_pulses = reload_pulses\n self.name = 'lookuptable amp180'\n self.parameter_name = 'amp180'\n self.unit = 'mV'\n self.LutMan = LutMan\n\n def set_parameter(self, val):\n self.LutMan.set('Q_amp180', val)\n self.LutMan.set('Q_amp90', val/2.0)\n if self.reload_pulses:\n self.LutMan.load_pulses_onto_AWG_lookuptable()\n\n\nclass DAC_offset(CBox_Sweep):\n '''\n Varies DAC offsets in CBox AWG's. Additionally identity pulses are loaded\n in the lookuptable 0, of I and Q channels\n '''\n\n def __init__(self, AWG_nr, dac_ch, CBox):\n super(self.__class__, self).__init__()\n self.sweep_control = 'soft' # Overwrites 'hard sweep part'\n self.name = 'CBox DAC offset'\n self.parameter_name = 'Voltage'\n self.unit = 'mV'\n self.filename = 'FPGA_DAC_offset_sweep_5014'\n self.dac_channel = dac_ch\n self.AWG_nr = AWG_nr\n self.CBox = CBox\n # any arbitrary sequence that is not time dependent on the pulse\n # trigger will do\n\n def set_parameter(self, val):\n self.CBox.set_dac_offset(self.AWG_nr, self.dac_channel, val)\n\n\nclass Ramsey(CBox_Sweep):\n '''\n Performs a T2 Ramsey measurement using a tektronix and the CBox.\n The tektronix is used for timing the pulses.\n Codewords are used to determine what pulse will be used.\n\n WARNING:\n The artificial detuning is applied by delaying the pi/2 pulses as the\n sideband modulation is the same for every pulse.\n This creates an error in the x-values of the sweep points<50ns.\n This should be taken care of when interpreting data for shorter timescales.\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", NoSegments=70, stepsize=50, **kw):\n print('WARNING, this function is deprecated. use Ramsey_tape()')\n # self.name = 'Ramsey'\n # self.parameter_name = 'time'\n # self.unit = 'ns'\n # self.available_stepsizes = [50, 100, 200, 500, 1000, 1500]\n # # NOTE: stepsizes below 50ns are not available because of SBmod freq\n # # self.available_stepsizes = [5, 10, 30, 100, 200, 500, 1000, 1500]\n # self.CBox_lut_man = qt.instruments['CBox_lut_man']\n # self.filename = 'FPGA_Codeword_Ramsey_%i_5014' % (stepsize)\n\n # base_pulse_delay = 200\n # self.sweep_points = np.linspace(stepsize+base_pulse_delay,\n # NoSegments*stepsize + base_pulse_delay,\n # NoSegments)\n # self.NoSegments = NoSegments\n\n # if stepsize not in self.available_stepsizes:\n # raise Exception('Stepsize not available')\n # super(self.__class__, self).__init__(**kw)\n\n # def prepare(self):\n # self.CBox.set_acquisition_mode(0)\n # self.CBox.set_awg_mode(0, 0)\n # self.CBox.set_awg_mode(1, 0)\n # self.AWG.stop()\n # self.AWG.set_setup_filename(self.filename,\n # force_load=False)\n\n # self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n # self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n # self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass Ramsey_tape(CBox_Sweep):\n '''\n Performs a T2 Ramsey measurement using a tektronix and the CBox.\n The tektronix is used for timing the pulses.\n Codewords are used to determine what pulse will be used.\n\n Artificial detuning is applied by delaying the triggers 5 ns\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", NoSegments=70, stepsize=50, **kw):\n self.name = 'Ramsey'\n print('Using tape mode Ramsey')\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.available_stepsizes = [50, 100, 200, 500, 1000, 1500]\n # NOTE: stepsizes below 50ns are not available because of SBmod freq\n # self.available_stepsizes = [5, 10, 30, 100, 200, 500, 1000, 1500]\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.TD_Meas = qt.instruments['TD_Meas']\n self.filename = 'FPGA_Ramsey_%i_5014' % (stepsize)\n\n base_pulse_delay = 200\n self.sweep_points = np.arange(stepsize+base_pulse_delay,\n NoSegments*(stepsize+5) +\n base_pulse_delay,\n stepsize + 5, dtype=float)\n\n self.NoSegments = NoSegments\n self.NoCalpoints = 10\n\n if stepsize not in self.available_stepsizes:\n raise Exception('Stepsize not available')\n super(self.__class__, self).__init__(**kw)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 2)\n self.CBox.set_awg_mode(1, 2)\n self.AWG.stop()\n self.TD_Meas.set_CBox_tape_mode(True)\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n ramsey_tape = [3, 3] * int((self.NoSegments - self.NoCalpoints))\n cal_zero_tape = [0] * int(self.NoCalpoints/2)\n cal_one_tape = [1] * int(self.NoCalpoints/2)\n tape = np.array(ramsey_tape+cal_zero_tape+cal_one_tape)\n\n self.CBox.set_awg_tape(0, len(tape), tape)\n self.CBox.set_awg_tape(1, len(tape), tape)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass Echo(CBox_Sweep):\n '''\n Performs a T2 Echo measurement using a tektronix and the CBox.\n The tektronix is used for timing the pulses, a Pi-pulse is loaded onto the\n CBox.\n '''\n\n def __init__(self, stepsize,\n amp180, amp90, gauss_width=25,\n qubit_suffix=\"\", NoSegments=70, f_modulation=-0.02, **kw):\n print(\"amp180\", amp180)\n self.name = 'Echo'\n self.parameter_name = 'time'\n self.unit = 'ns'\n # Available stepsizes needs to be verified! this is copy from AWG_swf\n self.available_stepsizes = [50, 100, 200, 500, 1000, 1500, 2000, 3000]\n\n self.filename = 'FPGA_Echo_%i_5014' % (stepsize)\n\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.NoSegments = NoSegments\n self.amp180 = amp180\n self.amp90 = amp90\n self.gauss_width = gauss_width\n self.f_modulation = f_modulation\n\n if stepsize not in self.available_stepsizes:\n logging.error('Stepsize not available')\n super(self.__class__, self).__init__(**kw)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 0)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n\n Wave_I_180, Wave_Q_180 = wf.mod_gauss(self.amp180, self.gauss_width,\n self.f_modulation)\n Wave_I_90, Wave_Q_90 = wf.mod_gauss(self.amp90, self.gauss_width,\n self.f_modulation)\n self.CBox.set_awg_lookuptable(0, 7, 1, np.round(Wave_I_180))\n self.CBox.set_awg_lookuptable(0, 7, 0, np.round(Wave_Q_180))\n self.CBox.set_awg_lookuptable(0, 0, 1, np.round(Wave_I_90))\n self.CBox.set_awg_lookuptable(0, 0, 0, np.round(Wave_Q_90))\n\n # These two lines should be combined to CBox.set_No_Samples but is\n # untested\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n\n\nclass T1_tape(CBox_Sweep):\n '''\n Performs a T1 measurement using a tektronix for metronome and the CBox to\n produce pulses in tape mode. The tektronix is used for timing the pulses, a\n Pi-pulse is loaded onto the CBox.\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", NoSegments=70, stepsize=4000,\n amp180=4000, amp90=2000, f_modulation=-0.02, cal_points=10, **kw):\n self.name = 'T1_tape'\n self.parameter_name = 'time'\n self.unit = 'ns'\n # Available stepsizes needs to be verified! this is copy from AWG_swf\n self.available_stepsizes = [50, 100, 200,\n 500, 1000, 1500, 2000, 3000, 4000]\n\n self.filename = 'FPGA_Tape_T1_%i_5014' % (stepsize)\n\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.NoSegments = NoSegments\n self.amp180 = amp180\n self.amp90 = amp90\n self.gauss_width = gauss_width\n self.f_modulation = f_modulation\n\n if stepsize not in self.available_stepsizes:\n logging.error('Stepsize not available')\n super(self.__class__, self).__init__(**kw)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n print(\"CBox set to mode 0\")\n self.CBox.set_awg_mode(0, 0)\n self.CBox.set_awg_mode(1, 0)\n self.AWG.stop()\n print(\"AWG is stopped\")\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n\n Wave_I_180, Wave_Q_180 = wf.mod_gauss(self.amp180, self.gauss_width,\n self.f_modulation)\n Wave_I_0 = Wave_I_180*0\n Wave_Q_0 = Wave_I_0\n\n self.CBox.set_awg_lookuptable(0, 0, 1, np.round(Wave_I_180))\n self.CBox.set_awg_lookuptable(0, 0, 0, np.round(Wave_Q_180))\n print(\"1\")\n self.CBox.set_awg_lookuptable(0, 7, 1, np.round(Wave_I_0))\n self.CBox.set_awg_lookuptable(0, 7, 0, np.round(Wave_Q_0))\n # copying the tables to AWG2 for scope\n self.CBox.set_awg_lookuptable(1, 0, 1, np.round(Wave_I_180))\n self.CBox.set_awg_lookuptable(1, 0, 0, np.round(Wave_Q_180))\n print(\"2\")\n self.CBox.set_awg_lookuptable(1, 7, 1, np.round(Wave_I_0))\n self.CBox.set_awg_lookuptable(1, 7, 0, np.round(Wave_Q_0))\n sequence_points = self.NoSegments-self.cal_points\n tape_length = (sequence_points)*self.NoSegments\n tape = 7*np.ones(tape_length)\n print(\"tape_length\", tape_length)\n for i in range(sequence_points):\n tape[(i+1)*(sequence_points)-i-1] = 0\n print(tape[i*(sequence_points):i *\n (sequence_points)+sequence_points])\n print(\"done first part\")\n # adding calibration points\n for i in range(self.cal_points):\n first_cal_segment = (sequence_points)**2\n segment = first_cal_segment+(i+1)*(sequence_points)-1\n # print segment\n if i > (self.cal_points/2-1):\n tape[segment] = 0\n # print segment-(sequence_points)+1\n print(tape[segment-sequence_points+1:segment+1])\n print(i)\n print(\"3\")\n self.CBox.set_awg_tape(0, len(tape), tape)\n print(\"tape length\", len(tape))\n # copying the tables to AWG2 for scope\n self.CBox.set_awg_tape(1, len(tape), tape)\n # These two lines should be combined to CBox.set_No_Samples but is\n # untested\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n print(\"tape is loaded\")\n # for i in range(len(tape)):\n # if np.mod(i,20) == 0:\n # print (\"#\\n\")\n\n\nclass OnOff_touch_n_go(CBox_Sweep):\n '''\n Performs OnOff measurement using the CBox to produce pulses in codeword\n tape mode.\n '''\n\n def __init__(self,\n NoSegments=2, stepsize=2000, pulses='OffOn',\n NoShots=8000, **kw):\n self.name = 'FPGA_touch_n_go_calibration'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.NoSegments = NoSegments\n self.NoShots = NoShots\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, stepsize*NoSegments,\n NoSegments)\n self.pulses = pulses\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_log_length(self.NoShots)\n self.CBox.set_awg_mode(0, 2)\n self.AWG.stop()\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n if self.pulses == 'OffOn':\n tape = np.array([0, 1])\n if self.pulses == 'OffOff':\n tape = np.array([0, 0])\n if self.pulses == 'OnOn':\n tape = np.array([1, 1])\n self.CBox.set_awg_tape(0, len(tape), tape)\n print(\"tape\", tape)\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n\n\nclass custom_tape_touch_n_go(CBox_Sweep):\n\n def __init__(self,\n NoSegments=2, stepsize=2000,\n custom_tape=None, NoShots=8000, **kw):\n self.name = 'custom_tape_touch_n_go'\n self.parameter_name = 'msmt index'\n self.unit = ''\n self.NoSegments = NoSegments\n self.NoShots = NoShots\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.arange(NoSegments)\n self.custom_tape = custom_tape\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_run_mode(0)\n print('setting nr of shots to', self.NoShots)\n self.CBox.set_log_length(self.NoShots)\n self.CBox.set_awg_mode(0, 2)\n self.AWG.stop()\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n if self.custom_tape is None:\n tape = np.array([0, 0])\n else:\n tape = self.custom_tape\n print(\"using the custom tape \", tape)\n self.CBox.set_awg_tape(0, len(tape), tape)\n\n\nclass random_telegraph_tape_touch_n_go(CBox_Sweep):\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=2, stepsize=2000,\n p_switch_us=0, NoShots=8000, pulse_a=0, pulse_b=1, **kw):\n self.name = 'random_telegraph_tape_touch_n_go'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.NoSegments = NoSegments\n self.NoShots = NoShots\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, stepsize*NoSegments,\n NoSegments)\n self.p_switch_us = p_switch_us\n self.p_switch = 1-(1-self.p_switch_us)**(stepsize/1000)\n self.pulse_a = pulse_a\n self.pulse_b = pulse_b\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_log_length(self.NoShots)\n self.CBox.set_awg_mode(0, 2)\n self.AWG.stop()\n\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n if self.NoShots > 4000:\n tape_elements = 4000\n else:\n tape_elements = self.NoShots\n tape = np.zeros(tape_elements)\n tape[0] = self.pulse_a\n for i in range(tape_elements-1):\n if np.random.rand(1) < self.p_switch: # flipping with chance p_switch\n if tape[i] == self.pulse_a:\n tape[i+1] = self.pulse_b\n else:\n tape[i+1] = self.pulse_a\n else: # no flipping event\n tape[i+1] = tape[i]\n self.CBox.set_awg_tape(0, len(tape), tape)\n\n\nclass AllXY(CBox_Sweep):\n '''\n Performs AllXY measurement using the CBox to produce pulses in codeword\n trigger mode. The tektronix is used for the coded trigges.\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", NoSegments=42, stepsize=1, **kw):\n print('Deprecated, recommend using AllXY_tape() instead')\n\n self.name = 'AllXY'\n self.parameter_name = 'time'\n self.unit = 'ns'\n # Available stepsizes needs to be verified! this is copy from AWG_swf\n self.filename = 'FPGA_AllXY_5014'\n self.cal_points = [list(range(10)), list(range(-8, 0))]\n self.NoSegments = NoSegments\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 0)\n self.CBox.set_awg_mode(1, 0)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n\n\nclass AllXY_tape(CBox_Sweep):\n '''\n Performs AllXY measurement using the CBox toproduce pulses in tape mode.\n The tektronix is used to time the pulses.\n '''\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=42, stepsize=1, **kw):\n self.name = 'AllXY_tape'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.filename = 'FPGA_Tape_AllXY_5014'\n self.cal_points = [list(range(10)), list(range(-8, 0))]\n self.NoSegments = NoSegments\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.TD_Meas = qt.instruments['TD_Meas']\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 2)\n self.CBox.set_awg_mode(2, 2)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(2)\n self.TD_Meas.set_CBox_tape_mode(True)\n # print \"AWG 1 luts loaded\"\n tape = np.array([0, 0, 0, 0, 1, 1, 1, 1, # 1, 3\n 2, 2, 2, 2, 1, 2, 1, 2, # 5, 7\n 2, 1, 2, 1, 3, 0, 3, 0, # 9, 11\n 4, 0, 4, 0, 3, 4, 3, 4, # 13, 15\n 4, 3, 4, 3, 3, 2, 3, 2, # 17, 19\n 4, 1, 4, 1, 1, 4, 1, 4, # 21,23\n 2, 3, 2, 3, 3, 1, 3, 1, # 25, 27\n 1, 3, 1, 3, 4, 2, 4, 2, # 29, 31\n 2, 4, 2, 4, 1, 0, 1, 0, # 33, 35\n 2, 0, 2, 0, 3, 3, 3, 3, # 37, 39\n 4, 4, 4, 4]) # 41\n\n self.CBox.set_awg_tape(0, len(tape), tape)\n self.CBox.set_awg_tape(2, len(tape), tape)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass OnOff_tape(CBox_Sweep):\n '''\n Performs OnOff measurement using the CBox toproduce pulses in tape mode.\n The tektronix is used to time the pulses.\n '''\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=2, stepsize=1, pulses='OffOn', **kw):\n self.name = 'OnOff_tape'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.filename = 'FPGA_Tape_OnOff_5014'\n self.NoSegments = NoSegments\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.pulses = pulses\n print('New version tape')\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 2)\n self.CBox.set_awg_mode(1, 2)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n\n # print \"AWG 1 luts loaded\"\n if self.pulses == 'OffOn':\n tape = np.array([0, 1])\n if self.pulses == 'OffOff':\n tape = np.array([0, 0])\n if self.pulses == 'OnOn':\n tape = np.array([1, 1])\n self.CBox.set_awg_tape(0, len(tape), tape)\n self.CBox.set_awg_tape(1, len(tape), tape)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass OnOff_transients(CBox_Sweep):\n '''\n Performs OnOff measurement using the CBox toproduce pulses in tape mode.\n The tektronix is used to time the pulses.\n '''\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=2, stepsize=1, pulses='OffOn', **kw):\n self.name = 'OnOff_tape'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.filename = 'FPGA_Tape_OnOff_5014'\n self.NoSegments = NoSegments\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.pulses = pulses\n print('New version tape')\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 2)\n self.CBox.set_awg_mode(1, 2)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n\n # print \"AWG 1 luts loaded\"\n if self.pulses == 'OffOn':\n tape = np.array([0, 1])\n if self.pulses == 'OffOff':\n tape = np.array([0, 0])\n if self.pulses == 'OnOn':\n tape = np.array([1, 1])\n self.CBox.set_awg_tape(0, len(tape), tape)\n self.CBox.set_awg_tape(1, len(tape), tape)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass single_element_tape_test(CBox_Sweep):\n '''\n Performs a measurement similar to AllXY in the syndrome it produces\n but only uses a single pulse per segment.\n '''\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=42, stepsize=1, **kw):\n self.name = 'Single_element_test_tape'\n self.parameter_name = 'time'\n self.unit = 'ns'\n self.filename = 'FPGA_tape_single_test_5014'\n self.cal_points = [list(range(10)), list(range(-8, 0))]\n self.NoSegments = NoSegments\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n print('New version tape')\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 2)\n self.CBox.set_awg_mode(1, 2)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n\n tape = np.array([0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, # 10 times identity\n\n 3, 4, 3, 4, 3,\n 4, 3, 4, 3, 4,\n 3, 4, 3, 4, 3,\n 4, 3, 4, 3, 4,\n 3, 4, 3, 4, # 24 times pi/2 pulses\n 1, 2, 1, 2, 1,\n 2, 1, 2 # 8 times pi pulse\n ])\n\n tape = np.array([0, 0, 1, 1, 2,\n 2, 1, 2, 2, 1, # 10 times identity\n\n 3, 0, 4, 0, 3,\n 4, 4, 3, 3, 2,\n 4, 1, 1, 4, 2,\n 3, 3, 1, 1, 3,\n 4, 2, 2, 4, # 24 times pi/2 pulses\n\n # 1, 2, 1, 2, 1,\n # 2, 1, 2\n 1, 0, 2, 0, 3,\n 3, 4, 4 # 8 times pi pulse\n ])\n\n self.CBox.set_awg_tape(0, len(tape), tape)\n self.CBox.set_awg_tape(1, len(tape), tape)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\nclass drag_detuning(CBox_Sweep):\n '''\n Performs drag_detuning measurement using the CBox to produce pulses in codeword\n trigger mode. The tektronix is used for the coded trigges.\n '''\n\n def __init__(self,\n qubit_suffix=\"\", NoSegments=2, stepsize=1, **kw):\n self.name = 'drag_detuning'\n self.parameter_name = 'time'\n self.unit = 'ns'\n # Available stepsizes needs to be verified! this is copy from AWG_swf\n self.filename = 'FPGA_DragDetuning_5014'\n self.NoSegments = NoSegments\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n super(self.__class__, self).__init__(**kw)\n self.sweep_points = np.linspace(stepsize, NoSegments*stepsize,\n NoSegments)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 0)\n self.CBox.set_awg_mode(1, 0)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n\n NoAvg = self.CBox.get_avg_size()\n self.CBox.set_averaging_parameters(self.NoSegments, NoAvg)\n\n\nclass flipping_sequence(CBox_Sweep):\n '''\n Loads a codeword trigger sequence that consists of applying a X90 pulse\n follwed by N X180 pulses. With 1<N<50 followed by 10 calibration points.\n '''\n\n def __init__(self, gauss_width=25,\n qubit_suffix=\"\", **kw):\n self.name = 'Flipping sequence'\n self.parameter_name = 'number of X180 pulses '\n self.unit = 'N'\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.filename = 'FPGA_X90_N_X180_5014'\n self.NoSegments = 60\n self.sweep_points = np.linspace(\n 1, 2 * self.NoSegments, self.NoSegments)\n super(self.__class__, self).__init__(**kw)\n\n def prepare(self):\n self.CBox.set_acquisition_mode(0)\n self.CBox.set_awg_mode(0, 0)\n self.CBox.set_awg_mode(1, 0)\n self.AWG.stop()\n self.AWG.set_setup_filename(self.filename,\n force_load=False)\n\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(1)\n self.CBox.set_nr_samples(self.NoSegments)\n\n\n######################\n# CLEAR sweeps #\n######################\n\n# Rampdown sweepfunctions\nclass CBox_CLEAR_amplitude_1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude 1'\n self.parameter_name = 'CLEAR pulse amplitude 1'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_amplitude_2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude 2'\n self.parameter_name = 'CLEAR pulse amplitude 2'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_amplitude_a1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_a1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude a1'\n self.parameter_name = 'CLEAR pulse amplitude a1'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_a1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_amplitude_a2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_a2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude a2'\n self.parameter_name = 'CLEAR pulse amplitude a2'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_a2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_amplitude_b1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_b1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude b1'\n self.parameter_name = 'CLEAR pulse amplitude b1'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_b1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_amplitude_b2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_amplitude_b2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse amplitude b2'\n self.parameter_name = 'CLEAR pulse amplitude b2'\n self.unit = 'mV'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_amp_CLEAR_b2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase 1'\n self.parameter_name = 'CLEAR pulse phase 1'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase 2'\n self.parameter_name = 'CLEAR pulse phase 2'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_a1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_a1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase a1'\n self.parameter_name = 'CLEAR pulse phase a1'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_a1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_a2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_a2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase a2'\n self.parameter_name = 'CLEAR pulse phase a2'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_a2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_b1(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_b1, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase b1'\n self.parameter_name = 'CLEAR pulse phase b1'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_b1(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_phase_b2(Soft_Sweep):\n '''\n Setting the amplitude of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_phase_b2, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse phase b2'\n self.parameter_name = 'CLEAR pulse phase b2'\n self.unit = 'deg'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_phase_CLEAR_b2(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_length_unc(Soft_Sweep):\n '''\n Setting the length of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_length_unc, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse length unconditional'\n self.parameter_name = 'CLEAR pulse length unconditional'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_length_CLEAR_unc(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_CLEAR_length_c(Soft_Sweep):\n '''\n Setting the length of the CBox CLEAR pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_CLEAR_length_unc, self).__init__()\n self.CBox_lut_man_2 = qt.instruments['CBox_lut_man_2']\n self.name = 'CBox CLEAR pulse length conditional'\n self.parameter_name = 'CLEAR pulse length conditional'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n self.CBox_lut_man_2.set_M_length_CLEAR_c(val)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(1)\n self.CBox_lut_man_2.load_pulses_onto_AWG_lookuptable(2)\n\n\nclass CBox_tng_RO_Pulse_length(Soft_Sweep):\n '''\n Setting the length of the tng Readout Pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_tng_RO_Pulse_length, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.name = 'CBox_tng_RO_Pulse_length'\n self.parameter_name = 'Readout pulse length'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n self.CBox.set_tng_readout_pulse_length(val)\n\n\nclass CBox_integration_length(Soft_Sweep):\n '''\n Setting the length of the tng Readout Pulse\n '''\n\n def __init__(self, **kw):\n super(CBox_integration_length, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.name = 'CBox_integration_length'\n self.parameter_name = 'Readout integration length'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n self.CBox.set_integration_length(int(val/5))\n\n\nclass CBox_tng_heartbeat_interval(Soft_Sweep):\n '''\n Setting the length of the tng heartbeat interval\n '''\n\n def __init__(self, **kw):\n super(CBox_tng_heartbeat_interval, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.name = 'CBox_tng_heartbeat_interval'\n self.parameter_name = 'heartbeat_interval'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n self.CBox.set_tng_heartbeat_interval(val)\n\n\nclass CBox_tng_burst_heartbeat_and_heartbeat_interval(Soft_Sweep):\n '''\n Setting the length burst heartbeat interval\n Setting the heartbeat to: burst heartbeat interval * iterations\n +200000 for relaxation to steady state\n '''\n\n def __init__(self, **kw):\n super(CBox_tng_burst_heartbeat_and_heartbeat_interval, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.name = 'CBox_tng_burst_heartbeat_interval'\n self.parameter_name = 'burst_heartbeat_interval'\n self.unit = 'ns'\n\n def set_parameter(self, val):\n iterations = self.CBox.get_tng_burst_heartbeat_n()\n self.CBox.set_tng_heartbeat_interval(val*iterations+200000)\n self.CBox.set_tng_burst_heartbeat_interval(val)\n\n\nclass CBox_tng_Ramsey_idle_and_heartbeat(Soft_Sweep):\n '''\n Setting the length of the tng Readout Pulse\n '''\n\n def __init__(self, heartbeat_start, **kw):\n super(CBox_tng_Ramsey_idle_and_heartbeat, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.name = 'CBox_tng_Ramsey_idle_and_heartbeat'\n self.parameter_name = 'Ramsey_idle'\n self.unit = 'ns'\n self.heartbeat_start = heartbeat_start\n\n def set_parameter(self, val):\n self.CBox_lut_man.set_Ramsey_idling(val)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n self.CBox.set_tng_readout_delay(100+val)\n self.CBox.set_tng_heartbeat_interval(self.heartbeat_start+val)\n\n\nclass CBox_tng_Ramsey_idle_and_heartbeat_v2(Soft_Sweep):\n '''\n Setting the length of the tng Readout Pulse\n Differs from old version that it uses 2! pulses with a delay between them\n '''\n\n def __init__(self, burst_heartbeat_start, **kw):\n super(CBox_tng_Ramsey_idle_and_heartbeat_v2, self).__init__()\n self.CBox = qt.instruments['CBox']\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.name = 'CBox_tng_Ramsey_idle_and_heartbeat'\n self.parameter_name = 'Ramsey_idle'\n self.unit = 'ns'\n self.burst_heartbeat_start = burst_heartbeat_start\n\n def set_parameter(self, val):\n self.CBox.set_tng_readout_delay(100)\n self.CBox.set_tng_second_pre_rotation_delay(100+val)\n self.CBox.set_tng_burst_heartbeat_interval(self.burst_heartbeat_start\n + val)\n\n\nclass None_Sweep_tape_restart(Soft_Sweep):\n\n def __init__(self, sweep_control='soft', **kw):\n super(None_Sweep_tape_restart, self).__init__()\n self.sweep_control = sweep_control\n self.name = 'None_Sweep_tape_restart'\n self.parameter_name = 'pts'\n self.unit = 'arb. unit'\n self.CBox = qt.instruments['CBox']\n\n def set_parameter(self, val):\n '''\n Set the parameter(s) to be sweeped. Differs per sweep function\n '''\n self.CBox.restart_awg_tape(0)\n self.CBox.restart_awg_tape(1)\n self.CBox.restart_awg_tape(2)\n\n\nclass prepare_for_conditional_depletion(Soft_Sweep):\n\n def __init__(self, AllXY_trigger=200, sweep_control='soft', double_pulse_Ramsey_idling=100, RTF_qubit_pulses=False, **kw):\n super(prepare_for_conditional_depletion, self).__init__()\n import Experiments.CLEAR.prepare_for_CLEAR as pfC # FIXME: import error\n self.pfC = pfC\n self.sweep_control = sweep_control\n self.name = 'prepare_for_conditional_depletion'\n self.parameter_name = 'depletion_pulse_length'\n self.unit = 'ns'\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.AllXY_trigger = AllXY_trigger\n self.double_pulse_Ramsey_idling = double_pulse_Ramsey_idling\n self.CBox = qt.instruments['CBox']\n self.RTF_qubit_pulses = RTF_qubit_pulses\n\n def set_parameter(self, val):\n '''\n Set the parameter(s) to be sweeped. Differs per sweep function\n '''\n\n self.pfC.prepare_for_CLEAR(length=300, depletion=True,\n integration=400,\n conditional=True, CLEAR_length=val,\n CLEAR_double_segment=False,\n CLEAR_double_frequency=True,\n cost_function='AllXY',\n AllXY_trigger=self.AllXY_trigger)\n if self.RTF_qubit_pulses:\n self.CBox_lut_man.set_lut_mapping(['I', 'X180', 'X90_X180_mX90',\n 'X90_X90', 'X90_X180_X90'])\n # This sets the idling in the X90_X90 element\n self.CBox_lut_man.set_Ramsey_idling(\n self.double_pulse_Ramsey_idling)\n self.CBox.set_tng_readout_delay(\n 100 + self.double_pulse_Ramsey_idling)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n\n\nclass prepare_for_unconditional_depletion(Soft_Sweep):\n\n def __init__(self, AllXY_trigger=200, sweep_control='soft', RTF_qubit_pulses=False, double_pulse_Ramsey_idling=100, **kw):\n super(prepare_for_unconditional_depletion, self).__init__()\n import Experiments.CLEAR.prepare_for_CLEAR as pfC # FIXME: import error\n self.pfC = pfC\n self.sweep_control = sweep_control\n self.name = 'prepare_for_unconditional_depletion'\n self.parameter_name = 'depletion_pulse_length'\n self.unit = 'ns'\n self.CBox_lut_man = qt.instruments['CBox_lut_man']\n self.CBox = qt.instruments['CBox']\n self.AllXY_trigger = AllXY_trigger\n self.RTF_qubit_pulses = RTF_qubit_pulses\n self.double_pulse_Ramsey_idling = double_pulse_Ramsey_idling\n\n def set_parameter(self, val):\n '''\n Set the parameter(s) to be sweeped. Differs per sweep function\n '''\n self.pfC.prepare_for_CLEAR(length=300, depletion=True,\n integration=460,\n conditional=False,\n CLEAR_length=val,\n CLEAR_double_segment=True,\n CLEAR_double_frequency=True,\n cost_function='AllXY',\n AllXY_trigger=self.AllXY_trigger)\n if self.RTF_qubit_pulses:\n self.CBox_lut_man.set_lut_mapping(['I', 'X180', 'X90_X180_mX90',\n 'X90_X90', 'X90_X180_X90'])\n # This sets the idling in the X90_X90 element\n self.CBox_lut_man.set_Ramsey_idling(\n self.double_pulse_Ramsey_idling)\n self.CBox.set_tng_readout_delay(\n 100 + self.double_pulse_Ramsey_idling)\n self.CBox_lut_man.load_pulses_onto_AWG_lookuptable(0)\n",
"import logging\nimport os\nimport numpy as np\nfrom copy import deepcopy\ntry:\n from math import gcd\nexcept: # Moved to math in python 3.5, this is to be 3.4 compatible\n from fractions import gcd\nimport qcodes as qc\nfrom pycqed.measurement.waveform_control import element\nfrom pycqed.measurement.waveform_control import pulse\nfrom pycqed.measurement.waveform_control import sequence\nfrom pycqed.utilities.general import add_suffix_to_dict_keys\nfrom pycqed.measurement.waveform_control import pulsar\nfrom pycqed.measurement.waveform_control.element import calculate_time_correction\nfrom pycqed.measurement.pulse_sequences.standard_elements import multi_pulse_elt\nfrom pycqed.measurement.pulse_sequences.standard_elements import distort_and_compensate\n\nfrom importlib import reload\nreload(pulse)\nfrom pycqed.measurement.waveform_control import pulse_library\nreload(pulse_library)\n\nstation = qc.station\n\n\ndef single_pulse_seq(pulse_pars=None,\n comp_pulse=True,\n verbose=False,\n distortion_dict=None,\n return_seq=False):\n '''\n\n '''\n if pulse_pars is None:\n pulse_pars = {'pulse_type': 'SquarePulse',\n 'pulse_delay': .1e-6,\n 'channel': 'ch3',\n 'amplitude': 0.5,\n 'length': .1e-6,\n 'dead_time_length': 10e-6}\n minus_pulse_pars = {'pulse_type': 'SquarePulse',\n 'pulse_delay': 3e-6 + pulse_pars['length'] + pulse_pars['pulse_delay'],\n 'channel': pulse_pars['channel'],\n 'amplitude': -pulse_pars['amplitude'],\n 'length': pulse_pars['length'],\n 'dead_time_length': 10e-6}\n\n dead_time_pulse = {'pulse_type': 'SquarePulse',\n 'pulse_delay': (minus_pulse_pars['length']),\n 'channel': pulse_pars['channel'],\n 'amplitude': 0,\n 'length': pulse_pars['dead_time_length']}\n\n trig_marker = {'pulse_type': 'SquarePulse',\n 'pulse_delay': 0.,\n 'channel': 'ch1_marker1',\n 'amplitude': 1.,\n 'length': .1e-6}\n # 'length': 5e-6}\n seq_name = 'Square_seq'\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n for i, iter in enumerate([0, 1]): # seq has to have at least 2 elts\n\n if comp_pulse:\n pulse_list = [\n pulse_pars, trig_marker, minus_pulse_pars, dead_time_pulse]\n else:\n pulse_list = [pulse_pars, trig_marker, dead_time_pulse]\n # pulse_list = [pulse_pars, dead_time_pulse]\n\n el = multi_pulse_elt(i, station, pulse_list)\n el_list.append(el)\n\n else:\n preloaded_kernels = []\n for i, el in enumerate(el_list):\n if distortion_dict is not None:\n el = distort_and_compensate(\n el, distortion_dict)\n el_list[i] = el\n seq.append_element(el, trigger_wait=True)\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n if return_seq:\n return seq, el_list\n else:\n return seq\n\n\ndef Ram_Z_seq(operation_dict, q0, distortion_dict,\n times,\n recovery_phase=0,\n RO_delay=3e-6,\n artificial_detuning=None,\n operation_name='Z',\n cal_points=True,\n verbose=False, upload=True):\n '''\n Performs a Ram-Z sequence similar to a conventional echo sequence.\n\n Timings of sequence\n <--- tau --->\n |mX90| | Z | |recPi2|---|RO|\n '''\n seq_name = 'Ram-Z-seq_{}'.format(q0)\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n sequencer_config = operation_dict['sequencer_config']\n # Allows using some other flux pulse to perform the RamZ with\n if (('Z '+q0) not in operation_dict) or (operation_name != 'Z'):\n operation_dict['Z ' + q0] = deepcopy(\n operation_dict[operation_name + ' ' + q0])\n\n recPi2 = deepcopy(operation_dict['X90 '+q0])\n\n operation_dict['recPi2 ' + q0] = recPi2\n\n for i, tau in enumerate(times): # seq has to have at least 2 elts\n if artificial_detuning is not None:\n D_phase = ((tau-times[0]) * artificial_detuning * 360) % 360\n operation_dict['recPi2 ' + q0]['phase'] = D_phase\n operation_dict['Z ' + q0]['length'] = tau\n pulse_list = ['mX90 '+q0, 'Z '+q0, 'recPi2 ' + q0, 'RO ' + q0]\n\n # calibration points overwrite the pulse_combinations list\n if cal_points and ((i == (len(times)-4) or i == (len(times)-3))):\n pulse_list = ['I '+q0, 'RO ' + q0]\n\n elif cal_points and ((i == (len(times)-2) or i == (len(times)-1))):\n pulse_list = ['X180 '+q0, 'RO ' + q0]\n pulses = []\n for p in pulse_list:\n pulses += [operation_dict[p]]\n el = multi_pulse_elt(i, station, pulses, sequencer_config)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n if distortion_dict is not None:\n print('\\r Distorting element {}/{} '.format(i+1, len(times)),\n end='')\n el = distort_and_compensate(\n el, distortion_dict)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n\n return seq, el_list\n\n\ndef Echo_Z_seq(operation_dict, q0, distortion_dict,\n times,\n recovery_phases=[0],\n RO_delay=3e-6,\n operation_name='Z ',\n Z_signs=[+1, +1],\n echo_MW_pulse=True,\n artificial_detuning=None,\n cal_points=True,\n verbose=False, upload=True):\n '''\n Performs a Ram-Z sequence similar to a conventional echo sequence.\n\n Timings of sequence\n <--- tau --->\n |mX90| | Z | |recPi2|---|RO|\n '''\n seq_name = 'Echo_Z_seq_{}'.format(q0)\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n sequencer_config = operation_dict['sequencer_config']\n ########################################################\n ########################################################\n pulse_list = ['mX90 '+q0,\n 'Z0 '+q0, 'mid_pulse '+q0, 'Z1 '+q0,\n 'recPi2 ' + q0, 'RO ' + q0]\n ########################################################\n ########################################################\n operation_dict['Z0 ' + q0] = deepcopy(\n operation_dict[operation_name + ' ' + q0])\n operation_dict['Z1 ' + q0] = deepcopy(\n operation_dict[operation_name + ' ' + q0])\n\n operation_dict['Z0 ' + q0]['amplitude'] *= Z_signs[0]\n operation_dict['Z1 ' + q0]['amplitude'] *= Z_signs[1]\n if echo_MW_pulse:\n pulse_list[2] = 'X180 '+q0\n else:\n pulse_list[2] = 'I '+q0\n\n recPi2 = deepcopy(operation_dict['X90 '+q0])\n\n operation_dict['recPi2 ' + q0] = recPi2\n\n # allows sweeping any variable\n if len(recovery_phases) == 1: # assumes it is a list\n recovery_phases *= len(times)\n if len(times) == 1: # assumes it is a list\n times *= len(recovery_phases)\n\n for i, tau in enumerate(times): # seq has to have at least 2 elts\n operation_dict['Z0 ' + q0]['length'] = tau/2\n operation_dict['Z1 ' + q0]['length'] = tau/2\n D_phase = recovery_phases[i]\n if artificial_detuning is not None:\n D_phase += ((tau-times[0]) * artificial_detuning * 360) % 360\n operation_dict['recPi2 ' + q0]['phase'] = D_phase % 360\n\n # calibration points overwrite the pulse_combinations list\n if cal_points and ((i == (len(times)-4) or i == (len(times)-3))):\n pulse_list = ['I '+q0, 'RO ' + q0]\n\n elif cal_points and ((i == (len(times)-2) or i == (len(times)-1))):\n pulse_list = ['X180 '+q0, 'RO ' + q0]\n pulses = []\n for p in pulse_list:\n pulses += [operation_dict[p]]\n el = multi_pulse_elt(i, station, pulses, sequencer_config)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n if distortion_dict is not None:\n print('\\r Distorting element {}/{} '.format(i+1, len(times)),\n end='')\n el = distort_and_compensate(\n el, distortion_dict)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n\n return seq, el_list\n\n\ndef Ram_Z_delay_seq(operation_dict, q0, distortion_dict,\n inter_pulse_delay=300e-9, recovery_phase=0,\n RO_delay=3e-6,\n operation_name='Z',\n times=np.arange(-100e-9, 400e-9, 25e-9),\n cal_points=True,\n verbose=False, upload=True,\n return_seq=False):\n '''\n Performs a Ram-Z sequence useful for calibrating timings of flux pulses\n\n Timings of sequence\n <-tau_inter_pulse->\n |mX90| --- |Z| --- |recPi2|---|RO|\n <- t -> <-- dt1 -->\n\n '''\n seq_name = 'Ram-Z-seq_{}'.format(q0)\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n sequencer_config = operation_dict['sequencer_config']\n # Allows using some other flux pulse to perform the RamZ with\n if (('Z '+q0) not in operation_dict) or (operation_name != 'Z'):\n operation_dict['Z ' + q0] = deepcopy(\n operation_dict[operation_name + ' ' + q0])\n\n # Setting the RO very high to ensure no overlap when moving the flux pulse\n operation_dict['RO ' + q0]['pulse_delay'] = RO_delay\n for i, tau in enumerate(times): # seq has to have at least 2 elts\n # Calibration points\n if (i == (len(times)-4) or i == (len(times)-3)):\n pulse_list = ['I '+q0, 'RO ' + q0]\n elif (i == (len(times)-2) or i == (len(times)-1)):\n pulse_list = ['X180 '+q0, 'RO ' + q0]\n else:\n operation_dict['Z '+q0]['pulse_delay'] = tau\n t1 = inter_pulse_delay - tau # refpoint is start of flux pulse\n\n recPi2 = deepcopy(operation_dict['X90 '+q0])\n recPi2['refpoint'] = 'start'\n recPi2['phase'] = recovery_phase\n recPi2['pulse_delay'] = t1\n operation_dict['recPi2 ' + q0] = recPi2\n\n pulse_list = ['mX90 '+q0, 'Z '+q0, 'recPi2 ' + q0, 'RO ' + q0]\n\n pulses = []\n for p in pulse_list:\n pulses += [operation_dict[p]]\n el = multi_pulse_elt(i, station, pulses, sequencer_config)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n if distortion_dict is not None:\n print('\\r Distorting element {}/{}'.format(i+1, len(times)),\n end='')\n el = distort_and_compensate(\n el, distortion_dict)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n if return_seq:\n return seq, el_list\n else:\n return seq_name\n\n\ndef chevron_seq(operation_dict, q0,\n pulse_lengths=np.arange(0, 120e-9, 2e-9),\n verbose=False,\n distortion_dict=None,\n upload=True,\n cal_points=True):\n '''\n Chevron sequence where length of the \"SWAP\" operation is varied\n X180 - SWAP(l) - RO\n\n\n verbose=False: (bool) used for verbosity printing in the pulsar\n distortion_dict=None: (dict) flux_pulse predistortion kernels\n upload=True: (bool) uploads to AWG, set False for testing purposes\n cal_points=True: (bool) wether to use calibration points\n '''\n\n seq_name = 'Chevron_seq'\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n sequencer_config = operation_dict['sequencer_config']\n\n SWAP_amp = operation_dict['SWAP '+q0]['amplitude']\n # seq has to have at least 2 elts\n for i, pulse_length in enumerate(pulse_lengths):\n # this converts negative pulse lenghts to negative pulse amplitudes\n if pulse_length < 0:\n operation_dict['SWAP '+q0]['amplitude'] = -SWAP_amp\n else:\n operation_dict['SWAP '+q0]['amplitude'] = SWAP_amp\n\n if cal_points and (i == (len(pulse_lengths)-4) or\n i == (len(pulse_lengths)-3)):\n pulse_combinations = ['RO '+q0]\n elif cal_points and (i == (len(pulse_lengths)-2) or\n i == (len(pulse_lengths)-1)):\n pulse_combinations = ['X180 ' + q0, 'RO ' + q0]\n else:\n operation_dict[\n 'SWAP '+q0]['square_pulse_length'] = abs(pulse_length)\n pulse_combinations = ['X180 '+q0, 'SWAP ' + q0, 'RO '+q0]\n\n pulses = []\n for p in pulse_combinations:\n pulses += [operation_dict[p]]\n\n el = multi_pulse_elt(i, station, pulses, sequencer_config)\n if distortion_dict is not None:\n print('\\r Distorting element {}/{}'.format(i+1, len(pulse_lengths)),\n end='')\n if i == len(pulse_lengths):\n print()\n el = distort_and_compensate(\n el, distortion_dict)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n\n return seq, el_list\n\n\ndef SwapN(operation_dict, q0,\n nr_pulses_list=[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20],\n alpha=1.,\n verbose=False,\n distortion_dict=None,\n upload=True,\n cal_points=True,\n inter_swap_wait=10e-9):\n '''\n Sequence of N swap operations\n (N_max-N)* FluxID - X180 - N*SWAP - X180 RO\n\n pulse_dict: (dict) dictionary containing the pulse parameters\n q0 (str) name of the target qubit\n nr_pulses_list (list) nr of swaps gates for each element\n verbose=False: (bool) used for verbosity printing in the pulsar\n distortion_dict=None: (dict) flux_pulse predistortion kernels\n upload=True: (bool) uploads to AWG, set False for testing purposes\n cal_points=True: (bool) wether to use calibration points\n '''\n seq_name = 'SWAPN_seq'\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n\n sequencer_config = operation_dict['sequencer_config']\n\n # Create the correction pulses\n operation_dict['FluxId '+q0] = deepcopy(operation_dict['SWAP ' + q0])\n # Flux identity\n operation_dict['FluxId '+q0]['amplitude'] = 0\n n_max = nr_pulses_list[-1]\n for j in range(n_max):\n SWAP_pulse_j = deepcopy(operation_dict['SWAP '+q0])\n SWAP_pulse_j['amplitude'] = SWAP_pulse_j['amplitude']*(alpha**j)\n # SWAP_pulse_train.append(SWAP_pulse_j)\n operation_dict['SWAP_{} {}'.format(j, q0)] = SWAP_pulse_j\n\n for i, n in enumerate(nr_pulses_list):\n # SWAP_pulse_train = []\n pulse_combinations = (['X180 ' + q0] +\n ['FluxId '+q0]*(n_max-n))\n for j in range(n):\n pulse_combinations += ['SWAP_{} {}'.format(j, q0)]\n pulse_combinations += ['RO '+q0]\n\n # calibration points overwrite the pulse_combinations list\n # All pulses are replaced with identities.\n if cal_points and (i == (len(nr_pulses_list)-4) or\n i == (len(nr_pulses_list)-3)):\n pulse_combinations = (['RO '+q0])\n elif cal_points and (i == (len(nr_pulses_list)-2) or\n i == (len(nr_pulses_list)-1)):\n pulse_combinations = (['X180 ' + q0] + ['RO '+q0])\n\n pulses = []\n for p in pulse_combinations:\n pulses += [operation_dict[p]]\n\n el = multi_pulse_elt(i, station, pulses, sequencer_config)\n if distortion_dict is not None:\n print('\\rDistorting element {}/{} '.format(i+1,\n len(nr_pulses_list)),\n end='')\n if i == len(nr_pulses_list):\n print()\n el = distort_and_compensate(\n el, distortion_dict)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n\n return seq, el_list\n\n\ndef swap_swap_wait(mw_pulse_pars, RO_pars,\n flux_pulse_pars,\n phases=np.linspace(0, 720, 41),\n inter_swap_wait=100e-9,\n verbose=False,\n distortion_dict=None,\n upload=True,\n cal_points=True):\n '''\n Sequence of 2 swap operations with varying recovery pulse\n mY90 - swap - idle- swap - rphi90 - RO\n\n mw_pulse_pars: (dict) qubit control pulse pars\n RO_pars: (dict) qubit RO pars\n flux_pulse_pars: (dict) flux puplse pars\n inter_swap_wait (float) wait time in seconds between the two swaps\n phases (list) phases used for the recovery pulse\n verbose=False: (bool) used for verbosity printing in the pulsar\n distortion_dict=None: (dict) flux_pulse predistortion kernels\n upload=True: (bool) uploads to AWG, set False for testing purposes\n cal_points=True: (bool) wether to use calibration points\n '''\n\n # To be merged with swap-CP-swap\n logging.warning('Do not use, I have marked this for deletion -MAR')\n\n preloaded_kernels_vec = preload_kernels_func(distortion_dict)\n # renamed as the dict contains the pulse directly\n minus_flux_pulse_pars = deepcopy(flux_pulse_pars)\n minus_flux_pulse_pars['amplitude'] = -flux_pulse_pars['amplitude']\n\n # Pulse is used to set the starting refpoint for the compensation pulses\n dead_time_pulse_pars = {'pulse_type': 'SquarePulse',\n 'pulse_delay': flux_pulse_pars['dead_time'],\n 'channel': flux_pulse_pars['channel'],\n 'amplitude': 0,\n 'length': 0.}\n\n seq_name = 'swap_swap_wait_seq'\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n pulses = get_pulse_dict_from_pars(mw_pulse_pars)\n\n # seq has to have at least 2 elts\n for i, phase in enumerate(phases):\n if cal_points and (i == (len(phases)-4) or i == (len(phases)-3)):\n el = multi_pulse_elt(i, station, [pulses['I'], RO_pars])\n elif cal_points and (i == (len(phases)-2) or i == (len(phases)-1)):\n el = multi_pulse_elt(i, station, [pulses['X180'], RO_pars])\n else:\n # correcting timings\n recovery_pi2 = deepcopy(pulses['X90'])\n recovery_pi2['phase'] = phase\n second_flux_pulse = deepcopy(flux_pulse_pars)\n second_flux_pulse['pulse_delay'] = inter_swap_wait\n\n pulse_list = [pulses['mY90']] + [flux_pulse_pars] + [second_flux_pulse] \\\n + [recovery_pi2]+[RO_pars] + \\\n [dead_time_pulse_pars] + [minus_flux_pulse_pars]*2\n\n el = multi_pulse_elt(i, station, pulse_list)\n if distortion_dict is not None:\n el = distort_and_compensate(\n el, distortion_dict)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n\n return seq, el_list\n\n\ndef chevron_with_excited_bus_2Qubits(mw_pulse_pars_qCP, mw_pulse_pars_qS,\n flux_pulse_pars_qCP, flux_pulse_pars_qS,\n RO_pars,\n distortion_dict,\n chevron_pulse_lengths=np.arange(\n 0, 120e-9, 2e-9),\n excitations=1,\n verbose=False,\n upload=True,\n cal_points=True):\n '''\n Sequence that swaps an excitation from qS into the bus and does chevron\n type measurement between qCPhase qubit and the bus. In the end, X180 gate\n to qCP is applied to invert population 0 and 1 to maximize 1-2 discrimination.\n\n X180 qS - swap qS,B - X180 qCP - chevron pulse qCP,B -\n - X180 qCP - RO\n\n qS is the \"swap qubit\"\n qCP is the \"CPhase qubit\"\n\n mw_pulse_pars qCP: (dict) qubit control pulse pars\n mw_pulse_pars qS: (dict) qubit control pulse pars\n flux_pulse_pars qCP: (dict) flux puplse pars\n flux_pulse_pars qS: (dict) flux puplse pars\n RO_pars: (dict) qubit RO pars, ideally a multiplexed readout\n distortion_dict=None: (dict) flux_pulse predistortion kernels\n excitations: (enum) [0, 1, 'both'] whether to put an excitation in\n the swap qubit, both does the sequence both ways.\n chevron_pulse_lengths (list) amplitudes for the chevron pulse\n verbose=False: (bool) used for verbosity printing in the pulsar\n upload=True: (bool) uploads to AWG, set False for testing purposes\n cal_points=True: (bool) wether to use calibration points\n\n TODO:\n - move getting the pulse dict to a single function\n '''\n\n # ############ This getting pulse dict should be a single function\n mw_pulses_qCP = add_suffix_to_dict_keys(\n get_pulse_dict_from_pars(mw_pulse_pars_qCP), ' qCP')\n mw_pulses_qS = add_suffix_to_dict_keys(\n get_pulse_dict_from_pars(mw_pulse_pars_qS), ' qS')\n # This should come out of this dict in a smarter way\n swap_qCP = {'swap qCP': flux_pulse_pars_qCP}\n swap_qS = {'swap qS': flux_pulse_pars_qS}\n RO_dict = {'RO': RO_pars}\n pulse_dict = {}\n pulse_dict.update(mw_pulses_qCP)\n pulse_dict.update(mw_pulses_qS)\n pulse_dict.update(swap_qCP)\n pulse_dict.update(swap_qS)\n pulse_dict.update(RO_dict)\n # End of the getting pulse dict\n\n # Getting the minus flux pulses should also be in the get pulse dict\n minus_flux_pulse_pars = deepcopy(flux_pulse_pars_qCP)\n pulse_dict['mswap qCP'] = deepcopy(pulse_dict['swap qCP'])\n pulse_dict['mswap qS'] = deepcopy(pulse_dict['swap qS'])\n pulse_dict['mswap qCP']['amplitude'] = -pulse_dict['swap qCP']['amplitude']\n pulse_dict['mswap qS']['amplitude'] = -pulse_dict['swap qS']['amplitude']\n\n pulse_dict.update({'mFlux_pulse': minus_flux_pulse_pars})\n pulse_dict.update({'dead_time_pulse':\n {'pulse_type': 'SquarePulse',\n 'pulse_delay': flux_pulse_pars_qCP['dead_time'],\n 'channel': flux_pulse_pars_qCP['channel'],\n 'amplitude': 0,\n 'length': 0.}})\n\n # Pulse is used to set the starting refpoint for the compensation pulses\n\n seq_name = 'chevron_with_excited_bus_2Qubits'\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n\n # seq has to have at least 2 elts\n for i, chevron_pulse_length in enumerate(chevron_pulse_lengths):\n pulse_dict['swap qCP']['square_pulse_length'] = chevron_pulse_length\n pulse_dict['mswap qCP']['square_pulse_length'] = chevron_pulse_length\n if excitations == 'both':\n if (i < (len(chevron_pulse_lengths)-4*cal_points)/2):\n excitation = False\n else:\n excitation = True\n elif excitations == 0:\n excitation = False\n elif excitations == 1:\n excitation = True\n else:\n raise ValueError(\n 'excitations {} not recognized'.format(excitations))\n if cal_points and (i == (len(chevron_pulse_lengths)-4)):\n pulse_combinations = ['I qCP', 'I qS', 'RO']\n elif cal_points and (i == (len(chevron_pulse_lengths)-3)):\n pulse_combinations = ['I qCP', 'X180 qS', 'RO']\n elif cal_points and ((i == len(chevron_pulse_lengths)-2)):\n pulse_combinations = ['X180 qCP', 'I qS', 'RO']\n elif cal_points and (i == (len(chevron_pulse_lengths)-1)):\n pulse_combinations = ['X180 qCP', 'X180 qS', 'RO']\n else:\n if excitation:\n pulse_combinations = ['X180 qCP', 'X180 qS'] +\\\n [ 'swap qS'] +\\\n ['swap qCP'] +\\\n ['swap qS'] +\\\n ['X180 qCP', 'X180 qS'] +\\\n ['RO'] +\\\n ['dead_time_pulse']+['mswap qS']*2+['mswap qCP']\n else:\n pulse_combinations = ['X180 qCP', 'I qS'] +\\\n ['swap qS'] +\\\n ['swap qCP'] +\\\n ['swap qS'] +\\\n ['X180 qCP', 'I qS'] +\\\n ['RO'] +\\\n ['dead_time_pulse']+['mswap qS']*2+['mswap qCP']\n # correcting timings\n pulses = []\n for p in pulse_combinations:\n pulses += [pulse_dict[p]]\n\n el = multi_pulse_elt(i, station, pulses)\n if distortion_dict is not None:\n el = distort_and_compensate(\n el, distortion_dict)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n\n return seq, el_list\n\n\ndef XSWAPxy(phis, mw_pulse_pars, RO_pars, flux_pulse_pars=None,\n excite=True,\n verbose=False,\n distortion_dict=None,\n upload=True,\n return_seq=False):\n '''\n\n '''\n if flux_pulse_pars is None:\n flux_pulse_pars = {'pulse_type': 'SquarePulse',\n 'pulse_delay': .1e-6,\n 'channel': 'ch3',\n 'amplitude': 0.5,\n 'length': .1e-6}\n # flux_pulse_pars['amplitude'] = 0.\n minus_flux_pulse_pars = {'pulse_type': 'SquarePulse',\n 'pulse_delay': 0., # will be overwritten\n 'channel': flux_pulse_pars['channel'],\n 'amplitude': -flux_pulse_pars['amplitude'],\n 'length': flux_pulse_pars['length']}\n original_delay = deepcopy(RO_pars)['pulse_delay']\n\n dead_time_pulse = {'pulse_type': 'SquarePulse',\n 'pulse_delay': (minus_flux_pulse_pars['length']),\n 'channel': flux_pulse_pars['channel'],\n 'amplitude': 0,\n 'length': 0.}\n\n seq_name = 'Chevron_seq'\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n pulses = get_pulse_dict_from_pars(mw_pulse_pars)\n lngt = flux_pulse_pars['length']\n minus_flux_pulse_pars['length'] = lngt\n for i in range(rep_max): # seq has to have at least 2 elts\n # correcting timings\n pulse_buffer = 50e-9\n flux_pulse_pars['pulse_delay'] = pulse_buffer + (mw_pulse_pars['sigma'] *\n mw_pulse_pars['nr_sigma'])\n msmt_buffer = 50e-9\n RO_pars['pulse_delay'] = msmt_buffer + lngt\n dead_time_pulse['pulse_delay'] = RO_pars['pulse_delay']\n\n dead_time = 3e-6\n minus_flux_pulse_pars['pulse_delay'] = dead_time + RO_pars['length']\n if excite:\n init_pulse = pulses['X180']\n else:\n init_pulse = pulses['I']\n\n buffer_swap = 50e-9\n\n sec_flux_pulse_pars = deepcopy(flux_pulse_pars)\n flux_pulse_pars['pulse_delay'] = flux_pulse_pars[\n 'length'] + buffer_swap\n sec_minus_flux_pulse_pars = deepcopy(minus_flux_pulse_pars)\n sec_minus_flux_pulse_pars['pulse_delay'] = minus_flux_pulse_pars[\n 'length'] + buffer_swap\n if i == 0:\n pulse_list = [init_pulse,\n flux_pulse_pars,\n RO_pars,\n minus_flux_pulse_pars,\n dead_time_pulse]\n else:\n pulse_list = [init_pulse,\n flux_pulse_pars] + [sec_flux_pulse_pars]*(2*i) + [RO_pars,\n minus_flux_pulse_pars] + [sec_minus_flux_pulse_pars]*(2*i) + [dead_time_pulse]\n # copy first element and set extra wait\n pulse_list[0] = deepcopy(pulse_list[0])\n # + ((-int(lngt*1e9)) % 50)*1e-9\n pulse_list[0]['pulse_delay'] += 0.01e-6\n\n el = multi_pulse_elt(i, station, pulse_list)\n el_list.append(el)\n\n for i, el in enumerate(el_list):\n if distortion_dict is not None:\n el = distort_and_compensate(\n el, distortion_dict)\n el_list[i] = el\n seq.append_element(el, trigger_wait=True)\n cal_points = 4\n RO_pars['pulse_delay'] = original_delay\n for i in range(int(cal_points/2)):\n pulse_list = [pulses['I'], RO_pars]\n # copy first element and set extra wait\n pulse_list[0] = deepcopy(pulse_list[0])\n pulse_list[0]['pulse_delay'] += 0.01e-6\n\n el = multi_pulse_elt(len(np.arange(rep_max))+i, station, pulse_list)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n for i in range(int(cal_points/2)):\n pulse_list = [pulses['X180'], RO_pars]\n # copy first element and set extra wait\n pulse_list[0] = deepcopy(pulse_list[0])\n pulse_list[0]['pulse_delay'] += 0.01e-6\n\n el = multi_pulse_elt(\n len(np.arange(rep_max))+int(cal_points/2)+i, station, pulse_list)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n if return_seq:\n return seq, el_list\n else:\n return seq\n\n\ndef chevron_seq_cphase(lengths, mw_pulse_pars, RO_pars, flux_pulse_pars=None,\n cphase_pulse_pars=None, artificial_detuning=None,\n phase_2=None,\n verbose=False,\n distortion_dict=None,\n upload=True,\n return_seq=False):\n '''\n\n '''\n if flux_pulse_pars is None:\n flux_pulse_pars = {'pulse_type': 'SquarePulse',\n 'pulse_delay': .1e-6,\n 'channel': 'ch3',\n 'amplitude': 0.5,\n 'length': .1e-6}\n # flux_pulse_pars['amplitude'] = 0.\n minus_flux_pulse_pars = {'pulse_type': 'SquarePulse',\n 'pulse_delay': 0., # will be overwritten\n 'channel': flux_pulse_pars['channel'],\n 'amplitude': -flux_pulse_pars['amplitude'],\n 'length': flux_pulse_pars['length']}\n original_delay = deepcopy(RO_pars)['pulse_delay']\n\n dead_time_pulse = {'pulse_type': 'SquarePulse',\n 'pulse_delay': (minus_flux_pulse_pars['length']),\n 'channel': flux_pulse_pars['channel'],\n 'amplitude': 0,\n 'length': 0.}\n\n seq_name = 'Chevron_seq'\n minus_cphase_pulse_pars = deepcopy(cphase_pulse_pars)\n minus_cphase_pulse_pars['amplitude'] = - \\\n minus_cphase_pulse_pars['amplitude']\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n pulses = get_pulse_dict_from_pars(mw_pulse_pars)\n for i, lngt in enumerate(lengths): # seq has to have at least 2 elts\n cphase_pulse_pars['length'] = lngt\n minus_cphase_pulse_pars['length'] = lngt\n cphase_pulse_pars['frequency'] = 0.5/lngt\n minus_cphase_pulse_pars['frequency'] = 0.5/lngt\n # cphase_pulse_pars['phase'] = -(90./np.pi)*(cphase_pulse_pars['pulse_delay'])/lngt\n # minus_cphase_pulse_pars['phase'] = -(90./np.pi)*(minus_cphase_pulse_pars['pulse_delay'])/lngt\n # correcting timings\n pulse_buffer = 50e-9\n flux_pulse_pars['pulse_delay'] = pulse_buffer + (mw_pulse_pars['sigma'] *\n mw_pulse_pars['nr_sigma'])\n msmt_buffer = 50e-9\n RO_pars['pulse_delay'] = msmt_buffer + lngt\n dead_time_pulse['pulse_delay'] = RO_pars['pulse_delay']\n\n dead_time = 3e-6\n minus_flux_pulse_pars['pulse_delay'] = dead_time + RO_pars['length']\n firstY90m = deepcopy(pulses['Y90'])\n firstY90m['pulse_delay'] = flux_pulse_pars['length'] + 30e-9\n secondY90m = deepcopy(pulses['X90'])\n\n if phase_2 is not None:\n secondY90m['phase'] = phase_2\n elif artificial_detuning is not None:\n secondY90m['phase'] = (lngt-lengths[0]) * artificial_detuning * 360\n secondY90m['pulse_delay'] = lngt + 20e-9\n pulse_list = [pulses['X180'],\n flux_pulse_pars,\n firstY90m,\n cphase_pulse_pars,\n secondY90m,\n RO_pars,\n minus_flux_pulse_pars,\n pulses['I'],\n minus_cphase_pulse_pars,\n dead_time_pulse]\n # copy first element and set extra wait\n pulse_list[0] = deepcopy(pulse_list[0])\n pulse_list[0]['pulse_delay'] += 0.01e-6 + ((-int(lngt*1e9)) % 50)*1e-9\n\n el = multi_pulse_elt(i, station, pulse_list)\n el_list.append(el)\n\n for i, el in enumerate(el_list):\n if distortion_dict is not None:\n el = distort_and_compensate(\n el, distortion_dict)\n el_list[i] = el\n seq.append_element(el, trigger_wait=True)\n cal_points = 4\n RO_pars['pulse_delay'] = original_delay\n for i in range(int(cal_points/2)):\n pulse_list = [pulses['I'], RO_pars]\n # copy first element and set extra wait\n pulse_list[0] = deepcopy(pulse_list[0])\n pulse_list[0]['pulse_delay'] += 0.01e-6\n\n el = multi_pulse_elt(len(lengths)+i, station, pulse_list)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n for i in range(int(cal_points/2)):\n pulse_list = [pulses['X180'], RO_pars]\n # copy first element and set extra wait\n pulse_list[0] = deepcopy(pulse_list[0])\n pulse_list[0]['pulse_delay'] += 0.01e-6\n\n el = multi_pulse_elt(\n len(lengths)+int(cal_points/2)+i, station, pulse_list)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n if return_seq:\n return seq, el_list\n else:\n return seq\n\n\ndef BusT2(times, mw_pulse_pars, RO_pars, flux_pulse_pars=None,\n verbose=False, distortion_dict=None,\n upload=True, return_seq=False):\n '''\n\n '''\n if flux_pulse_pars is None:\n raise ValueError('Need flux parameters for the gate.')\n minus_flux_pulse_pars = deepcopy(flux_pulse_pars)\n minus_flux_pulse_pars['amplitude'] = -minus_flux_pulse_pars['amplitude']\n\n original_delay = deepcopy(RO_pars)['pulse_delay']\n\n seq_name = 'BusT2_seq'\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n pulses = get_pulse_dict_from_pars(mw_pulse_pars)\n\n dead_time_pulse = {'pulse_type': 'SquarePulse',\n 'pulse_delay': (minus_flux_pulse_pars['length']),\n 'channel': flux_pulse_pars['channel'],\n 'amplitude': 0,\n 'length': 0.}\n for i, tt in enumerate(times):\n # correcting timings\n pulse_buffer = 50e-9\n flux_pulse_pars['pulse_delay'] = pulse_buffer + (mw_pulse_pars['sigma'] *\n mw_pulse_pars['nr_sigma'])\n\n flux_pulse_pars_2 = deepcopy(flux_pulse_pars)\n # flux_pulse_pars_2['amplitude'] = 0.\n flux_pulse_pars_2['pulse_delay'] = tt + flux_pulse_pars['length']\n\n msmt_buffer = 50e-9\n RO_pars['pulse_delay'] = msmt_buffer + flux_pulse_pars['length']\n\n dead_time = 3e-6\n minus_flux_pulse_pars['pulse_delay'] = dead_time + RO_pars['length']\n\n minus_flux_pulse_pars_2 = deepcopy(flux_pulse_pars_2)\n minus_flux_pulse_pars_2['amplitude'] = - \\\n minus_flux_pulse_pars_2['amplitude']\n\n dead_time_pulse['pulse_delay'] = RO_pars['pulse_delay']\n\n pulse_list = [pulses['Y90'], flux_pulse_pars, flux_pulse_pars_2, pulses['Y90'],\n RO_pars, minus_flux_pulse_pars, minus_flux_pulse_pars_2,\n dead_time_pulse]\n\n # This ensures fixed point\n pulse_list[0]['pulse_delay'] += 0.01e-6 # + ((-int(tt*1e9)) % 50)*1e-9\n\n el = multi_pulse_elt(i, station, pulse_list)\n el_list.append(el)\n\n for i, el in enumerate(el_list):\n if distortion_dict is not None:\n el = distort_and_compensate(\n el, distortion_dict)\n el_list[i] = el\n seq.append_element(el, trigger_wait=True)\n cal_points = 4\n RO_pars['pulse_delay'] = original_delay\n for i in range(int(cal_points/2)):\n pulse_list = [pulses['I'], RO_pars]\n # copy first element and set extra wait\n pulse_list[0] = deepcopy(pulse_list[0])\n pulse_list[0]['pulse_delay'] += 0.01e-6\n\n el = multi_pulse_elt(len(times)+i, station, pulse_list)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n for i in range(int(cal_points/2)):\n pulse_list = [pulses['X180'], RO_pars]\n # copy first element and set extra wait\n pulse_list[0] = deepcopy(pulse_list[0])\n pulse_list[0]['pulse_delay'] += 0.01e-6\n\n el = multi_pulse_elt(\n len(times)+int(cal_points/2)+i, station, pulse_list)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n if return_seq:\n return seq, el_list\n else:\n return seq\n\n\ndef BusEcho(times, mw_pulse_pars, RO_pars, artificial_detuning=None, flux_pulse_pars=None,\n verbose=False, distortion_dict=None,\n upload=True, return_seq=False):\n '''\n\n '''\n if flux_pulse_pars is None:\n raise ValueError('Need flux parameters for the gate.')\n minus_flux_pulse_pars = deepcopy(flux_pulse_pars)\n minus_flux_pulse_pars['amplitude'] = -minus_flux_pulse_pars['amplitude']\n\n original_delay = deepcopy(RO_pars)['pulse_delay']\n\n seq_name = 'BusEcho_seq'\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n pulses = get_pulse_dict_from_pars(mw_pulse_pars)\n pulse_pars_x2 = deepcopy(pulses['X90'])\n\n dead_time_pulse = {'pulse_type': 'SquarePulse',\n 'pulse_delay': (minus_flux_pulse_pars['length']),\n 'channel': flux_pulse_pars['channel'],\n 'amplitude': 0,\n 'length': 0.}\n for i, tt in enumerate(times):\n # correcting timings\n pulse_buffer = 50e-9\n flux_pulse_pars['pulse_delay'] = pulse_buffer + (mw_pulse_pars['sigma'] *\n mw_pulse_pars['nr_sigma'])\n\n flux_pulse_pars_2 = deepcopy(flux_pulse_pars)\n # flux_pulse_pars_2['amplitude'] = 0.\n flux_pulse_pars_2['pulse_delay'] = tt*0.5 + flux_pulse_pars['length']\n\n msmt_buffer = 50e-9\n RO_pars['pulse_delay'] = msmt_buffer + flux_pulse_pars['length']\n\n dead_time = 3e-6\n minus_flux_pulse_pars['pulse_delay'] = dead_time + RO_pars['length']\n\n minus_flux_pulse_pars_2 = deepcopy(flux_pulse_pars_2)\n minus_flux_pulse_pars_2['amplitude'] = - \\\n minus_flux_pulse_pars_2['amplitude']\n\n dead_time_pulse['pulse_delay'] = RO_pars['pulse_delay']\n if artificial_detuning is not None:\n pulse_pars_x2['phase'] = (tt-times[0]) * artificial_detuning * 360\n\n pulse_list = [pulses['Y90'], flux_pulse_pars, flux_pulse_pars_2, pulses['X180'],\n flux_pulse_pars, flux_pulse_pars_2, pulse_pars_x2,\n RO_pars, minus_flux_pulse_pars, minus_flux_pulse_pars_2, pulses[\n 'I'],\n minus_flux_pulse_pars, minus_flux_pulse_pars_2, dead_time_pulse]\n\n # This ensures fixed point\n pulse_list[0]['pulse_delay'] += 0.01e-6 # + ((-int(tt*1e9)) % 50)*1e-9\n\n el = multi_pulse_elt(i, station, pulse_list)\n el_list.append(el)\n\n for i, el in enumerate(el_list):\n if distortion_dict is not None:\n el = distort_and_compensate(\n el, distortion_dict)\n el_list[i] = el\n seq.append_element(el, trigger_wait=True)\n cal_points = 4\n RO_pars['pulse_delay'] = original_delay\n for i in range(int(cal_points/2)):\n pulse_list = [pulses['I'], RO_pars]\n # copy first element and set extra wait\n pulse_list[0] = deepcopy(pulse_list[0])\n pulse_list[0]['pulse_delay'] += 0.01e-6\n\n el = multi_pulse_elt(len(times)+i, station, pulse_list)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n for i in range(int(cal_points/2)):\n pulse_list = [pulses['X180'], RO_pars]\n # copy first element and set extra wait\n pulse_list[0] = deepcopy(pulse_list[0])\n pulse_list[0]['pulse_delay'] += 0.01e-6\n\n el = multi_pulse_elt(\n len(times)+int(cal_points/2)+i, station, pulse_list)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n if return_seq:\n return seq, el_list\n else:\n return seq\n\n\ndef preload_kernels_func(distortion_dict):\n logging.warning('deprecated. you should use the kernel object directly')\n # output_dict = {ch: [] for ch in distortion_dict['ch_list']}\n # for ch in distortion_dict['ch_list']:\n # for kernel in distortion_dict[ch]:\n # if kernel is not '':\n # if kernel in cached_kernels.keys():\n # print('Cached {}'.format(kernel_dir+kernel))\n # output_dict[ch].append(cached_kernels[kernel])\n # else:\n # print('Loading {}'.format(kernel_dir+kernel))\n # # print(os.path.isfile('kernels/'+kernel))\n # kernel_vec = np.loadtxt(kernel_dir+kernel)\n # output_dict[ch].append(kernel_vec)\n # cached_kernels.update({kernel: kernel_vec})\n # return output_dict\n return None\n\n\ndef get_pulse_dict_from_pars(pulse_pars):\n '''\n Returns a dictionary containing pulse_pars for all the primitive pulses\n based on a single set of pulse_pars.\n Using this function deepcopies the pulse parameters preventing accidently\n editing the input dictionary.\n\n input args:\n pulse_pars: dictionary containing pulse_parameters\n return:\n pulses: dictionary of pulse_pars dictionaries\n '''\n pi_amp = pulse_pars['amplitude']\n pi2_amp = pulse_pars['amplitude']*pulse_pars['amp90_scale']\n\n pulses = {'I': deepcopy(pulse_pars),\n 'X180': deepcopy(pulse_pars),\n 'mX180': deepcopy(pulse_pars),\n 'X90': deepcopy(pulse_pars),\n 'mX90': deepcopy(pulse_pars),\n 'Y180': deepcopy(pulse_pars),\n 'mY180': deepcopy(pulse_pars),\n 'Y90': deepcopy(pulse_pars),\n 'mY90': deepcopy(pulse_pars)}\n\n pulses['I']['amplitude'] = 0\n pulses['mX180']['amplitude'] = -pi_amp\n pulses['X90']['amplitude'] = pi2_amp\n pulses['mX90']['amplitude'] = -pi2_amp\n pulses['Y180']['phase'] = 90\n pulses['mY180']['phase'] = 90\n pulses['mY180']['amplitude'] = -pi_amp\n\n pulses['Y90']['amplitude'] = pi2_amp\n pulses['Y90']['phase'] = 90\n pulses['mY90']['amplitude'] = -pi2_amp\n pulses['mY90']['phase'] = 90\n\n return pulses\n\n\ndef SWAP_CZ_SWAP_phase_corr_swp(operation_dict, qS, qCZ,\n sweep_qubit,\n RO_target='all',\n rec_phases=None,\n phase_corr_amps=None,\n distortion_dict=None,\n CZ_disabled=False,\n excitations='both_cases',\n # 0 in 1st half and 1 in 2nd\n cal_points_with_flux_pulses=True,\n verbose=False,\n upload=True):\n '''\n Sequence that swaps qS with the bus and does CPhase between qCZ and the bus\n X180 qS - Ym90 qCZ - swap qS,B - CPhase qCZ,B - swap qS,B - fphi90 qCZ\n - X180 qS - RO\n\n the keyword swap target and control reverses the\n qubit roles during a second sweep:\n\n X180 qCZ - Ym90 qS - swap qS,B - CPhase qCZ,B - swap qS,B - fphi90 qS\n - X180 qCZ - RO\n\n qS is the \"SWAP qubit\"\n qCZ is the \"C-Phase qubit\"\n '''\n sequencer_config = operation_dict['sequencer_config']\n\n seq_name = 'SWAP_CZ_SWAP_phase_corr_swp_{}_{}'.format(qS, qCZ)\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n\n if CZ_disabled:\n operation_dict['CZ '+qCZ]['amplitude'] = 0\n operation_dict['CZ '+qCZ]['phase_corr_pulse_amp'] = 0\n\n ################################################\n # Creating additional pulses for this sequence #\n ################################################\n # the recovery SWAP is identical to the regular SWAP operation, unless\n # an rSWAP is explicitly contained in the operation dict\n\n operation_dict['phi90 ' + qCZ] = deepcopy(operation_dict['Y90 ' + qCZ])\n operation_dict['phi90 ' + qS] = deepcopy(operation_dict['Y90 ' + qS])\n # operation_dict['rSWAP ' + qS] = deepcopy(operation_dict['SWAP ' + qS])\n if ('rSWAP ' + qS) not in operation_dict.keys():\n operation_dict['rSWAP ' + qS] = deepcopy(operation_dict['SWAP ' + qS])\n operation_dict['CZ_corr ' + qCZ]['refpoint'] = 'simultaneous'\n\n # seq has to have at least 2 elts\n # mid_point_phase_amp = phase_corr_amps[len(phase_corr_amps[:-4])//4]\n\n if (rec_phases is None) and (phase_corr_amps is None):\n raise Exception('Must sweep either recovery phase or phase corr amp')\n if rec_phases is None:\n rec_phases = [90]*len(phase_corr_amps)\n if phase_corr_amps is None:\n if sweep_qubit == qCZ:\n phase_corr_amp = operation_dict[\n 'CZ_corr ' + qCZ]['amplitude']\n else:\n phase_corr_amp = operation_dict[\n 'SWAP_corr ' + qS]['amplitude']\n phase_corr_amps = [phase_corr_amp]*len(rec_phases)\n\n ############################################\n # Generating the elements #\n ############################################\n for i in range(len(phase_corr_amps)):\n operation_dict['phi90 ' + qCZ]['phase'] = rec_phases[i]\n operation_dict['phi90 ' + qS]['phase'] = rec_phases[i]\n if sweep_qubit == qCZ:\n operation_dict['CZ_corr ' + qCZ]['amplitude'] \\\n = phase_corr_amps[i]\n else:\n operation_dict['SWAP_corr ' + qS]['amplitude'] \\\n = phase_corr_amps[i]\n ######################\n # The base seqeunce #\n ######################\n if sweep_qubit == qCZ:\n pulse_combinations = (\n ['I ' + qS, 'mY90 ' + qCZ,\n 'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,\n 'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,\n 'phi90 ' + qCZ, 'I '+qCZ, 'I '+qS, 'RO '+RO_target])\n if (excitations == 1 or (excitations == 'both_cases' and\n i >= (len(phase_corr_amps)-4)/2)):\n # Put a single excitation in the Swap qubit by replacing Id\n pulse_combinations[0] = 'X180 ' + qS\n pulse_combinations[-2] = 'X180 ' + qS\n\n elif sweep_qubit == qS:\n pulse_combinations = (\n ['mY90 ' + qS, 'I ' + qCZ,\n 'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,\n 'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,\n 'mY90 ' + qCZ, 'Y90 '+qCZ, 'phi90 '+qS, 'RO '+RO_target])\n # Two pulses on the CZ qubit are to emulate tomo pulses\n if (excitations == 1 or (excitations == 'both_cases' and\n (i >= len(phase_corr_amps)-4)/2)):\n # Put a single excitation in the CZ qubit by replacing Id\n pulse_combinations[1] = 'X180 ' + qCZ\n pulse_combinations[-3] = 'X180 ' + qCZ\n else:\n raise ValueError('Sweep qubit \"{}\" must be either \"{}\" or \"{}\"'.format(\n sweep_qubit, qS, qCZ))\n ############################################\n # calibration points #\n ############################################\n if i == (len(rec_phases) - 4):\n pulse_combinations = (\n ['I ' + qS, 'I ' + qCZ,\n 'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,\n 'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,\n 'I ' + qCZ, 'I '+qCZ, 'I '+qS, 'RO '+RO_target])\n elif i == (len(rec_phases) - 3):\n pulse_combinations = (\n ['I ' + qS, 'I ' + qCZ,\n 'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,\n 'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,\n 'I ' + qCZ, 'I '+qCZ, 'X180 '+qS, 'RO '+RO_target])\n elif i == (len(rec_phases) - 2):\n pulse_combinations = (\n ['I ' + qS, 'I ' + qCZ,\n 'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,\n 'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,\n 'I ' + qCZ, 'X180 '+qCZ, 'I '+qS, 'RO '+RO_target])\n elif i == (len(rec_phases) - 1):\n pulse_combinations = (\n ['I ' + qS, 'I ' + qCZ,\n 'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,\n 'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,\n 'I ' + qCZ, 'X180 '+qCZ, 'X180 '+qS, 'RO '+RO_target])\n\n pulses = []\n for p in pulse_combinations:\n pulses += [operation_dict[p]]\n el = multi_pulse_elt(i, station, pulses, sequencer_config)\n if distortion_dict is not None:\n print('\\rDistorting element {}/{} '.format(i+1,\n len(phase_corr_amps)),\n end='')\n if i == len(phase_corr_amps):\n print()\n el = distort_and_compensate(\n el, distortion_dict)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n\n return seq, el_list\n\n\ndef rSWAP_amp_sweep(operation_dict, qS, qCZ,\n RO_target='all',\n recovery_swap_amps=np.arange(0.3, 0.55, 0.01),\n distortion_dict=None,\n CZ_disabled=False,\n emulate_cross_driving=False,\n cal_points_with_flux_pulses=True,\n verbose=False,\n upload=True, **kw):\n '''\n Sequence that swaps qS with the bus and does CPhase between qCZ and the bus\n X180 qS - Ym90 qCZ - swap qS,B - CPhase qCZ,B - swap qS,B - fphi90 qCZ\n - X180 qS - RO\n\n kw is not used, but implemented to avoid crashing when passed argument name\n\n the keyword swap target and control reverses the\n qubit roles during a second sweep:\n\n X180 qCZ - Ym90 qS - swap qS,B - CPhase qCZ,B - swap qS,B - fphi90 qS\n - X180 qCZ - RO\n\n qS is the \"SWAP qubit\"\n qCZ is the \"C-Phase qubit\"\n '''\n sequencer_config = operation_dict['sequencer_config']\n\n seq_name = 'SWAP_CZ_SWAP_phase_corr_swp_{}_{}'.format(qS, qCZ)\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n\n if CZ_disabled:\n operation_dict['CZ '+qCZ]['amplitude'] = 0\n operation_dict['CZ '+qCZ]['phase_corr_pulse_amp'] = 0\n\n ################################################\n # Creating additional pulses for this sequence #\n ################################################\n # the recovery SWAP is identical to the regular SWAP operation, unless\n # an rSWAP is explicitly contained in the operation dict\n if ('rSWAP ' + qS) not in operation_dict.keys():\n operation_dict['rSWAP ' + qS] = deepcopy(operation_dict['SWAP ' + qS])\n operation_dict['CZ_corr ' + qCZ]['refpoint'] = 'simultaneous'\n\n rSWAP_cals = np.mean(recovery_swap_amps[:-4])\n\n ############################################\n # Generating the elements #\n ############################################\n for i in range(len(recovery_swap_amps)):\n operation_dict['rSWAP ' + qS]['amplitude'] = recovery_swap_amps[i]\n ######################\n # The base sequence #\n ######################\n pulse_combinations = (\n ['X180 ' + qS, 'I ' + qCZ,\n 'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,\n 'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,\n 'I ' + qCZ, 'I '+qCZ, 'I '+qS, 'RO '+RO_target])\n if emulate_cross_driving is True:\n pulse_combinations[1] = 'Y90 ' + qCZ\n pulse_combinations[7] = 'mY90 ' + qCZ\n ############################################\n # calibration points #\n ############################################\n if i == (len(recovery_swap_amps) - 4):\n operation_dict['rSWAP ' + qS]['amplitude'] = rSWAP_cals\n pulse_combinations = (\n ['I ' + qS, 'I ' + qCZ,\n 'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,\n 'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,\n 'I ' + qCZ, 'I '+qCZ, 'I '+qS, 'RO '+RO_target])\n elif i == (len(recovery_swap_amps) - 3):\n operation_dict['rSWAP ' + qS]['amplitude'] = rSWAP_cals\n pulse_combinations = (\n ['I ' + qS, 'I ' + qCZ,\n 'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,\n 'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,\n 'I ' + qCZ, 'I '+qCZ, 'X180 '+qS, 'RO '+RO_target])\n elif i == (len(recovery_swap_amps) - 2):\n operation_dict['rSWAP ' + qS]['amplitude'] = rSWAP_cals\n pulse_combinations = (\n ['I ' + qS, 'I ' + qCZ,\n 'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,\n 'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,\n 'I ' + qCZ, 'X180 '+qCZ, 'I '+qS, 'RO '+RO_target])\n elif i == (len(recovery_swap_amps) - 1):\n operation_dict['rSWAP ' + qS]['amplitude'] = rSWAP_cals\n pulse_combinations = (\n ['I ' + qS, 'I ' + qCZ,\n 'SWAP '+qS, 'CZ ' + qCZ, 'rSWAP ' + qS,\n 'SWAP_corr ' + qS, 'CZ_corr ' + qCZ,\n 'I ' + qCZ, 'X180 '+qCZ, 'X180 '+qS, 'RO '+RO_target])\n\n pulses = []\n for p in pulse_combinations:\n pulses += [operation_dict[p]]\n el = multi_pulse_elt(i, station, pulses, sequencer_config)\n if distortion_dict is not None:\n print('\\rDistorting element {}/{} '.format(i+1,\n len(recovery_swap_amps)),\n end='')\n if i == len(recovery_swap_amps):\n print()\n el = distort_and_compensate(\n el, distortion_dict)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n\n return seq, el_list\n\n\ndef BusT1(operation_dict, q0,\n times,\n distortion_dict=None,\n verbose=False,\n upload=True):\n '''\n\n '''\n sequencer_config = operation_dict['sequencer_config']\n\n seq_name = 'BusT1_{}'.format(q0)\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n\n base_sequence = ['X180 ' + q0, 'SWAP '+q0, 'rSWAP '+q0, 'RO '+q0]\n\n for i, tau in enumerate(times):\n operation_dict['rSWAP '+q0] = deepcopy(operation_dict['SWAP '+q0])\n operation_dict['rSWAP '+q0]['pulse_delay'] = tau\n pulse_combinations = base_sequence\n ############################################\n # calibration points #\n ############################################\n if (i == (len(times) - 4)) or (i == (len(times)-3)):\n pulse_combinations = (['RO ' + q0])\n elif i == (len(times) - 2) or i == (len(times) - 1):\n pulse_combinations = (['X180 '+q0, 'RO ' + q0])\n pulses = []\n for p in pulse_combinations:\n pulses += [operation_dict[p]]\n el = multi_pulse_elt(i, station, pulses, sequencer_config)\n if distortion_dict is not None:\n print('\\rDistorting element {}/{} \\t'.format(i+1, len(times)),\n end='')\n el = distort_and_compensate(\n el, distortion_dict)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n\n return seq, el_list\n\n\ndef FluxTrack(operation_dict, q0,\n pulse_lengths=np.arange(0, 120e-9, 2e-9),\n verbose=False,\n distortion_dict=None,\n upload=True,\n cal_points=True):\n '''\n FluxTrack sequence where a poitive and negative SWAP are implemented\n amplitude and length are not varied.\n X180 - SWAP(l) - RO\n\n\n verbose=False: (bool) used for verbosity printing in the pulsar\n distortion_dict=None: (dict) flux_pulse predistortion kernels\n upload=True: (bool) uploads to AWG, set False for testing purposes\n cal_points=True: (bool) wether to use calibration points\n '''\n\n seq_name = 'FluxTrack_seq'\n seq = sequence.Sequence(seq_name)\n station.pulsar.update_channel_settings()\n el_list = []\n sequencer_config = operation_dict['sequencer_config']\n\n # SWAP_amp = operation_dict['SWAP '+q0]['amplitude']\n mSWAP = deepcopy(operation_dict['SWAP ' + q0])\n mSWAP['amplitude'] *= -1\n operation_dict['mSWAP ' + q0] = mSWAP\n # seq has to have at least 2 elts\n total_elts = 2 + cal_points*4\n for i in range(total_elts):\n if i == 0:\n pulse_combinations = ['X180 ' + q0, 'SWAP ' + q0, 'RO '+q0]\n elif i == 1:\n pulse_combinations = ['X180 ' + q0, 'mSWAP ' + q0, 'RO '+q0]\n # Calibration points\n elif i == 2 or i == 3:\n pulse_combinations = ['RO '+q0]\n elif i == 4 or i == 5:\n pulse_combinations = ['X180 ' + q0, 'RO '+q0]\n else:\n raise Exception('larger index than expected')\n\n # # this converts negative pulse lenghts to negative pulse amplitudes\n # operation_dict[\n # 'SWAP '+q0]['amplitude'] = np.abs(operation_dict['SWAP '+q0]['amplitude'])*(-1)**i\n # if cal_points and (i == (len(pulse_lengths)-4) or\n # i == (len(pulse_lengths)-3)):\n # pulse_combinations = ['RO '+q0]\n # elif cal_points and (i == (len(pulse_lengths)-2) or\n # i == (len(pulse_lengths)-1)):\n # pulse_combinations = ['X180 ' + q0, 'RO ' + q0]\n # else:\n # pulse_combinations = ['X180 '+q0, 'SWAP ' + q0, 'RO '+q0]\n\n pulses = []\n for p in pulse_combinations:\n pulses += [operation_dict[p]]\n\n el = multi_pulse_elt(i, station, pulses, sequencer_config)\n if distortion_dict is not None:\n print('\\r Distorting element {}/{} '.format(i+1, total_elts),\n end='')\n el = distort_and_compensate(\n el, distortion_dict)\n el_list.append(el)\n seq.append_element(el, trigger_wait=True)\n if upload:\n station.pulsar.program_awgs(seq, *el_list, verbose=verbose)\n\n return seq, el_list\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nimport logging\nfrom qcodes.instrument.base import Instrument\nfrom qcodes.instrument.parameter import ManualParameter\nfrom qcodes.instrument.parameter import InstrumentRefParameter\nfrom qcodes.utils import validators as vals\nfrom pycqed.analysis.fit_toolbox.functions import PSD\nfrom pycqed.analysis.tools.plotting import set_xlabel, set_ylabel\n\n\nclass Base_LutMan(Instrument):\n \"\"\"\n The base LutMan is an abstract base class for the individual LutMans.\n The idea of the Lookuptable Manager (LutMan) is to provide a convenient\n interface to manage the waveforms loaded on specific lookuptables in\n an AWG.\n\n The LutMan provides\n - A set of basic waveforms that are generated based on\n parameters specified in the LutMan\n - A LutMap, relating waveform names to specific lookuptable indices\n - Methods to upload and regenerate these waveforms.\n - Methods to render waves.\n\n The Base LutMan does not provide a set of FIXME: comment ends\n\n\n \"\"\"\n\n def __init__(self, name, **kw):\n logging.info(__name__ + \" : Initializing instrument\")\n super().__init__(name, **kw)\n # FIXME: rename to instr_AWG to be consistent with other instr refs\n self.add_parameter(\n \"AWG\",\n parameter_class=InstrumentRefParameter,\n docstring=(\n \"Name of the AWG instrument used, note that this can also be \"\n \"a UHFQC or a CBox as these also contain AWG's\"\n ),\n vals=vals.Strings(),\n )\n self._add_cfg_parameters()\n self._add_waveform_parameters()\n self.add_parameter(\n \"LutMap\",\n docstring=(\n \"Dictionary containing the mapping between waveform\"\n \" names and parameter names (codewords).\"\n ),\n initial_value={},\n vals=vals.Dict(),\n parameter_class=ManualParameter,\n )\n self.add_parameter(\n \"sampling_rate\",\n unit=\"Hz\",\n vals=vals.Numbers(1, 100e10),\n initial_value=1e9,\n parameter_class=ManualParameter,\n )\n\n # Used to determine bounds in plotting.\n # overwrite in child classes if used.\n self._voltage_min = None\n self._voltage_max = None\n\n # initialize the _wave_dict to an empty dictionary\n self._wave_dict = {}\n self.set_default_lutmap()\n\n def time_to_sample(self, time):\n \"\"\"\n Takes a time in seconds and returns the corresponding sample\n \"\"\"\n return int(time * self.sampling_rate())\n\n def set_default_lutmap(self):\n \"\"\"\n Sets the \"LutMap\" parameter to\n\n \"\"\"\n raise NotImplementedError()\n\n def _add_waveform_parameters(self):\n \"\"\"\n Adds the parameters required to generate the standard waveforms\n \"\"\"\n raise NotImplementedError()\n\n def _add_cfg_parameters(self):\n pass\n\n def generate_standard_waveforms(self):\n \"\"\"\n Generates all the standard waveforms and populates self._wave_dict\n\n \"\"\"\n raise NotImplementedError()\n\n def load_waveform_onto_AWG_lookuptable(\n self, waveform_name: str, regenerate_waveforms: bool = False\n ):\n \"\"\"\n Loads a specific waveform to the AWG\n \"\"\"\n raise NotImplementedError()\n\n def load_waveforms_onto_AWG_lookuptable(\n self, regenerate_waveforms: bool = True, stop_start: bool = True\n ):\n \"\"\"\n Loads all waveforms specified in the LutMap to an AWG.\n\n Args:\n regenerate_waveforms (bool): if True calls\n generate_standard_waveforms before uploading.\n stop_start (bool): if True stops and starts the AWG.\n \"\"\"\n AWG = self.AWG.get_instr()\n\n if stop_start:\n AWG.stop()\n if regenerate_waveforms:\n self.generate_standard_waveforms()\n\n for waveform_name, lookuptable in self.LutMap().items():\n self.load_waveform_onto_AWG_lookuptable(waveform_name)\n\n if stop_start:\n AWG.start()\n\n def render_wave(\n self, wave_id, show=True, time_units=\"lut_index\", reload_pulses=True\n ):\n \"\"\"\n Render a waveform.\n\n Args:\n wave_id: can be either the \"name\" of a waveform or\n the integer key in self._wave_dict.\n \"\"\"\n if wave_id not in self.LutMap().keys():\n wave_id = get_wf_idx_from_name(wave_id, self.LutMap())\n\n if reload_pulses:\n self.generate_standard_waveforms()\n fig, ax = plt.subplots(1, 1)\n if time_units == \"lut_index\":\n x = np.arange(len(self._wave_dict[wave_id][0]))\n ax.set_xlabel(\"Lookuptable index (i)\")\n if self._voltage_min is not None:\n ax.vlines(2048, self._voltage_min, self._voltage_max, linestyle=\"--\")\n elif time_units == \"s\":\n x = np.arange(len(self._wave_dict[wave_id][0])) / self.sampling_rate.get()\n\n if self._voltage_min is not None:\n ax.vlines(\n 2048 / self.sampling_rate.get(),\n self._voltage_min,\n self._voltage_max,\n linestyle=\"--\",\n )\n\n if len(self._wave_dict[wave_id]) == 2:\n ax.plot(x, self._wave_dict[wave_id][0], marker=\".\", label=\"chI\")\n ax.plot(x, self._wave_dict[wave_id][1], marker=\".\", label=\"chQ\")\n elif len(self._wave_dict[wave_id]) == 4:\n ax.plot(x, self._wave_dict[wave_id][0], marker=\".\", label=\"chGI\")\n ax.plot(x, self._wave_dict[wave_id][1], marker=\".\", label=\"chGQ\")\n ax.plot(x, self._wave_dict[wave_id][2], marker=\".\", label=\"chDI\")\n ax.plot(x, self._wave_dict[wave_id][3], marker=\".\", label=\"chDQ\")\n else:\n raise ValueError(\"waveform shape not understood\")\n ax.legend()\n if self._voltage_min is not None:\n ax.set_facecolor(\"gray\")\n ax.axhspan(self._voltage_min, self._voltage_max, facecolor=\"w\", linewidth=0)\n ax.set_ylim(self._voltage_min * 1.1, self._voltage_max * 1.1)\n\n ax.set_xlim(0, x[-1])\n if time_units == \"s\":\n set_xlabel(ax, \"time\", \"s\")\n set_ylabel(ax, \"Amplitude\", \"V\")\n if show:\n plt.show()\n return fig, ax\n\n def render_wave_PSD(\n self, wave_id, show=True, reload_pulses=True, f_bounds=None, y_bounds=None\n ):\n if wave_id not in self.LutMap().keys():\n wave_id = get_wf_idx_from_name(wave_id, self.LutMap())\n if reload_pulses:\n self.generate_standard_waveforms()\n fig, ax = plt.subplots(1, 1)\n f_axis, PSD_I = PSD(self._wave_dict[wave_id][0], 1 / self.sampling_rate())\n f_axis, PSD_Q = PSD(self._wave_dict[wave_id][1], 1 / self.sampling_rate())\n\n ax.plot(f_axis, PSD_I, marker=\",\", label=\"chI\")\n ax.plot(f_axis, PSD_Q, marker=\",\", label=\"chQ\")\n ax.legend()\n\n ax.set_yscale(\"log\", nonposy=\"clip\")\n if y_bounds is not None:\n ax.set_ylim(y_bounds[0], y_bounds[1])\n if f_bounds is not None:\n ax.set_xlim(f_bounds[0], f_bounds[1])\n set_xlabel(ax, \"Frequency\", \"Hz\")\n set_ylabel(ax, \"Spectral density\", \"V^2/Hz\")\n if show:\n plt.show()\n return fig, ax\n\n\ndef get_redundant_codewords(codeword: int, bit_width: int = 4, bit_shift: int = 0):\n \"\"\"\n Takes in a desired codeword and generates the redundant codewords.\n\n Example A:\n Codeword = 5 -> '101'\n bit_width = 4 -> '0101'\n bit_shift = 0 -> xxxx0101\n The function should return all combinations for all\n xxxx0101\n\n Example B:\n Codeword = 5 -> '101'\n bit_width = 4 -> '0101'\n bit_shift = 4 -> 0101xxxx\n The function should return all combinations for all\n 0101xxxx\n\n Args:\n codeword (int) : the desired codeword\n bit_width (int): the number of bits in the codeword, determines\n how many redundant combinations are generated.\n bit_shift (int): determines how many bits the codeword is shifted.\n\n returns:\n redundant_codewords (list): all redundant combinations of the codeword\n see example above.\n \"\"\"\n codeword_shifted = codeword << bit_shift\n redundant_codewords = []\n for i in range(2 ** bit_width):\n if bit_shift == 0: # assumes the higher bits are used\n redundant_codewords.append(codeword_shifted + (i << bit_width))\n else: # assumes the lower bits are used\n redundant_codewords.append(codeword_shifted + i)\n return redundant_codewords\n\n\ndef get_wf_idx_from_name(name, lutmap):\n \"\"\"Find first match to a name in a lutmap.\"\"\"\n for idx_key, waveform in lutmap.items():\n if waveform[\"name\"] == name:\n return idx_key\n else:\n return False\n"
] | [
[
"numpy.linspace",
"numpy.arange",
"numpy.ones",
"numpy.round",
"numpy.random.rand",
"numpy.array",
"numpy.zeros"
],
[
"numpy.arange",
"numpy.mean",
"numpy.linspace"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
darya-chyzhyk/nilearn | [
"c2a597586314ee1fe9da260d7d7ee00ef8b4eef5",
"7e413a51676fea8bdcbab12e20482da6c417567c",
"c2a597586314ee1fe9da260d7d7ee00ef8b4eef5"
] | [
"nilearn/decoding/tests/test_space_net.py",
"nilearn/input_data/tests/test_masker_validation.py",
"nilearn/decoding/objective_functions.py"
] | [
"import itertools\nfrom functools import partial\nfrom nose import SkipTest\nfrom nose.tools import (assert_equal, assert_true, assert_false,\n assert_raises)\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.utils import extmath\nfrom sklearn.linear_model import Lasso\nfrom sklearn.utils import check_random_state\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom nilearn._utils.testing import assert_raises_regex, assert_warns\nfrom nilearn.decoding.space_net import (\n _EarlyStoppingCallback, _space_net_alpha_grid, path_scores, BaseSpaceNet,\n _crop_mask, _univariate_feature_screening, SpaceNetClassifier,\n SpaceNetRegressor)\nfrom nilearn._utils.param_validation import _adjust_screening_percentile\nfrom nilearn.decoding.space_net_solvers import (_graph_net_logistic,\n _graph_net_squared_loss)\n\nmni152_brain_mask = (\n \"/usr/share/fsl/data/standard/MNI152_T1_1mm_brain_mask.nii.gz\")\nlogistic_path_scores = partial(path_scores, is_classif=True)\nsquared_loss_path_scores = partial(path_scores, is_classif=False)\n\n# Data used in almost all tests\nfrom .test_same_api import to_niimgs\nsize = 4\nfrom .simulate_graph_net_data import create_graph_net_simulation_data\nX_, y, w, mask = create_graph_net_simulation_data(\n snr=1., n_samples=10, size=size, n_points=5, random_state=42)\nX, mask = to_niimgs(X_, [size] * 3)\n\n\ndef test_space_net_alpha_grid(n_samples=4, n_features=3):\n rng = check_random_state(42)\n X = rng.randn(n_samples, n_features)\n y = np.arange(n_samples)\n\n for l1_ratio, is_classif in itertools.product([.5, 1.], [True, False]):\n alpha_max = np.max(np.abs(np.dot(X.T, y))) / l1_ratio\n np.testing.assert_almost_equal(_space_net_alpha_grid(\n X, y, n_alphas=1, l1_ratio=l1_ratio,\n logistic=is_classif), alpha_max)\n\n for l1_ratio, is_classif in itertools.product([.5, 1.], [True, False]):\n alpha_max = np.max(np.abs(np.dot(X.T, y))) / l1_ratio\n for n_alphas in range(1, 10):\n alphas = _space_net_alpha_grid(\n X, y, n_alphas=n_alphas, l1_ratio=l1_ratio,\n logistic=is_classif)\n np.testing.assert_almost_equal(alphas.max(), alpha_max)\n np.testing.assert_almost_equal(n_alphas, len(alphas))\n\n\ndef test_space_net_alpha_grid_same_as_sk():\n try:\n from sklearn.linear_model.coordinate_descent import _alpha_grid\n iris = load_iris()\n X = iris.data\n y = iris.target\n np.testing.assert_almost_equal(_space_net_alpha_grid(\n X, y, n_alphas=5), X.shape[0] * _alpha_grid(X, y, n_alphas=5,\n fit_intercept=False))\n except ImportError:\n raise SkipTest\n\n\ndef test_early_stopping_callback_object(n_samples=10, n_features=30):\n # This test evolves w so that every line of th _EarlyStoppingCallback\n # code is executed a some point. This a kind of code fuzzing.\n rng = check_random_state(42)\n X_test = rng.randn(n_samples, n_features)\n y_test = np.dot(X_test, np.ones(n_features))\n w = np.zeros(n_features)\n escb = _EarlyStoppingCallback(X_test, y_test, False)\n for counter in range(50):\n k = min(counter, n_features - 1)\n w[k] = 1\n\n # jitter\n if k > 0 and rng.rand() > .9:\n w[k - 1] = 1 - w[k - 1]\n\n escb(dict(w=w, counter=counter))\n assert_equal(len(escb.test_scores), counter + 1)\n\n # restart\n if counter > 20:\n w *= 0.\n\n\ndef test_params_correctly_propagated_in_constructors():\n for (penalty, is_classif, n_alphas, l1_ratio, n_jobs,\n cv, perc) in itertools.product([\"graph-net\", \"tv-l1\"],\n [True, False], [.1, .01],\n [.5, 1.], [1, -1], [2, 3],\n [5, 10]):\n cvobj = BaseSpaceNet(\n mask=\"dummy\", n_alphas=n_alphas, n_jobs=n_jobs, l1_ratios=l1_ratio,\n cv=cv, screening_percentile=perc, penalty=penalty,\n is_classif=is_classif)\n assert_equal(cvobj.n_alphas, n_alphas)\n assert_equal(cvobj.l1_ratios, l1_ratio)\n assert_equal(cvobj.n_jobs, n_jobs)\n assert_equal(cvobj.cv, cv)\n assert_equal(cvobj.screening_percentile, perc)\n\n\ndef test_screening_space_net():\n for verbose in [0, 2]:\n screening_percentile = assert_warns(UserWarning,\n _adjust_screening_percentile, 10,\n mask, verbose)\n screening_percentile = assert_warns(UserWarning,\n _adjust_screening_percentile, 10, mask)\n # We gave here a very small mask, judging by standards of brain size\n # thus the screening_percentile_ corrected for brain size should\n # be 100%\n assert_equal(screening_percentile, 100)\n\n\ndef test_logistic_path_scores():\n iris = load_iris()\n X, y = iris.data, iris.target\n _, mask = to_niimgs(X, [2, 2, 2])\n mask = mask.get_data().astype(np.bool)\n alphas = [1., .1, .01]\n test_scores, best_w = logistic_path_scores(\n _graph_net_logistic, X, y, mask, alphas, .5,\n np.arange(len(X)), np.arange(len(X)), {})[:2]\n test_scores = test_scores[0]\n assert_equal(len(test_scores), len(alphas))\n assert_equal(X.shape[1] + 1, len(best_w))\n\n\ndef test_squared_loss_path_scores():\n iris = load_iris()\n X, y = iris.data, iris.target\n _, mask = to_niimgs(X, [2, 2, 2])\n mask = mask.get_data().astype(np.bool)\n alphas = [1., .1, .01]\n test_scores, best_w = squared_loss_path_scores(\n _graph_net_squared_loss, X, y, mask, alphas, .5,\n np.arange(len(X)), np.arange(len(X)), {})[:2]\n test_scores = test_scores[0]\n assert_equal(len(test_scores), len(alphas))\n assert_equal(X.shape[1] + 1, len(best_w))\n\n\ndef test_tv_regression_simple():\n rng = check_random_state(42)\n dim = (4, 4, 4)\n W_init = np.zeros(dim)\n W_init[2:3, 1:2, -2:] = 1\n n = 10\n p = np.prod(dim)\n X = np.ones((n, 1)) + W_init.ravel().T\n X += rng.randn(n, p)\n y = np.dot(X, W_init.ravel())\n X, mask = to_niimgs(X, dim)\n print(\"%s %s\" % (X.shape, mask.get_data().sum()))\n alphas = [.1, 1.]\n\n for l1_ratio in [1.]:\n for debias in [True]:\n BaseSpaceNet(mask=mask, alphas=alphas, l1_ratios=l1_ratio,\n penalty=\"tv-l1\", is_classif=False, max_iter=10,\n debias=debias).fit(X, y)\n\n\ndef test_tv_regression_3D_image_doesnt_crash():\n rng = check_random_state(42)\n dim = (3, 4, 5)\n W_init = np.zeros(dim)\n W_init[2:3, 3:, 1:3] = 1\n\n n = 10\n p = dim[0] * dim[1] * dim[2]\n X = np.ones((n, 1)) + W_init.ravel().T\n X += rng.randn(n, p)\n y = np.dot(X, W_init.ravel())\n alpha = 1.\n X, mask = to_niimgs(X, dim)\n\n for l1_ratio in [0., .5, 1.]:\n BaseSpaceNet(mask=mask, alphas=alpha, l1_ratios=l1_ratio,\n penalty=\"tv-l1\", is_classif=False, max_iter=10).fit(X, y)\n\n\ndef test_graph_net_classifier_score():\n iris = load_iris()\n X, y = iris.data, iris.target\n y = 2 * (y > 0) - 1\n X_, mask = to_niimgs(X, (2, 2, 2))\n gnc = SpaceNetClassifier(mask=mask, alphas=1. / .01 / X.shape[0],\n l1_ratios=1., tol=1e-10,\n standardize=False, verbose=0,\n screening_percentile=100.).fit(X_, y)\n accuracy = gnc.score(X_, y)\n assert_equal(accuracy, accuracy_score(y, gnc.predict(X_)))\n\n\ndef test_log_reg_vs_graph_net_two_classes_iris(C=.01, tol=1e-10,\n zero_thr=1e-4):\n # Test for one of the extreme cases of Graph-Net: That is, with\n # l1_ratio = 1 (pure Lasso), we compare Graph-Net's coefficients'\n # performance with the coefficients obtained from Scikit-Learn's\n # LogisticRegression, with L1 penalty, in a 2 classes classification task\n iris = load_iris()\n X, y = iris.data, iris.target\n y = 2 * (y > 0) - 1\n X_, mask = to_niimgs(X, (2, 2, 2))\n tvl1 = SpaceNetClassifier(\n mask=mask, alphas=1. / C / X.shape[0], l1_ratios=1., tol=tol,\n verbose=0, max_iter=1000, penalty=\"tv-l1\", standardize=False,\n screening_percentile=100.).fit(X_, y)\n sklogreg = LogisticRegression(penalty=\"l1\",\n fit_intercept=True,\n solver='liblinear',\n tol=tol,\n C=C,\n ).fit(X, y)\n\n # compare supports\n np.testing.assert_array_equal((np.abs(tvl1.coef_) < zero_thr),\n (np.abs(sklogreg.coef_) < zero_thr))\n\n # compare predictions\n np.testing.assert_array_equal(tvl1.predict(X_), sklogreg.predict(X))\n\n\ndef test_lasso_vs_graph_net():\n # Test for one of the extreme cases of Graph-Net: That is, with\n # l1_ratio = 1 (pure Lasso), we compare Graph-Net's performance with\n # Scikit-Learn lasso\n lasso = Lasso(max_iter=100, tol=1e-8, normalize=False)\n graph_net = BaseSpaceNet(mask=mask, alphas=1. * X_.shape[0],\n l1_ratios=1, is_classif=False,\n penalty=\"graph-net\", max_iter=100)\n lasso.fit(X_, y)\n graph_net.fit(X, y)\n lasso_perf = 0.5 / y.size * extmath.norm(np.dot(\n X_, lasso.coef_) - y) ** 2 + np.sum(np.abs(lasso.coef_))\n graph_net_perf = 0.5 * ((graph_net.predict(X) - y) ** 2).mean()\n np.testing.assert_almost_equal(graph_net_perf, lasso_perf, decimal=3)\n\n\ndef test_params_correctly_propagated_in_constructors_biz():\n for penalty, is_classif, alpha, l1_ratio in itertools.product(\n [\"graph-net\", \"tv-l1\"], [True, False], [.4, .01], [.5, 1.]):\n cvobj = BaseSpaceNet(\n mask=\"dummy\", penalty=penalty, is_classif=is_classif, alphas=alpha,\n l1_ratios=l1_ratio)\n assert_equal(cvobj.alphas, alpha)\n assert_equal(cvobj.l1_ratios, l1_ratio)\n\n\ndef test_crop_mask():\n rng = np.random.RandomState(42)\n mask = np.zeros((3, 4, 5), dtype=np.bool)\n box = mask[:2, :3, :4]\n box[rng.rand(*box.shape) < 3.] = 1 # mask covers 30% of brain\n idx = np.where(mask)\n assert_true(idx[1].max() < 3)\n tight_mask = _crop_mask(mask)\n assert_equal(mask.sum(), tight_mask.sum())\n assert_true(np.prod(tight_mask.shape) <= np.prod(box.shape))\n\n\ndef test_univariate_feature_screening(dim=(11, 12, 13), n_samples=10):\n rng = np.random.RandomState(42)\n mask = rng.rand(*dim) > 100. / np.prod(dim)\n assert_true(mask.sum() >= 100.)\n mask[dim[0] // 2, dim[1] // 3:, -dim[2] // 2:] = 1 # put spatial structure\n n_features = mask.sum()\n X = rng.randn(n_samples, n_features)\n w = rng.randn(n_features)\n w[rng.rand(n_features) > .8] = 0.\n y = X.dot(w)\n for is_classif in [True, False]:\n X_, mask_, support_ = _univariate_feature_screening(\n X, y, mask, is_classif, 20.)\n n_features_ = support_.sum()\n assert_equal(X_.shape[1], n_features_)\n assert_equal(mask_.sum(), n_features_)\n assert_true(n_features_ <= n_features)\n\n\ndef test_space_net_classifier_subclass():\n for penalty, alpha, l1_ratio, verbose in itertools.product(\n [\"graph-net\", \"tv-l1\"], [.4, .01], [.5, 1.], [True, False]):\n cvobj = SpaceNetClassifier(\n mask=\"dummy\", penalty=penalty, alphas=alpha, l1_ratios=l1_ratio,\n verbose=verbose)\n assert_equal(cvobj.alphas, alpha)\n assert_equal(cvobj.l1_ratios, l1_ratio)\n\n\ndef test_space_net_regressor_subclass():\n for penalty, alpha, l1_ratio, verbose in itertools.product(\n [\"graph-net\", \"tv-l1\"], [.4, .01], [.5, 1.], [True, False]):\n cvobj = SpaceNetRegressor(\n mask=\"dummy\", penalty=penalty, alphas=alpha, l1_ratios=l1_ratio,\n verbose=verbose)\n assert_equal(cvobj.alphas, alpha)\n assert_equal(cvobj.l1_ratios, l1_ratio)\n\n\ndef test_space_net_alpha_grid_pure_spatial():\n rng = check_random_state(42)\n X = rng.randn(10, 100)\n y = np.arange(X.shape[0])\n for is_classif in [True, False]:\n assert_false(np.any(np.isnan(_space_net_alpha_grid(\n X, y, l1_ratio=0., logistic=is_classif))))\n\n\ndef test_string_params_case():\n # penalty\n assert_raises(ValueError, BaseSpaceNet, penalty='TV-L1')\n assert_raises(ValueError, BaseSpaceNet, penalty='Graph-Net')\n\n\ndef test_crop_mask_empty_mask():\n assert_raises_regex(ValueError, \"Empty mask:.\", _crop_mask, np.array([]))\n assert_raises_regex(ValueError, \"Empty mask:\", _crop_mask,\n np.zeros((2, 2, 2)))\n\n\ndef test_space_net_no_crash_not_fitted():\n \"\"\"Regression test.\"\"\"\n iris = load_iris()\n X, y = iris.data, iris.target\n X, mask = to_niimgs(X, [2, 2, 2])\n for model in [SpaceNetRegressor, SpaceNetClassifier]:\n assert_raises_regex(RuntimeError,\n \"This %s instance is not fitted yet\" % (\n model.__name__), model().predict, X)\n model(mask=mask, alphas=1.).fit(X, y).predict(X)\n\n\ndef test_space_net_one_alpha_no_crash():\n \"\"\"Regression test.\"\"\"\n iris = load_iris()\n X, y = iris.data, iris.target\n X, mask = to_niimgs(X, [2, 2, 2])\n for model in [SpaceNetRegressor, SpaceNetClassifier]:\n model(n_alphas=1, mask=mask).fit(X, y)\n model(alphas=None, n_alphas=2, mask=mask).fit(X, y)\n\n\ndef test_checking_inputs_length():\n iris = load_iris()\n X, y = iris.data, iris.target\n y = 2 * (y > 0) - 1\n X_, mask = to_niimgs(X, (2, 2, 2))\n\n # Remove ten samples from y\n y = y[:-10]\n\n for model in [SpaceNetRegressor, SpaceNetClassifier]:\n\n assert_raises(ValueError, model(mask=mask,\n alphas=1. / .01 / X.shape[0],\n l1_ratios=1., tol=1e-10,\n screening_percentile=100.).fit, X_, y)\n\n\ndef test_targets_in_y_space_net_regressor():\n # This tests whether raises an error when unique targets given in y\n # are single.\n iris = load_iris()\n X, _ = iris.data, iris.target\n y = np.ones((iris.target.shape))\n\n imgs, mask = to_niimgs(X, (2, 2, 2))\n regressor = SpaceNetRegressor(mask=mask)\n assert_raises_regex(ValueError,\n \"The given input y must have atleast 2 targets\",\n regressor.fit, imgs, y)\n",
"from nose.tools import assert_true, assert_equal\nimport nibabel\nimport numpy as np\n\nfrom sklearn.base import BaseEstimator\nfrom sklearn.externals.joblib import Memory\n\nfrom nilearn._utils.testing import assert_warns\nfrom nilearn.input_data.masker_validation import check_embedded_nifti_masker\nfrom nilearn.input_data import MultiNiftiMasker, NiftiMasker\n\n\nclass OwningClass(BaseEstimator):\n\n def __init__(self, mask=None, smoothing_fwhm=None,\n standardize=False, detrend=False,\n low_pass=None, high_pass=None, t_r=None,\n target_affine=None, target_shape=None,\n mask_strategy='background', mask_args=None,\n memory=Memory(cachedir=None), memory_level=0,\n n_jobs=1, verbose=0,\n dummy=None):\n self.mask = mask\n\n self.smoothing_fwhm = smoothing_fwhm\n self.standardize = standardize\n self.detrend = detrend\n self.low_pass = low_pass\n self.high_pass = high_pass\n self.t_r = t_r\n self.target_affine = target_affine\n self.target_shape = target_shape\n self.mask_strategy = mask_strategy\n self.mask_args = mask_args\n self.memory = memory\n self.memory_level = memory_level\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.dummy = dummy\n\n\ndef test_check_embedded_nifti_masker():\n owner = OwningClass()\n masker = check_embedded_nifti_masker(owner)\n assert_true(type(masker) is MultiNiftiMasker)\n\n for mask, multi_subject in (\n (MultiNiftiMasker(), True), (NiftiMasker(), False)):\n owner = OwningClass(mask=mask)\n masker = check_embedded_nifti_masker(owner,\n multi_subject=multi_subject)\n assert_equal(type(masker), type(mask))\n for param_key in masker.get_params():\n if param_key not in ['memory', 'memory_level', 'n_jobs',\n 'verbose']:\n assert_equal(getattr(masker, param_key),\n getattr(mask, param_key))\n else:\n assert_equal(getattr(masker, param_key),\n getattr(owner, param_key))\n\n # Check use of mask as mask_img\n shape = (6, 8, 10, 5)\n affine = np.eye(4)\n mask = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine)\n owner = OwningClass(mask=mask)\n masker = check_embedded_nifti_masker(owner)\n assert_true(masker.mask_img is mask)\n\n # Check attribute forwarding\n data = np.zeros((9, 9, 9))\n data[2:-2, 2:-2, 2:-2] = 10\n imgs = nibabel.Nifti1Image(data, np.eye(4))\n mask = MultiNiftiMasker()\n mask.fit([[imgs]])\n owner = OwningClass(mask=mask)\n masker = check_embedded_nifti_masker(owner)\n assert_true(masker.mask_img is mask.mask_img_)\n\n # Check conflict warning\n mask = NiftiMasker(mask_strategy='epi')\n owner = OwningClass(mask=mask)\n assert_warns(UserWarning, check_embedded_nifti_masker, owner)\n",
"\n\"\"\"\nCommon functions and base classes.\n\n\"\"\"\n# Author: DOHMATOB Elvis Dopgima,\n# PIZARRO Gaspar,\n# VAROQUAUX Gael,\n# GRAMFORT Alexandre,\n# PEDREGOSA Fabian\n# License: simplified BSD\n\nfrom functools import partial\nimport numpy as np\nfrom scipy import linalg\n\n\ndef spectral_norm_squared(X):\n \"\"\"Computes square of the operator 2-norm (spectral norm) of X\n\n This corresponds to the Lipschitz constant of the gradient of the\n squared-loss function:\n\n w -> .5 * ||y - Xw||^2\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n Design matrix.\n\n Returns\n -------\n lipschitz_constant : float\n The square of the spectral norm of X.\n\n \"\"\"\n # On big matrices like those that we have in neuroimaging, svdvals\n # is faster than a power iteration (even when using arpack's)\n return linalg.svdvals(X)[0] ** 2\n\n\ndef _logistic_loss_lipschitz_constant(X):\n \"\"\"Compute the Lipschitz constant (upper bound) for the gradient of the\n logistic sum:\n\n w -> \\sum_i log(1+exp(-y_i*(x_i*w + v)))\n\n \"\"\"\n # N.B: we handle intercept!\n X = np.hstack((X, np.ones((X.shape[0], 1))))\n return spectral_norm_squared(X)\n\n\ndef _squared_loss(X, y, w, compute_energy=True, compute_grad=False):\n \"\"\"Compute the MSE error, and optionally, its gradient too.\n\n The cost / energy function is\n\n MSE = .5 * ||y - Xw||^2\n\n A (1 / n_samples) factor is applied to the MSE.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n Design matrix.\n\n y : ndarray, shape (n_samples,)\n Target / response vector.\n\n w : ndarray shape (n_features,)\n Unmasked, ravelized weights map.\n\n compute_energy : bool, optional (default True)\n If set then energy is computed, otherwise only gradient is computed.\n\n compute_grad : bool, optional (default False)\n If set then gradient is computed, otherwise only energy is computed.\n\n Returns\n -------\n energy : float\n Energy (returned if `compute_energy` is set).\n\n gradient : ndarray, shape (n_features,)\n Gradient of energy (returned if `compute_grad` is set).\n\n \"\"\"\n if not (compute_energy or compute_grad):\n raise RuntimeError(\n \"At least one of compute_energy or compute_grad must be True.\")\n\n residual = np.dot(X, w) - y\n\n # compute energy\n if compute_energy:\n energy = .5 * np.dot(residual, residual)\n if not compute_grad:\n return energy\n\n grad = np.dot(X.T, residual)\n\n if not compute_energy:\n return grad\n\n return energy, grad\n\n\ndef _tv_l1_from_gradient(spatial_grad):\n \"\"\"Energy contribution due to penalized gradient, in TV-L1 model.\n\n Parameters\n ----------\n spatial_grad : ndarray, shape (4, nx, ny, nx)\n precomputed \"gradient + id\" array\n\n Returns\n -------\n out : float\n Energy contribution due to penalized gradient.\n \"\"\"\n\n tv_term = np.sum(np.sqrt(np.sum(spatial_grad[:-1] * spatial_grad[:-1],\n axis=0)))\n l1_term = np.abs(spatial_grad[-1]).sum()\n return l1_term + tv_term\n\n\ndef _div_id(grad, l1_ratio=.5):\n \"\"\"Compute divergence + id of image gradient + id\n\n Parameters\n ----------\n grad : ndarray, shape (4, nx, ny, nz, ...)\n where `img_shape` is the shape of the brain bounding box, and\n n_axes = len(img_shape).\n\n l1_ratio : float in the interval [0, 1]; optional (default .5)\n Constant that mixes L1 and spatial prior terms in the penalization.\n\n Returns\n -------\n res : ndarray, shape (nx, ny, nz, ...)\n The computed divergence + id operator.\n\n Raises\n ------\n RuntimeError\n\n \"\"\"\n\n if not (0. <= l1_ratio <= 1.):\n raise RuntimeError(\n \"l1_ratio must be in the interval [0, 1]; got %s\" % l1_ratio)\n\n res = np.zeros(grad.shape[1:])\n\n # the divergence part\n for d in range((grad.shape[0] - 1)):\n this_grad = np.rollaxis(grad[d], d)\n this_res = np.rollaxis(res, d)\n this_res[:-1] += this_grad[:-1]\n this_res[1:-1] -= this_grad[:-2]\n if len(this_grad) > 1:\n this_res[-1] -= this_grad[-2]\n\n res *= (1. - l1_ratio)\n\n # the identity part\n res -= l1_ratio * grad[-1]\n\n return res\n\n\ndef _gradient_id(img, l1_ratio=.5):\n \"\"\"Compute gradient + id of an image\n\n Parameters\n ----------\n img : ndarray, shape (nx, ny, nz, ...)\n N-dimensional image\n\n l1_ratio : float in the interval [0, 1]; optional (default .5)\n Constant that mixes L1 and spatial prior terms in the penalization.\n\n Returns\n -------\n gradient : ndarray, shape (4, nx, ny, nz, ...).\n Spatial gradient of the image: the i-th component along the first\n axis is the gradient along the i-th axis of the original array img.\n\n Raises\n ------\n RuntimeError\n\n \"\"\"\n\n if not (0. <= l1_ratio <= 1.):\n raise RuntimeError(\n \"l1_ratio must be in the interval [0, 1]; got %s\" % l1_ratio)\n\n shape = [img.ndim + 1] + list(img.shape)\n gradient = np.zeros(shape, dtype=np.float)\n\n # the gradient part: 'Clever' code to have a view of the gradient\n # with dimension i stop at -1\n slice_all = [0, slice(None, -1)]\n for d in range(img.ndim):\n gradient[tuple(slice_all)] = np.diff(img, axis=d)\n slice_all[0] = d + 1\n slice_all.insert(1, slice(None))\n\n gradient[:-1] *= (1. - l1_ratio)\n\n # the identity part\n gradient[-1] = l1_ratio * img\n\n return gradient\n\n\ndef _unmask(w, mask):\n \"\"\"Unmask an image into whole brain, with off-mask voxels set to 0.\n\n Parameters\n ----------\n w : ndarray, shape (n_features,)\n The image to be unmasked.\n\n mask : ndarray, shape (nx, ny, nz)\n The mask used in the unmasking operation. It is required that\n mask.sum() == n_features.\n\n Returns\n -------\n out : 3d of same shape as `mask`.\n The unmasked version of `w`\n \"\"\"\n\n if mask.sum() != len(w):\n raise ValueError(\"Expecting mask.sum() == len(w).\")\n out = np.zeros(mask.shape, dtype=w.dtype)\n out[mask] = w\n return out\n\n\ndef _sigmoid(t, copy=True):\n \"\"\"Helper function: return 1. / (1 + np.exp(-t))\"\"\"\n if copy:\n t = np.copy(t)\n t *= -1.\n t = np.exp(t, t)\n t += 1.\n t = np.reciprocal(t, t)\n return t\n\n\ndef _logistic(X, y, w):\n \"\"\"Compute the logistic function of the data: sum(sigmoid(yXw))\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n Design matrix.\n\n y : ndarray, shape (n_samples,)\n Target / response vector. Each entry must be +1 or -1.\n\n w : ndarray, shape (n_features,)\n Unmasked, ravelized input map.\n\n Returns\n -------\n energy : float\n Energy contribution due to logistic data-fit term.\n \"\"\"\n\n z = np.dot(X, w[:-1]) + w[-1]\n yz = y * z\n idx = yz > 0\n out = np.empty_like(yz)\n out[idx] = np.log1p(np.exp(-yz[idx]))\n out[~idx] = -yz[~idx] + np.log1p(np.exp(yz[~idx]))\n out = out.sum()\n return out\n\n\ndef _logistic_loss_grad(X, y, w):\n \"\"\"Computes the derivative of logistic\"\"\"\n z = np.dot(X, w[:-1]) + w[-1]\n yz = y * z\n z = _sigmoid(yz, copy=False)\n z0 = (z - 1.) * y\n grad = np.empty(w.shape)\n grad[:-1] = np.dot(X.T, z0)\n grad[-1] = np.sum(z0)\n return grad\n\n\n# gradient of squared loss function\n_squared_loss_grad = partial(_squared_loss, compute_energy=False,\n compute_grad=True)\n\n\ndef _gradient(w):\n \"\"\"Pure spatial gradient\"\"\"\n return _gradient_id(w, l1_ratio=0.)[:-1] # pure nabla\n\n\ndef _div(v):\n \"\"\"Pure spatial divergence\"\"\"\n return _div_id(np.vstack((v, [np.zeros_like(v[0])])), l1_ratio=0.)\n"
] | [
[
"numpy.dot",
"sklearn.linear_model.LogisticRegression",
"numpy.abs",
"numpy.arange",
"sklearn.linear_model.coordinate_descent._alpha_grid",
"sklearn.datasets.load_iris",
"sklearn.linear_model.Lasso",
"numpy.ones",
"numpy.testing.assert_almost_equal",
"numpy.where",
"numpy.prod",
"numpy.array",
"numpy.random.RandomState",
"numpy.zeros",
"sklearn.utils.check_random_state"
],
[
"numpy.eye",
"sklearn.externals.joblib.Memory",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.rollaxis",
"numpy.dot",
"numpy.abs",
"numpy.empty_like",
"numpy.ones",
"numpy.copy",
"numpy.diff",
"numpy.zeros_like",
"scipy.linalg.svdvals",
"numpy.reciprocal",
"numpy.exp",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
fmamashli/mne-python | [
"52f064415e7c9fa8fe243d22108dcdf3d86505b9",
"52f064415e7c9fa8fe243d22108dcdf3d86505b9",
"52f064415e7c9fa8fe243d22108dcdf3d86505b9",
"52f064415e7c9fa8fe243d22108dcdf3d86505b9",
"52f064415e7c9fa8fe243d22108dcdf3d86505b9",
"52f064415e7c9fa8fe243d22108dcdf3d86505b9",
"52f064415e7c9fa8fe243d22108dcdf3d86505b9",
"52f064415e7c9fa8fe243d22108dcdf3d86505b9",
"52f064415e7c9fa8fe243d22108dcdf3d86505b9",
"52f064415e7c9fa8fe243d22108dcdf3d86505b9",
"52f064415e7c9fa8fe243d22108dcdf3d86505b9"
] | [
"examples/time_frequency/plot_source_power_spectrum.py",
"mne/_digitization/base.py",
"mne/io/kit/kit.py",
"mne/tests/test_report.py",
"examples/inverse/plot_read_stc.py",
"mne/forward/_lead_dots.py",
"mne/time_frequency/tests/test_stockwell.py",
"mne/viz/_brain/surface.py",
"mne/preprocessing/otp.py",
"mne/gui/tests/test_fiducials_gui.py",
"mne/channels/tests/test_layout.py"
] | [
"\"\"\"\n======================================================\nCompute source power spectral density (PSD) in a label\n======================================================\n\nReturns an STC file containing the PSD (in dB) of each of the sources\nwithin a label.\n\"\"\"\n# Authors: Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne import io\nfrom mne.datasets import sample\nfrom mne.minimum_norm import read_inverse_operator, compute_source_psd\n\nprint(__doc__)\n\n###############################################################################\n# Set parameters\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'\nfname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'\nfname_label = data_path + '/MEG/sample/labels/Aud-lh.label'\n\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname, verbose=False)\nevents = mne.find_events(raw, stim_channel='STI 014')\ninverse_operator = read_inverse_operator(fname_inv)\nraw.info['bads'] = ['MEG 2443', 'EEG 053']\n\n# picks MEG gradiometers\npicks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,\n stim=False, exclude='bads')\n\ntmin, tmax = 0, 120 # use the first 120s of data\nfmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz\nn_fft = 2048 # the FFT size (n_fft). Ideally a power of 2\nlabel = mne.read_label(fname_label)\n\nstc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method=\"dSPM\",\n tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,\n pick_ori=\"normal\", n_fft=n_fft, label=label,\n dB=True)\n\nstc.save('psd_dSPM')\n\n###############################################################################\n# View PSD of sources in label\nplt.plot(1e3 * stc.times, stc.data.T)\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('PSD (dB)')\nplt.title('Source Power Spectrum (PSD)')\nplt.show()\n",
"# -*- coding: utf-8 -*-\n# Authors: Alexandre Gramfort <[email protected]>\n# Eric Larson <[email protected]>\n# Joan Massich <[email protected]>\n#\n# License: BSD (3-clause)\nimport numpy as np\nfrom copy import deepcopy\nfrom collections import Counter\n\nfrom ..transforms import _coord_frame_name\nfrom ..io.constants import FIFF\n\n_dig_kind_dict = {\n 'cardinal': FIFF.FIFFV_POINT_CARDINAL,\n 'hpi': FIFF.FIFFV_POINT_HPI,\n 'eeg': FIFF.FIFFV_POINT_EEG,\n 'extra': FIFF.FIFFV_POINT_EXTRA,\n}\n_dig_kind_ints = tuple(sorted(_dig_kind_dict.values()))\n_dig_kind_proper = {'cardinal': 'Cardinal',\n 'hpi': 'HPI',\n 'eeg': 'EEG',\n 'extra': 'Extra',\n 'unknown': 'Unknown'}\n_dig_kind_rev = {val: key for key, val in _dig_kind_dict.items()}\n_cardinal_kind_rev = {1: 'LPA', 2: 'Nasion', 3: 'RPA', 4: 'Inion'}\n\n\ndef _format_dig_points(dig):\n \"\"\"Format the dig points nicely.\"\"\"\n dig_points = [DigPoint(d) for d in dig] if dig is not None else dig\n return Digitization(dig_points)\n\n\ndef _get_dig_eeg(dig):\n return [d for d in dig if d['kind'] == FIFF.FIFFV_POINT_EEG]\n\n\ndef _count_points_by_type(dig):\n \"\"\"Get the number of points of each type.\"\"\"\n occurrences = Counter([d['kind'] for d in dig])\n return dict(\n fid=occurrences[FIFF.FIFFV_POINT_CARDINAL],\n hpi=occurrences[FIFF.FIFFV_POINT_HPI],\n eeg=occurrences[FIFF.FIFFV_POINT_EEG],\n extra=occurrences[FIFF.FIFFV_POINT_EXTRA],\n )\n\n\nclass DigPoint(dict):\n \"\"\"Container for a digitization point.\n\n This is a simple subclass of the standard dict type designed to provide\n a readable string representation.\n\n Parameters\n ----------\n kind : int\n The kind of channel,\n e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``.\n r : array, shape (3,)\n 3D position in m. and coord_frame.\n ident : int\n Number specifying the identity of the point.\n e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``,\n or 42 if kind is ``FIFFV_POINT_EEG``.\n coord_frame : int\n The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``.\n \"\"\"\n\n def __repr__(self): # noqa: D105\n if self['kind'] == FIFF.FIFFV_POINT_CARDINAL:\n id_ = _cardinal_kind_rev.get(\n self.get('ident', -1), 'Unknown cardinal')\n else:\n id_ = _dig_kind_proper[\n _dig_kind_rev.get(self.get('kind', -1), 'unknown')]\n id_ = ('%s #%s' % (id_, self.get('ident', -1)))\n id_ = id_.rjust(10)\n cf = _coord_frame_name(self['coord_frame'])\n pos = ('(%0.1f, %0.1f, %0.1f) mm' % tuple(1000 * self['r'])).ljust(25)\n return ('<DigPoint | %s : %s : %s frame>' % (id_, pos, cf))\n\n def __eq__(self, other): # noqa: D105\n \"\"\"Compare two DigPoints.\n\n Two digpoints are equal if they are the same kind, share the same\n coordinate frame and position.\n \"\"\"\n my_keys = ['kind', 'ident', 'coord_frame']\n if sorted(self.keys()) != sorted(other.keys()):\n return False\n elif any([self[_] != other[_] for _ in my_keys]):\n return False\n else:\n return np.allclose(self['r'], other['r'])\n\n\nclass Digitization(list):\n \"\"\"Represent a list of DigPoint objects.\n\n Parameters\n ----------\n elements : list | None\n A list of DigPoint objects.\n \"\"\"\n\n def __init__(self, elements=None):\n\n elements = list() if elements is None else elements\n\n if not all([isinstance(_, DigPoint) for _ in elements]):\n _msg = 'Digitization expected a iterable of DigPoint objects.'\n raise ValueError(_msg)\n else:\n super(Digitization, self).__init__(deepcopy(elements))\n\n def __eq__(self, other): # noqa: D105\n if not isinstance(other, (Digitization, list)) or \\\n len(self) != len(other):\n return False\n else:\n return all([ss == oo for ss, oo in zip(self, other)])\n",
"\"\"\"Conversion tool from SQD to FIF.\n\nRawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py.\n\"\"\"\n\n# Authors: Teon Brooks <[email protected]>\n# Joan Massich <[email protected]>\n# Christian Brodbeck <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom collections import defaultdict\nfrom math import sin, cos\nfrom os import SEEK_CUR, path as op\nfrom struct import unpack\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom ..pick import pick_types\nfrom ...utils import verbose, logger, warn, fill_doc, _check_option\nfrom ...transforms import apply_trans, als_ras_trans\nfrom ..base import BaseRaw\nfrom ..utils import _mult_cal_one\nfrom ...epochs import BaseEpochs\nfrom ..constants import FIFF\nfrom ..meas_info import _empty_info\nfrom .constants import KIT, LEGACY_AMP_PARAMS\nfrom .coreg import read_mrk\nfrom ...event import read_events\n\nfrom ..._digitization._utils import _set_dig_kit\n\n\ndef _call_digitization(info, mrk, elp, hsp):\n # prepare mrk\n if isinstance(mrk, list):\n mrk = [read_mrk(marker) if isinstance(marker, str)\n else marker for marker in mrk]\n mrk = np.mean(mrk, axis=0)\n\n # setup digitization\n if mrk is not None and elp is not None and hsp is not None:\n dig_points, dev_head_t = _set_dig_kit(mrk, elp, hsp)\n info['dig'] = dig_points\n info['dev_head_t'] = dev_head_t\n elif mrk is not None or elp is not None or hsp is not None:\n raise ValueError(\"mrk, elp and hsp need to be provided as a group \"\n \"(all or none)\")\n\n return info\n\n\nclass UnsupportedKITFormat(ValueError):\n \"\"\"Our reader is not guaranteed to work with old files.\"\"\"\n\n def __init__(self, sqd_version, *args, **kwargs): # noqa: D102\n self.sqd_version = sqd_version\n ValueError.__init__(self, *args, **kwargs)\n\n\n@fill_doc\nclass RawKIT(BaseRaw):\n \"\"\"Raw object from KIT SQD file.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n mrk : None | str | array_like, shape (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10,000 points are in the head shape, they are automatically decimated.\n stim : list of int | '<' | '>' | None\n Channel-value correspondence when converting KIT trigger channels to a\n Neuromag-style stim channel. For '<', the largest values are assigned\n to the first channel (default). For '>', the largest values are\n assigned to the last channel. Can also be specified as a list of\n trigger channel indexes. If None, no synthesized channel is generated.\n slope : '+' | '-'\n How to interpret values on KIT trigger channels when synthesizing a\n Neuromag-style stim channel. With '+', a positive slope (low-to-high)\n is interpreted as an event. With '-', a negative slope (high-to-low)\n is interpreted as an event.\n stimthresh : float\n The threshold level for accepting voltage changes in KIT trigger\n channels as a trigger event. If None, stim must also be set to None.\n %(preload)s\n stim_code : 'binary' | 'channel'\n How to decode trigger values from stim channels. 'binary' read stim\n channel events as binary code, 'channel' encodes channel number.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(verbose)s\n\n Notes\n -----\n ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the\n Polhemus FastScan system. hsp refers to the headshape surface points. elp\n refers to the points in head-space that corresponds to the HPI points.\n Currently, '*.elp' and '*.hsp' files are NOT supported.\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n \"\"\"\n\n @verbose\n def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',\n slope='-', stimthresh=1, preload=False, stim_code='binary',\n allow_unknown_format=False, verbose=None): # noqa: D102\n logger.info('Extracting SQD Parameters from %s...' % input_fname)\n input_fname = op.abspath(input_fname)\n self.preload = False\n logger.info('Creating Raw.info structure...')\n info, kit_info = get_kit_info(input_fname, allow_unknown_format)\n kit_info['slope'] = slope\n kit_info['stimthresh'] = stimthresh\n if kit_info['acq_type'] != KIT.CONTINUOUS:\n raise TypeError('SQD file contains epochs, not raw data. Wrong '\n 'reader.')\n logger.info('Creating Info structure...')\n\n last_samps = [kit_info['n_samples'] - 1]\n self._raw_extras = [kit_info]\n self._set_stimchannels(info, stim, stim_code)\n super(RawKIT, self).__init__(\n info, preload, last_samps=last_samps, filenames=[input_fname],\n raw_extras=self._raw_extras, verbose=verbose)\n\n self.info = _call_digitization(info=self.info,\n mrk=mrk,\n elp=elp,\n hsp=hsp,\n )\n\n logger.info('Ready.')\n\n def read_stim_ch(self, buffer_size=1e5):\n \"\"\"Read events from data.\n\n Parameter\n ---------\n buffer_size : int\n The size of chunk to by which the data are scanned.\n\n Returns\n -------\n events : array, [samples]\n The event vector (1 x samples).\n \"\"\"\n buffer_size = int(buffer_size)\n start = int(self.first_samp)\n stop = int(self.last_samp + 1)\n\n pick = pick_types(self.info, meg=False, ref_meg=False,\n stim=True, exclude=[])\n stim_ch = np.empty((1, stop), dtype=np.int)\n for b_start in range(start, stop, buffer_size):\n b_stop = b_start + buffer_size\n x = self[pick, b_start:b_stop][0]\n stim_ch[:, b_start:b_start + x.shape[1]] = x\n\n return stim_ch\n\n def _set_stimchannels(self, info, stim, stim_code):\n \"\"\"Specify how the trigger channel is synthesized from analog channels.\n\n Has to be done before loading data. For a RawKIT instance that has been\n created with preload=True, this method will raise a\n NotImplementedError.\n\n Parameters\n ----------\n info : instance of MeasInfo\n The measurement info.\n stim : list of int | '<' | '>'\n Can be submitted as list of trigger channels.\n If a list is not specified, the default triggers extracted from\n misc channels will be used with specified directionality.\n '<' means that largest values assigned to the first channel\n in sequence.\n '>' means the largest trigger assigned to the last channel\n in sequence.\n stim_code : 'binary' | 'channel'\n How to decode trigger values from stim channels. 'binary' read stim\n channel events as binary code, 'channel' encodes channel number.\n \"\"\"\n if self.preload:\n raise NotImplementedError(\"Can't change stim channel after \"\n \"loading data\")\n _check_option('stim_code', stim_code, ['binary', 'channel'])\n\n if stim is not None:\n if isinstance(stim, str):\n picks = _default_stim_chs(info)\n if stim == '<':\n stim = picks[::-1]\n elif stim == '>':\n stim = picks\n else:\n raise ValueError(\"stim needs to be list of int, '>' or \"\n \"'<', not %r\" % str(stim))\n else:\n stim = np.asarray(stim, int)\n if stim.max() >= self._raw_extras[0]['nchan']:\n raise ValueError(\n 'Got stim=%s, but sqd file only has %i channels' %\n (stim, self._raw_extras[0]['nchan']))\n\n # modify info\n nchan = self._raw_extras[0]['nchan'] + 1\n info['chs'].append(dict(\n cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0,\n unit=FIFF.FIFF_UNIT_NONE, unit_mul=0, ch_name='STI 014',\n coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan),\n kind=FIFF.FIFFV_STIM_CH))\n info._update_redundant()\n\n self._raw_extras[0]['stim'] = stim\n self._raw_extras[0]['stim_code'] = stim_code\n\n @verbose\n def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):\n \"\"\"Read a chunk of raw data.\"\"\"\n nchan = self._raw_extras[fi]['nchan']\n data_left = (stop - start) * nchan\n conv_factor = self._raw_extras[fi]['conv_factor']\n\n n_bytes = 2\n # Read up to 100 MB of data at a time.\n blk_size = min(data_left, (100000000 // n_bytes // nchan) * nchan)\n with open(self._filenames[fi], 'rb', buffering=0) as fid:\n # extract data\n fid.seek(144)\n # data offset info\n data_offset = unpack('i', fid.read(KIT.INT))[0]\n pointer = start * nchan * KIT.SHORT\n fid.seek(data_offset + pointer)\n stim = self._raw_extras[fi]['stim']\n for blk_start in np.arange(0, data_left, blk_size) // nchan:\n blk_size = min(blk_size, data_left - blk_start * nchan)\n block = np.fromfile(fid, dtype='h', count=blk_size)\n block = block.reshape(nchan, -1, order='F').astype(float)\n blk_stop = blk_start + block.shape[1]\n data_view = data[:, blk_start:blk_stop]\n block *= conv_factor\n\n # Create a synthetic stim channel\n if stim is not None:\n params = self._raw_extras[fi]\n stim_ch = _make_stim_channel(block[stim, :],\n params['slope'],\n params['stimthresh'],\n params['stim_code'], stim)\n block = np.vstack((block, stim_ch))\n\n _mult_cal_one(data_view, block, idx, None, mult)\n # cals are all unity, so can be ignored\n\n\ndef _default_stim_chs(info):\n \"\"\"Return default stim channels for SQD files.\"\"\"\n return pick_types(info, meg=False, ref_meg=False, misc=True,\n exclude=[])[:8]\n\n\ndef _make_stim_channel(trigger_chs, slope, threshold, stim_code,\n trigger_values):\n \"\"\"Create synthetic stim channel from multiple trigger channels.\"\"\"\n if slope == '+':\n trig_chs_bin = trigger_chs > threshold\n elif slope == '-':\n trig_chs_bin = trigger_chs < threshold\n else:\n raise ValueError(\"slope needs to be '+' or '-'\")\n # trigger value\n if stim_code == 'binary':\n trigger_values = 2 ** np.arange(len(trigger_chs))\n elif stim_code != 'channel':\n raise ValueError(\"stim_code must be 'binary' or 'channel', got %s\" %\n repr(stim_code))\n trig_chs = trig_chs_bin * trigger_values[:, np.newaxis]\n return np.array(trig_chs.sum(axis=0), ndmin=2)\n\n\nclass EpochsKIT(BaseEpochs):\n \"\"\"Epochs Array object from KIT SQD file.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n events : str | array, shape (n_events, 3)\n Path to events file. If array, it is the events typically returned\n by the read_events function. If some events don't match the events\n of interest as specified by event_id,they will be marked as 'IGNORED'\n in the drop log.\n event_id : int | list of int | dict | None\n The id of the event to consider. If dict,\n the keys can later be used to access associated events. Example:\n dict(auditory=1, visual=3). If int, a dict will be created with\n the id as string. If a list, all events with the IDs specified\n in the list are used. If None, all events will be used with\n and a dict is created with string integer names corresponding\n to the event id integers.\n tmin : float\n Start time before event.\n baseline : None or tuple of length 2 (default (None, 0))\n The time interval to apply baseline correction.\n If None do not apply it. If baseline is (a, b)\n the interval is between \"a (s)\" and \"b (s)\".\n If a is None the beginning of the data is used\n and if b is None then b is set to the end of the interval.\n If baseline is equal to (None, None) all the time\n interval is used.\n The baseline (a, b) includes both endpoints, i.e. all\n timepoints t such that a <= t <= b.\n reject : dict | None\n Rejection parameters based on peak-to-peak amplitude.\n Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.\n If reject is None then no rejection is done. Example::\n\n reject = dict(grad=4000e-13, # T / m (gradiometers)\n mag=4e-12, # T (magnetometers)\n eeg=40e-6, # V (EEG channels)\n eog=250e-6 # V (EOG channels)\n )\n flat : dict | None\n Rejection parameters based on flatness of signal.\n Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values\n are floats that set the minimum acceptable peak-to-peak amplitude.\n If flat is None then no rejection is done.\n reject_tmin : scalar | None\n Start of the time window used to reject epochs (with the default None,\n the window will start with tmin).\n reject_tmax : scalar | None\n End of the time window used to reject epochs (with the default None,\n the window will end with tmax).\n mrk : None | str | array_like, shape = (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape = (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape = (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10`000 points are in the head shape, they are automatically decimated.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(verbose)s\n\n Notes\n -----\n ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the\n Polhemus FastScan system. hsp refers to the headshape surface points. elp\n refers to the points in head-space that corresponds to the HPI points.\n Currently, '*.elp' and '*.hsp' files are NOT supported.\n\n See Also\n --------\n mne.Epochs : Documentation of attribute and methods.\n \"\"\"\n\n @verbose\n def __init__(self, input_fname, events, event_id=None, tmin=0,\n baseline=None, reject=None, flat=None, reject_tmin=None,\n reject_tmax=None, mrk=None, elp=None, hsp=None,\n allow_unknown_format=False, verbose=None): # noqa: D102\n\n if isinstance(events, str):\n events = read_events(events)\n\n logger.info('Extracting KIT Parameters from %s...' % input_fname)\n input_fname = op.abspath(input_fname)\n self.info, kit_info = get_kit_info(input_fname, allow_unknown_format)\n kit_info.update(filename=input_fname)\n self._raw_extras = [kit_info]\n self._filenames = []\n if len(events) != self._raw_extras[0]['n_epochs']:\n raise ValueError('Event list does not match number of epochs.')\n\n if self._raw_extras[0]['acq_type'] == KIT.EPOCHS:\n self._raw_extras[0]['data_length'] = KIT.INT\n self._raw_extras[0]['dtype'] = 'h'\n else:\n raise TypeError('SQD file contains raw data, not epochs or '\n 'average. Wrong reader.')\n\n if event_id is None: # convert to int to make typing-checks happy\n event_id = {str(e): int(e) for e in np.unique(events[:, 2])}\n\n for key, val in event_id.items():\n if val not in events[:, 2]:\n raise ValueError('No matching events found for %s '\n '(event id %i)' % (key, val))\n\n data = self._read_kit_data()\n assert data.shape == (self._raw_extras[0]['n_epochs'],\n self.info['nchan'],\n self._raw_extras[0]['frame_length'])\n tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin\n super(EpochsKIT, self).__init__(\n self.info, data, events, event_id, tmin, tmax, baseline,\n reject=reject, flat=flat, reject_tmin=reject_tmin,\n reject_tmax=reject_tmax, filename=input_fname, verbose=verbose)\n\n # XXX: This should be unified with kitraw\n self.info = _call_digitization(info=self.info,\n mrk=mrk,\n elp=elp,\n hsp=hsp,\n )\n\n logger.info('Ready.')\n\n def _read_kit_data(self):\n \"\"\"Read epochs data.\n\n Returns\n -------\n data : array, [channels x samples]\n the data matrix (channels x samples).\n times : array, [samples]\n returns the time values corresponding to the samples.\n \"\"\"\n info = self._raw_extras[0]\n epoch_length = info['frame_length']\n n_epochs = info['n_epochs']\n n_samples = info['n_samples']\n filename = info['filename']\n dtype = info['dtype']\n nchan = info['nchan']\n\n with open(filename, 'rb', buffering=0) as fid:\n fid.seek(144)\n # data offset info\n data_offset = unpack('i', fid.read(KIT.INT))[0]\n count = n_samples * nchan\n fid.seek(data_offset)\n data = np.fromfile(fid, dtype=dtype, count=count)\n data = data.reshape((n_samples, nchan)).T\n data = data * info['conv_factor']\n data = data.reshape((nchan, n_epochs, epoch_length))\n data = data.transpose((1, 0, 2))\n\n return data\n\n\ndef get_kit_info(rawfile, allow_unknown_format):\n \"\"\"Extract all the information from the sqd file.\n\n Parameters\n ----------\n rawfile : str\n KIT file to be read.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n\n Returns\n -------\n info : instance of Info\n An Info for the instance.\n sqd : dict\n A dict containing all the sqd parameter settings.\n \"\"\"\n sqd = dict()\n sqd['rawfile'] = rawfile\n unsupported_format = False\n with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug\n fid.seek(16)\n basic_offset = unpack('i', fid.read(KIT.INT))[0]\n fid.seek(basic_offset)\n # check file format version\n version, revision = unpack('2i', fid.read(2 * KIT.INT))\n if version < 2 or (version == 2 and revision < 3):\n version_string = \"V%iR%03i\" % (version, revision)\n if allow_unknown_format:\n unsupported_format = True\n logger.warning(\"Force loading KIT format %s\", version_string)\n else:\n raise UnsupportedKITFormat(\n version_string,\n \"SQD file format %s is not officially supported. \"\n \"Set allow_unknown_format=True to load it anyways.\" %\n (version_string,))\n\n sysid = unpack('i', fid.read(KIT.INT))[0]\n # basic info\n system_name = unpack('128s', fid.read(128))[0].decode()\n # model name\n model_name = unpack('128s', fid.read(128))[0].decode()\n # channels\n sqd['nchan'] = channel_count = unpack('i', fid.read(KIT.INT))[0]\n comment = unpack('256s', fid.read(256))[0].decode()\n create_time, last_modified_time = unpack('2i', fid.read(2 * KIT.INT))\n fid.seek(KIT.INT * 3, SEEK_CUR) # reserved\n dewar_style = unpack('i', fid.read(KIT.INT))[0]\n fid.seek(KIT.INT * 3, SEEK_CUR) # spare\n fll_type = unpack('i', fid.read(KIT.INT))[0]\n fid.seek(KIT.INT * 3, SEEK_CUR) # spare\n trigger_type = unpack('i', fid.read(KIT.INT))[0]\n fid.seek(KIT.INT * 3, SEEK_CUR) # spare\n adboard_type = unpack('i', fid.read(KIT.INT))[0]\n fid.seek(KIT.INT * 29, SEEK_CUR) # reserved\n\n if version < 2 or (version == 2 and revision <= 3):\n adc_range = float(unpack('i', fid.read(KIT.INT))[0])\n else:\n adc_range = unpack('d', fid.read(KIT.DOUBLE))[0]\n adc_polarity, adc_allocated, adc_stored = unpack('3i',\n fid.read(3 * KIT.INT))\n system_name = system_name.replace('\\x00', '')\n system_name = system_name.strip().replace('\\n', '/')\n model_name = model_name.replace('\\x00', '')\n model_name = model_name.strip().replace('\\n', '/')\n\n logger.debug(\"SQD file basic information:\")\n logger.debug(\"Meg160 version = V%iR%03i\", version, revision)\n logger.debug(\"System ID = %i\", sysid)\n logger.debug(\"System name = %s\", system_name)\n logger.debug(\"Model name = %s\", model_name)\n logger.debug(\"Channel count = %i\", channel_count)\n logger.debug(\"Comment = %s\", comment)\n logger.debug(\"Dewar style = %i\", dewar_style)\n logger.debug(\"FLL type = %i\", fll_type)\n logger.debug(\"Trigger type = %i\", trigger_type)\n logger.debug(\"A/D board type = %i\", adboard_type)\n logger.debug(\"ADC range = +/-%s[V]\", adc_range / 2.)\n logger.debug(\"ADC allocate = %i[bit]\", adc_allocated)\n logger.debug(\"ADC bit = %i[bit]\", adc_stored)\n\n # check that we can read this file\n if fll_type not in KIT.FLL_SETTINGS:\n fll_types = sorted(KIT.FLL_SETTINGS.keys())\n use_fll_type = fll_types[\n np.searchsorted(fll_types, fll_type) - 1]\n warn('Unknown site filter settings (FLL) for system '\n '\"%s\" model \"%s\" (ID %s), will assume FLL %d->%d, check '\n 'your data for correctness, including channel scales and '\n 'filter settings!'\n % (system_name, model_name, sysid, fll_type, use_fll_type))\n fll_type = use_fll_type\n\n # channel information\n fid.seek(64)\n chan_offset, chan_size = unpack('2i', fid.read(2 * KIT.INT))\n sqd['channels'] = channels = []\n for i in range(channel_count):\n fid.seek(chan_offset + chan_size * i)\n channel_type, = unpack('i', fid.read(KIT.INT))\n # System 52 mislabeled reference channels as NULL. This was fixed\n # in system 53; not sure about 51...\n if sysid == 52 and i < 160 and channel_type == KIT.CHANNEL_NULL:\n channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE\n\n if channel_type in KIT.CHANNELS_MEG:\n if channel_type not in KIT.CH_TO_FIFF_COIL:\n raise NotImplementedError(\n \"KIT channel type %i can not be read. Please contact \"\n \"the mne-python developers.\" % channel_type)\n channels.append({\n 'type': channel_type,\n # (x, y, z, theta, phi) for all MEG channels. Some channel\n # types have additional information which we're not using.\n 'loc': np.fromfile(fid, dtype='d', count=5)\n })\n elif channel_type in KIT.CHANNELS_MISC:\n channel_no, = unpack('i', fid.read(KIT.INT))\n # name, = unpack('64s', fid.read(64))\n fid.seek(64, 1)\n channels.append({\n 'type': channel_type,\n 'no': channel_no,\n })\n elif channel_type == KIT.CHANNEL_NULL:\n channels.append({'type': channel_type})\n else:\n raise IOError(\"Unknown KIT channel type: %i\" % channel_type)\n\n # Channel sensitivity information:\n # only sensor channels requires gain. the additional misc channels\n # (trigger channels, audio and voice channels) are passed\n # through unaffected\n fid.seek(80)\n sensitivity_offset, = unpack('i', fid.read(KIT.INT))\n fid.seek(sensitivity_offset)\n # (offset [Volt], gain [Tesla/Volt]) for each channel\n sensitivity = np.fromfile(fid, dtype='d', count=channel_count * 2)\n sensitivity.shape = (channel_count, 2)\n channel_offset, channel_gain = sensitivity.T\n\n # amplifier gain\n fid.seek(112)\n amp_offset = unpack('i', fid.read(KIT.INT))[0]\n fid.seek(amp_offset)\n amp_data = unpack('i', fid.read(KIT.INT))[0]\n if fll_type >= 100: # Kapper Type\n # gain: mask bit\n gain1 = (amp_data & 0x00007000) >> 12\n gain2 = (amp_data & 0x70000000) >> 28\n gain3 = (amp_data & 0x07000000) >> 24\n amp_gain = (KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3])\n # filter settings\n hpf = (amp_data & 0x00000700) >> 8\n lpf = (amp_data & 0x00070000) >> 16\n bef = (amp_data & 0x00000003) >> 0\n else: # Hanger Type\n # gain\n input_gain = (amp_data & 0x1800) >> 11\n output_gain = (amp_data & 0x0007) >> 0\n amp_gain = KIT.GAINS[input_gain] * KIT.GAINS[output_gain]\n # filter settings\n hpf = (amp_data & 0x007) >> 4\n lpf = (amp_data & 0x0700) >> 8\n bef = (amp_data & 0xc000) >> 14\n hpf_options, lpf_options, bef_options = KIT.FLL_SETTINGS[fll_type]\n sqd['highpass'] = KIT.HPFS[hpf_options][hpf]\n sqd['lowpass'] = KIT.LPFS[lpf_options][lpf]\n sqd['notch'] = KIT.BEFS[bef_options][bef]\n\n # Acquisition Parameters\n fid.seek(128)\n acqcond_offset, = unpack('i', fid.read(KIT.INT))\n fid.seek(acqcond_offset)\n sqd['acq_type'], = acq_type, = unpack('i', fid.read(KIT.INT))\n sqd['sfreq'], = unpack('d', fid.read(KIT.DOUBLE))\n if acq_type == KIT.CONTINUOUS:\n # samples_count, = unpack('i', fid.read(KIT.INT))\n fid.seek(KIT.INT, 1)\n sqd['n_samples'], = unpack('i', fid.read(KIT.INT))\n elif acq_type == KIT.EVOKED or acq_type == KIT.EPOCHS:\n sqd['frame_length'], = unpack('i', fid.read(KIT.INT))\n sqd['pretrigger_length'], = unpack('i', fid.read(KIT.INT))\n sqd['average_count'], = unpack('i', fid.read(KIT.INT))\n sqd['n_epochs'], = unpack('i', fid.read(KIT.INT))\n if acq_type == KIT.EVOKED:\n sqd['n_samples'] = sqd['frame_length']\n else:\n sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs']\n else:\n raise IOError(\"Invalid acquisition type: %i. Your file is neither \"\n \"continuous nor epoched data.\" % (acq_type,))\n\n # precompute conversion factor for reading data\n if unsupported_format:\n if sysid not in LEGACY_AMP_PARAMS:\n raise IOError(\"Legacy parameters for system ID %i unavailable\" %\n (sysid,))\n adc_range, adc_stored = LEGACY_AMP_PARAMS[sysid]\n is_meg = np.array([ch['type'] in KIT.CHANNELS_MEG for ch in channels])\n ad_to_volt = adc_range / (2. ** adc_stored)\n ad_to_tesla = ad_to_volt / amp_gain * channel_gain\n conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt)\n sqd['conv_factor'] = conv_factor[:, np.newaxis]\n\n # Create raw.info dict for raw fif object with SQD data\n info = _empty_info(float(sqd['sfreq']))\n info.update(meas_date=(create_time, 0), lowpass=sqd['lowpass'],\n highpass=sqd['highpass'], kit_system_id=sysid)\n\n # Creates a list of dicts of meg channels for raw.info\n logger.info('Setting channel info structure...')\n info['chs'] = fiff_channels = []\n channel_index = defaultdict(lambda: 0)\n for idx, ch in enumerate(channels, 1):\n if ch['type'] in KIT.CHANNELS_MEG:\n ch_name = 'MEG %03d' % idx\n # create three orthogonal vector\n # ch_angles[0]: theta, ch_angles[1]: phi\n theta, phi = np.radians(ch['loc'][3:])\n x = sin(theta) * cos(phi)\n y = sin(theta) * sin(phi)\n z = cos(theta)\n vec_z = np.array([x, y, z])\n vec_z /= linalg.norm(vec_z)\n vec_x = np.zeros(vec_z.size, dtype=np.float)\n if vec_z[1] < vec_z[2]:\n if vec_z[0] < vec_z[1]:\n vec_x[0] = 1.0\n else:\n vec_x[1] = 1.0\n elif vec_z[0] < vec_z[2]:\n vec_x[0] = 1.0\n else:\n vec_x[2] = 1.0\n vec_x -= np.sum(vec_x * vec_z) * vec_z\n vec_x /= linalg.norm(vec_x)\n vec_y = np.cross(vec_z, vec_x)\n # transform to Neuromag like coordinate space\n vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z))\n vecs = apply_trans(als_ras_trans, vecs)\n unit = FIFF.FIFF_UNIT_T\n loc = vecs.ravel()\n else:\n ch_type_label = KIT.CH_LABEL[ch['type']]\n channel_index[ch_type_label] += 1\n ch_type_index = channel_index[ch_type_label]\n ch_name = '%s %03i' % (ch_type_label, ch_type_index)\n unit = FIFF.FIFF_UNIT_V\n loc = np.zeros(12)\n fiff_channels.append(dict(\n cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE,\n unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name,\n coord_frame=FIFF.FIFFV_COORD_DEVICE,\n coil_type=KIT.CH_TO_FIFF_COIL[ch['type']],\n kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc))\n info._update_redundant()\n return info, sqd\n\n\n@fill_doc\ndef read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',\n slope='-', stimthresh=1, preload=False, stim_code='binary',\n allow_unknown_format=False, verbose=None):\n \"\"\"Reader function for KIT conversion to FIF.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n mrk : None | str | array_like, shape (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10,000 points are in the head shape, they are automatically decimated.\n stim : list of int | '<' | '>'\n Channel-value correspondence when converting KIT trigger channels to a\n Neuromag-style stim channel. For '<', the largest values are assigned\n to the first channel (default). For '>', the largest values are\n assigned to the last channel. Can also be specified as a list of\n trigger channel indexes.\n slope : '+' | '-'\n How to interpret values on KIT trigger channels when synthesizing a\n Neuromag-style stim channel. With '+', a positive slope (low-to-high)\n is interpreted as an event. With '-', a negative slope (high-to-low)\n is interpreted as an event.\n stimthresh : float\n The threshold level for accepting voltage changes in KIT trigger\n channels as a trigger event.\n %(preload)s\n stim_code : 'binary' | 'channel'\n How to decode trigger values from stim channels. 'binary' read stim\n channel events as binary code, 'channel' encodes channel number.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(verbose)s\n\n Returns\n -------\n raw : instance of RawKIT\n A Raw object containing KIT data.\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n\n Notes\n -----\n If mrk, hsp or elp are array_like inputs, then the numbers in xyz\n coordinates should be in units of meters.\n \"\"\"\n return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp,\n stim=stim, slope=slope, stimthresh=stimthresh,\n preload=preload, stim_code=stim_code,\n allow_unknown_format=allow_unknown_format, verbose=verbose)\n\n\n@fill_doc\ndef read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None,\n hsp=None, allow_unknown_format=False, verbose=None):\n \"\"\"Reader function for KIT epochs files.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n events : array, shape (n_events, 3)\n The events typically returned by the read_events function.\n If some events don't match the events of interest as specified\n by event_id, they will be marked as 'IGNORED' in the drop log.\n event_id : int | list of int | dict | None\n The id of the event to consider. If dict,\n the keys can later be used to access associated events. Example:\n dict(auditory=1, visual=3). If int, a dict will be created with\n the id as string. If a list, all events with the IDs specified\n in the list are used. If None, all events will be used with\n and a dict is created with string integer names corresponding\n to the event id integers.\n mrk : None | str | array_like, shape (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10,000 points are in the head shape, they are automatically decimated.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(verbose)s\n\n Returns\n -------\n epochs : instance of Epochs\n The epochs.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n epochs = EpochsKIT(input_fname=input_fname, events=events,\n event_id=event_id, mrk=mrk, elp=elp, hsp=hsp,\n allow_unknown_format=allow_unknown_format,\n verbose=verbose)\n return epochs\n",
"# -*- coding: utf-8 -*-\n# Authors: Mainak Jas <[email protected]>\n# Teon Brooks <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport copy\nimport glob\nimport os\nimport os.path as op\nimport shutil\n\nimport numpy as np\nfrom numpy.testing import assert_equal\nimport pytest\nfrom matplotlib import pyplot as plt\n\nfrom mne import Epochs, read_events, read_evokeds\nfrom mne.io import read_raw_fif\nfrom mne.datasets import testing\nfrom mne.report import Report, open_report, _ReportScraper\nfrom mne.utils import (_TempDir, requires_mayavi, requires_nibabel, Bunch,\n run_tests_if_main, traits_test, requires_h5py)\nfrom mne.viz import plot_alignment\n\ndata_dir = testing.data_path(download=False)\nsubjects_dir = op.join(data_dir, 'subjects')\nreport_dir = op.join(data_dir, 'MEG', 'sample')\nraw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')\nms_fname = op.join(data_dir, 'SSS', 'test_move_anon_raw.fif')\nevent_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')\ncov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')\nfwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')\ntrans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')\ninv_fname = op.join(report_dir,\n 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')\nmri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')\n\nbase_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',\n 'data'))\nevoked_fname = op.join(base_dir, 'test-ave.fif')\n\n\ndef _get_example_figures():\n \"\"\"Create two example figures.\"\"\"\n fig1 = plt.plot([1, 2], [1, 2])[0].figure\n fig2 = plt.plot([3, 4], [3, 4])[0].figure\n return [fig1, fig2]\n\n\[email protected]\[email protected]_testing_data\ndef test_render_report():\n \"\"\"Test rendering -*.fif files for mne report.\"\"\"\n tempdir = _TempDir()\n raw_fname_new = op.join(tempdir, 'temp_raw.fif')\n ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')\n event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')\n cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')\n fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')\n inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')\n for a, b in [[raw_fname, raw_fname_new],\n [ms_fname, ms_fname_new],\n [event_fname, event_fname_new],\n [cov_fname, cov_fname_new],\n [fwd_fname, fwd_fname_new],\n [inv_fname, inv_fname_new]]:\n shutil.copyfile(a, b)\n\n # create and add -epo.fif and -ave.fif files\n epochs_fname = op.join(tempdir, 'temp-epo.fif')\n evoked_fname = op.join(tempdir, 'temp-ave.fif')\n # Speed it up by picking channels\n raw = read_raw_fif(raw_fname_new, preload=True)\n raw.pick_channels(['MEG 0111', 'MEG 0121'])\n raw.del_proj()\n epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2)\n epochs.save(epochs_fname, overwrite=True)\n # This can take forever (stall Travis), so let's make it fast\n # Also, make sure crop range is wide enough to avoid rendering bug\n epochs.average().crop(0.1, 0.2).save(evoked_fname)\n\n report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)\n with pytest.warns(RuntimeWarning, match='Cannot render MRI'):\n report.parse_folder(data_path=tempdir, on_error='raise')\n assert repr(report)\n\n # Check correct paths and filenames\n fnames = glob.glob(op.join(tempdir, '*.fif'))\n for fname in fnames:\n assert (op.basename(fname) in\n [op.basename(x) for x in report.fnames])\n assert (''.join(report.html).find(op.basename(fname)) != -1)\n\n assert_equal(len(report.fnames), len(fnames))\n assert_equal(len(report.html), len(report.fnames))\n assert_equal(len(report.fnames), len(report))\n\n # Check saving functionality\n report.data_path = tempdir\n fname = op.join(tempdir, 'report.html')\n report.save(fname=fname, open_browser=False)\n assert (op.isfile(fname))\n with open(fname, 'rb') as fid:\n html = fid.read().decode('utf-8')\n assert '(MaxShield on)' in html\n\n assert_equal(len(report.html), len(fnames))\n assert_equal(len(report.html), len(report.fnames))\n\n # Check saving same report to new filename\n report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)\n assert (op.isfile(op.join(tempdir, 'report2.html')))\n\n # Check overwriting file\n report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,\n overwrite=True)\n assert (op.isfile(op.join(tempdir, 'report.html')))\n\n # Check pattern matching with multiple patterns\n pattern = ['*raw.fif', '*eve.fif']\n with pytest.warns(RuntimeWarning, match='Cannot render MRI'):\n report.parse_folder(data_path=tempdir, pattern=pattern)\n assert (repr(report))\n\n fnames = glob.glob(op.join(tempdir, '*.raw')) + \\\n glob.glob(op.join(tempdir, '*.raw'))\n for fname in fnames:\n assert (op.basename(fname) in\n [op.basename(x) for x in report.fnames])\n assert (''.join(report.html).find(op.basename(fname)) != -1)\n\n pytest.raises(ValueError, Report, image_format='foo')\n pytest.raises(ValueError, Report, image_format=None)\n\n # SVG rendering\n report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,\n image_format='svg')\n with pytest.warns(RuntimeWarning, match='Cannot render MRI'):\n report.parse_folder(data_path=tempdir, on_error='raise')\n\n # ndarray support smoke test\n report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')\n\n with pytest.raises(TypeError, match='Each fig must be a'):\n report.add_figs_to_section('foo', 'caption', 'section')\n with pytest.raises(TypeError, match='Each fig must be a'):\n report.add_figs_to_section(['foo'], 'caption', 'section')\n\n\[email protected]_testing_data\ndef test_report_raw_psd_and_date():\n \"\"\"Test report raw PSD and DATE_NONE functionality.\"\"\"\n with pytest.raises(TypeError, match='dict'):\n Report(raw_psd='foo')\n\n tempdir = _TempDir()\n raw = read_raw_fif(raw_fname).crop(0, 1.).load_data()\n raw_fname_new = op.join(tempdir, 'temp_raw.fif')\n raw.save(raw_fname_new)\n report = Report(raw_psd=True)\n report.parse_folder(data_path=tempdir, render_bem=False,\n on_error='raise')\n assert isinstance(report.html, list)\n assert 'PSD' in ''.join(report.html)\n assert 'GMT' in ''.join(report.html)\n\n # DATE_NONE functionality\n report = Report()\n raw.anonymize()\n raw.save(raw_fname_new, overwrite=True)\n report.parse_folder(data_path=tempdir, render_bem=False,\n on_error='raise')\n assert isinstance(report.html, list)\n assert 'GMT' not in ''.join(report.html)\n\n\[email protected]_testing_data\n@requires_mayavi\n@traits_test\ndef test_render_add_sections():\n \"\"\"Test adding figures/images to section.\"\"\"\n tempdir = _TempDir()\n report = Report(subjects_dir=subjects_dir)\n # Check add_figs_to_section functionality\n fig = plt.plot([1, 2], [1, 2])[0].figure\n report.add_figs_to_section(figs=fig, # test non-list input\n captions=['evoked response'], scale=1.2,\n image_format='svg')\n pytest.raises(ValueError, report.add_figs_to_section, figs=[fig, fig],\n captions='H')\n pytest.raises(ValueError, report.add_figs_to_section, figs=fig,\n captions=['foo'], scale=0, image_format='svg')\n pytest.raises(ValueError, report.add_figs_to_section, figs=fig,\n captions=['foo'], scale=1e-10, image_format='svg')\n # need to recreate because calls above change size\n fig = plt.plot([1, 2], [1, 2])[0].figure\n\n # Check add_images_to_section with png\n img_fname = op.join(tempdir, 'testimage.png')\n fig.savefig(img_fname)\n report.add_images_to_section(fnames=[img_fname],\n captions=['evoked response'])\n\n report.add_images_to_section(fnames=[img_fname],\n captions=['evoked response'])\n\n pytest.raises(ValueError, report.add_images_to_section,\n fnames=[img_fname, img_fname], captions='H')\n\n pytest.raises(ValueError, report.add_images_to_section,\n fnames=['foobar.xxx'], captions='H')\n\n evoked = read_evokeds(evoked_fname, condition='Left Auditory',\n baseline=(-0.2, 0.0))\n fig = plot_alignment(evoked.info, trans_fname, subject='sample',\n subjects_dir=subjects_dir)\n\n report.add_figs_to_section(figs=fig, # test non-list input\n captions='random image', scale=1.2)\n assert (repr(report))\n\n\[email protected]\[email protected]_testing_data\n@requires_mayavi\n@traits_test\n@requires_nibabel()\ndef test_render_mri():\n \"\"\"Test rendering MRI for mne report.\"\"\"\n tempdir = _TempDir()\n trans_fname_new = op.join(tempdir, 'temp-trans.fif')\n for a, b in [[trans_fname, trans_fname_new]]:\n shutil.copyfile(a, b)\n report = Report(info_fname=raw_fname,\n subject='sample', subjects_dir=subjects_dir)\n report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*')\n report.save(op.join(tempdir, 'report.html'), open_browser=False)\n assert repr(report)\n report.add_bem_to_section('sample', caption='extra', section='foo',\n subjects_dir=subjects_dir, decim=30)\n report.save(op.join(tempdir, 'report.html'), open_browser=False,\n overwrite=True)\n\n\[email protected]_testing_data\n@requires_nibabel()\ndef test_render_mri_without_bem():\n \"\"\"Test rendering MRI without BEM for mne report.\"\"\"\n tempdir = _TempDir()\n os.mkdir(op.join(tempdir, 'sample'))\n os.mkdir(op.join(tempdir, 'sample', 'mri'))\n shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))\n report = Report(info_fname=raw_fname,\n subject='sample', subjects_dir=tempdir)\n report.parse_folder(tempdir, render_bem=False)\n report.save(op.join(tempdir, 'report.html'), open_browser=False)\n\n\[email protected]_testing_data\n@requires_nibabel()\ndef test_add_htmls_to_section():\n \"\"\"Test adding html str to mne report.\"\"\"\n report = Report(info_fname=raw_fname,\n subject='sample', subjects_dir=subjects_dir)\n html = '<b>MNE-Python is AWESOME</b>'\n caption, section = 'html', 'html_section'\n report.add_htmls_to_section(html, caption, section)\n idx = report._sectionlabels.index('report_' + section)\n html_compare = report.html[idx]\n assert (html in html_compare)\n assert (repr(report))\n\n\ndef test_add_slider_to_section():\n \"\"\"Test adding a slider with a series of images to mne report.\"\"\"\n tempdir = _TempDir()\n report = Report(info_fname=raw_fname,\n subject='sample', subjects_dir=subjects_dir)\n section = 'slider_section'\n figs = _get_example_figures()\n report.add_slider_to_section(figs, section=section, title='my title')\n assert report.fnames[0] == 'my title-#-report_slider_section-#-custom'\n report.save(op.join(tempdir, 'report.html'), open_browser=False)\n\n pytest.raises(NotImplementedError, report.add_slider_to_section,\n [figs, figs])\n pytest.raises(ValueError, report.add_slider_to_section, figs, ['wug'])\n pytest.raises(TypeError, report.add_slider_to_section, figs, 'wug')\n # need at least 2\n pytest.raises(ValueError, report.add_slider_to_section, figs[:1], 'wug')\n\n # Smoke test that SVG w/unicode can be added\n report = Report()\n fig, ax = plt.subplots()\n ax.set_xlabel(u'μ')\n report.add_slider_to_section([fig] * 2, image_format='svg')\n\n\ndef test_validate_input():\n \"\"\"Test Report input validation.\"\"\"\n report = Report()\n items = ['a', 'b', 'c']\n captions = ['Letter A', 'Letter B', 'Letter C']\n section = 'ABCs'\n comments = ['First letter of the alphabet.',\n 'Second letter of the alphabet',\n 'Third letter of the alphabet']\n pytest.raises(ValueError, report._validate_input, items, captions[:-1],\n section, comments=None)\n pytest.raises(ValueError, report._validate_input, items, captions, section,\n comments=comments[:-1])\n values = report._validate_input(items, captions, section, comments=None)\n items_new, captions_new, comments_new = values\n assert_equal(len(comments_new), len(items))\n\n\n@requires_h5py\ndef test_open_report():\n \"\"\"Test the open_report function.\"\"\"\n tempdir = _TempDir()\n hdf5 = op.join(tempdir, 'report.h5')\n\n # Test creating a new report through the open_report function\n fig1 = _get_example_figures()[0]\n with open_report(hdf5, subjects_dir=subjects_dir) as report:\n assert report.subjects_dir == subjects_dir\n assert report._fname == hdf5\n report.add_figs_to_section(figs=fig1, captions=['evoked response'])\n # Exiting the context block should have triggered saving to HDF5\n assert op.exists(hdf5)\n\n # Load the HDF5 version of the report and check equivalence\n report2 = open_report(hdf5)\n assert report2._fname == hdf5\n assert report2.subjects_dir == report.subjects_dir\n assert report2.html == report.html\n assert report2.__getstate__() == report.__getstate__()\n assert '_fname' not in report2.__getstate__()\n\n # Check parameters when loading a report\n pytest.raises(ValueError, open_report, hdf5, foo='bar') # non-existing\n pytest.raises(ValueError, open_report, hdf5, subjects_dir='foo')\n open_report(hdf5, subjects_dir=subjects_dir) # This should work\n\n # Check that the context manager doesn't swallow exceptions\n with pytest.raises(ZeroDivisionError):\n with open_report(hdf5, subjects_dir=subjects_dir) as report:\n 1 / 0\n\n\ndef test_remove():\n \"\"\"Test removing figures from a report.\"\"\"\n r = Report()\n fig1, fig2 = _get_example_figures()\n r.add_figs_to_section(fig1, 'figure1', 'mysection')\n r.add_slider_to_section([fig1, fig2], title='figure1',\n section='othersection')\n r.add_figs_to_section(fig2, 'figure1', 'mysection')\n r.add_figs_to_section(fig2, 'figure2', 'mysection')\n\n # Test removal by caption\n r2 = copy.deepcopy(r)\n removed_index = r2.remove(caption='figure1')\n assert removed_index == 2\n assert len(r2.html) == 3\n assert r2.html[0] == r.html[0]\n assert r2.html[1] == r.html[1]\n assert r2.html[2] == r.html[3]\n\n # Test restricting to section\n r2 = copy.deepcopy(r)\n removed_index = r2.remove(caption='figure1', section='othersection')\n assert removed_index == 1\n assert len(r2.html) == 3\n assert r2.html[0] == r.html[0]\n assert r2.html[1] == r.html[2]\n assert r2.html[2] == r.html[3]\n\n # Test removal of empty sections\n r2 = copy.deepcopy(r)\n r2.remove(caption='figure1', section='othersection')\n assert r2.sections == ['mysection']\n assert r2._sectionvars == {'mysection': 'report_mysection'}\n\n\ndef test_add_or_replace():\n \"\"\"Test replacing existing figures in a report.\"\"\"\n r = Report()\n fig1, fig2 = _get_example_figures()\n r.add_figs_to_section(fig1, 'duplicate', 'mysection')\n r.add_figs_to_section(fig1, 'duplicate', 'mysection')\n r.add_figs_to_section(fig1, 'duplicate', 'othersection')\n r.add_figs_to_section(fig2, 'nonduplicate', 'mysection')\n # By default, replace=False, so all figures should be there\n assert len(r.html) == 4\n\n old_r = copy.deepcopy(r)\n\n # Re-add fig1 with replace=True, it should overwrite the last occurrence of\n # fig1 in section 'mysection'.\n r.add_figs_to_section(fig2, 'duplicate', 'mysection', replace=True)\n assert len(r.html) == 4\n assert r.html[1] != old_r.html[1] # This figure should have changed\n # All other figures should be the same\n assert r.html[0] == old_r.html[0]\n assert r.html[2] == old_r.html[2]\n assert r.html[3] == old_r.html[3]\n\n\ndef test_scraper(tmpdir):\n \"\"\"Test report scraping.\"\"\"\n r = Report()\n fig1, fig2 = _get_example_figures()\n r.add_figs_to_section(fig1, 'a', 'mysection')\n r.add_figs_to_section(fig2, 'b', 'mysection')\n # Mock a Sphinx + sphinx_gallery config\n app = Bunch(builder=Bunch(srcdir=str(tmpdir),\n outdir=op.join(str(tmpdir), '_build', 'html')))\n scraper = _ReportScraper()\n scraper.app = app\n gallery_conf = dict(src_dir=app.builder.srcdir, builder_name='html')\n img_fname = op.join(app.builder.srcdir, 'auto_examples', 'images',\n 'sg_img.png')\n target_file = op.join(app.builder.srcdir, 'auto_examples', 'sg.py')\n os.makedirs(op.dirname(img_fname))\n os.makedirs(app.builder.outdir)\n block_vars = dict(image_path_iterator=(img for img in [img_fname]),\n example_globals=dict(a=1), target_file=target_file)\n # Nothing yet\n block = None\n rst = scraper(block, block_vars, gallery_conf)\n assert rst == ''\n # Still nothing\n block_vars['example_globals']['r'] = r\n rst = scraper(block, block_vars, gallery_conf)\n # Once it's saved, add it\n assert rst == ''\n fname = op.join(str(tmpdir), 'my_html.html')\n r.save(fname, open_browser=False)\n rst = scraper(block, block_vars, gallery_conf)\n out_html = op.join(app.builder.outdir, 'auto_examples', 'my_html.html')\n assert not op.isfile(out_html)\n os.makedirs(op.join(app.builder.outdir, 'auto_examples'))\n scraper.copyfiles()\n assert op.isfile(out_html)\n assert rst.count('\"') == 6\n assert \"<iframe\" in rst\n assert op.isfile(img_fname.replace('png', 'svg'))\n\n\nrun_tests_if_main()\n",
"\"\"\"\n===================\nReading an STC file\n===================\n\nSTC files contain activations on cortex ie. source\nreconstructions\n\"\"\"\n# Author: Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne.datasets import sample\n\nprint(__doc__)\n\ndata_path = sample.data_path()\nfname = data_path + '/MEG/sample/sample_audvis-meg'\n\nstc = mne.read_source_estimate(fname)\n\nn_vertices, n_samples = stc.data.shape\nprint(\"stc data size: %s (nb of vertices) x %s (nb of samples)\"\n % (n_vertices, n_samples))\n\n# View source activations\nplt.plot(stc.times, stc.data[::100, :].T)\nplt.xlabel('time (ms)')\nplt.ylabel('Source amplitude')\nplt.show()\n",
"# Authors: Eric Larson <[email protected]>\n# Mainak Jas <[email protected]>\n# Matti Hamalainen <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport os.path as op\n\nimport numpy as np\nfrom numpy.polynomial import legendre\n\nfrom ..fixes import einsum\nfrom ..parallel import parallel_func\nfrom ..utils import logger, verbose, _get_extra_data_path, fill_doc\n\n\n##############################################################################\n# FAST LEGENDRE (DERIVATIVE) POLYNOMIALS USING LOOKUP TABLE\n\ndef _next_legen_der(n, x, p0, p01, p0d, p0dd):\n \"\"\"Compute the next Legendre polynomial and its derivatives.\"\"\"\n # only good for n > 1 !\n old_p0 = p0\n old_p0d = p0d\n p0 = ((2 * n - 1) * x * old_p0 - (n - 1) * p01) / n\n p0d = n * old_p0 + x * old_p0d\n p0dd = (n + 1) * old_p0d + x * p0dd\n return p0, p0d, p0dd\n\n\ndef _get_legen(x, n_coeff=100):\n \"\"\"Get Legendre polynomials expanded about x.\"\"\"\n return legendre.legvander(x, n_coeff - 1)\n\n\ndef _get_legen_der(xx, n_coeff=100):\n \"\"\"Get Legendre polynomial derivatives expanded about x.\"\"\"\n coeffs = np.empty((len(xx), n_coeff, 3))\n for c, x in zip(coeffs, xx):\n p0s, p0ds, p0dds = c[:, 0], c[:, 1], c[:, 2]\n p0s[:2] = [1.0, x]\n p0ds[:2] = [0.0, 1.0]\n p0dds[:2] = [0.0, 0.0]\n for n in range(2, n_coeff):\n p0s[n], p0ds[n], p0dds[n] = _next_legen_der(\n n, x, p0s[n - 1], p0s[n - 2], p0ds[n - 1], p0dds[n - 1])\n return coeffs\n\n\n@verbose\ndef _get_legen_table(ch_type, volume_integral=False, n_coeff=100,\n n_interp=20000, force_calc=False, verbose=None):\n \"\"\"Return a (generated) LUT of Legendre (derivative) polynomial coeffs.\"\"\"\n if n_interp % 2 != 0:\n raise RuntimeError('n_interp must be even')\n fname = op.join(_get_extra_data_path(), 'tables')\n if not op.isdir(fname):\n # Updated due to API change (GH 1167)\n os.makedirs(fname)\n if ch_type == 'meg':\n fname = op.join(fname, 'legder_%s_%s.bin' % (n_coeff, n_interp))\n leg_fun = _get_legen_der\n extra_str = ' derivative'\n lut_shape = (n_interp + 1, n_coeff, 3)\n else: # 'eeg'\n fname = op.join(fname, 'legval_%s_%s.bin' % (n_coeff, n_interp))\n leg_fun = _get_legen\n extra_str = ''\n lut_shape = (n_interp + 1, n_coeff)\n if not op.isfile(fname) or force_calc:\n logger.info('Generating Legendre%s table...' % extra_str)\n x_interp = np.linspace(-1, 1, n_interp + 1)\n lut = leg_fun(x_interp, n_coeff).astype(np.float32)\n if not force_calc:\n with open(fname, 'wb') as fid:\n fid.write(lut.tostring())\n else:\n logger.info('Reading Legendre%s table...' % extra_str)\n with open(fname, 'rb', buffering=0) as fid:\n lut = np.fromfile(fid, np.float32)\n lut.shape = lut_shape\n\n # we need this for the integration step\n n_fact = np.arange(1, n_coeff, dtype=float)\n if ch_type == 'meg':\n n_facts = list() # multn, then mult, then multn * (n + 1)\n if volume_integral:\n n_facts.append(n_fact / ((2.0 * n_fact + 1.0) *\n (2.0 * n_fact + 3.0)))\n else:\n n_facts.append(n_fact / (2.0 * n_fact + 1.0))\n n_facts.append(n_facts[0] / (n_fact + 1.0))\n n_facts.append(n_facts[0] * (n_fact + 1.0))\n # skip the first set of coefficients because they are not used\n lut = lut[:, 1:, [0, 1, 1, 2]] # for multiplicative convenience later\n # reshape this for convenience, too\n n_facts = np.array(n_facts)[[2, 0, 1, 1], :].T\n n_facts = np.ascontiguousarray(n_facts)\n n_fact = n_facts\n else: # 'eeg'\n n_fact = (2.0 * n_fact + 1.0) * (2.0 * n_fact + 1.0) / n_fact\n # skip the first set of coefficients because they are not used\n lut = lut[:, 1:].copy()\n return lut, n_fact\n\n\ndef _comp_sum_eeg(beta, ctheta, lut_fun, n_fact):\n \"\"\"Lead field dot products using Legendre polynomial (P_n) series.\"\"\"\n # Compute the sum occurring in the evaluation.\n # The result is\n # sums[:] (2n+1)^2/n beta^n P_n\n n_chunk = 50000000 // (8 * max(n_fact.shape) * 2)\n lims = np.concatenate([np.arange(0, beta.size, n_chunk), [beta.size]])\n s0 = np.empty(beta.shape)\n for start, stop in zip(lims[:-1], lims[1:]):\n coeffs = lut_fun(ctheta[start:stop])\n betans = np.tile(beta[start:stop][:, np.newaxis], (1, n_fact.shape[0]))\n np.cumprod(betans, axis=1, out=betans) # run inplace\n coeffs *= betans\n s0[start:stop] = np.dot(coeffs, n_fact) # == weighted sum across cols\n return s0\n\n\ndef _comp_sums_meg(beta, ctheta, lut_fun, n_fact, volume_integral):\n \"\"\"Lead field dot products using Legendre polynomial (P_n) series.\n\n Parameters\n ----------\n beta : array, shape (n_points * n_points, 1)\n Coefficients of the integration.\n ctheta : array, shape (n_points * n_points, 1)\n Cosine of the angle between the sensor integration points.\n lut_fun : callable\n Look-up table for evaluating Legendre polynomials.\n n_fact : array\n Coefficients in the integration sum.\n volume_integral : bool\n If True, compute volume integral.\n\n Returns\n -------\n sums : array, shape (4, n_points * n_points)\n The results.\n \"\"\"\n # Compute the sums occurring in the evaluation.\n # Two point magnetometers on the xz plane are assumed.\n # The four sums are:\n # * sums[:, 0] n(n+1)/(2n+1) beta^(n+1) P_n\n # * sums[:, 1] n/(2n+1) beta^(n+1) P_n'\n # * sums[:, 2] n/((2n+1)(n+1)) beta^(n+1) P_n'\n # * sums[:, 3] n/((2n+1)(n+1)) beta^(n+1) P_n''\n\n # This is equivalent, but slower:\n # sums = np.sum(bbeta[:, :, np.newaxis].T * n_fact * coeffs, axis=1)\n # sums = np.rollaxis(sums, 2)\n # or\n # sums = einsum('ji,jk,ijk->ki', bbeta, n_fact, lut_fun(ctheta)))\n sums = np.empty((n_fact.shape[1], len(beta)))\n # beta can be e.g. 3 million elements, which ends up using lots of memory\n # so we split up the computations into ~50 MB blocks\n n_chunk = 50000000 // (8 * max(n_fact.shape) * 2)\n lims = np.concatenate([np.arange(0, beta.size, n_chunk), [beta.size]])\n for start, stop in zip(lims[:-1], lims[1:]):\n bbeta = np.tile(beta[start:stop][np.newaxis], (n_fact.shape[0], 1))\n bbeta[0] *= beta[start:stop]\n np.cumprod(bbeta, axis=0, out=bbeta) # run inplace\n einsum('ji,jk,ijk->ki', bbeta, n_fact, lut_fun(ctheta[start:stop]),\n out=sums[:, start:stop])\n return sums\n\n\n###############################################################################\n# SPHERE DOTS\n\n_meg_const = 4e-14 * np.pi # This is \\mu_0^2/4\\pi\n_eeg_const = 1.0 / (4.0 * np.pi)\n\n\ndef _fast_sphere_dot_r0(r, rr1_orig, rr2s, lr1, lr2s, cosmags1, cosmags2s,\n w1, w2s, volume_integral, lut, n_fact, ch_type):\n \"\"\"Lead field dot product computation for M/EEG in the sphere model.\n\n Parameters\n ----------\n r : float\n The integration radius. It is used to calculate beta as:\n beta = (r * r) / (lr1 * lr2).\n rr1 : array, shape (n_points x 3)\n Normalized position vectors of integrations points in first sensor.\n rr2s : list\n Normalized position vector of integration points in second sensor.\n lr1 : array, shape (n_points x 1)\n Magnitude of position vector of integration points in first sensor.\n lr2s : list\n Magnitude of position vector of integration points in second sensor.\n cosmags1 : array, shape (n_points x 1)\n Direction of integration points in first sensor.\n cosmags2s : list\n Direction of integration points in second sensor.\n w1 : array, shape (n_points x 1) | None\n Weights of integration points in the first sensor.\n w2s : list\n Weights of integration points in the second sensor.\n volume_integral : bool\n If True, compute volume integral.\n lut : callable\n Look-up table for evaluating Legendre polynomials.\n n_fact : array\n Coefficients in the integration sum.\n ch_type : str\n The channel type. It can be 'meg' or 'eeg'.\n\n Returns\n -------\n result : float\n The integration sum.\n \"\"\"\n if w1 is None: # operating on surface, treat independently\n out_shape = (len(rr2s), len(rr1_orig))\n sum_axis = 1 # operate along second axis only at the end\n else:\n out_shape = (len(rr2s),)\n sum_axis = None # operate on flattened array at the end\n out = np.empty(out_shape)\n rr2 = np.concatenate(rr2s)\n lr2 = np.concatenate(lr2s)\n cosmags2 = np.concatenate(cosmags2s)\n\n # outer product, sum over coords\n ct = einsum('ik,jk->ij', rr1_orig, rr2)\n np.clip(ct, -1, 1, ct)\n\n # expand axes\n rr1 = rr1_orig[:, np.newaxis, :] # (n_rr1, n_rr2, n_coord) e.g. 4x4x3\n rr2 = rr2[np.newaxis, :, :]\n lr1lr2 = lr1[:, np.newaxis] * lr2[np.newaxis, :]\n\n beta = (r * r) / lr1lr2\n if ch_type == 'meg':\n sums = _comp_sums_meg(beta.flatten(), ct.flatten(), lut, n_fact,\n volume_integral)\n sums.shape = (4,) + beta.shape\n\n # Accumulate the result, a little bit streamlined version\n # cosmags1 = cosmags1[:, np.newaxis, :]\n # cosmags2 = cosmags2[np.newaxis, :, :]\n # n1c1 = np.sum(cosmags1 * rr1, axis=2)\n # n1c2 = np.sum(cosmags1 * rr2, axis=2)\n # n2c1 = np.sum(cosmags2 * rr1, axis=2)\n # n2c2 = np.sum(cosmags2 * rr2, axis=2)\n # n1n2 = np.sum(cosmags1 * cosmags2, axis=2)\n n1c1 = einsum('ik,ijk->ij', cosmags1, rr1)\n n1c2 = einsum('ik,ijk->ij', cosmags1, rr2)\n n2c1 = einsum('jk,ijk->ij', cosmags2, rr1)\n n2c2 = einsum('jk,ijk->ij', cosmags2, rr2)\n n1n2 = einsum('ik,jk->ij', cosmags1, cosmags2)\n part1 = ct * n1c1 * n2c2\n part2 = n1c1 * n2c1 + n1c2 * n2c2\n\n result = (n1c1 * n2c2 * sums[0] +\n (2.0 * part1 - part2) * sums[1] +\n (n1n2 + part1 - part2) * sums[2] +\n (n1c2 - ct * n1c1) * (n2c1 - ct * n2c2) * sums[3])\n\n # Give it a finishing touch!\n result *= (_meg_const / lr1lr2)\n if volume_integral:\n result *= r\n else: # 'eeg'\n result = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact)\n result.shape = beta.shape\n # Give it a finishing touch!\n result *= _eeg_const\n result /= lr1lr2\n # now we add them all up with weights\n offset = 0\n result *= np.concatenate(w2s)\n if w1 is not None:\n result *= w1[:, np.newaxis]\n for ii, w2 in enumerate(w2s):\n out[ii] = np.sum(result[:, offset:offset + len(w2)], axis=sum_axis)\n offset += len(w2)\n return out\n\n\n@fill_doc\ndef _do_self_dots(intrad, volume, coils, r0, ch_type, lut, n_fact, n_jobs):\n \"\"\"Perform the lead field dot product integrations.\n\n Parameters\n ----------\n intrad : float\n The integration radius. It is used to calculate beta as:\n beta = (intrad * intrad) / (r1 * r2).\n volume : bool\n If True, perform volume integral.\n coils : list of dict\n The coils.\n r0 : array, shape (3 x 1)\n The origin of the sphere.\n ch_type : str\n The channel type. It can be 'meg' or 'eeg'.\n lut : callable\n Look-up table for evaluating Legendre polynomials.\n n_fact : array\n Coefficients in the integration sum.\n %(n_jobs)s\n\n Returns\n -------\n products : array, shape (n_coils, n_coils)\n The integration products.\n \"\"\"\n if ch_type == 'eeg':\n intrad *= 0.7\n # convert to normalized distances from expansion center\n rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils]\n rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags]\n rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)]\n cosmags = [coil['cosmag'] for coil in coils]\n ws = [coil['w'] for coil in coils]\n parallel, p_fun, _ = parallel_func(_do_self_dots_subset, n_jobs)\n prods = parallel(p_fun(intrad, rmags, rlens, cosmags,\n ws, volume, lut, n_fact, ch_type, idx)\n for idx in np.array_split(np.arange(len(rmags)), n_jobs))\n products = np.sum(prods, axis=0)\n return products\n\n\ndef _do_self_dots_subset(intrad, rmags, rlens, cosmags, ws, volume, lut,\n n_fact, ch_type, idx):\n \"\"\"Parallelize.\"\"\"\n # all possible combinations of two magnetometers\n products = np.zeros((len(rmags), len(rmags)))\n for ci1 in idx:\n ci2 = ci1 + 1\n res = _fast_sphere_dot_r0(\n intrad, rmags[ci1], rmags[:ci2], rlens[ci1], rlens[:ci2],\n cosmags[ci1], cosmags[:ci2], ws[ci1], ws[:ci2], volume, lut,\n n_fact, ch_type)\n products[ci1, :ci2] = res\n products[:ci2, ci1] = res\n return products\n\n\ndef _do_cross_dots(intrad, volume, coils1, coils2, r0, ch_type,\n lut, n_fact):\n \"\"\"Compute lead field dot product integrations between two coil sets.\n\n The code is a direct translation of MNE-C code found in\n `mne_map_data/lead_dots.c`.\n\n Parameters\n ----------\n intrad : float\n The integration radius. It is used to calculate beta as:\n beta = (intrad * intrad) / (r1 * r2).\n volume : bool\n If True, compute volume integral.\n coils1 : list of dict\n The original coils.\n coils2 : list of dict\n The coils to which data is being mapped.\n r0 : array, shape (3 x 1).\n The origin of the sphere.\n ch_type : str\n The channel type. It can be 'meg' or 'eeg'\n lut : callable\n Look-up table for evaluating Legendre polynomials.\n n_fact : array\n Coefficients in the integration sum.\n\n Returns\n -------\n products : array, shape (n_coils, n_coils)\n The integration products.\n \"\"\"\n rmags1 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils1]\n rmags2 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils2]\n\n rlens1 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags1]\n rlens2 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags2]\n\n rmags1 = [r / rl[:, np.newaxis] for r, rl in zip(rmags1, rlens1)]\n rmags2 = [r / rl[:, np.newaxis] for r, rl in zip(rmags2, rlens2)]\n\n ws1 = [coil['w'] for coil in coils1]\n ws2 = [coil['w'] for coil in coils2]\n\n cosmags1 = [coil['cosmag'] for coil in coils1]\n cosmags2 = [coil['cosmag'] for coil in coils2]\n\n products = np.zeros((len(rmags1), len(rmags2)))\n for ci1 in range(len(coils1)):\n res = _fast_sphere_dot_r0(\n intrad, rmags1[ci1], rmags2, rlens1[ci1], rlens2, cosmags1[ci1],\n cosmags2, ws1[ci1], ws2, volume, lut, n_fact, ch_type)\n products[ci1, :] = res\n return products\n\n\n@fill_doc\ndef _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type,\n lut, n_fact, n_jobs):\n \"\"\"Compute the map construction products.\n\n Parameters\n ----------\n intrad : float\n The integration radius. It is used to calculate beta as:\n beta = (intrad * intrad) / (r1 * r2)\n volume : bool\n If True, compute a volume integral.\n coils : list of dict\n The coils.\n surf : dict\n The surface on which the field is interpolated.\n sel : array\n Indices of the surface vertices to select.\n r0 : array, shape (3 x 1)\n The origin of the sphere.\n ch_type : str\n The channel type. It can be 'meg' or 'eeg'.\n lut : callable\n Look-up table for Legendre polynomials.\n n_fact : array\n Coefficients in the integration sum.\n %(n_jobs)s\n\n Returns\n -------\n products : array, shape (n_coils, n_coils)\n The integration products.\n \"\"\"\n # convert to normalized distances from expansion center\n rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils]\n rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags]\n rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)]\n cosmags = [coil['cosmag'] for coil in coils]\n ws = [coil['w'] for coil in coils]\n rref = None\n refl = None\n # virt_ref = False\n if ch_type == 'eeg':\n intrad *= 0.7\n # The virtual ref code is untested and unused, so it is\n # commented out for now\n # if virt_ref:\n # rref = virt_ref[np.newaxis, :] - r0[np.newaxis, :]\n # refl = np.sqrt(np.sum(rref * rref, axis=1))\n # rref /= refl[:, np.newaxis]\n\n rsurf = surf['rr'][sel] - r0[np.newaxis, :]\n lsurf = np.sqrt(np.sum(rsurf * rsurf, axis=1))\n rsurf /= lsurf[:, np.newaxis]\n this_nn = surf['nn'][sel]\n\n # loop over the coils\n parallel, p_fun, _ = parallel_func(_do_surface_dots_subset, n_jobs)\n prods = parallel(p_fun(intrad, rsurf, rmags, rref, refl, lsurf, rlens,\n this_nn, cosmags, ws, volume, lut, n_fact, ch_type,\n idx)\n for idx in np.array_split(np.arange(len(rmags)), n_jobs))\n products = np.sum(prods, axis=0)\n return products\n\n\ndef _do_surface_dots_subset(intrad, rsurf, rmags, rref, refl, lsurf, rlens,\n this_nn, cosmags, ws, volume, lut, n_fact, ch_type,\n idx):\n \"\"\"Parallelize.\n\n Parameters\n ----------\n refl : array | None\n If ch_type is 'eeg', the magnitude of position vector of the\n virtual reference (never used).\n lsurf : array\n Magnitude of position vector of the surface points.\n rlens : list of arrays of length n_coils\n Magnitude of position vector.\n this_nn : array, shape (n_vertices, 3)\n Surface normals.\n cosmags : list of array.\n Direction of the integration points in the coils.\n ws : list of array\n Integration weights of the coils.\n volume : bool\n If True, compute volume integral.\n lut : callable\n Look-up table for evaluating Legendre polynomials.\n n_fact : array\n Coefficients in the integration sum.\n ch_type : str\n 'meg' or 'eeg'\n idx : array, shape (n_coils x 1)\n Index of coil.\n\n Returns\n -------\n products : array, shape (n_coils, n_coils)\n The integration products.\n \"\"\"\n products = _fast_sphere_dot_r0(\n intrad, rsurf, rmags, lsurf, rlens, this_nn, cosmags, None, ws,\n volume, lut, n_fact, ch_type).T\n if rref is not None:\n raise NotImplementedError # we don't ever use this, isn't tested\n # vres = _fast_sphere_dot_r0(\n # intrad, rref, rmags, refl, rlens, this_nn, cosmags, None, ws,\n # volume, lut, n_fact, ch_type)\n # products -= vres\n return products\n",
"# Authors : Denis A. Engemann <[email protected]>\n# Alexandre Gramfort <[email protected]>\n#\n# License : BSD 3-clause\n\nimport os.path as op\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import (assert_array_almost_equal, assert_allclose,\n assert_equal)\n\nfrom scipy import fftpack\n\nfrom mne import read_events, Epochs, make_fixed_length_events\nfrom mne.io import read_raw_fif\nfrom mne.time_frequency._stockwell import (tfr_stockwell, _st,\n _precompute_st_windows,\n _check_input_st,\n _st_power_itc)\n\nfrom mne.time_frequency.tfr import AverageTFR\nfrom mne.utils import run_tests_if_main\n\nbase_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')\nraw_fname = op.join(base_dir, 'test_raw.fif')\nraw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')\n\n\ndef test_stockwell_ctf():\n \"\"\"Test that Stockwell can be calculated on CTF data.\"\"\"\n raw = read_raw_fif(raw_ctf_fname)\n raw.apply_gradient_compensation(3)\n events = make_fixed_length_events(raw, duration=0.5)\n evoked = Epochs(raw, events, tmin=-0.2, tmax=0.3, decim=10,\n preload=True, verbose='error').average()\n tfr_stockwell(evoked, verbose='error') # smoke test\n\n\ndef test_stockwell_check_input():\n \"\"\"Test input checker for stockwell.\"\"\"\n # check for data size equal and unequal to a power of 2\n\n for last_dim in (127, 128):\n data = np.zeros((2, 10, last_dim))\n with pytest.warns(None): # n_fft sometimes\n x_in, n_fft, zero_pad = _check_input_st(data, None)\n\n assert_equal(x_in.shape, (2, 10, 128))\n assert_equal(n_fft, 128)\n assert_equal(zero_pad, 128 - last_dim)\n\n\ndef test_stockwell_st_no_zero_pad():\n \"\"\"Test stockwell power itc.\"\"\"\n data = np.zeros((20, 128))\n start_f = 1\n stop_f = 10\n sfreq = 30\n width = 2\n W = _precompute_st_windows(data.shape[-1], start_f, stop_f, sfreq, width)\n _st_power_itc(data, 10, True, 0, 1, W)\n\n\ndef test_stockwell_core():\n \"\"\"Test stockwell transform.\"\"\"\n # adapted from\n # http://vcs.ynic.york.ac.uk/docs/naf/intro/concepts/timefreq.html\n sfreq = 1000.0 # make things easy to understand\n dur = 0.5\n onset, offset = 0.175, 0.275\n n_samp = int(sfreq * dur)\n t = np.arange(n_samp) / sfreq # make an array for time\n pulse_freq = 15.\n pulse = np.cos(2. * np.pi * pulse_freq * t)\n pulse[0:int(onset * sfreq)] = 0. # Zero before our desired pulse\n pulse[int(offset * sfreq):] = 0. # and zero after our desired pulse\n\n width = 0.5\n freqs = fftpack.fftfreq(len(pulse), 1. / sfreq)\n fmin, fmax = 1.0, 100.0\n start_f, stop_f = [np.abs(freqs - f).argmin() for f in (fmin, fmax)]\n W = _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width)\n\n st_pulse = _st(pulse, start_f, W)\n st_pulse = np.abs(st_pulse) ** 2\n assert_equal(st_pulse.shape[-1], len(pulse))\n st_max_freq = freqs[st_pulse.max(axis=1).argmax(axis=0)] # max freq\n assert_allclose(st_max_freq, pulse_freq, atol=1.0)\n assert (onset < t[st_pulse.max(axis=0).argmax(axis=0)] < offset)\n\n # test inversion to FFT, by averaging local spectra, see eq. 5 in\n # Moukadem, A., Bouguila, Z., Ould Abdeslam, D. and Alain Dieterlen.\n # \"Stockwell transform optimization applied on the detection of split in\n # heart sounds.\"\n\n width = 1.0\n start_f, stop_f = 0, len(pulse)\n W = _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width)\n y = _st(pulse, start_f, W)\n # invert stockwell\n y_inv = fftpack.ifft(np.sum(y, axis=1)).real\n assert_array_almost_equal(pulse, y_inv)\n\n\ndef test_stockwell_api():\n \"\"\"Test stockwell functions.\"\"\"\n raw = read_raw_fif(raw_fname)\n event_id, tmin, tmax = 1, -0.2, 0.5\n event_name = op.join(base_dir, 'test-eve.fif')\n events = read_events(event_name)\n epochs = Epochs(raw, events, # XXX pick 2 has epochs of zeros.\n event_id, tmin, tmax, picks=[0, 1, 3])\n for fmin, fmax in [(None, 50), (5, 50), (5, None)]:\n with pytest.warns(RuntimeWarning, match='padding'):\n power, itc = tfr_stockwell(epochs, fmin=fmin, fmax=fmax,\n return_itc=True)\n if fmax is not None:\n assert (power.freqs.max() <= fmax)\n with pytest.warns(RuntimeWarning, match='padding'):\n power_evoked = tfr_stockwell(epochs.average(), fmin=fmin,\n fmax=fmax, return_itc=False)\n # for multitaper these don't necessarily match, but they seem to\n # for stockwell... if this fails, this maybe could be changed\n # just to check the shape\n assert_array_almost_equal(power_evoked.data, power.data)\n assert (isinstance(power, AverageTFR))\n assert (isinstance(itc, AverageTFR))\n assert_equal(power.data.shape, itc.data.shape)\n assert (itc.data.min() >= 0.0)\n assert (itc.data.max() <= 1.0)\n assert (np.log(power.data.max()) * 20 <= 0.0)\n assert (np.log(power.data.max()) * 20 <= 0.0)\n\n\nrun_tests_if_main()\n",
"# Authors: Alexandre Gramfort <[email protected]>\n# Eric Larson <[email protected]>\n# Oleh Kozynets <[email protected]>\n# Guillaume Favelier <[email protected]>\n# jona-sassenhagen <[email protected]>\n#\n# License: Simplified BSD\n\nfrom os import path as path\n\nimport numpy as np\n\n\nclass Surface(object):\n \"\"\"Container for a brain surface.\n\n It is used for storing vertices, faces and morphometric data\n (curvature) of a hemisphere mesh.\n\n Parameters\n ----------\n subject_id : string\n Name of subject\n hemi : {'lh', 'rh'}\n Which hemisphere to load\n surf : string\n Name of the surface to load (eg. inflated, orig ...).\n subjects_dir : str | None\n If not None, this directory will be used as the subjects directory\n instead of the value set using the SUBJECTS_DIR environment variable.\n offset : float | None\n If 0.0, the surface will be offset such that the medial\n wall is aligned with the origin. If None, no offset will\n be applied. If != 0.0, an additional offset will be used.\n units : str\n Can be 'm' or 'mm' (default).\n\n Attributes\n ----------\n bin_curv : numpy.ndarray\n Curvature values stored as non-negative integers.\n coords : numpy.ndarray\n nvtx x 3 array of vertex (x, y, z) coordinates.\n curv : numpy.ndarray\n Vector representation of surface morpometry (curvature) values as\n loaded from a file.\n grey_curv : numpy.ndarray\n Normalized morphometry (curvature) data, used in order to get\n a gray cortex.\n faces : numpy.ndarray\n nfaces x 3 array of defining mesh triangles.\n hemi : {'lh', 'rh'}\n Which hemisphere to load.\n nn : numpy.ndarray\n Vertex normals for a triangulated surface.\n offset : float | None\n If float, align inside edge of each hemisphere to center + offset.\n If None, do not change coordinates (default).\n subject_id : string\n Name of subject.\n surf : string\n Name of the surface to load (eg. inflated, orig ...).\n units : str\n Can be 'm' or 'mm' (default).\n \"\"\"\n\n def __init__(self, subject_id, hemi, surf, subjects_dir=None, offset=None,\n units='mm'):\n from surfer.utils import _check_units, _get_subjects_dir\n\n hemis = ('lh', 'rh')\n\n if hemi not in hemis:\n raise ValueError('hemi should be either \"lh\" or \"rh\",' +\n 'given value {0}'.format(hemi))\n\n if offset is not None and ((not isinstance(offset, float)) and\n (not isinstance(offset, int))):\n raise ValueError('offset should either float or int, given ' +\n 'type {0}'.format(type(offset).__name__))\n\n self.subject_id = subject_id\n self.hemi = hemi\n self.surf = surf\n self.offset = offset\n self.units = _check_units(units)\n self.bin_curv = None\n self.coords = None\n self.curv = None\n self.faces = None\n self.grey_curv = None\n self.nn = None\n self.labels = dict()\n\n subjects_dir = _get_subjects_dir(subjects_dir)\n self.data_path = path.join(subjects_dir, subject_id)\n\n def load_geometry(self):\n \"\"\"Load geometry of the surface.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n from nibabel import freesurfer\n from surfer.utils import _compute_normals\n\n surf_path = path.join(self.data_path, 'surf',\n '%s.%s' % (self.hemi, self.surf))\n coords, faces = freesurfer.read_geometry(surf_path)\n if self.units == 'm':\n coords /= 1000.\n if self.offset is not None:\n if self.hemi == 'lh':\n coords[:, 0] -= (np.max(coords[:, 0]) + self.offset)\n else:\n coords[:, 0] -= (np.min(coords[:, 0]) + self.offset)\n nn = _compute_normals(coords, faces)\n\n if self.coords is None:\n self.coords = coords\n self.faces = faces\n self.nn = nn\n else:\n self.coords[:] = coords\n self.faces[:] = faces\n self.nn[:] = nn\n\n def __len__(self):\n \"\"\"Return number of vertices.\"\"\"\n return len(self.coords)\n\n @property\n def x(self):\n return self.coords[:, 0]\n\n @property\n def y(self):\n return self.coords[:, 1]\n\n @property\n def z(self):\n return self.coords[:, 2]\n\n def load_curvature(self):\n \"\"\"Load in curvature values from the ?h.curv file.\"\"\"\n from nibabel import freesurfer\n curv_path = path.join(self.data_path, 'surf', '%s.curv' % self.hemi)\n self.curv = freesurfer.read_morph_data(curv_path)\n self.bin_curv = np.array(self.curv > 0, np.int)\n # morphometry (curvature) normalization in order to get gray cortex\n # TODO: delete self.grey_curv after cortex parameter\n # will be fully supported\n color = (self.curv > 0).astype(float)\n color = 0.5 - (color - 0.5) / 3\n color = color[:, np.newaxis] * [1, 1, 1]\n self.grey_curv = color\n\n def load_label(self, name):\n \"\"\"Load in a Freesurfer .label file.\n\n Label files are just text files indicating the vertices included\n in the label. Each Surface instance has a dictionary of labels, keyed\n by the name (which is taken from the file name if not given as an\n argument.\n\n \"\"\"\n from nibabel import freesurfer\n label = freesurfer.read_label(path.join(self.data_path,\n 'label',\n '%s.%s.label' %\n (self.hemi, name)))\n label_array = np.zeros_like(self.x).astype(np.int)\n label_array[label] = 1\n self.labels[name] = label_array\n\n def apply_xfm(self, mtx):\n \"\"\"Apply an affine transformation matrix to the x,y,z vectors.\"\"\"\n self.coords = np.dot(np.c_[self.coords, np.ones(len(self.coords))],\n mtx.T)[:, :3]\n",
"# -*- coding: utf-8 -*-\n# Authors: Samu Taulu <[email protected]>\n# Eric Larson <[email protected]>\n\n# License: BSD (3-clause)\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom ..io.pick import _picks_to_idx\nfrom ..surface import _normalize_vectors\nfrom ..utils import logger, verbose\nfrom .utils import _get_lims_cola\n\n\ndef _svd_cov(cov, data):\n \"\"\"Use a covariance matrix to compute the SVD faster.\"\"\"\n # This makes use of mathematical equivalences between PCA and SVD\n # on zero-mean data\n s, u = linalg.eigh(cov)\n norm = np.ones((s.size,))\n mask = s > np.finfo(float).eps * s[-1] # largest is last\n s = np.sqrt(s, out=s)\n norm[mask] = 1. / s[mask]\n u *= norm\n v = np.dot(u.T[mask], data)\n return u, s, v\n\n\n@verbose\ndef oversampled_temporal_projection(raw, duration=10., picks=None,\n verbose=None):\n \"\"\"Denoise MEG channels using leave-one-out temporal projection.\n\n Parameters\n ----------\n raw : instance of Raw\n Raw data to denoise.\n duration : float | str\n The window duration (in seconds; default 10.) to use. Can also\n be \"min\" to use as short a window as possible.\n %(picks_all_data)s\n %(verbose)s\n\n Returns\n -------\n raw_clean : instance of Raw\n The cleaned data.\n\n Notes\n -----\n This algorithm is computationally expensive, and can be several times\n slower than realtime for conventional M/EEG datasets. It uses a\n leave-one-out procedure with parallel temporal projection to remove\n individual sensor noise under the assumption that sampled fields\n (e.g., MEG and EEG) are oversampled by the sensor array [1]_.\n\n OTP can improve sensor noise levels (especially under visual\n inspection) and repair some bad channels. This noise reduction is known\n to interact with :func:`tSSS <mne.preprocessing.maxwell_filter>` such\n that increasing the ``st_correlation`` value will likely be necessary.\n\n Channels marked as bad will not be used to reconstruct good channels,\n but good channels will be used to process the bad channels. Depending\n on the type of noise present in the bad channels, this might make\n them usable again.\n\n Use of this algorithm is covered by a provisional patent.\n\n .. versionadded:: 0.16\n\n References\n ----------\n .. [1] Larson E, Taulu S (2017). Reducing Sensor Noise in MEG and EEG\n Recordings Using Oversampled Temporal Projection.\n IEEE Transactions on Biomedical Engineering.\n \"\"\"\n logger.info('Processing MEG data using oversampled temporal projection')\n picks = _picks_to_idx(raw.info, picks, exclude=())\n picks_good, picks_bad = list(), list()\n for pi in picks:\n if raw.ch_names[pi] in raw.info['bads']:\n picks_bad.append(pi)\n else:\n picks_good.append(pi)\n del picks\n picks_good = np.array(picks_good, int)\n picks_bad = np.array(picks_bad, int)\n\n n_samp = int(round(float(duration) * raw.info['sfreq']))\n starts, stops, windows = _get_lims_cola(\n n_samp, len(raw.times), raw.info['sfreq'], picks_good)\n min_samp = (stops - starts).min()\n if min_samp < len(picks_good) - 1:\n raise ValueError('duration (%s) yielded %s samples, which is fewer '\n 'than the number of channels -1 (%s)'\n % (n_samp / raw.info['sfreq'], min_samp,\n len(picks_good) - 1))\n raw_orig = raw.copy()\n raw = raw.copy().load_data(verbose=False)\n raw._data[picks_good] = 0.\n raw._data[picks_bad] = 0.\n for start, stop, window in zip(starts, stops, windows):\n logger.info(' Denoising % 8.2f - % 8.2f sec'\n % tuple(raw.times[[start, stop - 1]]))\n data_picked = raw_orig[picks_good, start:stop][0]\n if not np.isfinite(data_picked).all():\n raise RuntimeError('non-finite data (inf or nan) found in raw '\n 'instance')\n # demean our slice and our copy\n data_picked_means = np.mean(data_picked, axis=-1, keepdims=True)\n data_picked -= data_picked_means\n # scale the copy that will be used to form the temporal basis vectors\n # so that _orth_svdvals thresholding should work properly with\n # different channel types (e.g., M-EEG)\n norms = _normalize_vectors(data_picked)\n cov = np.dot(data_picked, data_picked.T)\n if len(picks_bad) > 0:\n full_basis = _svd_cov(cov, data_picked)[2]\n for mi, pick in enumerate(picks_good):\n # operate on original data\n idx = list(range(mi)) + list(range(mi + 1, len(data_picked)))\n # Equivalent: linalg.svd(data[idx], full_matrices=False)[2]\n t_basis = _svd_cov(cov[np.ix_(idx, idx)], data_picked[idx])[2]\n x = np.dot(np.dot(data_picked[mi], t_basis.T), t_basis)\n x *= norms[mi]\n x += data_picked_means[mi]\n x *= window\n raw._data[pick, start:stop] += x\n for pick in picks_bad:\n this_data = raw_orig[pick, start:stop][0][0].copy()\n this_mean = this_data.mean()\n this_data -= this_mean\n x = np.dot(np.dot(this_data, full_basis.T), full_basis)\n x += this_mean\n raw._data[pick, start:stop] += window * x\n return raw\n",
"# Authors: Christian Brodbeck <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport os.path as op\n\nfrom numpy.testing import assert_array_equal\n\nfrom mne.utils import requires_mayavi, run_tests_if_main, traits_test\n\n\n@requires_mayavi\n@traits_test\ndef test_mri_model(subjects_dir_tmp):\n \"\"\"Test MRIHeadWithFiducialsModel Traits Model.\"\"\"\n from mne.gui._fiducials_gui import MRIHeadWithFiducialsModel\n tgt_fname = op.join(subjects_dir_tmp, 'test-fiducials.fif')\n\n # Remove the two files that will make the fiducials okay via MNI estimation\n os.remove(op.join(subjects_dir_tmp, 'sample', 'bem',\n 'sample-fiducials.fif'))\n os.remove(op.join(subjects_dir_tmp, 'sample', 'mri', 'transforms',\n 'talairach.xfm'))\n\n model = MRIHeadWithFiducialsModel(subjects_dir=subjects_dir_tmp)\n model.subject = 'sample'\n assert model.default_fid_fname[-20:] == \"sample-fiducials.fif\"\n assert not model.can_reset\n assert not model.can_save\n model.lpa = [[-1, 0, 0]]\n model.nasion = [[0, 1, 0]]\n model.rpa = [[1, 0, 0]]\n assert not model.can_reset\n assert model.can_save\n\n bem_fname = op.basename(model.bem_high_res.file)\n assert not model.can_reset\n assert bem_fname == 'sample-head-dense.fif'\n\n model.save(tgt_fname)\n assert model.fid_file == tgt_fname\n\n # resetting the file should not affect the model's fiducials\n model.fid_file = ''\n assert_array_equal(model.lpa, [[-1, 0, 0]])\n assert_array_equal(model.nasion, [[0, 1, 0]])\n assert_array_equal(model.rpa, [[1, 0, 0]])\n\n # reset model\n model.lpa = [[0, 0, 0]]\n model.nasion = [[0, 0, 0]]\n model.rpa = [[0, 0, 0]]\n assert_array_equal(model.lpa, [[0, 0, 0]])\n assert_array_equal(model.nasion, [[0, 0, 0]])\n assert_array_equal(model.rpa, [[0, 0, 0]])\n\n # loading the file should assign the model's fiducials\n model.fid_file = tgt_fname\n assert_array_equal(model.lpa, [[-1, 0, 0]])\n assert_array_equal(model.nasion, [[0, 1, 0]])\n assert_array_equal(model.rpa, [[1, 0, 0]])\n\n # after changing from file model should be able to reset\n model.nasion = [[1, 1, 1]]\n assert model.can_reset\n model.reset = True\n assert_array_equal(model.nasion, [[0, 1, 0]])\n\n\nrun_tests_if_main()\n",
"# Authors: Alexandre Gramfort <[email protected]>\n# Denis Engemann <[email protected]>\n# Martin Luessi <[email protected]>\n# Eric Larson <[email protected]>\n#\n# License: Simplified BSD\n\nimport copy\nimport os.path as op\n\nimport numpy as np\nfrom numpy.testing import (assert_array_almost_equal, assert_array_equal,\n assert_allclose, assert_equal)\nimport pytest\nimport matplotlib.pyplot as plt\n\nfrom mne.channels import (make_eeg_layout, make_grid_layout, read_layout,\n find_layout)\nfrom mne.channels.layout import (_box_size, _auto_topomap_coords,\n generate_2d_layout)\nfrom mne.utils import run_tests_if_main\nfrom mne import pick_types, pick_info\nfrom mne.io import read_raw_kit, _empty_info, read_info\nfrom mne.io.constants import FIFF\nfrom mne.bem import fit_sphere_to_headshape\nfrom mne.utils import _TempDir\n\nio_dir = op.join(op.dirname(__file__), '..', '..', 'io')\nfif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif')\nlout_path = op.join(io_dir, 'tests', 'data')\nbti_dir = op.join(io_dir, 'bti', 'tests', 'data')\nfname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')\nfname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')\nfname_kit_umd = op.join(io_dir, 'kit', 'tests', 'data', 'test_umd-raw.sqd')\n\n\ndef _get_test_info():\n \"\"\"Make test info.\"\"\"\n test_info = _empty_info(1000)\n loc = np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],\n dtype=np.float32)\n test_info['chs'] = [\n {'cal': 1, 'ch_name': 'ICA 001', 'coil_type': 0, 'coord_Frame': 0,\n 'kind': 502, 'loc': loc.copy(), 'logno': 1, 'range': 1.0, 'scanno': 1,\n 'unit': -1, 'unit_mul': 0},\n {'cal': 1, 'ch_name': 'ICA 002', 'coil_type': 0, 'coord_Frame': 0,\n 'kind': 502, 'loc': loc.copy(), 'logno': 2, 'range': 1.0, 'scanno': 2,\n 'unit': -1, 'unit_mul': 0},\n {'cal': 0.002142000012099743, 'ch_name': 'EOG 061', 'coil_type': 1,\n 'coord_frame': 0, 'kind': 202, 'loc': loc.copy(), 'logno': 61,\n 'range': 1.0, 'scanno': 376, 'unit': 107, 'unit_mul': 0}]\n test_info._update_redundant()\n test_info._check_consistency()\n return test_info\n\n\ndef test_io_layout_lout():\n \"\"\"Test IO with .lout files.\"\"\"\n tempdir = _TempDir()\n layout = read_layout('Vectorview-all', scale=False)\n layout.save(op.join(tempdir, 'foobar.lout'))\n layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',\n scale=False)\n assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)\n assert layout.names == layout_read.names\n print(layout) # test repr\n\n\ndef test_io_layout_lay():\n \"\"\"Test IO with .lay files.\"\"\"\n tempdir = _TempDir()\n layout = read_layout('CTF151', scale=False)\n layout.save(op.join(tempdir, 'foobar.lay'))\n layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',\n scale=False)\n assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)\n assert layout.names == layout_read.names\n\n\ndef test_auto_topomap_coords():\n \"\"\"Test mapping of coordinates in 3D space to 2D.\"\"\"\n info = read_info(fif_fname)\n picks = pick_types(info, meg=False, eeg=True, eog=False, stim=False)\n\n # Remove extra digitization point, so EEG digitization points match up\n # with the EEG channels\n del info['dig'][85]\n\n # Remove head origin from channel locations, so mapping with digitization\n # points yields the same result\n dig_kinds = (FIFF.FIFFV_POINT_CARDINAL,\n FIFF.FIFFV_POINT_EEG,\n FIFF.FIFFV_POINT_EXTRA)\n _, origin_head, _ = fit_sphere_to_headshape(info, dig_kinds, units='m')\n for ch in info['chs']:\n ch['loc'][:3] -= origin_head\n\n # Use channel locations\n l0 = _auto_topomap_coords(info, picks)\n\n # Remove electrode position information, use digitization points from now\n # on.\n for ch in info['chs']:\n ch['loc'].fill(np.nan)\n\n l1 = _auto_topomap_coords(info, picks)\n assert_allclose(l1, l0, atol=1e-3)\n\n # Test plotting mag topomap without channel locations: it should fail\n mag_picks = pick_types(info, meg='mag')\n pytest.raises(ValueError, _auto_topomap_coords, info, mag_picks)\n\n # Test function with too many EEG digitization points: it should fail\n info['dig'].append({'r': [1, 2, 3], 'kind': FIFF.FIFFV_POINT_EEG})\n pytest.raises(ValueError, _auto_topomap_coords, info, picks)\n\n # Test function with too little EEG digitization points: it should fail\n info['dig'] = info['dig'][:-2]\n pytest.raises(ValueError, _auto_topomap_coords, info, picks)\n\n # Electrode positions must be unique\n info['dig'].append(info['dig'][-1])\n pytest.raises(ValueError, _auto_topomap_coords, info, picks)\n\n # Test function without EEG digitization points: it should fail\n info['dig'] = [d for d in info['dig'] if d['kind'] != FIFF.FIFFV_POINT_EEG]\n pytest.raises(RuntimeError, _auto_topomap_coords, info, picks)\n\n # Test function without any digitization points, it should fail\n info['dig'] = None\n pytest.raises(RuntimeError, _auto_topomap_coords, info, picks)\n info['dig'] = []\n pytest.raises(RuntimeError, _auto_topomap_coords, info, picks)\n\n\ndef test_make_eeg_layout():\n \"\"\"Test creation of EEG layout.\"\"\"\n tempdir = _TempDir()\n tmp_name = 'foo'\n lout_name = 'test_raw'\n lout_orig = read_layout(kind=lout_name, path=lout_path)\n info = read_info(fif_fname)\n info['bads'].append(info['ch_names'][360])\n layout = make_eeg_layout(info, exclude=[])\n assert_array_equal(len(layout.names), len([ch for ch in info['ch_names']\n if ch.startswith('EE')]))\n layout.save(op.join(tempdir, tmp_name + '.lout'))\n lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False)\n assert_array_equal(lout_new.kind, tmp_name)\n assert_allclose(layout.pos, lout_new.pos, atol=0.1)\n assert_array_equal(lout_orig.names, lout_new.names)\n\n # Test input validation\n pytest.raises(ValueError, make_eeg_layout, info, radius=-0.1)\n pytest.raises(ValueError, make_eeg_layout, info, radius=0.6)\n pytest.raises(ValueError, make_eeg_layout, info, width=-0.1)\n pytest.raises(ValueError, make_eeg_layout, info, width=1.1)\n pytest.raises(ValueError, make_eeg_layout, info, height=-0.1)\n pytest.raises(ValueError, make_eeg_layout, info, height=1.1)\n\n\ndef test_make_grid_layout():\n \"\"\"Test creation of grid layout.\"\"\"\n tempdir = _TempDir()\n tmp_name = 'bar'\n lout_name = 'test_ica'\n lout_orig = read_layout(kind=lout_name, path=lout_path)\n layout = make_grid_layout(_get_test_info())\n layout.save(op.join(tempdir, tmp_name + '.lout'))\n lout_new = read_layout(kind=tmp_name, path=tempdir)\n assert_array_equal(lout_new.kind, tmp_name)\n assert_array_equal(lout_orig.pos, lout_new.pos)\n assert_array_equal(lout_orig.names, lout_new.names)\n\n # Test creating grid layout with specified number of columns\n layout = make_grid_layout(_get_test_info(), n_col=2)\n # Vertical positions should be equal\n assert layout.pos[0, 1] == layout.pos[1, 1]\n # Horizontal positions should be unequal\n assert layout.pos[0, 0] != layout.pos[1, 0]\n # Box sizes should be equal\n assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:])\n\n\ndef test_find_layout():\n \"\"\"Test finding layout.\"\"\"\n pytest.raises(ValueError, find_layout, _get_test_info(), ch_type='meep')\n\n sample_info = read_info(fif_fname)\n grads = pick_types(sample_info, meg='grad')\n sample_info2 = pick_info(sample_info, grads)\n\n mags = pick_types(sample_info, meg='mag')\n sample_info3 = pick_info(sample_info, mags)\n\n # mock new convention\n sample_info4 = copy.deepcopy(sample_info)\n for ii, name in enumerate(sample_info4['ch_names']):\n new = name.replace(' ', '')\n sample_info4['chs'][ii]['ch_name'] = new\n\n eegs = pick_types(sample_info, meg=False, eeg=True)\n sample_info5 = pick_info(sample_info, eegs)\n\n lout = find_layout(sample_info, ch_type=None)\n assert lout.kind == 'Vectorview-all'\n assert all(' ' in k for k in lout.names)\n\n lout = find_layout(sample_info2, ch_type='meg')\n assert_equal(lout.kind, 'Vectorview-all')\n\n # test new vector-view\n lout = find_layout(sample_info4, ch_type=None)\n assert_equal(lout.kind, 'Vectorview-all')\n assert all(' ' not in k for k in lout.names)\n\n lout = find_layout(sample_info, ch_type='grad')\n assert_equal(lout.kind, 'Vectorview-grad')\n lout = find_layout(sample_info2)\n assert_equal(lout.kind, 'Vectorview-grad')\n lout = find_layout(sample_info2, ch_type='grad')\n assert_equal(lout.kind, 'Vectorview-grad')\n lout = find_layout(sample_info2, ch_type='meg')\n assert_equal(lout.kind, 'Vectorview-all')\n\n lout = find_layout(sample_info, ch_type='mag')\n assert_equal(lout.kind, 'Vectorview-mag')\n lout = find_layout(sample_info3)\n assert_equal(lout.kind, 'Vectorview-mag')\n lout = find_layout(sample_info3, ch_type='mag')\n assert_equal(lout.kind, 'Vectorview-mag')\n lout = find_layout(sample_info3, ch_type='meg')\n assert_equal(lout.kind, 'Vectorview-all')\n\n lout = find_layout(sample_info, ch_type='eeg')\n assert_equal(lout.kind, 'EEG')\n lout = find_layout(sample_info5)\n assert_equal(lout.kind, 'EEG')\n lout = find_layout(sample_info5, ch_type='eeg')\n assert_equal(lout.kind, 'EEG')\n # no common layout, 'meg' option not supported\n\n lout = find_layout(read_info(fname_ctf_raw))\n assert_equal(lout.kind, 'CTF-275')\n\n fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')\n lout = find_layout(read_info(fname_bti_raw))\n assert_equal(lout.kind, 'magnesWH3600')\n\n raw_kit = read_raw_kit(fname_kit_157)\n lout = find_layout(raw_kit.info)\n assert_equal(lout.kind, 'KIT-157')\n\n raw_kit.info['bads'] = ['MEG 13', 'MEG 14', 'MEG 15', 'MEG 16']\n lout = find_layout(raw_kit.info)\n assert_equal(lout.kind, 'KIT-157')\n\n raw_umd = read_raw_kit(fname_kit_umd)\n lout = find_layout(raw_umd.info)\n assert_equal(lout.kind, 'KIT-UMD-3')\n\n # Test plotting\n lout.plot()\n lout.plot(picks=np.arange(10))\n plt.close('all')\n\n\ndef test_box_size():\n \"\"\"Test calculation of box sizes.\"\"\"\n # No points. Box size should be 1,1.\n assert_allclose(_box_size([]), (1.0, 1.0))\n\n # Create one point. Box size should be 1,1.\n point = [(0, 0)]\n assert_allclose(_box_size(point), (1.0, 1.0))\n\n # Create two points. Box size should be 0.5,1.\n points = [(0.25, 0.5), (0.75, 0.5)]\n assert_allclose(_box_size(points), (0.5, 1.0))\n\n # Create three points. Box size should be (0.5, 0.5).\n points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]\n assert_allclose(_box_size(points), (0.5, 0.5))\n\n # Create a grid of points. Box size should be (0.1, 0.1).\n x, y = np.meshgrid(np.linspace(-0.5, 0.5, 11), np.linspace(-0.5, 0.5, 11))\n x, y = x.ravel(), y.ravel()\n assert_allclose(_box_size(np.c_[x, y]), (0.1, 0.1))\n\n # Create a random set of points. This should never break the function.\n rng = np.random.RandomState(42)\n points = rng.rand(100, 2)\n width, height = _box_size(points)\n assert width is not None\n assert height is not None\n\n # Test specifying an existing width.\n points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]\n assert_allclose(_box_size(points, width=0.4), (0.4, 0.5))\n\n # Test specifying an existing width that has influence on the calculated\n # height.\n points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]\n assert_allclose(_box_size(points, width=0.2), (0.2, 1.0))\n\n # Test specifying an existing height.\n points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]\n assert_allclose(_box_size(points, height=0.4), (0.5, 0.4))\n\n # Test specifying an existing height that has influence on the calculated\n # width.\n points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]\n assert_allclose(_box_size(points, height=0.1), (1.0, 0.1))\n\n # Test specifying both width and height. The function should simply return\n # these.\n points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]\n assert_array_equal(_box_size(points, width=0.1, height=0.1), (0.1, 0.1))\n\n # Test specifying a width that will cause unfixable horizontal overlap and\n # essentially breaks the function (height will be 0).\n points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]\n assert_array_equal(_box_size(points, width=1), (1, 0))\n\n # Test adding some padding.\n # Create three points. Box size should be a little less than (0.5, 0.5).\n points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]\n assert_allclose(_box_size(points, padding=0.1), (0.9 * 0.5, 0.9 * 0.5))\n\n\ndef test_generate_2d_layout():\n \"\"\"Test creation of a layout from 2d points.\"\"\"\n snobg = 10\n sbg = 15\n side = range(snobg)\n bg_image = np.random.RandomState(42).randn(sbg, sbg)\n w, h = [.2, .5]\n\n # Generate fake data\n xy = np.array([(i, j) for i in side for j in side])\n lt = generate_2d_layout(xy, w=w, h=h)\n\n # Correct points ordering / minmaxing\n comp_1, comp_2 = [(5, 0), (7, 0)]\n assert lt.pos[:, :2].max() == 1\n assert lt.pos[:, :2].min() == 0\n with np.errstate(invalid='ignore'): # divide by zero\n assert_allclose(xy[comp_2] / float(xy[comp_1]),\n lt.pos[comp_2] / float(lt.pos[comp_1]))\n assert_allclose(lt.pos[0, [2, 3]], [w, h])\n\n # Correct number elements\n assert lt.pos.shape[1] == 4\n assert len(lt.box) == 4\n\n # Make sure background image normalizing is correct\n lt_bg = generate_2d_layout(xy, bg_image=bg_image)\n assert_allclose(lt_bg.pos[:, :2].max(), xy.max() / float(sbg))\n\n\nrun_tests_if_main()\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.allclose"
],
[
"numpy.fromfile",
"numpy.radians",
"numpy.unique",
"numpy.asarray",
"numpy.arange",
"numpy.vstack",
"numpy.full",
"numpy.mean",
"numpy.zeros",
"scipy.linalg.norm",
"numpy.cross",
"numpy.searchsorted",
"numpy.array",
"numpy.where",
"numpy.sum",
"numpy.empty"
],
[
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.subplots"
],
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.polynomial.legendre.legvander",
"numpy.dot",
"numpy.fromfile",
"numpy.linspace",
"numpy.clip",
"numpy.ascontiguousarray",
"numpy.arange",
"numpy.tile",
"numpy.concatenate",
"numpy.cumprod",
"numpy.array",
"numpy.sum",
"numpy.empty"
],
[
"numpy.testing.assert_equal",
"numpy.abs",
"numpy.arange",
"numpy.cos",
"numpy.testing.assert_allclose",
"numpy.zeros",
"numpy.sum",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.max",
"numpy.array",
"numpy.zeros_like",
"numpy.min"
],
[
"numpy.dot",
"numpy.ix_",
"numpy.sqrt",
"numpy.isfinite",
"numpy.ones",
"numpy.finfo",
"scipy.linalg.eigh",
"numpy.mean",
"numpy.array"
],
[
"numpy.testing.assert_array_equal"
],
[
"numpy.testing.assert_equal",
"numpy.linspace",
"numpy.arange",
"numpy.testing.assert_array_equal",
"matplotlib.pyplot.close",
"numpy.testing.assert_allclose",
"numpy.errstate",
"numpy.array",
"numpy.random.RandomState",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dopplerchase/GewitterGefahr | [
"d819874d616f98a25187bfd3091073a2e6d5279e",
"d819874d616f98a25187bfd3091073a2e6d5279e",
"d819874d616f98a25187bfd3091073a2e6d5279e",
"d819874d616f98a25187bfd3091073a2e6d5279e",
"d819874d616f98a25187bfd3091073a2e6d5279e",
"4415b08dd64f37eba5b1b9e8cc5aa9af24f96593",
"4415b08dd64f37eba5b1b9e8cc5aa9af24f96593"
] | [
"gewittergefahr/scripts/convert_examples_myrorss_to_gridrad.py",
"gewittergefahr/gg_utils/conus_boundary_test.py",
"gewittergefahr/scripts/subset_predictions_by_space.py",
"gewittergefahr/scripts/train_many_cnns_3d_gridrad.py",
"gewittergefahr/gg_utils/dilation_test.py",
"gewittergefahr/gg_utils/moisture_conversions.py",
"gewittergefahr/scripts/tmp_verify.py"
] | [
"\"\"\"Converts examples from MYRORSS to GridRad format.\"\"\"\n\nimport os.path\nimport argparse\nimport numpy\nfrom gewittergefahr.gg_utils import radar_utils\nfrom gewittergefahr.gg_utils import time_conversion\nfrom gewittergefahr.deep_learning import input_examples\nfrom gewittergefahr.deep_learning import training_validation_io as trainval_io\n\nAZ_SHEAR_TO_VORTICITY = 0.5\nMAX_LL_SHEAR_HEIGHT_M_AGL = 2000\nREFL_HEIGHTS_M_AGL = numpy.array(\n [1000, 2000, 3000, 4000, 5000, 6000], dtype=int\n)\nNEW_RADAR_HEIGHTS_M_AGL = numpy.array(\n [0, 1000, 2000, 3000, 4000, 5000, 6000], dtype=int\n)\n\nINPUT_DIR_ARG_NAME = 'input_example_dir_name'\nFIRST_DATE_ARG_NAME = 'first_spc_date_string'\nLAST_DATE_ARG_NAME = 'last_spc_date_string'\nNUM_EX_PER_BATCH_ARG_NAME = 'num_examples_per_batch'\nOUTPUT_DIR_ARG_NAME = 'output_example_dir_name'\n\nINPUT_DIR_HELP_STRING = (\n 'Name of top-level directory with original examples (in MYRORSS format). '\n 'Files therein will be found by `input_examples.find_example_file` and read'\n ' by `input_examples.read_example_file`.')\n\nSPC_DATE_HELP_STRING = (\n 'SPC date (format \"yyyymmdd\"). Examples will be converted for all SPC '\n 'dates in period `{0:s}`...`{1:s}`.'\n).format(FIRST_DATE_ARG_NAME, LAST_DATE_ARG_NAME)\n\nNUM_EX_PER_BATCH_HELP_STRING = (\n 'Number of examples per batch. Examples will read and written in batches '\n 'of this size.')\n\nOUTPUT_DIR_HELP_STRING = (\n 'Name of top-level directory for new examples (in GridRad format). Files '\n 'will be written by `input_examples.write_example_file` to locations '\n 'therein determined by `input_examples.find_example_file`.')\n\nINPUT_ARG_PARSER = argparse.ArgumentParser()\nINPUT_ARG_PARSER.add_argument(\n '--' + INPUT_DIR_ARG_NAME, type=str, required=True,\n help=INPUT_DIR_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + FIRST_DATE_ARG_NAME, type=str, required=True,\n help=SPC_DATE_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + LAST_DATE_ARG_NAME, type=str, required=True,\n help=SPC_DATE_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + NUM_EX_PER_BATCH_ARG_NAME, type=int, required=False, default=1000,\n help=NUM_EX_PER_BATCH_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,\n help=OUTPUT_DIR_HELP_STRING)\n\n\ndef _convert_one_file_selected_examples(\n input_file_name, output_file_name, full_storm_id_strings,\n storm_times_unix_sec, append_to_file):\n \"\"\"Converts selected examples in one file from MYRORSS to GridRad format.\n\n E = number of examples\n\n :param input_file_name: See doc for `_convert_one_file`.\n :param output_file_name: Same.\n :param full_storm_id_strings: length-E list of storm IDs.\n :param storm_times_unix_sec: length-E numpy array of storm times.\n :param append_to_file: Boolean flag. If True, will append new examples to\n output file. If False, will overwrite output file.\n \"\"\"\n\n print('Reading MYRORSS examples from: \"{0:s}\"...'.format(input_file_name))\n example_dict = input_examples.read_specific_examples(\n netcdf_file_name=input_file_name, read_all_target_vars=True,\n full_storm_id_strings=full_storm_id_strings,\n storm_times_unix_sec=storm_times_unix_sec,\n radar_heights_to_keep_m_agl=REFL_HEIGHTS_M_AGL)\n\n # Add surface reflectivity, then double horizontal resolution.\n reflectivity_matrix_dbz = example_dict[\n input_examples.REFL_IMAGE_MATRIX_KEY][..., 0]\n\n reflectivity_matrix_dbz = numpy.concatenate(\n (reflectivity_matrix_dbz, reflectivity_matrix_dbz[..., [0]]), axis=-1\n )\n\n reflectivity_matrix_dbz = trainval_io.upsample_reflectivity(\n reflectivity_matrix_dbz=reflectivity_matrix_dbz, upsampling_factor=2)\n\n # Create vorticity matrix.\n shear_field_names = example_dict[input_examples.RADAR_FIELDS_KEY]\n ll_shear_index = shear_field_names.index(radar_utils.LOW_LEVEL_SHEAR_NAME)\n ml_shear_index = shear_field_names.index(radar_utils.MID_LEVEL_SHEAR_NAME)\n\n ll_shear_matrix_s01 = example_dict[\n input_examples.AZ_SHEAR_IMAGE_MATRIX_KEY\n ][..., ll_shear_index]\n\n ml_shear_matrix_s01 = example_dict[\n input_examples.AZ_SHEAR_IMAGE_MATRIX_KEY\n ][..., ml_shear_index]\n\n num_radar_heights = len(NEW_RADAR_HEIGHTS_M_AGL)\n these_dimensions = numpy.array(\n ll_shear_matrix_s01.shape + (num_radar_heights,), dtype=int\n )\n vorticity_matrix_s01 = numpy.full(these_dimensions, numpy.nan)\n\n for k in range(num_radar_heights):\n if NEW_RADAR_HEIGHTS_M_AGL[k] > MAX_LL_SHEAR_HEIGHT_M_AGL:\n vorticity_matrix_s01[..., k] = ml_shear_matrix_s01\n else:\n vorticity_matrix_s01[..., k] = ll_shear_matrix_s01\n\n vorticity_matrix_s01 *= AZ_SHEAR_TO_VORTICITY\n radar_matrix = numpy.stack(\n (reflectivity_matrix_dbz, vorticity_matrix_s01), axis=-1\n )\n\n example_dict[input_examples.RADAR_IMAGE_MATRIX_KEY] = radar_matrix\n example_dict[input_examples.RADAR_HEIGHTS_KEY] = NEW_RADAR_HEIGHTS_M_AGL\n example_dict[input_examples.RADAR_FIELDS_KEY] = [\n radar_utils.REFL_NAME, radar_utils.VORTICITY_NAME\n ]\n example_dict[input_examples.ROTATED_GRID_SPACING_KEY] *= 0.5\n\n example_dict.pop(input_examples.REFL_IMAGE_MATRIX_KEY, None)\n example_dict.pop(input_examples.AZ_SHEAR_IMAGE_MATRIX_KEY, None)\n\n print('Writing examples in GridRad format to: \"{0:s}\"...'.format(\n output_file_name\n ))\n\n input_examples.write_example_file(\n netcdf_file_name=output_file_name, example_dict=example_dict,\n append_to_file=append_to_file)\n\n\ndef _convert_one_file(input_file_name, output_file_name,\n num_examples_per_batch):\n \"\"\"Converts examples in one file from MYRORSS to GridRad format.\n\n :param input_file_name: Path to input file (with MYRORSS examples). Will be\n read by `input_examples.read_example_file`.\n :param output_file_name: Path to output file (with the same examples but in\n GridRad format). Will be written by\n `input_examples.write_example_file`.\n :param num_examples_per_batch: See documentation at top of file.\n \"\"\"\n\n print('Reading metadata from: \"{0:s}\"...'.format(input_file_name))\n example_dict = input_examples.read_example_file(\n netcdf_file_name=input_file_name, read_all_target_vars=True,\n metadata_only=True)\n\n full_storm_id_strings = example_dict[input_examples.FULL_IDS_KEY]\n storm_times_unix_sec = example_dict[input_examples.STORM_TIMES_KEY]\n num_examples = len(full_storm_id_strings)\n\n for i in range(0, num_examples, num_examples_per_batch):\n this_first_index = i\n this_last_index = min(\n [i + num_examples_per_batch - 1, num_examples - 1]\n )\n\n _convert_one_file_selected_examples(\n input_file_name=input_file_name,\n output_file_name=output_file_name,\n full_storm_id_strings=\n full_storm_id_strings[this_first_index:(this_last_index + 1)],\n storm_times_unix_sec=\n storm_times_unix_sec[this_first_index:(this_last_index + 1)],\n append_to_file=i > 0\n )\n\n\ndef _run(top_input_dir_name, first_spc_date_string, last_spc_date_string,\n num_examples_per_batch, top_output_dir_name):\n \"\"\"Converts examples from MYRORSS to GridRad format.\n\n This is effectively the main method.\n\n :param top_input_dir_name: See documentation at top of file.\n :param first_spc_date_string: Same.\n :param last_spc_date_string: Same.\n :param num_examples_per_batch: Same.\n :param top_output_dir_name: Same.\n \"\"\"\n\n spc_date_strings = time_conversion.get_spc_dates_in_range(\n first_spc_date_string=first_spc_date_string,\n last_spc_date_string=last_spc_date_string)\n\n input_file_names = [\n input_examples.find_example_file(\n top_directory_name=top_input_dir_name, shuffled=False,\n spc_date_string=d, raise_error_if_missing=False\n )\n for d in spc_date_strings\n ]\n\n output_file_names = [\n input_examples.find_example_file(\n top_directory_name=top_output_dir_name, shuffled=False,\n spc_date_string=d, raise_error_if_missing=False\n )\n for d in spc_date_strings\n ]\n\n num_spc_dates = len(spc_date_strings)\n\n for i in range(num_spc_dates):\n if not os.path.isfile(input_file_names[i]):\n continue\n\n _convert_one_file(\n input_file_name=input_file_names[i],\n output_file_name=output_file_names[i],\n num_examples_per_batch=num_examples_per_batch\n )\n\n print('\\n')\n\n\nif __name__ == '__main__':\n INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()\n\n _run(\n top_input_dir_name=getattr(INPUT_ARG_OBJECT, INPUT_DIR_ARG_NAME),\n first_spc_date_string=getattr(INPUT_ARG_OBJECT, FIRST_DATE_ARG_NAME),\n last_spc_date_string=getattr(INPUT_ARG_OBJECT, LAST_DATE_ARG_NAME),\n num_examples_per_batch=getattr(\n INPUT_ARG_OBJECT, NUM_EX_PER_BATCH_ARG_NAME),\n top_output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)\n )\n",
"\"\"\"Unit tests for conus_boundary.py.\"\"\"\n\nimport unittest\nimport numpy\nfrom gewittergefahr.gg_utils import conus_boundary\n\nQUERY_LATITUDES_DEG = numpy.array([\n 33.7, 42.6, 39.7, 34.9, 40.2, 33.6, 36.4, 35.1, 30.8, 47.4, 44.2, 45.1,\n 49.6, 38.9, 35.0, 38.1, 40.7, 47.1, 30.2, 39.2\n])\nQUERY_LONGITUDES_DEG = numpy.array([\n 276.3, 282.7, 286.6, 287.5, 271.0, 266.4, 258.3, 257.3, 286.8, 235.0, 273.5,\n 262.5, 277.2, 255.3, 271.8, 254.3, 262.1, 247.8, 262.9, 251.6\n])\n\nIN_CONUS_FLAGS = numpy.array(\n [1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1], dtype=bool\n)\n\n\nclass ConusBoundaryTests(unittest.TestCase):\n \"\"\"Each method is a unit test for conus_boundary.py.\"\"\"\n\n def test_find_points_in_conus_no_shortcuts(self):\n \"\"\"Ensures correct output from find_points_in_conus.\n\n In this case, does not use shortcuts.\n \"\"\"\n\n conus_latitudes_deg, conus_longitudes_deg = (\n conus_boundary.read_from_netcdf()\n )\n\n these_flags = conus_boundary.find_points_in_conus(\n conus_latitudes_deg=conus_latitudes_deg,\n conus_longitudes_deg=conus_longitudes_deg,\n query_latitudes_deg=QUERY_LATITUDES_DEG,\n query_longitudes_deg=QUERY_LONGITUDES_DEG, use_shortcuts=False)\n\n self.assertTrue(numpy.array_equal(these_flags, IN_CONUS_FLAGS))\n\n def test_find_points_in_conus_with_shortcuts(self):\n \"\"\"Ensures correct output from find_points_in_conus.\n\n In this case, uses shortcuts.\n \"\"\"\n\n conus_latitudes_deg, conus_longitudes_deg = (\n conus_boundary.read_from_netcdf()\n )\n\n these_flags = conus_boundary.find_points_in_conus(\n conus_latitudes_deg=conus_latitudes_deg,\n conus_longitudes_deg=conus_longitudes_deg,\n query_latitudes_deg=QUERY_LATITUDES_DEG,\n query_longitudes_deg=QUERY_LONGITUDES_DEG, use_shortcuts=True)\n\n self.assertTrue(numpy.array_equal(these_flags, IN_CONUS_FLAGS))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"Subsets ungridded predictions by space.\n\nSpecifically, this script groups predictions into cells on an equidistant grid\nand writes one prediction file per grid cell.\n\"\"\"\n\nimport argparse\nimport numpy\nfrom gewittergefahr.gg_io import storm_tracking_io as tracking_io\nfrom gewittergefahr.gg_utils import grids\nfrom gewittergefahr.gg_utils import projections\nfrom gewittergefahr.gg_utils import time_conversion\nfrom gewittergefahr.gg_utils import echo_top_tracking\nfrom gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils\nfrom gewittergefahr.deep_learning import prediction_io\n\nSEPARATOR_STRING = '\\n\\n' + '*' * 50 + '\\n\\n'\nDUMMY_TRACKING_SCALE_METRES2 = echo_top_tracking.DUMMY_TRACKING_SCALE_METRES2\n\nINPUT_FILE_ARG_NAME = 'input_file_name'\nTRACKING_DIR_ARG_NAME = 'input_tracking_dir_name'\nMIN_LATITUDE_ARG_NAME = 'min_latitude_deg'\nMAX_LATITUDE_ARG_NAME = 'max_latitude_deg'\nMIN_LONGITUDE_ARG_NAME = 'min_longitude_deg'\nMAX_LONGITUDE_ARG_NAME = 'max_longitude_deg'\nGRID_SPACING_ARG_NAME = 'grid_spacing_metres'\nOUTPUT_DIR_ARG_NAME = 'output_dir_name'\n\nINPUT_FILE_HELP_STRING = (\n 'Path to main input file (with predictions to be subset). Will be read by '\n '`prediction_io.read_ungridded_predictions`.')\n\nTRACKING_DIR_HELP_STRING = (\n 'Name of top-level tracking directory (will be used to find storm '\n 'locations). Files therein will be found by `storm_tracking_io.find_file` '\n 'and read by `storm_tracking_io.read_file`.')\n\nMIN_LATITUDE_HELP_STRING = 'Minimum latitude (deg N) in equidistant grid.'\nMAX_LATITUDE_HELP_STRING = 'Max latitude (deg N) in equidistant grid.'\nMIN_LONGITUDE_HELP_STRING = 'Minimum longitude (deg E) in equidistant grid.'\nMAX_LONGITUDE_HELP_STRING = 'Max longitude (deg E) in equidistant grid.'\nGRID_SPACING_HELP_SPACING = 'Spacing for equidistant grid.'\n\nOUTPUT_DIR_HELP_STRING = (\n 'Name of output directory. Spatially subset predictions (one file per '\n 'equidistant grid cell) will be written here by '\n '`prediction_io.write_ungridded_predictions`, to exact locations determined'\n ' by `prediction_io.find_ungridded_file`.')\n\nINPUT_ARG_PARSER = argparse.ArgumentParser()\nINPUT_ARG_PARSER.add_argument(\n '--' + INPUT_FILE_ARG_NAME, type=str, required=True,\n help=INPUT_FILE_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + TRACKING_DIR_ARG_NAME, type=str, required=True,\n help=TRACKING_DIR_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + MIN_LATITUDE_ARG_NAME, type=float, required=False, default=24.,\n help=MIN_LATITUDE_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + MAX_LATITUDE_ARG_NAME, type=float, required=False, default=50.,\n help=MAX_LATITUDE_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + MIN_LONGITUDE_ARG_NAME, type=float, required=False, default=234.,\n help=MIN_LONGITUDE_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + MAX_LONGITUDE_ARG_NAME, type=float, required=False, default=294.,\n help=MAX_LONGITUDE_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + GRID_SPACING_ARG_NAME, type=float, required=False, default=1e5,\n help=GRID_SPACING_HELP_SPACING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,\n help=OUTPUT_DIR_HELP_STRING)\n\n\ndef _read_storm_locations_one_time(\n top_tracking_dir_name, valid_time_unix_sec, desired_full_id_strings):\n \"\"\"Reads storm locations at one time.\n\n K = number of storm objects desired\n\n :param top_tracking_dir_name: See documentation at top of file.\n :param valid_time_unix_sec: Valid time.\n :param desired_full_id_strings: length-K list of full storm IDs. Locations\n will be read for these storms only.\n :return: desired_latitudes_deg: length-K numpy array of latitudes (deg N).\n :return: desired_longitudes_deg: length-K numpy array of longitudes (deg E).\n \"\"\"\n\n spc_date_string = time_conversion.time_to_spc_date_string(\n valid_time_unix_sec)\n desired_times_unix_sec = numpy.full(\n len(desired_full_id_strings), valid_time_unix_sec, dtype=int\n )\n\n tracking_file_name = tracking_io.find_file(\n top_tracking_dir_name=top_tracking_dir_name,\n tracking_scale_metres2=DUMMY_TRACKING_SCALE_METRES2,\n source_name=tracking_utils.SEGMOTION_NAME,\n valid_time_unix_sec=valid_time_unix_sec,\n spc_date_string=spc_date_string, raise_error_if_missing=True)\n\n print('Reading storm locations from: \"{0:s}\"...'.format(tracking_file_name))\n storm_object_table = tracking_io.read_file(tracking_file_name)\n\n desired_indices = tracking_utils.find_storm_objects(\n all_id_strings=storm_object_table[\n tracking_utils.FULL_ID_COLUMN].values.tolist(),\n all_times_unix_sec=storm_object_table[\n tracking_utils.VALID_TIME_COLUMN].values,\n id_strings_to_keep=desired_full_id_strings,\n times_to_keep_unix_sec=desired_times_unix_sec, allow_missing=False)\n\n desired_latitudes_deg = storm_object_table[\n tracking_utils.CENTROID_LATITUDE_COLUMN].values[desired_indices]\n desired_longitudes_deg = storm_object_table[\n tracking_utils.CENTROID_LONGITUDE_COLUMN].values[desired_indices]\n\n return desired_latitudes_deg, desired_longitudes_deg\n\n\ndef _run(input_file_name, top_tracking_dir_name, min_latitude_deg,\n max_latitude_deg, min_longitude_deg, max_longitude_deg,\n grid_spacing_metres, output_dir_name):\n \"\"\"Subsets ungridded predictions by space.\n\n This is effectively the main method.\n\n :param input_file_name: See documentation at top of file.\n :param top_tracking_dir_name: Same.\n :param min_latitude_deg: Same.\n :param max_latitude_deg: Same.\n :param min_longitude_deg: Same.\n :param max_longitude_deg: Same.\n :param grid_spacing_metres: Same.\n :param output_dir_name: Same.\n \"\"\"\n\n equidistant_grid_dict = grids.create_equidistant_grid(\n min_latitude_deg=min_latitude_deg, max_latitude_deg=max_latitude_deg,\n min_longitude_deg=min_longitude_deg,\n max_longitude_deg=max_longitude_deg,\n x_spacing_metres=grid_spacing_metres,\n y_spacing_metres=grid_spacing_metres, azimuthal=False)\n\n grid_metafile_name = grids.find_equidistant_metafile(\n directory_name=output_dir_name, raise_error_if_missing=False)\n\n print('Writing metadata for equidistant grid to: \"{0:s}\"...'.format(\n grid_metafile_name\n ))\n\n grids.write_equidistant_metafile(grid_dict=equidistant_grid_dict,\n pickle_file_name=grid_metafile_name)\n\n grid_point_x_coords_metres = equidistant_grid_dict[grids.X_COORDS_KEY]\n grid_point_y_coords_metres = equidistant_grid_dict[grids.Y_COORDS_KEY]\n projection_object = equidistant_grid_dict[grids.PROJECTION_KEY]\n\n grid_edge_x_coords_metres = numpy.append(\n grid_point_x_coords_metres - 0.5 * grid_spacing_metres,\n grid_point_x_coords_metres[-1] + 0.5 * grid_spacing_metres\n )\n grid_edge_y_coords_metres = numpy.append(\n grid_point_y_coords_metres - 0.5 * grid_spacing_metres,\n grid_point_y_coords_metres[-1] + 0.5 * grid_spacing_metres\n )\n\n print('Reading input data from: \"{0:s}\"...'.format(input_file_name))\n prediction_dict = prediction_io.read_ungridded_predictions(input_file_name)\n print(SEPARATOR_STRING)\n\n full_id_strings = prediction_dict[prediction_io.STORM_IDS_KEY]\n storm_times_unix_sec = prediction_dict[prediction_io.STORM_TIMES_KEY]\n unique_storm_times_unix_sec = numpy.unique(storm_times_unix_sec)\n\n num_storm_objects = len(storm_times_unix_sec)\n storm_latitudes_deg = numpy.full(num_storm_objects, numpy.nan)\n storm_longitudes_deg = numpy.full(num_storm_objects, numpy.nan)\n\n for this_time_unix_sec in unique_storm_times_unix_sec:\n these_indices = numpy.where(\n storm_times_unix_sec == this_time_unix_sec\n )[0]\n these_full_id_strings = [full_id_strings[k] for k in these_indices]\n\n (storm_latitudes_deg[these_indices],\n storm_longitudes_deg[these_indices]\n ) = _read_storm_locations_one_time(\n top_tracking_dir_name=top_tracking_dir_name,\n valid_time_unix_sec=this_time_unix_sec,\n desired_full_id_strings=these_full_id_strings)\n\n print(SEPARATOR_STRING)\n\n storm_x_coords_metres, storm_y_coords_metres = (\n projections.project_latlng_to_xy(\n latitudes_deg=storm_latitudes_deg,\n longitudes_deg=storm_longitudes_deg,\n projection_object=projection_object)\n )\n\n num_grid_rows = len(grid_point_y_coords_metres)\n num_grid_columns = len(grid_point_x_coords_metres)\n\n for i in range(num_grid_rows):\n for j in range(num_grid_columns):\n these_indices = grids.find_events_in_grid_cell(\n event_x_coords_metres=storm_x_coords_metres,\n event_y_coords_metres=storm_y_coords_metres,\n grid_edge_x_coords_metres=grid_edge_x_coords_metres,\n grid_edge_y_coords_metres=grid_edge_y_coords_metres,\n row_index=i, column_index=j, verbose=True)\n\n if len(these_indices) == 0:\n continue\n\n this_prediction_dict = prediction_io.subset_ungridded_predictions(\n prediction_dict=prediction_dict,\n desired_storm_indices=these_indices)\n\n this_output_file_name = prediction_io.find_ungridded_file(\n directory_name=output_dir_name, grid_row=i, grid_column=j,\n raise_error_if_missing=False)\n\n print('Writing subset to: \"{0:s}\"...'.format(this_output_file_name))\n\n prediction_io.write_ungridded_predictions(\n netcdf_file_name=this_output_file_name,\n class_probability_matrix=this_prediction_dict[\n prediction_io.PROBABILITY_MATRIX_KEY],\n storm_ids=this_prediction_dict[prediction_io.STORM_IDS_KEY],\n storm_times_unix_sec=this_prediction_dict[\n prediction_io.STORM_TIMES_KEY],\n observed_labels=this_prediction_dict[\n prediction_io.OBSERVED_LABELS_KEY],\n target_name=this_prediction_dict[prediction_io.TARGET_NAME_KEY],\n model_file_name=this_prediction_dict[\n prediction_io.MODEL_FILE_KEY]\n )\n\n print('\\n')\n\n\nif __name__ == '__main__':\n INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()\n\n _run(\n input_file_name=getattr(INPUT_ARG_OBJECT, INPUT_FILE_ARG_NAME),\n top_tracking_dir_name=getattr(INPUT_ARG_OBJECT, TRACKING_DIR_ARG_NAME),\n min_latitude_deg=getattr(INPUT_ARG_OBJECT, MIN_LATITUDE_ARG_NAME),\n max_latitude_deg=getattr(INPUT_ARG_OBJECT, MAX_LATITUDE_ARG_NAME),\n min_longitude_deg=getattr(INPUT_ARG_OBJECT, MIN_LONGITUDE_ARG_NAME),\n max_longitude_deg=getattr(INPUT_ARG_OBJECT, MAX_LONGITUDE_ARG_NAME),\n grid_spacing_metres=getattr(INPUT_ARG_OBJECT, GRID_SPACING_ARG_NAME),\n output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)\n )\n",
"\"\"\"Trains many convolutional neural nets with native (3-D) GridRad images.\"\"\"\n\nimport os\nimport pickle\nimport argparse\nimport traceback\nfrom multiprocessing import Pool, Manager\nimport numpy\nfrom gewittergefahr.gg_utils import time_conversion\nfrom gewittergefahr.gg_utils import soundings\nfrom gewittergefahr.gg_utils import file_system_utils\n\nSEPARATOR_STRING = '\\n\\n' + '*' * 50 + '\\n\\n'\n\nNUM_GPU_PER_NODE = 8\nTIME_FORMAT = '%Y-%m-%d-%H%M%S'\n\nFIRST_BATCH_NUMBER = 0\nLAST_BATCH_NUMBER = int(1e12)\nRADAR_HEIGHTS_M_AGL = numpy.linspace(1000, 12000, num=12, dtype=int)\nSOUNDING_HEIGHTS_M_AGL = soundings.DEFAULT_HEIGHT_LEVELS_M_AGL + 0\n\nRADAR_FIELDS_KEY = 'radar_field_names'\nREFLECTIVITY_MASK_KEY = 'refl_masking_threshold_dbz'\n\nARGUMENT_FILES_ARG_NAME = 'argument_file_names'\nARGUMENT_FILES_HELP_STRING = (\n '1-D list of paths to input files, each containing a dictionary of '\n 'arguments for the single-CNN script train_cnn_3d_gridrad.py. Each file '\n 'should be a Pickle file, containing only said dictionary.'\n)\n\nINPUT_ARG_PARSER = argparse.ArgumentParser()\nINPUT_ARG_PARSER.add_argument(\n '--' + ARGUMENT_FILES_ARG_NAME, type=str, nargs='+', required=True,\n help=ARGUMENT_FILES_HELP_STRING\n)\n\n\ndef _write_metadata_one_cnn(model_object, argument_dict):\n \"\"\"Writes metadata for one CNN to file.\n\n :param model_object: Untrained CNN (instance of `keras.models.Model` or\n `keras.models.Sequential`).\n :param argument_dict: See doc for `_train_one_cnn`.\n :return: metadata_dict: See doc for `cnn.write_model_metadata`.\n :return: training_option_dict: Same.\n \"\"\"\n\n from gewittergefahr.deep_learning import cnn\n from gewittergefahr.deep_learning import input_examples\n from gewittergefahr.deep_learning import \\\n training_validation_io as trainval_io\n from gewittergefahr.scripts import deep_learning_helper as dl_helper\n\n # Read input args.\n radar_field_names = argument_dict[RADAR_FIELDS_KEY]\n sounding_field_names = argument_dict[dl_helper.SOUNDING_FIELDS_ARG_NAME]\n\n normalization_type_string = (\n argument_dict[dl_helper.NORMALIZATION_TYPE_ARG_NAME]\n )\n normalization_file_name = (\n argument_dict[dl_helper.NORMALIZATION_FILE_ARG_NAME]\n )\n min_normalized_value = argument_dict[dl_helper.MIN_NORM_VALUE_ARG_NAME]\n max_normalized_value = argument_dict[dl_helper.MAX_NORM_VALUE_ARG_NAME]\n\n target_name = argument_dict[dl_helper.TARGET_NAME_ARG_NAME]\n shuffle_target = bool(argument_dict[dl_helper.SHUFFLE_TARGET_ARG_NAME])\n downsampling_classes = numpy.array(\n argument_dict[dl_helper.DOWNSAMPLING_CLASSES_ARG_NAME],\n dtype=int\n )\n downsampling_fractions = numpy.array(\n argument_dict[dl_helper.DOWNSAMPLING_FRACTIONS_ARG_NAME],\n dtype=float\n )\n\n monitor_string = argument_dict[dl_helper.MONITOR_ARG_NAME]\n weight_loss_function = bool(argument_dict[dl_helper.WEIGHT_LOSS_ARG_NAME])\n refl_masking_threshold_dbz = argument_dict[REFLECTIVITY_MASK_KEY]\n\n x_translations_pixels = numpy.array(\n argument_dict[dl_helper.X_TRANSLATIONS_ARG_NAME], dtype=int\n )\n y_translations_pixels = numpy.array(\n argument_dict[dl_helper.Y_TRANSLATIONS_ARG_NAME], dtype=int\n )\n ccw_rotation_angles_deg = numpy.array(\n argument_dict[dl_helper.ROTATION_ANGLES_ARG_NAME], dtype=float\n )\n noise_standard_deviation = argument_dict[dl_helper.NOISE_STDEV_ARG_NAME]\n num_noisings = argument_dict[dl_helper.NUM_NOISINGS_ARG_NAME]\n flip_in_x = bool(argument_dict[dl_helper.FLIP_X_ARG_NAME])\n flip_in_y = bool(argument_dict[dl_helper.FLIP_Y_ARG_NAME])\n\n top_training_dir_name = argument_dict[dl_helper.TRAINING_DIR_ARG_NAME]\n first_training_time_string = (\n argument_dict[dl_helper.FIRST_TRAINING_TIME_ARG_NAME]\n )\n last_training_time_string = (\n argument_dict[dl_helper.LAST_TRAINING_TIME_ARG_NAME]\n )\n num_examples_per_train_batch = (\n argument_dict[dl_helper.NUM_EX_PER_TRAIN_ARG_NAME]\n )\n\n top_validation_dir_name = argument_dict[dl_helper.VALIDATION_DIR_ARG_NAME]\n first_validation_time_string = (\n argument_dict[dl_helper.FIRST_VALIDATION_TIME_ARG_NAME]\n )\n last_validation_time_string = (\n argument_dict[dl_helper.LAST_VALIDATION_TIME_ARG_NAME]\n )\n num_examples_per_validn_batch = (\n argument_dict[dl_helper.NUM_EX_PER_VALIDN_ARG_NAME]\n )\n\n num_epochs = argument_dict[dl_helper.NUM_EPOCHS_ARG_NAME]\n num_training_batches_per_epoch = (\n argument_dict[dl_helper.NUM_TRAINING_BATCHES_ARG_NAME]\n )\n num_validation_batches_per_epoch = (\n argument_dict[dl_helper.NUM_VALIDATION_BATCHES_ARG_NAME]\n )\n output_dir_name = argument_dict[dl_helper.OUTPUT_DIR_ARG_NAME]\n\n # Process input args.\n first_training_time_unix_sec = time_conversion.string_to_unix_sec(\n first_training_time_string, TIME_FORMAT\n )\n last_training_time_unix_sec = time_conversion.string_to_unix_sec(\n last_training_time_string, TIME_FORMAT\n )\n first_validation_time_unix_sec = time_conversion.string_to_unix_sec(\n first_validation_time_string, TIME_FORMAT\n )\n last_validation_time_unix_sec = time_conversion.string_to_unix_sec(\n last_validation_time_string, TIME_FORMAT\n )\n\n if sounding_field_names[0] in ['', 'None']:\n sounding_field_names = None\n\n if len(downsampling_classes) > 1:\n downsampling_dict = dict(list(zip(\n downsampling_classes, downsampling_fractions\n )))\n else:\n downsampling_dict = None\n\n translate_flag = (\n len(x_translations_pixels) > 1\n or x_translations_pixels[0] != 0 or y_translations_pixels[0] != 0\n )\n\n if not translate_flag:\n x_translations_pixels = None\n y_translations_pixels = None\n\n if len(ccw_rotation_angles_deg) == 1 and ccw_rotation_angles_deg[0] == 0:\n ccw_rotation_angles_deg = None\n\n if num_noisings <= 0:\n num_noisings = 0\n noise_standard_deviation = None\n\n if refl_masking_threshold_dbz <= 0:\n refl_masking_threshold_dbz = None\n\n # Find training and validation files.\n training_file_names = input_examples.find_many_example_files(\n top_directory_name=top_training_dir_name, shuffled=True,\n first_batch_number=FIRST_BATCH_NUMBER,\n last_batch_number=LAST_BATCH_NUMBER,\n raise_error_if_any_missing=False\n )\n validation_file_names = input_examples.find_many_example_files(\n top_directory_name=top_validation_dir_name, shuffled=True,\n first_batch_number=FIRST_BATCH_NUMBER,\n last_batch_number=LAST_BATCH_NUMBER,\n raise_error_if_any_missing=False\n )\n\n # Write metadata.\n metadata_dict = {\n cnn.NUM_EPOCHS_KEY: num_epochs,\n cnn.NUM_TRAINING_BATCHES_KEY: num_training_batches_per_epoch,\n cnn.NUM_VALIDATION_BATCHES_KEY: num_validation_batches_per_epoch,\n cnn.MONITOR_STRING_KEY: monitor_string,\n cnn.WEIGHT_LOSS_FUNCTION_KEY: weight_loss_function,\n cnn.CONV_2D3D_KEY: False,\n cnn.VALIDATION_FILES_KEY: validation_file_names,\n cnn.FIRST_VALIDN_TIME_KEY: first_validation_time_unix_sec,\n cnn.LAST_VALIDN_TIME_KEY: last_validation_time_unix_sec,\n cnn.NUM_EX_PER_VALIDN_BATCH_KEY: num_examples_per_validn_batch\n }\n\n input_tensor = model_object.input\n if isinstance(input_tensor, list):\n input_tensor = input_tensor[0]\n\n num_grid_rows = input_tensor.get_shape().as_list()[1]\n num_grid_columns = input_tensor.get_shape().as_list()[2]\n\n training_option_dict = {\n trainval_io.EXAMPLE_FILES_KEY: training_file_names,\n trainval_io.TARGET_NAME_KEY: target_name,\n trainval_io.SHUFFLE_TARGET_KEY: shuffle_target,\n trainval_io.FIRST_STORM_TIME_KEY: first_training_time_unix_sec,\n trainval_io.LAST_STORM_TIME_KEY: last_training_time_unix_sec,\n trainval_io.NUM_EXAMPLES_PER_BATCH_KEY: num_examples_per_train_batch,\n trainval_io.RADAR_FIELDS_KEY: radar_field_names,\n trainval_io.RADAR_HEIGHTS_KEY: RADAR_HEIGHTS_M_AGL,\n trainval_io.SOUNDING_FIELDS_KEY: sounding_field_names,\n trainval_io.SOUNDING_HEIGHTS_KEY: SOUNDING_HEIGHTS_M_AGL,\n trainval_io.NUM_ROWS_KEY: num_grid_rows,\n trainval_io.NUM_COLUMNS_KEY: num_grid_columns,\n trainval_io.NORMALIZATION_TYPE_KEY: normalization_type_string,\n trainval_io.NORMALIZATION_FILE_KEY: normalization_file_name,\n trainval_io.MIN_NORMALIZED_VALUE_KEY: min_normalized_value,\n trainval_io.MAX_NORMALIZED_VALUE_KEY: max_normalized_value,\n trainval_io.BINARIZE_TARGET_KEY: False,\n trainval_io.SAMPLING_FRACTIONS_KEY: downsampling_dict,\n trainval_io.LOOP_ONCE_KEY: False,\n trainval_io.REFLECTIVITY_MASK_KEY: refl_masking_threshold_dbz,\n trainval_io.X_TRANSLATIONS_KEY: x_translations_pixels,\n trainval_io.Y_TRANSLATIONS_KEY: y_translations_pixels,\n trainval_io.ROTATION_ANGLES_KEY: ccw_rotation_angles_deg,\n trainval_io.NOISE_STDEV_KEY: noise_standard_deviation,\n trainval_io.NUM_NOISINGS_KEY: num_noisings,\n trainval_io.FLIP_X_KEY: flip_in_x,\n trainval_io.FLIP_Y_KEY: flip_in_y\n }\n\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name\n )\n metafile_name = '{0:s}/model_metadata.p'.format(output_dir_name)\n\n print('Writing metadata to: \"{0:s}\"...'.format(metafile_name))\n cnn.write_model_metadata(\n pickle_file_name=metafile_name, metadata_dict=metadata_dict,\n training_option_dict=training_option_dict\n )\n\n return metadata_dict, training_option_dict\n\n\ndef _train_one_cnn(gpu_queue, argument_dict):\n \"\"\"Trains single CNN with 3-D GridRad data.\n\n :param gpu_queue: GPU queue (instance of `multiprocessing.Manager.Queue`).\n :param argument_dict: Dictionary of CNN arguments, where each key is an\n input arg to the script train_cnn_3d_gridrad.py.\n \"\"\"\n\n import keras\n from keras import backend as K\n import tensorflow\n from gewittergefahr.deep_learning import cnn\n from gewittergefahr.deep_learning import cnn_setup\n from gewittergefahr.scripts import deep_learning_helper as dl_helper\n\n gpu_index = -1\n\n try:\n # Deal with GPU business.\n gpu_index = int(gpu_queue.get())\n os.environ['CUDA_VISIBLE_DEVICES'] = '{0:d}'.format(gpu_index)\n\n session_object = tensorflow.Session(\n config=tensorflow.ConfigProto(\n intra_op_parallelism_threads=7, inter_op_parallelism_threads=7,\n allow_soft_placement=False, log_device_placement=False,\n gpu_options=tensorflow.GPUOptions(allow_growth=True)\n )\n )\n\n K.set_session(session_object)\n\n # Read untrained model.\n untrained_model_file_name = (\n argument_dict[dl_helper.INPUT_MODEL_FILE_ARG_NAME]\n )\n\n with tensorflow.device('/gpu:0'):\n print('Reading untrained model from: \"{0:s}\"...'.format(\n untrained_model_file_name\n ))\n model_object = cnn.read_model(untrained_model_file_name)\n\n model_object.compile(\n loss=keras.losses.binary_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=cnn_setup.DEFAULT_METRIC_FUNCTION_LIST\n )\n\n print(SEPARATOR_STRING)\n model_object.summary()\n print(SEPARATOR_STRING)\n\n # Write metadata.\n metadata_dict, training_option_dict = _write_metadata_one_cnn(\n model_object=model_object, argument_dict=argument_dict\n )\n\n print('Training CNN on GPU {0:d}...'.format(gpu_index))\n print(SEPARATOR_STRING)\n\n # Train CNN.\n output_dir_name = argument_dict[dl_helper.OUTPUT_DIR_ARG_NAME]\n output_model_file_name = '{0:s}/model.h5'.format(output_dir_name)\n history_file_name = '{0:s}/model_history.csv'.format(output_dir_name)\n tensorboard_dir_name = '{0:s}/tensorboard'.format(output_dir_name)\n\n cnn.train_cnn_2d_or_3d(\n model_object=model_object, model_file_name=output_model_file_name,\n history_file_name=history_file_name,\n tensorboard_dir_name=tensorboard_dir_name,\n num_epochs=metadata_dict[cnn.NUM_EPOCHS_KEY],\n num_training_batches_per_epoch=\n metadata_dict[cnn.NUM_TRAINING_BATCHES_KEY],\n training_option_dict=training_option_dict,\n monitor_string=metadata_dict[cnn.MONITOR_STRING_KEY],\n weight_loss_function=metadata_dict[cnn.WEIGHT_LOSS_FUNCTION_KEY],\n num_validation_batches_per_epoch=\n metadata_dict[cnn.NUM_VALIDATION_BATCHES_KEY],\n validation_file_names=metadata_dict[cnn.VALIDATION_FILES_KEY],\n first_validn_time_unix_sec=metadata_dict[cnn.FIRST_VALIDN_TIME_KEY],\n last_validn_time_unix_sec=metadata_dict[cnn.LAST_VALIDN_TIME_KEY],\n num_examples_per_validn_batch=\n metadata_dict[cnn.NUM_EX_PER_VALIDN_BATCH_KEY]\n )\n\n session_object.close()\n del session_object\n gpu_queue.put(gpu_index)\n\n except Exception as this_exception:\n if gpu_index >= 0:\n gpu_queue.put(gpu_index)\n\n print(traceback.format_exc())\n raise this_exception\n\n\ndef _run(argument_file_names):\n \"\"\"Trains many convolutional neural nets with native (3-D) GridRad images.\n\n This is effectively the main method.\n\n :param argument_file_names: See documentation at top of file.\n \"\"\"\n\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([\n '{0:d}'.format(j) for j in range(NUM_GPU_PER_NODE)\n ])\n\n gpu_manager = Manager()\n gpu_queue = gpu_manager.Queue()\n gpu_pool = Pool(NUM_GPU_PER_NODE, maxtasksperchild=1)\n\n for j in range(NUM_GPU_PER_NODE):\n gpu_queue.put(j)\n\n for this_arg_file_name in argument_file_names:\n print('Reading single-CNN input args from: \"{0:s}\"...'.format(\n this_arg_file_name\n ))\n\n this_file_handle = open(this_arg_file_name, 'rb')\n this_argument_dict = pickle.load(this_file_handle)\n this_file_handle.close()\n\n gpu_pool.apply_async(\n func=_train_one_cnn, args=(gpu_queue, this_argument_dict)\n )\n\n gpu_pool.close()\n gpu_pool.join()\n\n del gpu_pool\n del gpu_queue\n del gpu_manager\n\n\nif __name__ == '__main__':\n INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()\n\n _run(\n argument_file_names=getattr(INPUT_ARG_OBJECT, ARGUMENT_FILES_ARG_NAME)\n )\n",
"\"\"\"Unit tests for dilation.py.\"\"\"\n\nimport unittest\nimport numpy\nfrom gewittergefahr.gg_utils import dilation\n\nTOLERANCE = 1e-6\nSMALL_PERCENTILE = 12.5\nLARGE_PERCENTILE = 87.5\nDILATION_HALF_WIDTH_IN_PIXELS = 1\n\nINPUT_MATRIX = numpy.array(\n [[-20., -15., -10., -5., 0.],\n [-10., -5., 0., 5., 10.],\n [0., 5., 10., numpy.nan, numpy.nan],\n [10., 15., 20., numpy.nan, numpy.nan]])\n\nOUTPUT_MATRIX_SMALL_PERCENTILE = numpy.array(\n [[-15., -15., -10., -5., numpy.nan],\n [-15., -15., -10., -5., numpy.nan],\n [-5., -5., numpy.nan, numpy.nan, numpy.nan],\n [numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan]])\n\nOUTPUT_MATRIX_LARGE_PERCENTILE = numpy.array(\n [[numpy.nan, numpy.nan, numpy.nan, 5., 5.],\n [numpy.nan, 5., 5., 10., 5.],\n [10., 15., 15., 10., 5.],\n [10., 15., 15., 10., numpy.nan]])\n\nOUTPUT_MATRIX_LARGEST_ABS_VALUE = numpy.array(\n [[-15., -15., -10., 5., 5.],\n [-15., -15., -10., 10., 5.],\n [10., 15., 15., 10., 5.],\n [10., 15., 15., 10., numpy.nan]])\n\n\nclass DilationTests(unittest.TestCase):\n \"\"\"Each method is a unit test for dilation.py.\"\"\"\n\n def test_dilate_2d_matrix_small_percentile(self):\n \"\"\"Ensures correct output from dilate_2d_matrix with small prctile.\"\"\"\n\n this_output_matrix = dilation.dilate_2d_matrix(\n INPUT_MATRIX, percentile_level=SMALL_PERCENTILE,\n half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS)\n\n self.assertTrue(numpy.allclose(\n this_output_matrix, OUTPUT_MATRIX_SMALL_PERCENTILE, atol=TOLERANCE,\n equal_nan=True))\n\n def test_dilate_2d_matrix_large_percentile(self):\n \"\"\"Ensures correct output from dilate_2d_matrix with large prctile.\"\"\"\n\n this_output_matrix = dilation.dilate_2d_matrix(\n INPUT_MATRIX, percentile_level=LARGE_PERCENTILE,\n half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS)\n\n self.assertTrue(numpy.allclose(\n this_output_matrix, OUTPUT_MATRIX_LARGE_PERCENTILE, atol=TOLERANCE,\n equal_nan=True))\n\n def test_dilate_2d_matrix_take_largest_abs_value(self):\n \"\"\"Ensures correct output from dilate_2d_matrix.\n\n In this case, take_largest_absolute_value = True.\n \"\"\"\n\n this_output_matrix = dilation.dilate_2d_matrix(\n INPUT_MATRIX, percentile_level=LARGE_PERCENTILE,\n half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS,\n take_largest_absolute_value=True)\n\n self.assertTrue(numpy.allclose(\n this_output_matrix, OUTPUT_MATRIX_LARGEST_ABS_VALUE, atol=TOLERANCE,\n equal_nan=True))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"Conversion methods for atmospheric-moisture variables.\"\"\"\n\nimport numpy\nfrom gewittergefahr.gg_utils import temperature_conversions as temperature_conv\nfrom gewittergefahr.gg_utils import error_checking\n\nDRY_AIR_GAS_CONSTANT_J_KG01_K01 = 287.04\nWATER_VAPOUR_GAS_CONSTANT_J_KG01_K01 = 461.5\nEPSILON = DRY_AIR_GAS_CONSTANT_J_KG01_K01 / WATER_VAPOUR_GAS_CONSTANT_J_KG01_K01\n\nBASE_VAPOUR_PRESSURE_PASCALS = 610.78\nMAGNUS_NUMERATOR_COEFF_WATER = 17.08085\nMAGNUS_NUMERATOR_COEFF_ICE = 17.84362\nMAGNUS_DENOMINATOR_COEFF_WATER = 234.175\nMAGNUS_DENOMINATOR_COEFF_ICE = 245.425\n\n\ndef specific_humidity_to_mixing_ratio(specific_humidities_kg_kg01):\n \"\"\"Converts each specific humidity to mixing ratio.\n\n :param specific_humidities_kg_kg01: numpy array (any shape) of specific\n humidities (kg per kg).\n :return: mixing_ratios_kg_kg01: numpy array (same shape) of mixing ratios\n (kg per kg).\n \"\"\"\n\n error_checking.assert_is_geq_numpy_array(\n specific_humidities_kg_kg01, 0., allow_nan=True\n )\n\n return specific_humidities_kg_kg01 / (1 - specific_humidities_kg_kg01)\n\n\ndef mixing_ratio_to_specific_humidity(mixing_ratios_kg_kg01):\n \"\"\"Converts each mixing ratio to specific humidity.\n\n :param mixing_ratios_kg_kg01: numpy array (any shape) of mixing ratios\n (kg per kg).\n :return: specific_humidities_kg_kg01: numpy array (same shape) of specific\n humidities (kg per kg).\n \"\"\"\n\n error_checking.assert_is_geq_numpy_array(\n mixing_ratios_kg_kg01, 0., allow_nan=True\n )\n\n return mixing_ratios_kg_kg01 / (1 + mixing_ratios_kg_kg01)\n\n\ndef mixing_ratio_to_vapour_pressure(\n mixing_ratios_kg_kg01, total_pressures_pascals):\n \"\"\"Converts each mixing ratio to vapour pressure.\n\n :param mixing_ratios_kg_kg01: numpy array (any shape) of mixing ratios\n (kg per kg).\n :param total_pressures_pascals: numpy array (same shape) of total air\n pressures.\n :return: vapour_pressures_pascals: numpy array (same shape) of vapour\n pressures.\n \"\"\"\n\n error_checking.assert_is_geq_numpy_array(\n mixing_ratios_kg_kg01, 0., allow_nan=True\n )\n error_checking.assert_is_geq_numpy_array(\n total_pressures_pascals, 0., allow_nan=True\n )\n error_checking.assert_is_numpy_array(\n total_pressures_pascals,\n exact_dimensions=numpy.array(mixing_ratios_kg_kg01.shape, dtype=int)\n )\n\n return (\n mixing_ratios_kg_kg01 * total_pressures_pascals /\n (EPSILON + mixing_ratios_kg_kg01)\n )\n\n\ndef vapour_pressure_to_mixing_ratio(\n vapour_pressures_pascals, total_pressures_pascals):\n \"\"\"Converts each vapour pressure to mixing ratio.\n\n :param vapour_pressures_pascals: numpy array (any shape) of vapour\n pressures.\n :param total_pressures_pascals: numpy array (same shape) of total air\n pressures.\n :return: mixing_ratios_kg_kg01: numpy array (same shape) of mixing ratios\n (kg per kg).\n \"\"\"\n\n error_checking.assert_is_geq_numpy_array(\n vapour_pressures_pascals, 0., allow_nan=True\n )\n error_checking.assert_is_geq_numpy_array(\n total_pressures_pascals, 0., allow_nan=True\n )\n error_checking.assert_is_numpy_array(\n total_pressures_pascals,\n exact_dimensions=numpy.array(vapour_pressures_pascals.shape, dtype=int)\n )\n error_checking.assert_is_geq_numpy_array(\n total_pressures_pascals - vapour_pressures_pascals, 0., allow_nan=True\n )\n\n denominators = total_pressures_pascals - vapour_pressures_pascals\n mixing_ratios_kg_kg01 = EPSILON * vapour_pressures_pascals / denominators\n mixing_ratios_kg_kg01[denominators <= 0] = 0\n\n return mixing_ratios_kg_kg01\n\n\ndef vapour_pressure_to_dewpoint(vapour_pressures_pascals, temperatures_kelvins):\n \"\"\"Converts each vapour pressure to dewpoint.\n\n Source:\n https://content.meteoblue.com/hu/specifications/weather-variables/humidity\n\n :param vapour_pressures_pascals: numpy array (any shape) of vapour\n pressures.\n :param temperatures_kelvins: numpy array (same shape) of temperatures.\n :return: dewpoints_kelvins: numpy array (same shape) of dewpoints.\n \"\"\"\n\n error_checking.assert_is_geq_numpy_array(\n vapour_pressures_pascals, 0., allow_nan=True\n )\n error_checking.assert_is_geq_numpy_array(\n temperatures_kelvins, 0., allow_nan=True\n )\n error_checking.assert_is_numpy_array(\n temperatures_kelvins,\n exact_dimensions=numpy.array(vapour_pressures_pascals.shape, dtype=int)\n )\n\n logarithms = numpy.log(\n vapour_pressures_pascals / BASE_VAPOUR_PRESSURE_PASCALS\n )\n\n temperatures_deg_c = temperature_conv.kelvins_to_celsius(\n temperatures_kelvins\n )\n\n numerator_coeffs = numpy.full(\n temperatures_deg_c.shape, MAGNUS_DENOMINATOR_COEFF_WATER\n )\n numerator_coeffs[temperatures_deg_c < 0] = MAGNUS_DENOMINATOR_COEFF_ICE\n numerators = numerator_coeffs * logarithms\n\n denominator_coeffs = numpy.full(\n temperatures_deg_c.shape, MAGNUS_NUMERATOR_COEFF_WATER\n )\n denominator_coeffs[temperatures_deg_c < 0] = MAGNUS_NUMERATOR_COEFF_ICE\n denominators = denominator_coeffs - logarithms\n\n dewpoints_deg_c = numerators / denominators\n dewpoints_deg_c[numpy.invert(numpy.isfinite(dewpoints_deg_c))] = (\n -temperature_conv.CELSIUS_TO_KELVINS_ADDEND\n )\n\n dewpoints_kelvins = temperature_conv.celsius_to_kelvins(dewpoints_deg_c)\n dewpoints_kelvins[dewpoints_deg_c + numerator_coeffs < 0] = 0.\n\n return dewpoints_kelvins\n\n\ndef dewpoint_to_vapour_pressure(dewpoints_kelvins, temperatures_kelvins,\n total_pressures_pascals):\n \"\"\"Converts each dewpoint to vapour pressure.\n\n Source:\n https://content.meteoblue.com/hu/specifications/weather-variables/humidity\n\n :param dewpoints_kelvins: numpy array (any shape) of dewpoints.\n :param temperatures_kelvins: numpy array (same shape) of temperatures.\n :param total_pressures_pascals: numpy array (same shape) of total air\n pressures.\n :return: vapour_pressures_pascals: numpy array (same shape) of vapour\n pressures.\n \"\"\"\n\n error_checking.assert_is_geq_numpy_array(\n dewpoints_kelvins, 0., allow_nan=True\n )\n error_checking.assert_is_geq_numpy_array(\n temperatures_kelvins, 0., allow_nan=True\n )\n error_checking.assert_is_numpy_array(\n temperatures_kelvins,\n exact_dimensions=numpy.array(dewpoints_kelvins.shape, dtype=int)\n )\n error_checking.assert_is_geq_numpy_array(\n total_pressures_pascals, 0., allow_nan=True\n )\n error_checking.assert_is_numpy_array(\n total_pressures_pascals,\n exact_dimensions=numpy.array(dewpoints_kelvins.shape, dtype=int)\n )\n\n dewpoints_deg_c = temperature_conv.kelvins_to_celsius(dewpoints_kelvins)\n temperatures_deg_c = temperature_conv.kelvins_to_celsius(\n temperatures_kelvins\n )\n\n numerator_coeffs = numpy.full(\n temperatures_deg_c.shape, MAGNUS_NUMERATOR_COEFF_WATER\n )\n numerator_coeffs[temperatures_deg_c < 0] = MAGNUS_NUMERATOR_COEFF_ICE\n numerators = numerator_coeffs * dewpoints_deg_c\n\n denominator_coeffs = numpy.full(\n temperatures_deg_c.shape, MAGNUS_DENOMINATOR_COEFF_WATER\n )\n denominator_coeffs[temperatures_deg_c < 0] = MAGNUS_DENOMINATOR_COEFF_ICE\n denominators = denominator_coeffs + dewpoints_deg_c\n\n vapour_pressures_pascals = (\n BASE_VAPOUR_PRESSURE_PASCALS * numpy.exp(numerators / denominators)\n )\n\n vapour_pressures_pascals[\n numpy.invert(numpy.isfinite(vapour_pressures_pascals))\n ] = 0.\n\n vapour_pressures_pascals[denominators <= 0] = 0.\n return numpy.minimum(vapour_pressures_pascals, total_pressures_pascals)\n\n\ndef specific_humidity_to_dewpoint(\n specific_humidities_kg_kg01, temperatures_kelvins,\n total_pressures_pascals):\n \"\"\"Converts each specific humidity to dewpoint.\n\n :param specific_humidities_kg_kg01: numpy array (any shape) of specific\n humidities (kg per kg).\n :param temperatures_kelvins: numpy array (same shape) of temperatures.\n :param total_pressures_pascals: numpy array (same shape) of total air\n pressures.\n :return: dewpoints_kelvins: numpy array (same shape) of dewpoints.\n \"\"\"\n\n mixing_ratios_kg_kg01 = specific_humidity_to_mixing_ratio(\n specific_humidities_kg_kg01\n )\n vapour_pressures_pascals = mixing_ratio_to_vapour_pressure(\n mixing_ratios_kg_kg01=mixing_ratios_kg_kg01,\n total_pressures_pascals=total_pressures_pascals\n )\n\n return vapour_pressure_to_dewpoint(\n vapour_pressures_pascals=vapour_pressures_pascals,\n temperatures_kelvins=temperatures_kelvins\n )\n\n\ndef dewpoint_to_specific_humidity(\n dewpoints_kelvins, temperatures_kelvins, total_pressures_pascals):\n \"\"\"Converts each dewpoint to specific humidity.\n\n :param dewpoints_kelvins: numpy array (any shape) of dewpoints.\n :param temperatures_kelvins: numpy array (same shape) of temperatures.\n :param total_pressures_pascals: numpy array (same shape) of total air\n pressures.\n :return: specific_humidities_kg_kg01: numpy array (same shape) of specific\n humidities (kg per kg).\n \"\"\"\n\n vapour_pressures_pascals = dewpoint_to_vapour_pressure(\n dewpoints_kelvins=dewpoints_kelvins,\n temperatures_kelvins=temperatures_kelvins,\n total_pressures_pascals=total_pressures_pascals\n )\n mixing_ratios_kg_kg01 = vapour_pressure_to_mixing_ratio(\n vapour_pressures_pascals=vapour_pressures_pascals,\n total_pressures_pascals=total_pressures_pascals\n )\n\n return mixing_ratio_to_specific_humidity(mixing_ratios_kg_kg01)\n\n\ndef relative_humidity_to_dewpoint(\n relative_humidities, temperatures_kelvins, total_pressures_pascals):\n \"\"\"Converts each relative humidity to dewpoint.\n\n :param relative_humidities: numpy array (any shape) of relative humidities\n (unitless).\n :param temperatures_kelvins: numpy array (same shape) of temperatures.\n :param total_pressures_pascals: numpy array (same shape) of total air\n pressures.\n :return: dewpoints_kelvins: numpy array (same shape) of dewpoints.\n \"\"\"\n\n error_checking.assert_is_geq_numpy_array(\n relative_humidities, 0., allow_nan=True\n )\n error_checking.assert_is_geq_numpy_array(\n temperatures_kelvins, 0., allow_nan=True\n )\n error_checking.assert_is_numpy_array(\n temperatures_kelvins,\n exact_dimensions=numpy.array(relative_humidities.shape, dtype=int)\n )\n error_checking.assert_is_geq_numpy_array(\n total_pressures_pascals, 0., allow_nan=True\n )\n error_checking.assert_is_numpy_array(\n total_pressures_pascals,\n exact_dimensions=numpy.array(relative_humidities.shape, dtype=int)\n )\n\n saturated_vapour_pressures_pascals = dewpoint_to_vapour_pressure(\n dewpoints_kelvins=temperatures_kelvins,\n temperatures_kelvins=temperatures_kelvins,\n total_pressures_pascals=total_pressures_pascals\n )\n saturated_mixing_ratios_kg_kg01 = vapour_pressure_to_mixing_ratio(\n vapour_pressures_pascals=saturated_vapour_pressures_pascals,\n total_pressures_pascals=total_pressures_pascals\n )\n vapour_pressures_pascals = mixing_ratio_to_vapour_pressure(\n mixing_ratios_kg_kg01=\n relative_humidities * saturated_mixing_ratios_kg_kg01,\n total_pressures_pascals=total_pressures_pascals\n )\n\n return vapour_pressure_to_dewpoint(\n vapour_pressures_pascals=vapour_pressures_pascals,\n temperatures_kelvins=temperatures_kelvins\n )\n\n\ndef dewpoint_to_relative_humidity(\n dewpoints_kelvins, temperatures_kelvins, total_pressures_pascals):\n \"\"\"Converts each dewpoint to specific humidity.\n\n :param dewpoints_kelvins: numpy array (any shape) of dewpoints.\n :param temperatures_kelvins: numpy array (same shape) of temperatures.\n :param total_pressures_pascals: numpy array (same shape) of total air\n pressures.\n :return: relative_humidities: numpy array (same shape) of relative\n humidities (unitless).\n \"\"\"\n\n error_checking.assert_is_geq_numpy_array(\n dewpoints_kelvins, 0., allow_nan=True\n )\n error_checking.assert_is_geq_numpy_array(\n temperatures_kelvins, 0., allow_nan=True\n )\n error_checking.assert_is_numpy_array(\n temperatures_kelvins,\n exact_dimensions=numpy.array(dewpoints_kelvins.shape, dtype=int)\n )\n error_checking.assert_is_geq_numpy_array(\n total_pressures_pascals, 0., allow_nan=True\n )\n error_checking.assert_is_numpy_array(\n total_pressures_pascals,\n exact_dimensions=numpy.array(dewpoints_kelvins.shape, dtype=int)\n )\n\n vapour_pressures_pascals = dewpoint_to_vapour_pressure(\n dewpoints_kelvins=dewpoints_kelvins,\n temperatures_kelvins=temperatures_kelvins,\n total_pressures_pascals=total_pressures_pascals\n )\n mixing_ratios_kg_kg01 = vapour_pressure_to_mixing_ratio(\n vapour_pressures_pascals=vapour_pressures_pascals,\n total_pressures_pascals=total_pressures_pascals\n )\n saturated_vapour_pressures_pascals = dewpoint_to_vapour_pressure(\n dewpoints_kelvins=temperatures_kelvins,\n temperatures_kelvins=temperatures_kelvins,\n total_pressures_pascals=total_pressures_pascals\n )\n saturated_mixing_ratios_kg_kg01 = vapour_pressure_to_mixing_ratio(\n vapour_pressures_pascals=saturated_vapour_pressures_pascals,\n total_pressures_pascals=total_pressures_pascals\n )\n\n relative_humidities = (\n mixing_ratios_kg_kg01 / saturated_mixing_ratios_kg_kg01\n )\n relative_humidities[numpy.invert(numpy.isfinite(relative_humidities))] = 0.\n\n return relative_humidities\n\n\ndef temperature_to_virtual_temperature(\n temperatures_kelvins, total_pressures_pascals,\n vapour_pressures_pascals):\n \"\"\"Converts each temperature to virtual temperature.\n\n :param temperatures_kelvins: numpy array (any shape) of temperatures.\n :param total_pressures_pascals: numpy array (same shape) of total air\n pressures.\n :param vapour_pressures_pascals: numpy array (same shape) of vapour\n pressures.\n :return: virtual_temperatures_kelvins: numpy array (same shape) of virtual\n temperatures.\n \"\"\"\n\n error_checking.assert_is_geq_numpy_array(\n temperatures_kelvins, 0., allow_nan=True\n )\n error_checking.assert_is_geq_numpy_array(\n total_pressures_pascals, 0., allow_nan=True\n )\n error_checking.assert_is_numpy_array(\n total_pressures_pascals,\n exact_dimensions=numpy.array(temperatures_kelvins.shape, dtype=int)\n )\n error_checking.assert_is_geq_numpy_array(\n vapour_pressures_pascals, 0., allow_nan=True\n )\n error_checking.assert_is_numpy_array(\n vapour_pressures_pascals,\n exact_dimensions=numpy.array(temperatures_kelvins.shape, dtype=int)\n )\n error_checking.assert_is_geq_numpy_array(\n total_pressures_pascals - vapour_pressures_pascals, 0., allow_nan=True\n )\n\n denominator_values = 1. - (\n (vapour_pressures_pascals / total_pressures_pascals) * (1. - EPSILON)\n )\n\n virtual_temperatures_kelvins = temperatures_kelvins / denominator_values\n virtual_temperatures_kelvins[total_pressures_pascals == 0] = (\n temperatures_kelvins[total_pressures_pascals == 0]\n )\n\n return virtual_temperatures_kelvins\n",
"from gridrad_tools import gridrad\nfrom matplotlib import patheffects\nimport matplotlib.pyplot as plt \nimport matplotlib\nmatplotlib.use('Agg')\nimport numpy as np \nimport xarray as xr\nimport netCDF4 \nimport pandas as pd\nimport cmocean\nfrom tqdm import tqdm \n\nimport warnings \nwarnings.filterwarnings('ignore')\n\nimport argparse\n\nLEARNING_EXAMPLE_FILE_ARG_NAME = 'learning_example_file'\nSTORM_IMAGE_DIR_ARG_NAME = 'storm_image_dir'\nLEVEL_TO_PLOT_ARG_NAME = 'level'\nLINKAGE_DIR_ARG_NAME = 'linkage_dir'\nSEGMOTION_DIR_ARG_NAME = 'seg_dir'\nGRIDRAD_DIR_ARG_NAME = 'rad_dir'\nNEXRAD_LOC_ARG_NAME = 'nexrad_loc_csv'\nSAVE_DIR_ARG_NAME = 'save_dir'\nSAVEFIG_BOOL_ARG_NAME = 'savefig'\nALTER_FILES_BOOL_ARG_NAME = 'alterfiles'\n\nLEARNING_EXAMPLE_FILE_HELP_STRING = (\n 'file you wish to verify')\n\nSTORM_IMAGE_DIR_HELP_STRING = (\n 'directory path where storm images are.')\n\nLEVEL_TO_PLOT_HELP_STRING = (\n 'Which height of radar data to plot')\n\nLINKAGE_DIR_HELP_STRING = (\n 'directory path where linked files are')\n\nSEGMOTION_DIR_HELP_STRING = (\n 'directory path where segmotion tracking files are')\n\nGRIDRAD_DIR_HELP_STRING = (\n 'directory path where gridded gridrad files are')\n\nNEXRAD_LOC_HELP_STRING = (\n 'Location of nexrad locations csv file')\n\nSAVE_DIR_HELP_STRING = (\n 'Path of where to save the .png images')\n\nSAVEFIG_BOOL_HELP_STRING = (\n 'Turn on or off the saving of the .pngs')\n\nALTER_FILES_BOOL_HELP_STRING = (\n 'Turn on off the adding of extra metadata')\n\nINPUT_ARG_PARSER = argparse.ArgumentParser()\n\nINPUT_ARG_PARSER.add_argument(\n '--' + LEARNING_EXAMPLE_FILE_ARG_NAME, type=str, required=True,\n default='', help=LEARNING_EXAMPLE_FILE_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + STORM_IMAGE_DIR_ARG_NAME, type=str, required=True,\n default='',\n help=STORM_IMAGE_DIR_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + LEVEL_TO_PLOT_ARG_NAME, type=str,required=False,\n default='04000_metres_agl', help=LEVEL_TO_PLOT_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + LINKAGE_DIR_ARG_NAME, type=str, required=True,\n default='',\n help=LINKAGE_DIR_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + SEGMOTION_DIR_ARG_NAME, type=str, required=True,\n default='',\n help=SEGMOTION_DIR_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + GRIDRAD_DIR_ARG_NAME, type=str,required=True,\n default='',\n help=GRIDRAD_DIR_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + NEXRAD_LOC_ARG_NAME, type=str, required=True,\n help=NEXRAD_LOC_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + SAVE_DIR_ARG_NAME, type=str, required=True,\n default='', help=SAVE_DIR_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + SAVEFIG_BOOL_ARG_NAME, type=bool, required=False,default=True,\n help=SAVEFIG_BOOL_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + ALTER_FILES_BOOL_ARG_NAME, type=bool, required=False,\n default=False, help=ALTER_FILES_BOOL_HELP_STRING)\n\n\n#plot parameters that I personally like, feel free to make these your own.\nmatplotlib.rcParams['axes.facecolor'] = [0.9,0.9,0.9]\nmatplotlib.rcParams['axes.labelsize'] = 14\nmatplotlib.rcParams['axes.titlesize'] = 14\nmatplotlib.rcParams['xtick.labelsize'] = 10\nmatplotlib.rcParams['ytick.labelsize'] = 10\nmatplotlib.rcParams['legend.fontsize'] = 12\nmatplotlib.rcParams['legend.facecolor'] = 'w'\nmatplotlib.rcParams['savefig.transparent'] = False\n\n\npe = [patheffects.withStroke(linewidth=3,\n foreground=\"w\")]\ndef padder(x):\n if x< 10:\n x = '0' + str(x)\n else:\n x = str(x)\n return x\n\ndef padder2(x):\n if x< 10:\n x = '00' + str(x)\n elif x < 100:\n x = '0' + str(x)\n else:\n x = str(x)\n return x\n\n\ndef validate_examples(input_example_filename,storm_image_dir,level,linkage_dir,seg_dir,rad_dir,\n nexrad_loc_csv,save_dir,savefig,alterfiles):\n \n \"\"\" This method is intened to buld trust in the user running Dr. Lagerquist's code. \n What this does is that it will go ahead and plot simple maps to show where the reported tor is.\n If you choose to turn on the additional functionality, it will add the following variables to the input_examples_*** files \n \n 1) Distance to nearest NEXRAD (in km), this is the distance from storm centroid to nearest radar \n 2) Time difference (in seconds), this is the time difference between the tornado report and the radar scan. \n 3) Distance between storm and report (in km), this is the distance between the storm centroid and the tornado report.\"\"\"\n \n #load nexrad loc dataframe\n df_nexrad = pd.read_csv(nexrad_loc_csv,index_col=0)\n \n #load example image file \n ds_images = xr.open_dataset(input_example_filename)\n dtime = pd.to_datetime(np.asarray(netCDF4.num2date(ds_images.storm_times_unix_sec,'seconds since 1970-01-01'),dtype=str))\n #assign dtime dimension \n ds_images['dtime'] = xr.DataArray(dtime.to_numpy(),coords=None,dims=ds_images.storm_times_unix_sec.dims)\n \n if alterfiles:\n #store index for easy rebuilding after dropping things as we go \n da = xr.DataArray(data=np.asarray(ds_images.storm_object.values,dtype=int),dims=[\"storm_object\"],attrs=dict(description=\"boring index for rebuilding\",units=\"none\",))\n ds_images['lame_index'] = da\n #preallocate arrays\n time_diff = np.ones(len(ds_images.storm_object.values))*-9999\n dist_to_nexrad_tor = np.ones(len(ds_images.storm_object.values))*-9999\n dist_to_report = np.ones(len(ds_images.storm_object.values))*-9999\n dist_to_nexrad_storm = np.ones(len(ds_images.storm_object.values))*-9999\n #can add grid lat lon if we want later (RJC 15 Jun 2021)\n \n\n #build date string \n #pick start time\n s_time = dtime.min()\n year =padder(s_time.year)\n month = padder(s_time.month)\n day = padder(s_time.day)\n ymd = year+month+day\n \n #load tornado linkage file\n from gewittergefahr.gg_utils import linkage\n #sometimes the file is stored in the previous days dir, so we need this try/except statment \n try:\n this_storm_to_events_table,_,this_tornado_table = linkage.read_linkage_file(linkage_dir+year+'/storm_to_tornadoes_'+ymd+'.p')\n except OSError as e:\n print('no storm/tornado file in the current day, looking one day back')\n #rebuild build date string \n time_alter = pd.to_datetime(s_time) - pd.Timedelta(days=1)\n ymd_alter = time_alter.strftime(\"%Y%m%d\")\n year_alter = time_alter.strftime(\"%Y\")\n file_str = '/storm_to_tornadoes_'+ymd_alter+'.p'\n storm_to_events_table_file = linkage_dir + year_alter + file_str\n print('newfilename: {}'.format(storm_to_events_table_file))\n this_storm_to_events_table,_,this_tornado_table = linkage.read_linkage_file(storm_to_events_table_file)\n \n \n #subset the images to just where the label is 1 (i.e., there was a tornado from LSR)\n #note the target label is currently hard coded. \n ds_images_sub = ds_images.where(ds_images.target_matrix[:,2] >= 1).dropna(dim='storm_object')\n \n \n \n #loop over all storms \n unique_storm_strings = np.unique(ds_images_sub.full_storm_id_strings)\n iter_count = -1 \n for storm_string in tqdm(unique_storm_strings):\n #drop all other images but current storm of interest\n ds_images_sub_storm = ds_images_sub.where(ds_images_sub.full_storm_id_strings==storm_string).dropna(dim='storm_object')\n #drop all other storm saved data from the table \n this_storm = this_storm_to_events_table.where(this_storm_to_events_table.full_id_string == storm_string.decode(\"utf-8\")).dropna()\n \n if len(this_storm) == 0:\n print('no storm id in the storm event table, looking in the previous date')\n time_alter = dtime.min() - pd.Timedelta(days=1)\n ymd_alter = time_alter.strftime(\"%Y%m%d\")\n year_alter = time_alter.strftime(\"%Y\")\n this_storm_to_events_table,_,this_tornado_table = linkage.read_linkage_file(linkage_dir+year_alter+'/storm_to_tornadoes_'+ymd_alter+'.p')\n this_storm = this_storm_to_events_table.where(this_storm_to_events_table.full_id_string == storm_string.decode(\"utf-8\")).dropna()\n if len(this_storm) == 0:\n print('CANT FIND STORM ID') \n \n #get current dtime from storm table \n dtime_this_storm = pd.to_datetime(np.asarray(netCDF4.num2date(this_storm.valid_time_unix_sec,'seconds since 1970-01-01'),dtype=str))\n this_storm['dtime'] = dtime_this_storm\n #sort times in the images \n times = ds_images_sub_storm.dtime.values\n times.sort()\n #loop over all times there are images \n for time in times:\n iter_count += 1 #this is for saving purposes. Might change the save strings to be more informative RJC 14/06/21 \n \n #select just one time from the images & table\n ds_images_sub_storm_time = ds_images_sub_storm.where(ds_images_sub_storm.dtime == time).dropna(dim='storm_object')\n this_storm_time = this_storm.where(this_storm.dtime == time).dropna()\n \n #get segmotion tracking to get the storm polygon \n file_str = 'storm-tracking_segmotion_'+pd.to_datetime(time).strftime(\"%Y-%m-%d-%H%M%S\") + '.p'\n tracking_file = seg_dir + year + '/' + ymd + '/scale_314159265m2/' + file_str\n from gewittergefahr.gg_io import storm_tracking_io\n \n #sometimes the file is stored in the previous days dir, so we need this try/except statment \n try:\n tracking_all = storm_tracking_io.read_file(tracking_file)\n except OSError as e:\n print('no segmotion file in current dir, looking one dir back')\n #rebuild build date string \n time_alter = pd.to_datetime(time) - pd.Timedelta(days=1)\n ymd_alter = time_alter.strftime(\"%Y%m%d\")\n year_alter = time_alter.strftime(\"%Y\")\n file_str = 'storm-tracking_segmotion_'+pd.to_datetime(time).strftime(\"%Y-%m-%d-%H%M%S\") + '.p'\n tracking_file = seg_dir + year + '/' + ymd_alter + '/scale_314159265m2/' + file_str\n print('newfilename: {}'.format(tracking_file))\n tracking_all = storm_tracking_io.read_file(tracking_file)\n \n dtime_tracking = pd.to_datetime(np.asarray(netCDF4.num2date(tracking_all.valid_time_unix_sec,'seconds since 1970-01-01'),dtype=str))\n tracking_all['dtime'] = dtime_tracking\n tracking_storm = tracking_all.where(tracking_all.full_id_string == storm_string.decode(\"utf-8\")).dropna()\n tracking_storm_time = tracking_storm.where(tracking_storm.dtime == time).dropna()\n \n #get raw radar (this will add in spatial (lat/lon) info)\n # file_str = 'nexrad_3d_v4_2_'+pd.to_datetime(time).strftime(\"%Y%m%dT%H%M%S\") + 'Z.nc'\n file_str = 'nexrad_3d_4_1_'+pd.to_datetime(time).strftime(\"%Y%m%dT%H%M%S\") + 'Z.nc'\n radar_file = rad_dir + year + '/' + ymd + '/' + file_str\n gr = gridrad()\n #sometimes the file is stored in the previous days dir, so we need this try/except statment \n try:\n gr.ds = xr.open_dataset(radar_file)\n #if you use the new gridrad files, use this \n #gr = gridrad(filename=radar_file,filter=True,toxr=True)\n except OSError as e:\n print('no gridrad file in current dir, looking one dir back')\n file_str = 'nexrad_3d_4_1_'+pd.to_datetime(time).strftime(\"%Y%m%dT%H%M%S\") + 'Z.nc'\n radar_file = rad_dir + year_alter + '/' + ymd_alter + '/' + file_str\n print('newfilename: {}'.format(radar_file))\n gr.ds = xr.open_dataset(radar_file)\n #if you use the new gridrad files, use this \n #gr = gridrad(filename=radar_file,filter=True,toxr=True)\n \n #subset to just the box around the storm centroid \n x,y = np.meshgrid(gr.ds.Longitude.values,gr.ds.Latitude.values)\n index_mat = np.arange(0,gr.ds.Longitude.shape[0]*gr.ds.Latitude.shape[0]).reshape([gr.ds.Longitude.shape[0],gr.ds.Latitude.shape[0]])\n da = xr.DataArray(data=index_mat,dims=['Longitude','Latitude'])\n gr.ds['index_mat'] = da\n print(tracking_storm_time.centroid_longitude_deg.values,tracking_storm_time.centroid_latitude_deg.values)\n closest = gr.ds.sel(Longitude=tracking_storm_time.centroid_longitude_deg.values,Latitude=tracking_storm_time.centroid_latitude_deg.values,method='nearest')\n closest = closest.squeeze()\n i_x,i_y = np.unravel_index(closest.index_mat.values,[gr.ds.Longitude.shape[0],gr.ds.Latitude.shape[0]])\n\n j = 24 #number of gridpoints in each dir (24 will be 48 total)\n h = 3 #4 km index\n \n #check to see if the storm is near the edge of the gridrad domain. if they are warn the user. Do x index first \n if (i_x < 24):\n print('WARNING: Near min x_edge of gridrad. Defulting to smallest index')\n i_x_min = 0 \n i_x_max = i_x + j \n elif ((i_x + j) > gr.ds.Longitude.shape[0]):\n print('WARNING: Near max x_edge of gridrad. Defulting to largest index')\n i_x_min = i_x - j \n i_x_max = gr.ds.Longitude.shape[0] \n else:\n i_x_min = i_x - j \n i_x_max = i_x + j \n \n #check to see if the storm is near the edge of the gridrad domain. if they are warn the user. Do y index\n if (i_y < 24):\n print('WARNING: Near min y_edge of gridrad. Defulting to smallest index')\n i_y_min = 0 \n i_y_max = i_y + j \n elif ((i_y + j) > gr.ds.Latitude.shape[0]):\n print('WARNING: Near max y_edge of gridrad. Defulting to largest index')\n i_y_min = i_y - j \n i_y_max = gr.ds.Latitude.shape[0]\n else:\n i_y_min = i_y - j \n i_y_max = i_y + j \n \n print(i_y_min,i_y_max) \n boxds = gr.ds.sel(Longitude=gr.ds.Longitude[i_x_min:i_x_max],Latitude=gr.ds.Latitude[i_y_min:i_y_max])\n\n \n #extract radar time from file \n# radar_time = pd.to_datetime(np.asarray(netCDF4.num2date(boxds.time.values[0],'seconds since 2001-01-01 00:00:00'),dtype='str'))\n radar_time = boxds.time.values[0]\n print(radar_time)\n\n #cut all NEXRAD locs to just ones in the box \n df_nexrad_adj = df_nexrad.where(df_nexrad.lon >= boxds.Longitude.values.min())\n df_nexrad_adj = df_nexrad_adj.where(df_nexrad_adj.lon <= boxds.Longitude.values.max())\n df_nexrad_adj = df_nexrad_adj.where(df_nexrad_adj.lat >= boxds.Latitude.values.min())\n df_nexrad_adj = df_nexrad_adj.where(df_nexrad_adj.lat <= boxds.Latitude.values.max())\n \n #grab the tornado report info \n this_tornado = this_tornado_table.where(this_tornado_table.tornado_id_string == this_storm_time.tornado_id_strings.values[0][0]).dropna()\n tor_lon = this_tornado.iloc[0].longitude_deg\n tor_lat = this_tornado.iloc[0].latitude_deg\n tor_time = pd.to_datetime(np.asarray(netCDF4.num2date(this_tornado.iloc[0].unix_time_sec,'seconds since 1970-01-01 00:00:00'),dtype='str'))\n\n #determine closest NEXRAD to TOR \n from pyproj import Proj\n p = Proj(proj='aeqd', ellps='WGS84', datum='WGS84', lat_0=tor_lat, lon_0=tor_lon)\n x,y = p(df_nexrad.lon.values,df_nexrad.lat.values)\n R = np.sqrt(x**2 + y**2)/1000\n closest_radar = np.argmin(R)\n closest_distance = np.min(R)\n \n #determine distance from centroid to radar and tor \n p = Proj(proj='aeqd', ellps='WGS84', datum='WGS84', lon_0=tracking_storm_time.centroid_longitude_deg.values[0],lat_0=tracking_storm_time.centroid_latitude_deg.values[0])\n x,y = p(df_nexrad.lon.values,df_nexrad.lat.values)\n R = np.sqrt(x**2 + y**2)/1000\n closest_radar_storm = np.argmin(R)\n closest_distance_storm = np.min(R)\n \n x,y = p(tor_lon,tor_lat)\n R = np.sqrt(x**2 + y**2)/1000\n tor_storm_dist = np.copy(R)\n\n #find range rings \n p = Proj(proj='aeqd', ellps='WGS84', datum='WGS84', lon_0=df_nexrad.lon.values[closest_radar],lat_0 = df_nexrad.lat.values[closest_radar])\n x = np.linspace(df_nexrad.lon.values[closest_radar]-5,df_nexrad.lon.values[closest_radar]+5,100)\n y = np.linspace(df_nexrad.lat.values[closest_radar]-5,df_nexrad.lat.values[closest_radar]+5,100)\n x,y = np.meshgrid(x,y)\n X,Y = p(x,y)\n R = np.sqrt(X**2 + Y**2)/1000\n\n #extract polygon \n polygon1 = tracking_storm_time.polygon_object_latlng_deg.values[0]\n \n if alterfiles:\n specific_index = ds_images_sub_storm_time.lame_index.astype(int).values[0]\n time_diff[specific_index] = (tor_time - radar_time).total_seconds() \n dist_to_nexrad_tor[specific_index] = closest_distance\n dist_to_nexrad_storm[specific_index] = closest_distance_storm\n dist_to_report[specific_index] = tor_storm_dist\n \n if savefig:\n from pathlib import Path\n Path(save_dir + 'figs/' + input_example_filename[-26:-3] + '/').mkdir(parents=True, exist_ok=True)\n #plot it up \n fig,axes = plt.subplots(2,4,figsize=(15,7.5))\n\n fig.set_facecolor('w')\n ax = axes[0,0]\n #axis one is the reflecitivty \n pm = ax.pcolormesh(boxds.Longitude,boxds.Latitude,boxds.ZH.values[0,h,:,:],cmap='Spectral_r')\n ax.plot(tracking_storm_time.centroid_longitude_deg.values,tracking_storm_time.centroid_latitude_deg.values,'*w',ms=10,markeredgecolor='k')\n ax.plot(*polygon1.exterior.xy,ls='--',color='k',path_effects=pe)\n plt.colorbar(pm,ax=ax)\n ax.set_title('Z')\n\n\n ax.plot(tor_lon,tor_lat,'vw',ms=10,markeredgecolor='k',markeredgewidth=2)\n ax.plot(df_nexrad_adj.lon,df_nexrad_adj.lat,'o')\n ax.plot(df_nexrad.lon[closest_radar],df_nexrad.lat[closest_radar],'ow',ms=10,markeredgecolor='k',markeredgewidth=2)\n CS = ax.contour(x,y,R,levels=[50,75,100,125,150],colors='k')\n plt.setp(CS.collections, path_effects=pe)\n ax.set_xlim([boxds.Longitude.values.min(),boxds.Longitude.values.max()])\n ax.set_ylim([boxds.Latitude.values.min(),boxds.Latitude.values.max()])\n\n ax = axes[0,1]\n #axis 2 is the Spectrum Width \n pm = ax.pcolormesh(boxds.Longitude,boxds.Latitude,boxds.SW.values[0,h,:,:],cmap='inferno',vmin=0,vmax=6)\n ax.plot(tracking_storm_time.centroid_longitude_deg.values,tracking_storm_time.centroid_latitude_deg.values,'*w',ms=10,markeredgecolor='k')\n ax.plot(*polygon1.exterior.xy,ls='--',color='k',path_effects=pe)\n ax.plot(tor_lon,tor_lat,'vw',ms=10,markeredgecolor='k',markeredgewidth=2)\n ax.plot(df_nexrad.lon[closest_radar],df_nexrad.lat[closest_radar],'ow',ms=10,markeredgecolor='k',markeredgewidth=2)\n CS = ax.contour(x,y,R,levels=[50,75,100,125,150],colors='k')\n plt.setp(CS.collections, path_effects=pe)\n ax.set_xlim([boxds.Longitude.values.min(),boxds.Longitude.values.max()])\n ax.set_ylim([boxds.Latitude.values.min(),boxds.Latitude.values.max()])\n plt.colorbar(pm,ax=ax)\n ax.set_title('SW')\n\n ax = axes[0,2]\n #axis 3 is the vorticity \n pm = ax.pcolormesh(boxds.Longitude,boxds.Latitude,boxds.VOR.values[0,h,:,:],cmap='seismic',vmin=-0.003,vmax=0.003)\n ax.plot(tracking_storm_time.centroid_longitude_deg.values,tracking_storm_time.centroid_latitude_deg.values,'*w',ms=10,markeredgecolor='k')\n ax.plot(*polygon1.exterior.xy,ls='--',color='k',path_effects=pe)\n ax.plot(tor_lon,tor_lat,'vw',ms=10,markeredgecolor='k',markeredgewidth=2)\n ax.plot(df_nexrad.lon[closest_radar],df_nexrad.lat[closest_radar],'ow',ms=10,markeredgecolor='k',markeredgewidth=2)\n CS = ax.contour(x,y,R,levels=[50,75,100,125,150],colors='k')\n plt.setp(CS.collections, path_effects=pe)\n ax.set_xlim([boxds.Longitude.values.min(),boxds.Longitude.values.max()])\n ax.set_ylim([boxds.Latitude.values.min(),boxds.Latitude.values.max()])\n plt.colorbar(pm,ax=ax)\n ax.set_title('VOR')\n\n ax = axes[0,3]\n #axis 4 is the divergence \n pm = ax.pcolormesh(boxds.Longitude,boxds.Latitude,boxds.DIV.values[0,h,:,:],cmap=cmocean.cm.balance,vmin=-0.003,vmax=0.003)\n ax.plot(tracking_storm_time.centroid_longitude_deg.values,tracking_storm_time.centroid_latitude_deg.values,'*w',ms=10,markeredgecolor='k')\n ax.plot(*polygon1.exterior.xy,ls='--',color='k',path_effects=pe)\n ax.plot(tor_lon,tor_lat,'vw',ms=10,markeredgecolor='k',markeredgewidth=2)\n ax.plot(df_nexrad.lon[closest_radar],df_nexrad.lat[closest_radar],'ow',ms=10,markeredgecolor='k',markeredgewidth=2)\n CS = ax.contour(x,y,R,levels=[50,75,100,125,150],colors='k')\n plt.setp(CS.collections, path_effects=pe)\n ax.set_xlim([boxds.Longitude.values.min(),boxds.Longitude.values.max()])\n ax.set_ylim([boxds.Latitude.values.min(),boxds.Latitude.values.max()])\n plt.colorbar(pm,ax=ax)\n ax.set_title('DIV')\n\n ax = axes[1,0]\n #axis 5 is the differential reflectivity \n pm = ax.pcolormesh(boxds.Longitude,boxds.Latitude,boxds.ZDR.values[0,h,:,:],cmap='turbo',vmin=0,vmax=3)\n ax.plot(tracking_storm_time.centroid_longitude_deg.values,tracking_storm_time.centroid_latitude_deg.values,'*w',ms=10,markeredgecolor='k')\n ax.plot(*polygon1.exterior.xy,ls='--',color='k',path_effects=pe)\n ax.plot(tor_lon,tor_lat,'vw',ms=10,markeredgecolor='k',markeredgewidth=2)\n ax.plot(df_nexrad.lon[closest_radar],df_nexrad.lat[closest_radar],'ow',ms=10,markeredgecolor='k',markeredgewidth=2)\n CS = ax.contour(x,y,R,levels=[50,75,100,125,150],colors='k')\n plt.setp(CS.collections, path_effects=pe)\n ax.set_xlim([boxds.Longitude.values.min(),boxds.Longitude.values.max()])\n ax.set_ylim([boxds.Latitude.values.min(),boxds.Latitude.values.max()])\n plt.colorbar(pm,ax=ax)\n ax.set_title('ZDR')\n\n ax = axes[1,1]\n #axis 6 is the specific differential phase \n pm = ax.pcolormesh(boxds.Longitude,boxds.Latitude,boxds.KDP.values[0,h,:,:],cmap='cividis',vmin=0,vmax=3)\n ax.plot(tracking_storm_time.centroid_longitude_deg.values,tracking_storm_time.centroid_latitude_deg.values,'*w',ms=10,markeredgecolor='k')\n ax.plot(*polygon1.exterior.xy,ls='--',color='k',path_effects=pe)\n ax.plot(tor_lon,tor_lat,'vw',ms=10,markeredgecolor='k',markeredgewidth=2)\n ax.plot(df_nexrad.lon[closest_radar],df_nexrad.lat[closest_radar],'ow',ms=10,markeredgecolor='k',markeredgewidth=2)\n CS = ax.contour(x,y,R,levels=[50,75,100,125,150],colors='k')\n plt.setp(CS.collections, path_effects=pe)\n ax.set_xlim([boxds.Longitude.values.min(),boxds.Longitude.values.max()])\n ax.set_ylim([boxds.Latitude.values.min(),boxds.Latitude.values.max()])\n plt.colorbar(pm,ax=ax)\n ax.set_title('KDP')\n\n ax = axes[1,2]\n #axis 7 is the correlation coefficient \n pm = ax.pcolormesh(boxds.Longitude,boxds.Latitude,boxds.RHV.values[0,h,:,:],cmap='RdYlBu_r',vmin=0.3,vmax=1)\n storm_center, = ax.plot(tracking_storm_time.centroid_longitude_deg.values,tracking_storm_time.centroid_latitude_deg.values,'*w',ms=10,markeredgecolor='k')\n storm, = ax.plot(*polygon1.exterior.xy,ls='--',color='k',path_effects=pe)\n tor, = ax.plot(tor_lon,tor_lat,'vw',ms=10,markeredgecolor='k',markeredgewidth=2)\n radar, = ax.plot(df_nexrad.lon[closest_radar],df_nexrad.lat[closest_radar],'ow',ms=10,markeredgecolor='k',markeredgewidth=2)\n CS = ax.contour(x,y,R,levels=[50,75,100,125,150],colors='k')\n plt.setp(CS.collections, path_effects=pe)\n ax.set_xlim([boxds.Longitude.values.min(),boxds.Longitude.values.max()])\n ax.set_ylim([boxds.Latitude.values.min(),boxds.Latitude.values.max()])\n plt.colorbar(pm,ax=ax)\n ax.set_title(r'$\\rho_{hv}$')\n\n ax = axes[1,3]\n #axis 8 has the meta data printed out\n ax.legend([storm_center,storm,tor,radar],['Storm Centroid','Storm Polygon','Tornado','Closest 88D: {}km'.format(int(np.round(closest_distance)))],loc=10,fontsize=18)\n ax.axis('off')\n ax.text(-0.2,0.1,'Tor Time:{}'.format(tor_time),transform=ax.transAxes,fontsize=18)\n ax.text(-0.24,0,'Rad Time:{}'.format(pd.to_datetime(radar_time).strftime('%Y-%m-%d %H:%M:%S')),transform=ax.transAxes,fontsize=18)\n plt.tight_layout()\n savestr = save_dir + 'figs/' + input_example_filename[-26:-3] + '/' + padder2(iter_count) + '.png'\n plt.savefig(savestr,dpi=300)\n plt.close()\n \n if alterfiles:\n #For now i will just make companion files, rather than append to already exisiting file. \n da = xr.DataArray(data=time_diff,dims=[\"storm_object\"],attrs=dict(description=\"difference between tor time and rad time\",units=\"seconds\",))\n da = da.where(da != -9999)\n ds_images['time_diff'] = da\n da = xr.DataArray(data=dist_to_nexrad_tor,dims=[\"storm_object\"],attrs=dict(description=\"distance from the tornado to the nearest radar\",units=\"km\",))\n da = da.where(da != -9999)\n ds_images['dist_tor_to_nexrad'] = da\n da = xr.DataArray(data=dist_to_nexrad_storm,dims=[\"storm_object\"],attrs=dict(description=\"distance from the tornado to the nearest radar\",units=\"km\",))\n da = da.where(da != -9999)\n ds_images['dist_storm_to_nexrad'] = da\n da = xr.DataArray(data=dist_to_report,dims=[\"storm_object\"],attrs=dict(description=\"distance from storm centroid to tor report\",units=\"km\",))\n da = da.where(da != -9999)\n ds_images['dist_to_report'] = da\n \n #need to drop all the copies of data to save disk space. \n ds_images = ds_images.drop(['full_storm_id_strings','radar_field_names','target_names','storm_times_unix_sec',\n 'target_matrix','radar_heights_m_agl','radar_image_matrix','sounding_field_names',\n 'sounding_heights_m_agl','sounding_matrix','lame_index','dtime'])\n \n ds_images.to_netcdf(save_dir + 'data/' + input_example_filename[-26:-3] + '.nc')\n\nLEARNING_EXAMPLE_FILE_ARG_NAME = 'learning_example_file'\nSTORM_IMAGE_DIR_ARG_NAME = 'storm_image_dir'\nLEVEL_TO_PLOT_ARG_NAME = 'level'\nLINKAGE_DIR_ARG_NAME = 'linkage_dir'\nSEGMOTION_DIR_ARG_NAME = 'seg_dir'\nGRIDRAD_DIR_ARG_NAME = 'rad_dir'\nNEXRAD_LOC_ARG_NAME = 'nexrad_loc_csv'\nSAVE_DIR_ARG_NAME = 'save_dir'\nSAVEFIG_BOOL_ARG_NAME = 'savefig'\nALTER_FILES_BOOL_ARG_NAME = 'alterfiles'\n\nif __name__ == '__main__':\n INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()\n ds_images = validate_examples(input_example_filename=getattr(INPUT_ARG_OBJECT, LEARNING_EXAMPLE_FILE_ARG_NAME),\n storm_image_dir=getattr(INPUT_ARG_OBJECT, STORM_IMAGE_DIR_ARG_NAME),\n level=getattr(INPUT_ARG_OBJECT, LEVEL_TO_PLOT_ARG_NAME),\n linkage_dir=getattr(INPUT_ARG_OBJECT, LINKAGE_DIR_ARG_NAME),\n seg_dir=getattr(INPUT_ARG_OBJECT, SEGMOTION_DIR_ARG_NAME),\n rad_dir=getattr(INPUT_ARG_OBJECT, GRIDRAD_DIR_ARG_NAME),\n nexrad_loc_csv=getattr(INPUT_ARG_OBJECT, NEXRAD_LOC_ARG_NAME),\n save_dir=getattr(INPUT_ARG_OBJECT, SAVE_DIR_ARG_NAME),\n savefig=getattr(INPUT_ARG_OBJECT, SAVEFIG_BOOL_ARG_NAME),\n alterfiles=getattr(INPUT_ARG_OBJECT, ALTER_FILES_BOOL_ARG_NAME),)\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.stack",
"numpy.full"
],
[
"numpy.array",
"numpy.array_equal"
],
[
"numpy.append",
"numpy.where",
"numpy.full",
"numpy.unique"
],
[
"tensorflow.device",
"numpy.array",
"tensorflow.GPUOptions",
"numpy.linspace"
],
[
"numpy.array",
"numpy.allclose"
],
[
"numpy.log",
"numpy.minimum",
"numpy.isfinite",
"numpy.full",
"numpy.exp",
"numpy.array"
],
[
"pandas.to_datetime",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.round",
"numpy.argmin",
"pandas.read_csv",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"numpy.arange",
"numpy.copy",
"matplotlib.patheffects.withStroke",
"matplotlib.pyplot.close",
"numpy.unravel_index",
"numpy.min",
"pandas.Timedelta",
"matplotlib.pyplot.savefig",
"numpy.meshgrid",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.setp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
takuseno/configurable-control-gym | [
"9837e6c03c5a7fe6711b32dc70fe5e432e7d96c3",
"9837e6c03c5a7fe6711b32dc70fe5e432e7d96c3"
] | [
"configurable_control_gym/envs/pendulum.py",
"configurable_control_gym/envs/cartpole.py"
] | [
"import gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\n\nclass PendulumEnv(gym.Env):\n metadata = {\n 'render.modes' : ['human', 'rgb_array'],\n 'video.frames_per_second' : 30\n }\n\n def __init__(self, force=10.0, length=1.0, mass=1.0):\n if isinstance(force, list):\n self.g_set = force\n else:\n self.g_set = None\n self.g = force\n\n if isinstance(length, list):\n self.l_set = length\n else:\n self.l_set = None\n self.l = length\n\n if isinstance(mass, list):\n self.m_set = mass\n else:\n self.m_set = None\n self.m = mass\n\n self.max_speed=8\n self.max_torque=2.\n self.dt=.05\n self.viewer = None\n\n high = np.array([1., 1., self.max_speed])\n self.action_space = spaces.Box(low=-self.max_torque, high=self.max_torque, shape=(1,), dtype=np.float32)\n self.observation_space = spaces.Box(low=-high, high=high, dtype=np.float32)\n\n self.steps_in_top = 0\n\n self.seed()\n\n def _sample_parameter(self):\n if self.g_set is not None:\n set_index = self.np_random.randint(len(self.g_set))\n self.g = self.np_random.uniform(self.g_set[set_index][0],\n self.g_set[set_index][1])\n if self.l_set is not None:\n set_index = self.np_random.randint(len(self.l_set))\n self.l = self.np_random.uniform(self.l_set[set_index][0],\n self.l_set[set_index][1])\n if self.m_set is not None:\n set_index = self.np_random.randint(len(self.m_set))\n self.m = self.np_random.uniform(self.m_set[set_index][0],\n self.m_set[set_index][1])\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self,u):\n th, thdot = self.state # th := theta\n\n g = self.g\n m = self.m\n l = self.l\n dt = self.dt\n\n u = np.clip(u, -self.max_torque, self.max_torque)[0]\n self.last_u = u # for rendering\n costs = angle_normalize(th)**2 + .1*thdot**2 + .001*(u**2)\n\n newthdot = thdot + (-3*g/(2*l) * np.sin(th + np.pi) + 3./(m*l**2)*u) * dt\n newth = th + newthdot*dt\n newthdot = np.clip(newthdot, -self.max_speed, self.max_speed) #pylint: disable=E1111\n\n self.state = np.array([newth, newthdot])\n\n target = np.pi / 3.0\n _newth = newth\n if np.abs(_newth) > 2.0 * np.pi:\n _newth = np.sign(_newth) * (np.abs(_newth) - 2.0 * np.pi * (_newth // (2.0 * np.pi)))\n if np.abs(_newth) < target or (2.0 * np.pi - np.abs(_newth)) < target:\n self.steps_in_top += 1\n else:\n self.steps_in_top = 0\n\n info = {}\n info['success'] = self.steps_in_top >= 100\n\n return self._get_obs(), -costs, False, info\n\n def reset(self):\n self._sample_parameter()\n high = np.array([np.pi, 1])\n self.state = self.np_random.uniform(low=-high, high=high)\n self.last_u = None\n self.steps_in_top = 0\n return self._get_obs()\n\n def _get_obs(self):\n theta, thetadot = self.state\n return np.array([np.cos(theta), np.sin(theta), thetadot])\n\n def render(self, mode='human'):\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(500,500)\n self.viewer.set_bounds(-2.2,2.2,-2.2,2.2)\n rod = rendering.make_capsule(self.l, .2)\n rod.set_color(.8, .3, .3)\n self.pole_transform = rendering.Transform()\n rod.add_attr(self.pole_transform)\n self.viewer.add_geom(rod)\n axle = rendering.make_circle(.05)\n axle.set_color(0,0,0)\n self.viewer.add_geom(axle)\n fname = path.join(path.dirname(gym.envs.classic_control.pendulum.__file__), \"assets/clockwise.png\")\n self.img = rendering.Image(fname, 1., 1.)\n self.imgtrans = rendering.Transform()\n self.img.add_attr(self.imgtrans)\n\n self.viewer.add_onetime(self.img)\n self.pole_transform.set_rotation(self.state[0] + np.pi/2)\n if self.last_u:\n self.imgtrans.scale = (-self.last_u/2, np.abs(self.last_u)/2)\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n\ndef angle_normalize(x):\n return (((x+np.pi) % (2*np.pi)) - np.pi)\n",
"import math\nimport gym\nfrom gym import spaces, logger\nfrom gym.utils import seeding\nimport numpy as np\n\nclass CartPoleEnv(gym.Env):\n \"\"\"\n Description:\n A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum starts upright, and the goal is to prevent it from falling over by increasing and reducing the cart's velocity.\n Source:\n This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson\n Observation: \n Type: Box(4)\n Num\tObservation Min Max\n 0\tCart Position -4.8 4.8\n 1\tCart Velocity -Inf Inf\n 2\tPole Angle -24 deg 24 deg\n 3\tPole Velocity At Tip -Inf Inf\n \n Actions:\n Type: Discrete(2)\n Num\tAction\n 0\tPush cart to the left\n 1\tPush cart to the right\n \n Note: The amount the velocity that is reduced or increased is not fixed; it depends on the angle the pole is pointing. This is because the center of gravity of the pole increases the amount of energy needed to move the cart underneath it\n Reward:\n Reward is 1 for every step taken, including the termination step\n Starting State:\n All observations are assigned a uniform random value in [-0.05..0.05]\n Episode Termination:\n Pole Angle is more than 12 degrees\n Cart Position is more than 2.4 (center of the cart reaches the edge of the display)\n Episode length is greater than 200\n Solved Requirements\n Considered solved when the average reward is greater than or equal to 195.0 over 100 consecutive trials.\n \"\"\"\n \n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 50\n }\n\n def __init__(self, force=10.0, length=0.5, mass=0.1):\n self.gravity = 9.8\n self.masscart = 1.0\n self.tau = 0.02 # seconds between state updates\n self.kinematics_integrator = 'euler'\n\n if isinstance(force, list):\n self.force_mag_set = force\n else:\n self.force_mag_set = None\n self.force_mag = force\n\n if isinstance(length, list):\n self.length_set = length\n else:\n self.length_set = None\n self.length = length\n\n if isinstance(mass, list):\n self.masspole_set = mass\n else:\n self.masspole_set = None\n self.masspole = mass\n\n # Angle at which to fail the episode\n self.theta_threshold_radians = 12 * 2 * math.pi / 360\n self.x_threshold = 2.4\n\n # Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds\n high = np.array([\n self.x_threshold * 2,\n np.finfo(np.float32).max,\n self.theta_threshold_radians * 2,\n np.finfo(np.float32).max])\n\n self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,), dtype=np.float32)\n self.observation_space = spaces.Box(-high, high, dtype=np.float32)\n\n self.seed()\n self.viewer = None\n self.state = None\n\n self.steps_beyond_done = None\n self.steps_in_episode = 0\n\n def _sample_parameter(self):\n if self.force_mag_set is not None:\n set_index = self.np_random.randint(len(self.force_mag_set))\n self.force_mag = self.np_random.uniform(\n self.force_mag_set[set_index][0],\n self.force_mag_set[set_index][1])\n if self.length_set is not None:\n set_index = self.np_random.randint(len(self.length_set))\n self.length = self.np_random.uniform(self.length_set[set_index][0],\n self.length_set[set_index][1])\n if self.masspole_set is not None:\n set_index = self.np_random.randint(len(self.masspole_set))\n self.masspole = self.np_random.uniform(\n self.masspole_set[set_index][0],\n self.masspole_set[set_index][1])\n\n self.polemass_length = (self.masspole * self.length)\n self.total_mass = (self.masspole + self.masscart)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, action):\n state = self.state\n x, x_dot, theta, theta_dot = state\n force = self.force_mag * action[0]\n costheta = math.cos(theta)\n sintheta = math.sin(theta)\n temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass\n thetaacc = (self.gravity * sintheta - costheta* temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\n if self.kinematics_integrator == 'euler':\n x = x + self.tau * x_dot\n x_dot = x_dot + self.tau * xacc\n theta = theta + self.tau * theta_dot\n theta_dot = theta_dot + self.tau * thetaacc\n else: # semi-implicit euler\n x_dot = x_dot + self.tau * xacc\n x = x + self.tau * x_dot\n theta_dot = theta_dot + self.tau * thetaacc\n theta = theta + self.tau * theta_dot\n self.state = (x,x_dot,theta,theta_dot)\n done = x < -self.x_threshold \\\n or x > self.x_threshold \\\n or theta < -self.theta_threshold_radians \\\n or theta > self.theta_threshold_radians\n done = bool(done)\n\n self.steps_in_episode += 1\n\n if not done:\n reward = 1.0\n elif self.steps_beyond_done is None:\n # Pole just fell!\n self.steps_beyond_done = 0\n reward = 1.0\n else:\n if self.steps_beyond_done == 0:\n logger.warn(\"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.\")\n self.steps_beyond_done += 1\n reward = 0.0\n\n info = {}\n info['success'] = self.steps_in_episode >= 195\n\n return np.array(self.state), reward, done, info\n\n def reset(self):\n self._sample_parameter()\n self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))\n self.steps_beyond_done = None\n self.steps_in_episode = 0\n return np.array(self.state)\n\n def render(self, mode='human'):\n screen_width = 600\n screen_height = 400\n\n world_width = self.x_threshold*2\n scale = screen_width/world_width\n carty = 100 # TOP OF CART\n polewidth = 10.0\n polelen = scale * (2 * self.length)\n cartwidth = 50.0\n cartheight = 30.0\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(screen_width, screen_height)\n l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2\n axleoffset =cartheight/4.0\n cart = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])\n self.carttrans = rendering.Transform()\n cart.add_attr(self.carttrans)\n self.viewer.add_geom(cart)\n l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2\n pole = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])\n pole.set_color(.8,.6,.4)\n self.poletrans = rendering.Transform(translation=(0, axleoffset))\n pole.add_attr(self.poletrans)\n pole.add_attr(self.carttrans)\n self.viewer.add_geom(pole)\n self.axle = rendering.make_circle(polewidth/2)\n self.axle.add_attr(self.poletrans)\n self.axle.add_attr(self.carttrans)\n self.axle.set_color(.5,.5,.8)\n self.viewer.add_geom(self.axle)\n self.track = rendering.Line((0,carty), (screen_width,carty))\n self.track.set_color(0,0,0)\n self.viewer.add_geom(self.track)\n\n self._pole_geom = pole\n\n if self.state is None: return None\n\n # Edit the pole polygon vertex\n pole = self._pole_geom\n l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2\n pole.v = [(l,b), (l,t), (r,t), (r,b)]\n\n x = self.state\n cartx = x[0]*scale+screen_width/2.0 # MIDDLE OF CART\n self.carttrans.set_translation(cartx, carty)\n self.poletrans.set_rotation(-x[2])\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n"
] | [
[
"numpy.abs",
"numpy.clip",
"numpy.cos",
"numpy.sin",
"numpy.sign",
"numpy.array"
],
[
"numpy.array",
"numpy.finfo"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tpeng/magnitude | [
"aec98628b5547773ca8c4114ec6d1ad51e21b230",
"aec98628b5547773ca8c4114ec6d1ad51e21b230",
"aec98628b5547773ca8c4114ec6d1ad51e21b230",
"aec98628b5547773ca8c4114ec6d1ad51e21b230",
"aec98628b5547773ca8c4114ec6d1ad51e21b230"
] | [
"pymagnitude/third_party/allennlp/semparse/worlds/atis_world.py",
"pymagnitude/third_party/allennlp/models/biaffine_dependency_parser.py",
"pymagnitude/third_party/allennlp/data/fields/span_field.py",
"pymagnitude/third_party/allennlp/modules/text_field_embedders/basic_text_field_embedder.py",
"pymagnitude/third_party/allennlp/data/fields/array_field.py"
] | [
"\nfrom __future__ import absolute_import\nfrom copy import deepcopy\n#typing\nimport numpy\n\nfrom parsimonious.grammar import Grammar\n\nfrom allennlp.semparse.contexts.atis_tables import * # pylint: disable=wildcard-import,unused-wildcard-import\nfrom allennlp.semparse.contexts.sql_table_context import\\\n SqlTableContext, SqlVisitor, generate_one_of_string, format_action\n\nfrom allennlp.data.tokenizers import Token, WordTokenizer\ntry:\n from itertools import izip\nexcept:\n izip = zip\n\n\ndef get_strings_from_utterance(tokenized_utterance ) :\n u\"\"\"\n Based on the current utterance, return a dictionary where the keys are the strings in the utterance\n that map to lists of the token indices that they are linked to.\n \"\"\"\n string_linking_scores = defaultdict(list)\n for index, (first_token, second_token) in enumerate(izip(tokenized_utterance, tokenized_utterance[1:])):\n for string in ATIS_TRIGGER_DICT.get(first_token.text.lower(), []):\n string_linking_scores[string].append(index)\n\n bigram = \"{first_token.text} {second_token.text}\".lower()\n for string in ATIS_TRIGGER_DICT.get(bigram, []):\n string_linking_scores[string].extend([index, index + 1])\n\n if tokenized_utterance[-1].text.lower() in ATIS_TRIGGER_DICT:\n for string in ATIS_TRIGGER_DICT[tokenized_utterance[-1].text.lower()]:\n string_linking_scores[string].append(len(tokenized_utterance)-1)\n\n date = get_date_from_utterance(tokenized_utterance)\n if date:\n for day in DAY_OF_WEEK_INDEX[date.weekday()]:\n string_linking_scores[day] = []\n\n return string_linking_scores\n\nclass AtisWorld(object):\n u\"\"\"\n World representation for the Atis SQL domain. This class has a ``SqlTableContext`` which holds the base\n grammars, it then augments this grammar with the entities that are detected from utterances.\n\n Parameters\n ----------\n utterances: ``List[str]``\n A list of utterances in the interaction, the last element in this list is the\n current utterance that we are interested in.\n \"\"\"\n sql_table_context = SqlTableContext(TABLES)\n\n def __init__(self, utterances , tokenizer=None) :\n self.utterances = utterances\n self.tokenizer = tokenizer if tokenizer else WordTokenizer()\n self.tokenized_utterances = [self.tokenizer.tokenize(utterance) for utterance in self.utterances]\n valid_actions, linking_scores = self.init_all_valid_actions()\n self.valid_actions = valid_actions\n\n # This has shape (num_entities, num_utterance_tokens).\n self.linking_scores: numpy.ndarray = linking_scores\n self.grammar_str: unicode = self.get_grammar_str()\n self.grammar_with_context: Grammar = Grammar(self.grammar_str)\n\n def get_valid_actions(self) :\n return self.valid_actions\n\n def init_all_valid_actions(self) :\n u\"\"\"\n We initialize the valid actions with the global actions. We then iterate through the\n utterances up to and including the current utterance and add the valid strings.\n \"\"\"\n\n valid_actions = deepcopy(self.sql_table_context.valid_actions)\n linking_scores = []\n current_tokenized_utterance = [] if not self.tokenized_utterances\\\n else self.tokenized_utterances[-1]\n\n strings = set()\n for tokenized_utterance in self.tokenized_utterances:\n string_linking_dict = get_strings_from_utterance(tokenized_utterance)\n strings.update(list(string_linking_dict.keys()))\n\n # We want to sort things in reverse here to be consistent with the grammar.\n # The parser is greedy which means that if we have a rule that has\n # multiple options for the right hand side, the first one that succeeds is\n # the one that is used. For example, if ``1400`` appears in the query, and\n # both ``1400`` and ``1`` are valid numbers, then we want to try to match\n # ``1400`` first. Otherwise, ``1`` will succeed but nothing will match ``400``.\n # The same applies for strings here.\n strings_list = sorted(strings, reverse=True)\n\n # We construct the linking scores for strings from the ``string_linking_dict`` here.\n string_linking_scores = []\n for string in strings_list:\n entity_linking = [0 for token in current_tokenized_utterance]\n # string_linking_dict has the strings and linking scores from the last utterance.\n # If the string is not in the last utterance, then the linking scores will be all 0.\n for token_index in string_linking_dict.get(string, []):\n entity_linking[token_index] = 1\n string_linking_scores.append(entity_linking)\n linking_scores.extend(string_linking_scores)\n\n for string in strings_list:\n action = format_action(u'string', string)\n if action not in valid_actions[u'string']:\n valid_actions[u'string'].append(action)\n\n numbers = set([u'0', u'1'])\n number_linking_dict = {}\n\n for utterance, tokenized_utterance in izip(self.utterances, self.tokenized_utterances):\n number_linking_dict = get_numbers_from_utterance(utterance, tokenized_utterance)\n numbers.update(list(number_linking_dict.keys()))\n numbers_list = sorted(numbers, reverse=True)\n\n # We construct the linking scores for numbers from the ``number_linking_dict`` here.\n number_linking_scores = []\n for number in numbers_list:\n entity_linking = [0 for token in current_tokenized_utterance]\n # number_linking_scores has the numbers and linking scores from the last utterance.\n # If the number is not in the last utterance, then the linking scores will be all 0.\n for token_index in number_linking_dict.get(number, []):\n entity_linking[token_index] = 1\n number_linking_scores.append(entity_linking)\n linking_scores.extend(number_linking_scores)\n\n for number in list(numbers_list):\n action = format_action(u'number', number)\n valid_actions[u'number'].append(action)\n return valid_actions, numpy.array(linking_scores)\n\n def get_grammar_str(self) :\n u\"\"\"\n Generate a string that can be used to instantiate a ``Grammar`` object. The string is a sequence of\n rules that define the grammar.\n \"\"\"\n grammar_str_with_context = self.sql_table_context.grammar_str\n numbers = [number.split(u\" -> \")[1].lstrip(u'[\"').rstrip(u'\"]') for\\\n number in sorted(self.valid_actions[u'number'], reverse=True)]\n strings = [string .split(u\" -> \")[1].lstrip(u'[\"').rstrip(u'\"]') for\\\n string in sorted(self.valid_actions[u'string'], reverse=True)]\n\n grammar_str_with_context += generate_one_of_string(u\"number\", numbers)\n grammar_str_with_context += generate_one_of_string(u\"string\", strings)\n return grammar_str_with_context\n\n\n def get_action_sequence(self, query ) :\n sql_visitor = SqlVisitor(self.grammar_with_context)\n if query:\n action_sequence = sql_visitor.parse(query)\n return action_sequence\n return []\n\n def all_possible_actions(self) :\n u\"\"\"\n Return a sorted list of strings representing all possible actions\n of the form: nonterminal -> [right_hand_side]\n \"\"\"\n all_actions = set()\n for _, action_list in list(self.valid_actions.items()):\n for action in action_list:\n all_actions.add(action)\n return sorted(all_actions)\n\n def __eq__(self, other):\n if isinstance(self, other.__class__):\n return all([self.valid_actions == other.valid_actions,\n numpy.array_equal(self.linking_scores, other.linking_scores),\n self.utterances == other.utterances,\n self.grammar_str == other.grammar_str])\n return False\n",
"\n\nfrom __future__ import division\nfrom __future__ import absolute_import\n#typing\nimport logging\nimport copy\n\n#overrides\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.modules import Dropout\nimport numpy\n\nfrom allennlp.common.checks import check_dimensions_match, ConfigurationError\nfrom allennlp.data import Vocabulary\nfrom allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder, Embedding, InputVariationalDropout\nfrom allennlp.modules.matrix_attention.bilinear_matrix_attention import BilinearMatrixAttention\nfrom allennlp.modules import FeedForward\nfrom allennlp.models.model import Model\nfrom allennlp.nn import InitializerApplicator, RegularizerApplicator, Activation\nfrom allennlp.nn.util import get_text_field_mask, get_range_vector\nfrom allennlp.nn.util import get_device_of, last_dim_log_softmax, get_lengths_from_binary_sequence_mask\nfrom allennlp.nn.decoding.chu_liu_edmonds import decode_mst\nfrom allennlp.training.metrics import AttachmentScores\ntry:\n from itertools import izip\nexcept:\n izip = zip\n\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\nPOS_TO_IGNORE = set([u'``', u\"''\", u':', u',', u'.', u'PU', u'PUNCT', u'SYM'])\n\nclass BiaffineDependencyParser(Model):\n u\"\"\"\n This dependency parser follows the model of\n ` Deep Biaffine Attention for Neural Dependency Parsing (Dozat and Manning, 2016)\n <https://arxiv.org/abs/1611.01734>`_ .\n\n Word representations are generated using a bidirectional LSTM,\n followed by separate biaffine classifiers for pairs of words,\n predicting whether a directed arc exists between the two words\n and the dependency label the arc should have. Decoding can either\n be done greedily, or the optimial Minimum Spanning Tree can be\n decoded using Edmond's algorithm by viewing the dependency tree as\n a MST on a fully connected graph, where nodes are words and edges\n are scored dependency arcs.\n\n Parameters\n ----------\n vocab : ``Vocabulary``, required\n A Vocabulary, required in order to compute sizes for input/output projections.\n text_field_embedder : ``TextFieldEmbedder``, required\n Used to embed the ``tokens`` ``TextField`` we get as input to the model.\n encoder : ``Seq2SeqEncoder``\n The encoder (with its own internal stacking) that we will use to generate representations\n of tokens.\n tag_representation_dim : ``int``, required.\n The dimension of the MLPs used for dependency tag prediction.\n arc_representation_dim : ``int``, required.\n The dimension of the MLPs used for head arc prediction.\n tag_feedforward : ``FeedForward``, optional, (default = None).\n The feedforward network used to produce tag representations.\n By default, a 1 layer feedforward network with an elu activation is used.\n arc_feedforward : ``FeedForward``, optional, (default = None).\n The feedforward network used to produce arc representations.\n By default, a 1 layer feedforward network with an elu activation is used.\n pos_tag_embedding : ``Embedding``, optional.\n Used to embed the ``pos_tags`` ``SequenceLabelField`` we get as input to the model.\n use_mst_decoding_for_validation : ``bool``, optional (default = True).\n Whether to use Edmond's algorithm to find the optimal minimum spanning tree during validation.\n If false, decoding is greedy.\n dropout : ``float``, optional, (default = 0.0)\n The variational dropout applied to the output of the encoder and MLP layers.\n input_dropout : ``float``, optional, (default = 0.0)\n The dropout applied to the embedded text input.\n initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)\n Used to initialize the model parameters.\n regularizer : ``RegularizerApplicator``, optional (default=``None``)\n If provided, will be used to calculate the regularization penalty during training.\n \"\"\"\n def __init__(self,\n vocab ,\n text_field_embedder ,\n encoder ,\n tag_representation_dim ,\n arc_representation_dim ,\n tag_feedforward = None,\n arc_feedforward = None,\n pos_tag_embedding = None,\n use_mst_decoding_for_validation = True,\n dropout = 0.0,\n input_dropout = 0.0,\n initializer = InitializerApplicator(),\n regularizer = None) :\n super(BiaffineDependencyParser, self).__init__(vocab, regularizer)\n\n self.text_field_embedder = text_field_embedder\n self.encoder = encoder\n\n encoder_dim = encoder.get_output_dim()\n\n self.head_arc_feedforward = arc_feedforward or\\\n FeedForward(encoder_dim, 1,\n arc_representation_dim,\n Activation.by_name(u\"elu\")())\n self.child_arc_feedforward = copy.deepcopy(self.head_arc_feedforward)\n\n self.arc_attention = BilinearMatrixAttention(arc_representation_dim,\n arc_representation_dim,\n use_input_biases=True)\n\n num_labels = self.vocab.get_vocab_size(u\"head_tags\")\n\n self.head_tag_feedforward = tag_feedforward or\\\n FeedForward(encoder_dim, 1,\n tag_representation_dim,\n Activation.by_name(u\"elu\")())\n self.child_tag_feedforward = copy.deepcopy(self.head_tag_feedforward)\n\n self.tag_bilinear = torch.nn.modules.Bilinear(tag_representation_dim,\n tag_representation_dim,\n num_labels)\n\n self._pos_tag_embedding = pos_tag_embedding or None\n self._dropout = InputVariationalDropout(dropout)\n self._input_dropout = Dropout(input_dropout)\n self._head_sentinel = torch.nn.Parameter(torch.randn([1, 1, encoder.get_output_dim()]))\n\n representation_dim = text_field_embedder.get_output_dim()\n if pos_tag_embedding is not None:\n representation_dim += pos_tag_embedding.get_output_dim()\n\n check_dimensions_match(representation_dim, encoder.get_input_dim(),\n u\"text field embedding dim\", u\"encoder input dim\")\n\n check_dimensions_match(tag_representation_dim, self.head_tag_feedforward.get_output_dim(),\n u\"tag representation dim\", u\"tag feedforward output dim\")\n check_dimensions_match(arc_representation_dim, self.head_arc_feedforward.get_output_dim(),\n u\"arc representation dim\", u\"arc feedforward output dim\")\n\n self.use_mst_decoding_for_validation = use_mst_decoding_for_validation\n\n tags = self.vocab.get_token_to_index_vocabulary(u\"pos\")\n punctuation_tag_indices = dict((tag, index) for tag, index in list(tags.items()) if tag in POS_TO_IGNORE)\n self._pos_to_ignore = set(punctuation_tag_indices.values())\n logger.info(\"Found POS tags correspoding to the following punctuation : {punctuation_tag_indices}. \"\n u\"Ignoring words with these POS tags for evaluation.\")\n\n self._attachment_scores = AttachmentScores()\n initializer(self)\n\n #overrides\n def forward(self, # type: ignore\n words ,\n pos_tags ,\n metadata ,\n head_tags = None,\n head_indices = None) :\n # pylint: disable=arguments-differ\n u\"\"\"\n Parameters\n ----------\n words : Dict[str, torch.LongTensor], required\n The output of ``TextField.as_array()``, which should typically be passed directly to a\n ``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``\n tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{\"tokens\":\n Tensor(batch_size, sequence_length)}``. This dictionary will have the same keys as were used\n for the ``TokenIndexers`` when you created the ``TextField`` representing your\n sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,\n which knows how to combine different word representations into a single vector per\n token in your input.\n pos_tags : ``torch.LongTensor``, required.\n The output of a ``SequenceLabelField`` containing POS tags.\n POS tags are required regardless of whether they are used in the model,\n because they are used to filter the evaluation metric to only consider\n heads of words which are not punctuation.\n head_tags : torch.LongTensor, optional (default = None)\n A torch tensor representing the sequence of integer gold class labels for the arcs\n in the dependency parse. Has shape ``(batch_size, sequence_length)``.\n head_indices : torch.LongTensor, optional (default = None)\n A torch tensor representing the sequence of integer indices denoting the parent of every\n word in the dependency parse. Has shape ``(batch_size, sequence_length)``.\n\n Returns\n -------\n An output dictionary consisting of:\n loss : ``torch.FloatTensor``, optional\n A scalar loss to be optimised.\n arc_loss : ``torch.FloatTensor``\n The loss contribution from the unlabeled arcs.\n loss : ``torch.FloatTensor``, optional\n The loss contribution from predicting the dependency\n tags for the gold arcs.\n heads : ``torch.FloatTensor``\n The predicted head indices for each word. A tensor\n of shape (batch_size, sequence_length).\n head_types : ``torch.FloatTensor``\n The predicted head types for each arc. A tensor\n of shape (batch_size, sequence_length).\n mask : ``torch.LongTensor``\n A mask denoting the padded elements in the batch.\n \"\"\"\n embedded_text_input = self.text_field_embedder(words)\n if pos_tags is not None and self._pos_tag_embedding is not None:\n embedded_pos_tags = self._pos_tag_embedding(pos_tags)\n embedded_text_input = torch.cat([embedded_text_input, embedded_pos_tags], -1)\n elif self._pos_tag_embedding is not None:\n raise ConfigurationError(u\"Model uses a POS embedding, but no POS tags were passed.\")\n\n mask = get_text_field_mask(words)\n embedded_text_input = self._input_dropout(embedded_text_input)\n encoded_text = self.encoder(embedded_text_input, mask)\n\n batch_size, _, encoding_dim = encoded_text.size()\n\n head_sentinel = self._head_sentinel.expand(batch_size, 1, encoding_dim)\n # Concatenate the head sentinel onto the sentence representation.\n encoded_text = torch.cat([head_sentinel, encoded_text], 1)\n mask = torch.cat([mask.new_ones(batch_size, 1), mask], 1)\n if head_indices is not None:\n head_indices = torch.cat([head_indices.new_zeros(batch_size, 1), head_indices], 1)\n if head_tags is not None:\n head_tags = torch.cat([head_tags.new_zeros(batch_size, 1), head_tags], 1)\n float_mask = mask.float()\n encoded_text = self._dropout(encoded_text)\n\n # shape (batch_size, sequence_length, arc_representation_dim)\n head_arc_representation = self._dropout(self.head_arc_feedforward(encoded_text))\n child_arc_representation = self._dropout(self.child_arc_feedforward(encoded_text))\n\n # shape (batch_size, sequence_length, tag_representation_dim)\n head_tag_representation = self._dropout(self.head_tag_feedforward(encoded_text))\n child_tag_representation = self._dropout(self.child_tag_feedforward(encoded_text))\n # shape (batch_size, sequence_length, sequence_length)\n attended_arcs = self.arc_attention(head_arc_representation,\n child_arc_representation)\n\n minus_inf = -1e8\n minus_mask = (1 - float_mask) * minus_inf\n attended_arcs = attended_arcs + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)\n\n if self.training or not self.use_mst_decoding_for_validation:\n predicted_heads, predicted_head_tags = self._greedy_decode(head_tag_representation,\n child_tag_representation,\n attended_arcs,\n mask)\n else:\n predicted_heads, predicted_head_tags = self._mst_decode(head_tag_representation,\n child_tag_representation,\n attended_arcs,\n mask)\n if head_indices is not None and head_tags is not None:\n\n arc_nll, tag_nll = self._construct_loss(head_tag_representation=head_tag_representation,\n child_tag_representation=child_tag_representation,\n attended_arcs=attended_arcs,\n head_indices=head_indices,\n head_tags=head_tags,\n mask=mask)\n loss = arc_nll + tag_nll\n\n evaluation_mask = self._get_mask_for_eval(mask[:, 1:], pos_tags)\n # We calculate attatchment scores for the whole sentence\n # but excluding the symbolic ROOT token at the start,\n # which is why we start from the second element in the sequence.\n self._attachment_scores(predicted_heads[:, 1:],\n predicted_head_tags[:, 1:],\n head_indices[:, 1:],\n head_tags[:, 1:],\n evaluation_mask)\n else:\n arc_nll, tag_nll = self._construct_loss(head_tag_representation=head_tag_representation,\n child_tag_representation=child_tag_representation,\n attended_arcs=attended_arcs,\n head_indices=predicted_heads.long(),\n head_tags=predicted_head_tags.long(),\n mask=mask)\n loss = arc_nll + tag_nll\n\n output_dict = {\n u\"heads\": predicted_heads,\n u\"head_tags\": predicted_head_tags,\n u\"arc_loss\": arc_nll,\n u\"tag_loss\": tag_nll,\n u\"loss\": loss,\n u\"mask\": mask,\n u\"words\": [meta[u\"words\"] for meta in metadata],\n u\"pos\": [meta[u\"pos\"] for meta in metadata]\n }\n\n return output_dict\n\n #overrides\n def decode(self, output_dict ) :\n\n head_tags = output_dict.pop(u\"head_tags\").cpu().detach().numpy()\n heads = output_dict.pop(u\"heads\").cpu().detach().numpy()\n mask = output_dict.pop(u\"mask\")\n lengths = get_lengths_from_binary_sequence_mask(mask)\n head_tag_labels = []\n head_indices = []\n for instance_heads, instance_tags, length in izip(heads, head_tags, lengths):\n instance_heads = list(instance_heads[1:length])\n instance_tags = instance_tags[1:length]\n labels = [self.vocab.get_token_from_index(label, u\"head_tags\")\n for label in instance_tags]\n head_tag_labels.append(labels)\n head_indices.append(instance_heads)\n\n output_dict[u\"predicted_dependencies\"] = head_tag_labels\n output_dict[u\"predicted_heads\"] = head_indices\n return output_dict\n\n def _construct_loss(self,\n head_tag_representation ,\n child_tag_representation ,\n attended_arcs ,\n head_indices ,\n head_tags ,\n mask ) :\n u\"\"\"\n Computes the arc and tag loss for a sequence given gold head indices and tags.\n\n Parameters\n ----------\n head_tag_representation : ``torch.Tensor``, required.\n A tensor of shape (batch_size, sequence_length, tag_representation_dim),\n which will be used to generate predictions for the dependency tags\n for the given arcs.\n child_tag_representation : ``torch.Tensor``, required\n A tensor of shape (batch_size, sequence_length, tag_representation_dim),\n which will be used to generate predictions for the dependency tags\n for the given arcs.\n attended_arcs : ``torch.Tensor``, required.\n A tensor of shape (batch_size, sequence_length, sequence_length) used to generate\n a distribution over attachements of a given word to all other words.\n head_indices : ``torch.Tensor``, required.\n A tensor of shape (batch_size, sequence_length).\n The indices of the heads for every word.\n head_tags : ``torch.Tensor``, required.\n A tensor of shape (batch_size, sequence_length).\n The dependency labels of the heads for every word.\n mask : ``torch.Tensor``, required.\n A mask of shape (batch_size, sequence_length), denoting unpadded\n elements in the sequence.\n\n Returns\n -------\n arc_nll : ``torch.Tensor``, required.\n The negative log likelihood from the arc loss.\n tag_nll : ``torch.Tensor``, required.\n The negative log likelihood from the arc tag loss.\n \"\"\"\n float_mask = mask.float()\n batch_size, sequence_length, _ = attended_arcs.size()\n # shape (batch_size, 1)\n range_vector = get_range_vector(batch_size, get_device_of(attended_arcs)).unsqueeze(1)\n # shape (batch_size, sequence_length, sequence_length)\n normalised_arc_logits = last_dim_log_softmax(attended_arcs,\n mask) * float_mask.unsqueeze(2) * float_mask.unsqueeze(1)\n\n # shape (batch_size, sequence_length, num_head_tags)\n head_tag_logits = self._get_head_tags(head_tag_representation, child_tag_representation, head_indices)\n normalised_head_tag_logits = last_dim_log_softmax(head_tag_logits,\n mask.unsqueeze(-1)) * float_mask.unsqueeze(-1)\n # index matrix with shape (batch, sequence_length)\n timestep_index = get_range_vector(sequence_length, get_device_of(attended_arcs))\n child_index = timestep_index.view(1, sequence_length).expand(batch_size, sequence_length).long()\n # shape (batch_size, sequence_length)\n arc_loss = normalised_arc_logits[range_vector, child_index, head_indices]\n tag_loss = normalised_head_tag_logits[range_vector, child_index, head_tags]\n # We don't care about predictions for the symbolic ROOT token's head,\n # so we remove it from the loss.\n arc_loss = arc_loss[:, 1:]\n tag_loss = tag_loss[:, 1:]\n\n # The number of valid positions is equal to the number of unmasked elements minus\n # 1 per sequence in the batch, to account for the symbolic HEAD token.\n valid_positions = mask.sum() - batch_size\n\n arc_nll = -arc_loss.sum() / valid_positions.float()\n tag_nll = -tag_loss.sum() / valid_positions.float()\n return arc_nll, tag_nll\n\n def _greedy_decode(self,\n head_tag_representation ,\n child_tag_representation ,\n attended_arcs ,\n mask ) :\n u\"\"\"\n Decodes the head and head tag predictions by decoding the unlabeled arcs\n independently for each word and then again, predicting the head tags of\n these greedily chosen arcs indpendently. Note that this method of decoding\n is not guaranteed to produce trees (i.e. there maybe be multiple roots,\n or cycles when children are attached to their parents).\n\n Parameters\n ----------\n head_tag_representation : ``torch.Tensor``, required.\n A tensor of shape (batch_size, sequence_length, tag_representation_dim),\n which will be used to generate predictions for the dependency tags\n for the given arcs.\n child_tag_representation : ``torch.Tensor``, required\n A tensor of shape (batch_size, sequence_length, tag_representation_dim),\n which will be used to generate predictions for the dependency tags\n for the given arcs.\n attended_arcs : ``torch.Tensor``, required.\n A tensor of shape (batch_size, sequence_length, sequence_length) used to generate\n a distribution over attachements of a given word to all other words.\n\n Returns\n -------\n heads : ``torch.Tensor``\n A tensor of shape (batch_size, sequence_length) representing the\n greedily decoded heads of each word.\n head_tags : ``torch.Tensor``\n A tensor of shape (batch_size, sequence_length) representing the\n dependency tags of the greedily decoded heads of each word.\n \"\"\"\n # Mask the diagonal, because the head of a word can't be itself.\n attended_arcs = attended_arcs + torch.diag(attended_arcs.new(mask.size(1)).fill_(-numpy.inf))\n # Mask padded tokens, because we only want to consider actual words as heads.\n if mask is not None:\n minus_mask = (1 - mask).byte().unsqueeze(2)\n attended_arcs.masked_fill_(minus_mask, -numpy.inf)\n\n # Compute the heads greedily.\n # shape (batch_size, sequence_length)\n _, heads = attended_arcs.max(dim=2)\n\n # Given the greedily predicted heads, decode their dependency tags.\n # shape (batch_size, sequence_length, num_head_tags)\n head_tag_logits = self._get_head_tags(head_tag_representation,\n child_tag_representation,\n heads)\n _, head_tags = head_tag_logits.max(dim=2)\n return heads, head_tags\n\n def _mst_decode(self,\n head_tag_representation ,\n child_tag_representation ,\n attended_arcs ,\n mask ) :\n u\"\"\"\n Decodes the head and head tag predictions using the Edmonds' Algorithm\n for finding minimum spanning trees on directed graphs. Nodes in the\n graph are the words in the sentence, and between each pair of nodes,\n there is an edge in each direction, where the weight of the edge corresponds\n to the most likely dependency label probability for that arc. The MST is\n then generated from this directed graph.\n\n Parameters\n ----------\n head_tag_representation : ``torch.Tensor``, required.\n A tensor of shape (batch_size, sequence_length, tag_representation_dim),\n which will be used to generate predictions for the dependency tags\n for the given arcs.\n child_tag_representation : ``torch.Tensor``, required\n A tensor of shape (batch_size, sequence_length, tag_representation_dim),\n which will be used to generate predictions for the dependency tags\n for the given arcs.\n attended_arcs : ``torch.Tensor``, required.\n A tensor of shape (batch_size, sequence_length, sequence_length) used to generate\n a distribution over attachements of a given word to all other words.\n\n Returns\n -------\n heads : ``torch.Tensor``\n A tensor of shape (batch_size, sequence_length) representing the\n greedily decoded heads of each word.\n head_tags : ``torch.Tensor``\n A tensor of shape (batch_size, sequence_length) representing the\n dependency tags of the optimally decoded heads of each word.\n \"\"\"\n batch_size, sequence_length, tag_representation_dim = head_tag_representation.size()\n\n lengths = mask.data.sum(dim=1).long().cpu().numpy()\n\n expanded_shape = [batch_size, sequence_length, sequence_length, tag_representation_dim]\n head_tag_representation = head_tag_representation.unsqueeze(2)\n head_tag_representation = head_tag_representation.expand(*expanded_shape).contiguous()\n child_tag_representation = child_tag_representation.unsqueeze(1)\n child_tag_representation = child_tag_representation.expand(*expanded_shape).contiguous()\n # Shape (batch_size, sequence_length, sequence_length, num_head_tags)\n pairwise_head_logits = self.tag_bilinear(head_tag_representation, child_tag_representation)\n\n # Note that this log_softmax is over the tag dimension, and we don't consider pairs\n # of tags which are invalid (e.g are a pair which includes a padded element) anyway below.\n # Shape (batch, num_labels,sequence_length, sequence_length)\n normalized_pairwise_head_logits = F.log_softmax(pairwise_head_logits, dim=3).permute(0, 3, 1, 2)\n\n # Mask padded tokens, because we only want to consider actual words as heads.\n minus_inf = -1e8\n minus_mask = (1 - mask.float()) * minus_inf\n attended_arcs = attended_arcs + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)\n\n # Shape (batch_size, sequence_length, sequence_length)\n normalized_arc_logits = F.log_softmax(attended_arcs, dim=2).transpose(1, 2)\n\n # Shape (batch_size, num_head_tags, sequence_length, sequence_length)\n # This energy tensor expresses the following relation:\n # energy[i,j] = \"Score that i is the head of j\". In this\n # case, we have heads pointing to their children.\n batch_energy = torch.exp(normalized_arc_logits.unsqueeze(1) + normalized_pairwise_head_logits)\n return self._run_mst_decoding(batch_energy, lengths)\n\n @staticmethod\n def _run_mst_decoding(batch_energy , lengths ) :\n heads = []\n head_tags = []\n for energy, length in izip(batch_energy.detach().cpu(), lengths):\n scores, tag_ids = energy.max(dim=0)\n # Although we need to include the root node so that the MST includes it,\n # we do not want any word to be the parent of the root node.\n # Here, we enforce this by setting the scores for all word -> ROOT edges\n # edges to be 0.\n scores[0, :] = 0\n # Decode the heads. Because we modify the scores to prevent\n # adding in word -> ROOT edges, we need to find the labels ourselves.\n instance_heads, _ = decode_mst(scores.numpy(), length, has_labels=False)\n\n # Find the labels which correspond to the edges in the max spanning tree.\n instance_head_tags = []\n for child, parent in enumerate(instance_heads):\n instance_head_tags.append(tag_ids[parent, child].item())\n # We don't care what the head or tag is for the root token, but by default it's\n # not necesarily the same in the batched vs unbatched case, which is annoying.\n # Here we'll just set them to zero.\n instance_heads[0] = 0\n instance_head_tags[0] = 0\n heads.append(instance_heads)\n head_tags.append(instance_head_tags)\n return torch.from_numpy(numpy.stack(heads)), torch.from_numpy(numpy.stack(head_tags))\n\n def _get_head_tags(self,\n head_tag_representation ,\n child_tag_representation ,\n head_indices ) :\n u\"\"\"\n Decodes the head tags given the head and child tag representations\n and a tensor of head indices to compute tags for. Note that these are\n either gold or predicted heads, depending on whether this function is\n being called to compute the loss, or if it's being called during inference.\n\n Parameters\n ----------\n head_tag_representation : ``torch.Tensor``, required.\n A tensor of shape (batch_size, sequence_length, tag_representation_dim),\n which will be used to generate predictions for the dependency tags\n for the given arcs.\n child_tag_representation : ``torch.Tensor``, required\n A tensor of shape (batch_size, sequence_length, tag_representation_dim),\n which will be used to generate predictions for the dependency tags\n for the given arcs.\n head_indices : ``torch.Tensor``, required.\n A tensor of shape (batch_size, sequence_length). The indices of the heads\n for every word.\n\n Returns\n -------\n head_tag_logits : ``torch.Tensor``\n A tensor of shape (batch_size, sequence_length, num_head_tags),\n representing logits for predicting a distribution over tags\n for each arc.\n \"\"\"\n batch_size = head_tag_representation.size(0)\n # shape (batch_size,)\n range_vector = get_range_vector(batch_size, get_device_of(head_tag_representation)).unsqueeze(1)\n\n # This next statement is quite a complex piece of indexing, which you really\n # need to read the docs to understand. See here:\n # https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#advanced-indexing\n # In effect, we are selecting the indices corresponding to the heads of each word from the\n # sequence length dimension for each element in the batch.\n\n # shape (batch_size, sequence_length, tag_representation_dim)\n selected_head_tag_representations = head_tag_representation[range_vector, head_indices]\n selected_head_tag_representations = selected_head_tag_representations.contiguous()\n # shape (batch_size, sequence_length, num_head_tags)\n head_tag_logits = self.tag_bilinear(selected_head_tag_representations,\n child_tag_representation)\n return head_tag_logits\n\n def _get_mask_for_eval(self,\n mask ,\n pos_tags ) :\n u\"\"\"\n Dependency evaluation excludes words are punctuation.\n Here, we create a new mask to exclude word indices which\n have a \"punctuation-like\" part of speech tag.\n\n Parameters\n ----------\n mask : ``torch.LongTensor``, required.\n The original mask.\n pos_tags : ``torch.LongTensor``, required.\n The pos tags for the sequence.\n\n Returns\n -------\n A new mask, where any indices equal to labels\n we should be ignoring are masked.\n \"\"\"\n new_mask = mask.detach()\n for label in self._pos_to_ignore:\n label_mask = pos_tags.eq(label).long()\n new_mask = new_mask * (1 - label_mask)\n return new_mask\n\n #overrides\n def get_metrics(self, reset = False) :\n return self._attachment_scores.get_metric(reset)\n\nBiaffineDependencyParser = Model.register(u\"biaffine_parser\")(BiaffineDependencyParser)\n",
"# pylint: disable=access-member-before-definition\n\nfrom __future__ import absolute_import\n#typing\n\n#overrides\nimport torch\n\nfrom allennlp.data.fields.field import Field\nfrom allennlp.data.fields.sequence_field import SequenceField\n\n\nclass SpanField(Field):\n u\"\"\"\n A ``SpanField`` is a pair of inclusive, zero-indexed (start, end) indices into a\n :class:`~allennlp.data.fields.sequence_field.SequenceField`, used to represent a span of text.\n Because it's a pair of indices into a :class:`SequenceField`, we take one of those as input\n to make the span's dependence explicit and to validate that the span is well defined.\n\n Parameters\n ----------\n span_start : ``int``, required.\n The index of the start of the span in the :class:`SequenceField`.\n span_end : ``int``, required.\n The inclusive index of the end of the span in the :class:`SequenceField`.\n sequence_field : ``SequenceField``, required.\n A field containing the sequence that this ``SpanField`` is a span inside.\n \"\"\"\n def __init__(self, span_start , span_end , sequence_field ) :\n self.span_start = span_start\n self.span_end = span_end\n self.sequence_field = sequence_field\n\n if not isinstance(span_start, int) or not isinstance(span_end, int):\n raise TypeError(\"SpanFields must be passed integer indices. Found span indices: \"\n \"({span_start}, {span_end}) with types \"\n \"({type(span_start)} {type(span_end)})\")\n if span_start > span_end:\n raise ValueError(\"span_start must be less than span_end, \"\n \"but found ({span_start}, {span_end}).\")\n\n if span_end > self.sequence_field.sequence_length() - 1:\n raise ValueError(\"span_end must be < len(sequence_length) - 1, but found \"\n \"{span_end} and {self.sequence_field.sequence_length() - 1} respectively.\")\n\n #overrides\n def get_padding_lengths(self) :\n # pylint: disable=no-self-use\n return {}\n\n #overrides\n def as_tensor(self,\n padding_lengths ,\n cuda_device = -1) :\n # pylint: disable=unused-argument\n tensor = torch.LongTensor([self.span_start, self.span_end])\n return tensor if cuda_device == -1 else tensor.cuda(cuda_device)\n\n #overrides\n def empty_field(self):\n return SpanField(-1, -1, self.sequence_field.empty_field())\n\n def __str__(self) :\n return \"SpanField with spans: ({self.span_start}, {self.span_end}).\"\n",
"\nfrom __future__ import absolute_import\n#typing\nimport warnings\n\nimport torch\n#overrides\n\nfrom allennlp.common import Params\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.data import Vocabulary\nfrom allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder\nfrom allennlp.modules.time_distributed import TimeDistributed\nfrom allennlp.modules.token_embedders.token_embedder import TokenEmbedder\n\n\nclass BasicTextFieldEmbedder(TextFieldEmbedder):\n u\"\"\"\n This is a ``TextFieldEmbedder`` that wraps a collection of :class:`TokenEmbedder` objects. Each\n ``TokenEmbedder`` embeds or encodes the representation output from one\n :class:`~allennlp.data.TokenIndexer`. As the data produced by a\n :class:`~allennlp.data.fields.TextField` is a dictionary mapping names to these\n representations, we take ``TokenEmbedders`` with corresponding names. Each ``TokenEmbedders``\n embeds its input, and the result is concatenated in an arbitrary order.\n\n Parameters\n ----------\n\n token_embedders : ``Dict[str, TokenEmbedder]``, required.\n A dictionary mapping token embedder names to implementations.\n These names should match the corresponding indexer used to generate\n the tensor passed to the TokenEmbedder.\n embedder_to_indexer_map : ``Dict[str, List[str]]``, optional, (default = None)\n Optionally, you can provide a mapping between the names of the TokenEmbedders\n that you are using to embed your TextField and an ordered list of indexer names\n which are needed for running it. In most cases, your TokenEmbedder will only\n require a single tensor, because it is designed to run on the output of a\n single TokenIndexer. For example, the ELMo Token Embedder can be used in\n two modes, one of which requires both character ids and word ids for the\n same text. Note that the list of token indexer names is `ordered`, meaning\n that the tensors produced by the indexers will be passed to the embedders\n in the order you specify in this list.\n allow_unmatched_keys : ``bool``, optional (default = False)\n If True, then don't enforce the keys of the ``text_field_input`` to\n match those in ``token_embedders`` (useful if the mapping is specified\n via ``embedder_to_indexer_map``).\n \"\"\"\n def __init__(self,\n token_embedders ,\n embedder_to_indexer_map = None,\n allow_unmatched_keys = False) :\n super(BasicTextFieldEmbedder, self).__init__()\n self._token_embedders = token_embedders\n self._embedder_to_indexer_map = embedder_to_indexer_map\n for key, embedder in list(token_embedders.items()):\n name = u'token_embedder_%s' % key\n self.add_module(name, embedder)\n self._allow_unmatched_keys = allow_unmatched_keys\n\n #overrides\n def get_output_dim(self) :\n output_dim = 0\n for embedder in list(self._token_embedders.values()):\n output_dim += embedder.get_output_dim()\n return output_dim\n\n def forward(self, text_field_input , num_wrapping_dims = 0) :\n if list(self._token_embedders.keys()) != list(text_field_input.keys()):\n if not self._allow_unmatched_keys:\n message = u\"Mismatched token keys: %s and %s\" % (unicode(list(self._token_embedders.keys())),\n unicode(list(text_field_input.keys())))\n raise ConfigurationError(message)\n embedded_representations = []\n keys = sorted(self._token_embedders.keys())\n for key in keys:\n # If we pre-specified a mapping explictly, use that.\n if self._embedder_to_indexer_map is not None:\n tensors = [text_field_input[indexer_key] for\n indexer_key in self._embedder_to_indexer_map[key]]\n else:\n # otherwise, we assume the mapping between indexers and embedders\n # is bijective and just use the key directly.\n tensors = [text_field_input[key]]\n # Note: need to use getattr here so that the pytorch voodoo\n # with submodules works with multiple GPUs.\n embedder = getattr(self, u'token_embedder_{}'.format(key))\n for _ in range(num_wrapping_dims):\n embedder = TimeDistributed(embedder)\n token_vectors = embedder(*tensors)\n embedded_representations.append(token_vectors)\n return torch.cat(embedded_representations, dim=-1)\n\n # This is some unusual logic, it needs a custom from_params.\n @classmethod\n def from_params(cls, vocab , params ) : # type: ignore\n # pylint: disable=arguments-differ,bad-super-call\n\n # The original `from_params` for this class was designed in a way that didn't agree\n # with the constructor. The constructor wants a 'token_embedders' parameter that is a\n # `Dict[str, TokenEmbedder]`, but the original `from_params` implementation expected those\n # key-value pairs to be top-level in the params object.\n #\n # This breaks our 'configuration wizard' and configuration checks. Hence, going forward,\n # the params need a 'token_embedders' key so that they line up with what the constructor wants.\n # For now, the old behavior is still supported, but produces a DeprecationWarning.\n\n embedder_to_indexer_map = params.pop(u\"embedder_to_indexer_map\", None)\n if embedder_to_indexer_map is not None:\n embedder_to_indexer_map = embedder_to_indexer_map.as_dict(quiet=True)\n allow_unmatched_keys = params.pop_bool(u\"allow_unmatched_keys\", False)\n\n token_embedder_params = params.pop(u'token_embedders', None)\n\n if token_embedder_params is not None:\n # New way: explicitly specified, so use it.\n token_embedders = dict((\n name, TokenEmbedder.from_params(subparams, vocab=vocab))\n for name, subparams in list(token_embedder_params.items()))\n\n else:\n # Warn that the original behavior is deprecated\n warnings.warn(DeprecationWarning(u\"the token embedders for BasicTextFieldEmbedder should now \"\n u\"be specified as a dict under the 'token_embedders' key, \"\n u\"not as top-level key-value pairs\"))\n\n token_embedders = {}\n keys = list(params.keys())\n for key in keys:\n embedder_params = params.pop(key)\n token_embedders[key] = TokenEmbedder.from_params(vocab=vocab, params=embedder_params)\n\n params.assert_empty(cls.__name__)\n return cls(token_embedders, embedder_to_indexer_map, allow_unmatched_keys)\n\nBasicTextFieldEmbedder = TextFieldEmbedder.register(u\"basic\")(BasicTextFieldEmbedder)\n",
"\nfrom __future__ import absolute_import\n#typing\n\nimport numpy\nimport torch\n#overrides\n\nfrom allennlp.data.fields.field import Field\n\n\nclass ArrayField(Field):\n u\"\"\"\n A class representing an array, which could have arbitrary dimensions.\n A batch of these arrays are padded to the max dimension length in the batch\n for each dimension.\n \"\"\"\n def __init__(self, array , padding_value = 0) :\n self.array = array\n self.padding_value = padding_value\n\n #overrides\n def get_padding_lengths(self) :\n return dict((u\"dimension_\" + unicode(i), shape)\n for i, shape in enumerate(self.array.shape))\n\n #overrides\n def as_tensor(self,\n padding_lengths ,\n cuda_device = -1) :\n max_shape = [padding_lengths[u\"dimension_{}\".format(i)]\n for i in range(len(padding_lengths))]\n\n return_array = numpy.ones(max_shape, u\"float32\") * self.padding_value\n\n # If the tensor has a different shape from the largest tensor, pad dimensions with zeros to\n # form the right shaped list of slices for insertion into the final tensor.\n slicing_shape = list(self.array.shape)\n if len(self.array.shape) < len(max_shape):\n slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.shape))]\n slices = tuple([slice(0, x) for x in slicing_shape])\n return_array[slices] = self.array\n tensor = torch.from_numpy(return_array)\n return tensor if cuda_device == -1 else tensor.cuda(cuda_device)\n\n #overrides\n def empty_field(self): # pylint: disable=no-self-use\n # Pass the padding_value, so that any outer field, e.g., `ListField[ArrayField]` uses the\n # same padding_value in the padded ArrayFields\n return ArrayField(numpy.array([], dtype=u\"float32\"), padding_value=self.padding_value)\n\n\n def __str__(self) :\n return \"ArrayField with shape: {self.array.shape}.\"\n"
] | [
[
"numpy.array",
"numpy.array_equal"
],
[
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.nn.modules.Bilinear",
"numpy.stack",
"torch.nn.modules.Dropout"
],
[
"torch.LongTensor"
],
[
"torch.cat"
],
[
"numpy.array",
"torch.from_numpy",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mdengler/pandas | [
"ca010142309076bf24a06ca83fb822915e49fa80"
] | [
"pandas/tests/arithmetic/test_datetime64.py"
] | [
"# Arithmetic tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\n# Specifically for datetime64 and datetime64tz dtypes\nfrom datetime import (\n datetime,\n time,\n timedelta,\n)\nfrom itertools import (\n product,\n starmap,\n)\nimport operator\nimport warnings\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs.conversion import localize_pydatetime\nfrom pandas._libs.tslibs.offsets import shift_months\nfrom pandas.compat import np_datetime64_compat\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import (\n DateOffset,\n DatetimeIndex,\n NaT,\n Period,\n Series,\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n DatetimeArray,\n TimedeltaArray,\n)\nfrom pandas.core.ops import roperator\nfrom pandas.tests.arithmetic.common import (\n assert_invalid_addsub_type,\n assert_invalid_comparison,\n get_upcast_box,\n)\n\n# ------------------------------------------------------------------\n# Comparisons\n\n\nclass TestDatetime64ArrayLikeComparisons:\n # Comparison tests for datetime64 vectors fully parametrized over\n # DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison\n # tests will eventually end up here.\n\n def test_compare_zerodim(self, tz_naive_fixture, box_with_array):\n # Test comparison with zero-dimensional array is unboxed\n tz = tz_naive_fixture\n box = box_with_array\n xbox = (\n box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray\n )\n dti = date_range(\"20130101\", periods=3, tz=tz)\n\n other = np.array(dti.to_numpy()[0])\n\n dtarr = tm.box_expected(dti, box)\n result = dtarr <= other\n expected = np.array([True, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n \"foo\",\n -1,\n 99,\n 4.0,\n object(),\n timedelta(days=2),\n # GH#19800, GH#19301 datetime.date comparison raises to\n # match DatetimeIndex/Timestamp. This also matches the behavior\n # of stdlib datetime.datetime\n datetime(2001, 1, 1).date(),\n # GH#19301 None and NaN are *not* cast to NaT for comparisons\n None,\n np.nan,\n ],\n )\n def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):\n # GH#22074, GH#15966\n tz = tz_naive_fixture\n\n rng = date_range(\"1/1/2000\", periods=10, tz=tz)\n dtarr = tm.box_expected(rng, box_with_array)\n assert_invalid_comparison(dtarr, other, box_with_array)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n list(range(10)),\n np.arange(10),\n np.arange(10).astype(np.float32),\n np.arange(10).astype(object),\n pd.timedelta_range(\"1ns\", periods=10).array,\n np.array(pd.timedelta_range(\"1ns\", periods=10)),\n list(pd.timedelta_range(\"1ns\", periods=10)),\n pd.timedelta_range(\"1 Day\", periods=10).astype(object),\n pd.period_range(\"1971-01-01\", freq=\"D\", periods=10).array,\n pd.period_range(\"1971-01-01\", freq=\"D\", periods=10).astype(object),\n ],\n )\n def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture):\n # We don't parametrize this over box_with_array because listlike\n # other plays poorly with assert_invalid_comparison reversed checks\n tz = tz_naive_fixture\n\n dta = date_range(\"1970-01-01\", freq=\"ns\", periods=10, tz=tz)._data\n assert_invalid_comparison(dta, other, tm.to_array)\n\n def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):\n tz = tz_naive_fixture\n\n dta = date_range(\"1970-01-01\", freq=\"h\", periods=5, tz=tz)._data\n\n other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])\n result = dta == other\n expected = np.array([False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dta != other\n tm.assert_numpy_array_equal(result, ~expected)\n\n msg = \"Invalid comparison between|Cannot compare type|not supported between\"\n with pytest.raises(TypeError, match=msg):\n dta < other\n with pytest.raises(TypeError, match=msg):\n dta > other\n with pytest.raises(TypeError, match=msg):\n dta <= other\n with pytest.raises(TypeError, match=msg):\n dta >= other\n\n def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):\n # GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly\n tz = tz_naive_fixture\n box = box_with_array\n xbox = box if box not in [pd.Index, pd.array] else np.ndarray\n\n ts = Timestamp.now(tz)\n ser = Series([ts, NaT])\n\n obj = tm.box_expected(ser, box)\n\n expected = Series([True, False], dtype=np.bool_)\n expected = tm.box_expected(expected, xbox)\n\n result = obj == ts\n tm.assert_equal(result, expected)\n\n\nclass TestDatetime64SeriesComparison:\n # TODO: moved from tests.series.test_operators; needs cleanup\n\n @pytest.mark.parametrize(\n \"pair\",\n [\n (\n [Timestamp(\"2011-01-01\"), NaT, Timestamp(\"2011-01-03\")],\n [NaT, NaT, Timestamp(\"2011-01-03\")],\n ),\n (\n [Timedelta(\"1 days\"), NaT, Timedelta(\"3 days\")],\n [NaT, NaT, Timedelta(\"3 days\")],\n ),\n (\n [Period(\"2011-01\", freq=\"M\"), NaT, Period(\"2011-03\", freq=\"M\")],\n [NaT, NaT, Period(\"2011-03\", freq=\"M\")],\n ),\n ],\n )\n @pytest.mark.parametrize(\"reverse\", [True, False])\n @pytest.mark.parametrize(\"dtype\", [None, object])\n @pytest.mark.parametrize(\n \"op, expected\",\n [\n (operator.eq, Series([False, False, True])),\n (operator.ne, Series([True, True, False])),\n (operator.lt, Series([False, False, False])),\n (operator.gt, Series([False, False, False])),\n (operator.ge, Series([False, False, True])),\n (operator.le, Series([False, False, True])),\n ],\n )\n def test_nat_comparisons(\n self,\n dtype,\n index_or_series,\n reverse,\n pair,\n op,\n expected,\n ):\n box = index_or_series\n l, r = pair\n if reverse:\n # add lhs / rhs switched data\n l, r = r, l\n\n left = Series(l, dtype=dtype)\n right = box(r, dtype=dtype)\n\n result = op(left, right)\n\n tm.assert_series_equal(result, expected)\n\n def test_comparison_invalid(self, tz_naive_fixture, box_with_array):\n # GH#4968\n # invalid date/int comparisons\n tz = tz_naive_fixture\n ser = Series(range(5))\n ser2 = Series(date_range(\"20010101\", periods=5, tz=tz))\n\n ser = tm.box_expected(ser, box_with_array)\n ser2 = tm.box_expected(ser2, box_with_array)\n\n assert_invalid_comparison(ser, ser2, box_with_array)\n\n @pytest.mark.parametrize(\n \"data\",\n [\n [Timestamp(\"2011-01-01\"), NaT, Timestamp(\"2011-01-03\")],\n [Timedelta(\"1 days\"), NaT, Timedelta(\"3 days\")],\n [Period(\"2011-01\", freq=\"M\"), NaT, Period(\"2011-03\", freq=\"M\")],\n ],\n )\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_nat_comparisons_scalar(self, dtype, data, box_with_array):\n box = box_with_array\n if box_with_array is tm.to_array and dtype is object:\n # dont bother testing ndarray comparison methods as this fails\n # on older numpys (since they check object identity)\n return\n\n xbox = box if box not in [pd.Index, pd.array] else np.ndarray\n\n left = Series(data, dtype=dtype)\n left = tm.box_expected(left, box)\n\n expected = [False, False, False]\n expected = tm.box_expected(expected, xbox)\n if box is pd.array and dtype is object:\n expected = pd.array(expected, dtype=\"bool\")\n\n tm.assert_equal(left == NaT, expected)\n tm.assert_equal(NaT == left, expected)\n\n expected = [True, True, True]\n expected = tm.box_expected(expected, xbox)\n if box is pd.array and dtype is object:\n expected = pd.array(expected, dtype=\"bool\")\n tm.assert_equal(left != NaT, expected)\n tm.assert_equal(NaT != left, expected)\n\n expected = [False, False, False]\n expected = tm.box_expected(expected, xbox)\n if box is pd.array and dtype is object:\n expected = pd.array(expected, dtype=\"bool\")\n tm.assert_equal(left < NaT, expected)\n tm.assert_equal(NaT > left, expected)\n tm.assert_equal(left <= NaT, expected)\n tm.assert_equal(NaT >= left, expected)\n\n tm.assert_equal(left > NaT, expected)\n tm.assert_equal(NaT < left, expected)\n tm.assert_equal(left >= NaT, expected)\n tm.assert_equal(NaT <= left, expected)\n\n @pytest.mark.parametrize(\"val\", [datetime(2000, 1, 4), datetime(2000, 1, 5)])\n def test_series_comparison_scalars(self, val):\n series = Series(date_range(\"1/1/2000\", periods=10))\n\n result = series > val\n expected = Series([x > val for x in series])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"left,right\", [(\"lt\", \"gt\"), (\"le\", \"ge\"), (\"eq\", \"eq\"), (\"ne\", \"ne\")]\n )\n def test_timestamp_compare_series(self, left, right):\n # see gh-4982\n # Make sure we can compare Timestamps on the right AND left hand side.\n ser = Series(date_range(\"20010101\", periods=10), name=\"dates\")\n s_nat = ser.copy(deep=True)\n\n ser[0] = Timestamp(\"nat\")\n ser[3] = Timestamp(\"nat\")\n\n left_f = getattr(operator, left)\n right_f = getattr(operator, right)\n\n # No NaT\n expected = left_f(ser, Timestamp(\"20010109\"))\n result = right_f(Timestamp(\"20010109\"), ser)\n tm.assert_series_equal(result, expected)\n\n # NaT\n expected = left_f(ser, Timestamp(\"nat\"))\n result = right_f(Timestamp(\"nat\"), ser)\n tm.assert_series_equal(result, expected)\n\n # Compare to Timestamp with series containing NaT\n expected = left_f(s_nat, Timestamp(\"20010109\"))\n result = right_f(Timestamp(\"20010109\"), s_nat)\n tm.assert_series_equal(result, expected)\n\n # Compare to NaT with series containing NaT\n expected = left_f(s_nat, Timestamp(\"nat\"))\n result = right_f(Timestamp(\"nat\"), s_nat)\n tm.assert_series_equal(result, expected)\n\n def test_dt64arr_timestamp_equality(self, box_with_array):\n # GH#11034\n xbox = (\n box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray\n )\n\n ser = Series([Timestamp(\"2000-01-29 01:59:00\"), Timestamp(\"2000-01-30\"), \"NaT\"])\n ser = tm.box_expected(ser, box_with_array)\n\n result = ser != ser\n expected = tm.box_expected([False, False, True], xbox)\n tm.assert_equal(result, expected)\n\n warn = FutureWarning if box_with_array is pd.DataFrame else None\n with tm.assert_produces_warning(warn):\n # alignment for frame vs series comparisons deprecated\n result = ser != ser[0]\n expected = tm.box_expected([False, True, True], xbox)\n tm.assert_equal(result, expected)\n\n with tm.assert_produces_warning(warn):\n # alignment for frame vs series comparisons deprecated\n result = ser != ser[2]\n expected = tm.box_expected([True, True, True], xbox)\n tm.assert_equal(result, expected)\n\n result = ser == ser\n expected = tm.box_expected([True, True, False], xbox)\n tm.assert_equal(result, expected)\n\n with tm.assert_produces_warning(warn):\n # alignment for frame vs series comparisons deprecated\n result = ser == ser[0]\n expected = tm.box_expected([True, False, False], xbox)\n tm.assert_equal(result, expected)\n\n with tm.assert_produces_warning(warn):\n # alignment for frame vs series comparisons deprecated\n result = ser == ser[2]\n expected = tm.box_expected([False, False, False], xbox)\n tm.assert_equal(result, expected)\n\n\nclass TestDatetimeIndexComparisons:\n\n # TODO: moved from tests.indexes.test_base; parametrize and de-duplicate\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],\n )\n def test_comparators(self, op):\n index = tm.makeDateIndex(100)\n element = index[len(index) // 2]\n element = Timestamp(element).to_datetime64()\n\n arr = np.array(index)\n arr_result = op(arr, element)\n index_result = op(index, element)\n\n assert isinstance(index_result, np.ndarray)\n tm.assert_numpy_array_equal(arr_result, index_result)\n\n @pytest.mark.parametrize(\n \"other\",\n [datetime(2016, 1, 1), Timestamp(\"2016-01-01\"), np.datetime64(\"2016-01-01\")],\n )\n def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):\n tz = tz_naive_fixture\n dti = date_range(\"2016-01-01\", periods=2, tz=tz)\n if tz is not None:\n if isinstance(other, np.datetime64):\n # no tzaware version available\n return\n other = localize_pydatetime(other, dti.tzinfo)\n\n result = dti == other\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti > other\n expected = np.array([False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti >= other\n expected = np.array([True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti < other\n expected = np.array([False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti <= other\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_dti_cmp_nat(self, dtype, box_with_array):\n if box_with_array is tm.to_array and dtype is object:\n # dont bother testing ndarray comparison methods as this fails\n # on older numpys (since they check object identity)\n return\n\n xbox = (\n box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray\n )\n\n left = DatetimeIndex([Timestamp(\"2011-01-01\"), NaT, Timestamp(\"2011-01-03\")])\n right = DatetimeIndex([NaT, NaT, Timestamp(\"2011-01-03\")])\n\n left = tm.box_expected(left, box_with_array)\n right = tm.box_expected(right, box_with_array)\n\n lhs, rhs = left, right\n if dtype is object:\n lhs, rhs = left.astype(object), right.astype(object)\n\n result = rhs == lhs\n expected = np.array([False, False, True])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n result = lhs != rhs\n expected = np.array([True, True, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n expected = np.array([False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs == NaT, expected)\n tm.assert_equal(NaT == rhs, expected)\n\n expected = np.array([True, True, True])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs != NaT, expected)\n tm.assert_equal(NaT != lhs, expected)\n\n expected = np.array([False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs < NaT, expected)\n tm.assert_equal(NaT > lhs, expected)\n\n def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):\n fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])\n fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])\n\n didx1 = DatetimeIndex(\n [\"2014-01-01\", NaT, \"2014-03-01\", NaT, \"2014-05-01\", \"2014-07-01\"]\n )\n didx2 = DatetimeIndex(\n [\"2014-02-01\", \"2014-03-01\", NaT, NaT, \"2014-06-01\", \"2014-07-01\"]\n )\n darr = np.array(\n [\n np_datetime64_compat(\"2014-02-01 00:00Z\"),\n np_datetime64_compat(\"2014-03-01 00:00Z\"),\n np_datetime64_compat(\"nat\"),\n np.datetime64(\"nat\"),\n np_datetime64_compat(\"2014-06-01 00:00Z\"),\n np_datetime64_compat(\"2014-07-01 00:00Z\"),\n ]\n )\n\n cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]\n\n # Check pd.NaT is handles as the same as np.nan\n with tm.assert_produces_warning(None):\n for idx1, idx2 in cases:\n\n result = idx1 < idx2\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 > idx1\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= idx2\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 >= idx1\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == idx2\n expected = np.array([False, False, False, False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != idx2\n expected = np.array([True, True, True, True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n with tm.assert_produces_warning(None):\n for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:\n result = idx1 < val\n expected = np.array([False, False, False, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 > val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= val\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 >= val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != val\n expected = np.array([True, True, True, True, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n # Check pd.NaT is handles as the same as np.nan\n with tm.assert_produces_warning(None):\n for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:\n result = idx1 < val\n expected = np.array([True, False, False, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 > val\n expected = np.array([False, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= val\n expected = np.array([True, False, True, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 >= val\n expected = np.array([False, False, True, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == val\n expected = np.array([False, False, True, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != val\n expected = np.array([True, True, False, True, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n def test_comparison_tzawareness_compat(self, op, box_with_array):\n # GH#18162\n box = box_with_array\n\n dr = date_range(\"2016-01-01\", periods=6)\n dz = dr.tz_localize(\"US/Pacific\")\n\n dr = tm.box_expected(dr, box)\n dz = tm.box_expected(dz, box)\n\n if box is pd.DataFrame:\n tolist = lambda x: x.astype(object).values.tolist()[0]\n else:\n tolist = list\n\n if op not in [operator.eq, operator.ne]:\n msg = (\n r\"Invalid comparison between dtype=datetime64\\[ns.*\\] \"\n \"and (Timestamp|DatetimeArray|list|ndarray)\"\n )\n with pytest.raises(TypeError, match=msg):\n op(dr, dz)\n\n with pytest.raises(TypeError, match=msg):\n op(dr, tolist(dz))\n with pytest.raises(TypeError, match=msg):\n op(dr, np.array(tolist(dz), dtype=object))\n with pytest.raises(TypeError, match=msg):\n op(dz, dr)\n\n with pytest.raises(TypeError, match=msg):\n op(dz, tolist(dr))\n with pytest.raises(TypeError, match=msg):\n op(dz, np.array(tolist(dr), dtype=object))\n\n # The aware==aware and naive==naive comparisons should *not* raise\n assert np.all(dr == dr)\n assert np.all(dr == tolist(dr))\n assert np.all(tolist(dr) == dr)\n assert np.all(np.array(tolist(dr), dtype=object) == dr)\n assert np.all(dr == np.array(tolist(dr), dtype=object))\n\n assert np.all(dz == dz)\n assert np.all(dz == tolist(dz))\n assert np.all(tolist(dz) == dz)\n assert np.all(np.array(tolist(dz), dtype=object) == dz)\n assert np.all(dz == np.array(tolist(dz), dtype=object))\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):\n # GH#18162\n dr = date_range(\"2016-01-01\", periods=6)\n dz = dr.tz_localize(\"US/Pacific\")\n\n dr = tm.box_expected(dr, box_with_array)\n dz = tm.box_expected(dz, box_with_array)\n\n # Check comparisons against scalar Timestamps\n ts = Timestamp(\"2000-03-14 01:59\")\n ts_tz = Timestamp(\"2000-03-14 01:59\", tz=\"Europe/Amsterdam\")\n\n assert np.all(dr > ts)\n msg = r\"Invalid comparison between dtype=datetime64\\[ns.*\\] and Timestamp\"\n if op not in [operator.eq, operator.ne]:\n with pytest.raises(TypeError, match=msg):\n op(dr, ts_tz)\n\n assert np.all(dz > ts_tz)\n if op not in [operator.eq, operator.ne]:\n with pytest.raises(TypeError, match=msg):\n op(dz, ts)\n\n if op not in [operator.eq, operator.ne]:\n # GH#12601: Check comparison against Timestamps and DatetimeIndex\n with pytest.raises(TypeError, match=msg):\n op(ts, dz)\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n @pytest.mark.parametrize(\n \"other\",\n [datetime(2016, 1, 1), Timestamp(\"2016-01-01\"), np.datetime64(\"2016-01-01\")],\n )\n # Bug in NumPy? https://github.com/numpy/numpy/issues/13841\n # Raising in __eq__ will fallback to NumPy, which warns, fails,\n # then re-raises the original exception. So we just need to ignore.\n @pytest.mark.filterwarnings(\"ignore:elementwise comp:DeprecationWarning\")\n @pytest.mark.filterwarnings(\"ignore:Converting timezone-aware:FutureWarning\")\n def test_scalar_comparison_tzawareness(\n self, op, other, tz_aware_fixture, box_with_array\n ):\n box = box_with_array\n tz = tz_aware_fixture\n dti = date_range(\"2016-01-01\", periods=2, tz=tz)\n xbox = box if box not in [pd.Index, pd.array] else np.ndarray\n\n dtarr = tm.box_expected(dti, box_with_array)\n if op in [operator.eq, operator.ne]:\n exbool = op is operator.ne\n expected = np.array([exbool, exbool], dtype=bool)\n expected = tm.box_expected(expected, xbox)\n\n result = op(dtarr, other)\n tm.assert_equal(result, expected)\n\n result = op(other, dtarr)\n tm.assert_equal(result, expected)\n else:\n msg = (\n r\"Invalid comparison between dtype=datetime64\\[ns, .*\\] \"\n f\"and {type(other).__name__}\"\n )\n with pytest.raises(TypeError, match=msg):\n op(dtarr, other)\n with pytest.raises(TypeError, match=msg):\n op(other, dtarr)\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n def test_nat_comparison_tzawareness(self, op):\n # GH#19276\n # tzaware DatetimeIndex should not raise when compared to NaT\n dti = DatetimeIndex(\n [\"2014-01-01\", NaT, \"2014-03-01\", NaT, \"2014-05-01\", \"2014-07-01\"]\n )\n expected = np.array([op == operator.ne] * len(dti))\n result = op(dti, NaT)\n tm.assert_numpy_array_equal(result, expected)\n\n result = op(dti.tz_localize(\"US/Pacific\"), NaT)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_dti_cmp_str(self, tz_naive_fixture):\n # GH#22074\n # regardless of tz, we expect these comparisons are valid\n tz = tz_naive_fixture\n rng = date_range(\"1/1/2000\", periods=10, tz=tz)\n other = \"1/1/2000\"\n\n result = rng == other\n expected = np.array([True] + [False] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng != other\n expected = np.array([False] + [True] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng < other\n expected = np.array([False] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng <= other\n expected = np.array([True] + [False] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng > other\n expected = np.array([False] + [True] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng >= other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_dti_cmp_list(self):\n rng = date_range(\"1/1/2000\", periods=10)\n\n result = rng == list(rng)\n expected = rng == rng\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n pd.timedelta_range(\"1D\", periods=10),\n pd.timedelta_range(\"1D\", periods=10).to_series(),\n pd.timedelta_range(\"1D\", periods=10).asi8.view(\"m8[ns]\"),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_dti_cmp_tdi_tzawareness(self, other):\n # GH#22074\n # reversion test that we _don't_ call _assert_tzawareness_compat\n # when comparing against TimedeltaIndex\n dti = date_range(\"2000-01-01\", periods=10, tz=\"Asia/Tokyo\")\n\n result = dti == other\n expected = np.array([False] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti != other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n msg = \"Invalid comparison between\"\n with pytest.raises(TypeError, match=msg):\n dti < other\n with pytest.raises(TypeError, match=msg):\n dti <= other\n with pytest.raises(TypeError, match=msg):\n dti > other\n with pytest.raises(TypeError, match=msg):\n dti >= other\n\n def test_dti_cmp_object_dtype(self):\n # GH#22074\n dti = date_range(\"2000-01-01\", periods=10, tz=\"Asia/Tokyo\")\n\n other = dti.astype(\"O\")\n\n result = dti == other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n other = dti.tz_localize(None)\n result = dti != other\n tm.assert_numpy_array_equal(result, expected)\n\n other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)\n result = dti == other\n expected = np.array([True] * 5 + [False] * 5)\n tm.assert_numpy_array_equal(result, expected)\n msg = \">=' not supported between instances of 'Timestamp' and 'Timedelta'\"\n with pytest.raises(TypeError, match=msg):\n dti >= other\n\n\n# ------------------------------------------------------------------\n# Arithmetic\n\n\nclass TestDatetime64Arithmetic:\n # This class is intended for \"finished\" tests that are fully parametrized\n # over DataFrame/Series/Index/DatetimeArray\n\n # -------------------------------------------------------------\n # Addition/Subtraction of timedelta-like\n\n @pytest.mark.arm_slow\n def test_dt64arr_add_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n # GH#22005, GH#22163 check DataFrame doesn't raise TypeError\n tz = tz_naive_fixture\n\n rng = date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = date_range(\"2000-01-01 02:00\", \"2000-02-01 02:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng + two_hours\n tm.assert_equal(result, expected)\n\n def test_dt64arr_iadd_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n tz = tz_naive_fixture\n\n rng = date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = date_range(\"2000-01-01 02:00\", \"2000-02-01 02:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n rng += two_hours\n tm.assert_equal(rng, expected)\n\n def test_dt64arr_sub_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n tz = tz_naive_fixture\n\n rng = date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = date_range(\"1999-12-31 22:00\", \"2000-01-31 22:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng - two_hours\n tm.assert_equal(result, expected)\n\n def test_dt64arr_isub_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n tz = tz_naive_fixture\n\n rng = date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = date_range(\"1999-12-31 22:00\", \"2000-01-31 22:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n rng -= two_hours\n tm.assert_equal(rng, expected)\n\n # TODO: redundant with test_dt64arr_add_timedeltalike_scalar\n def test_dt64arr_add_td64_scalar(self, box_with_array):\n # scalar timedeltas/np.timedelta64 objects\n # operate with np.timedelta64 correctly\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n\n expected = Series(\n [Timestamp(\"20130101 9:01:01\"), Timestamp(\"20130101 9:02:01\")]\n )\n\n dtarr = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr + np.timedelta64(1, \"s\")\n tm.assert_equal(result, expected)\n result = np.timedelta64(1, \"s\") + dtarr\n tm.assert_equal(result, expected)\n\n expected = Series(\n [Timestamp(\"20130101 9:01:00.005\"), Timestamp(\"20130101 9:02:00.005\")]\n )\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr + np.timedelta64(5, \"ms\")\n tm.assert_equal(result, expected)\n result = np.timedelta64(5, \"ms\") + dtarr\n tm.assert_equal(result, expected)\n\n def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):\n # GH#23320 special handling for timedelta64(\"NaT\")\n tz = tz_naive_fixture\n\n dti = date_range(\"1994-04-01\", periods=9, tz=tz, freq=\"QS\")\n other = np.timedelta64(\"NaT\")\n expected = DatetimeIndex([\"NaT\"] * 9, tz=tz)\n\n obj = tm.box_expected(dti, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = obj + other\n tm.assert_equal(result, expected)\n result = other + obj\n tm.assert_equal(result, expected)\n result = obj - other\n tm.assert_equal(result, expected)\n msg = \"cannot subtract\"\n with pytest.raises(TypeError, match=msg):\n other - obj\n\n def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):\n\n tz = tz_naive_fixture\n dti = date_range(\"2016-01-01\", periods=3, tz=tz)\n tdi = TimedeltaIndex([\"-1 Day\", \"-1 Day\", \"-1 Day\"])\n tdarr = tdi.values\n\n expected = date_range(\"2015-12-31\", \"2016-01-02\", periods=3, tz=tz)\n\n dtarr = tm.box_expected(dti, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr + tdarr\n tm.assert_equal(result, expected)\n result = tdarr + dtarr\n tm.assert_equal(result, expected)\n\n expected = date_range(\"2016-01-02\", \"2016-01-04\", periods=3, tz=tz)\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr - tdarr\n tm.assert_equal(result, expected)\n msg = \"cannot subtract|(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n tdarr - dtarr\n\n # -----------------------------------------------------------------\n # Subtraction of datetime-like scalars\n\n @pytest.mark.parametrize(\n \"ts\",\n [\n Timestamp(\"2013-01-01\"),\n Timestamp(\"2013-01-01\").to_pydatetime(),\n Timestamp(\"2013-01-01\").to_datetime64(),\n ],\n )\n def test_dt64arr_sub_dtscalar(self, box_with_array, ts):\n # GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype\n idx = date_range(\"2013-01-01\", periods=3)._with_freq(None)\n idx = tm.box_expected(idx, box_with_array)\n\n expected = TimedeltaIndex([\"0 Days\", \"1 Day\", \"2 Days\"])\n expected = tm.box_expected(expected, box_with_array)\n\n result = idx - ts\n tm.assert_equal(result, expected)\n\n def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):\n # GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano\n # for DataFrame operation\n dt64 = np.datetime64(\"2013-01-01\")\n assert dt64.dtype == \"datetime64[D]\"\n\n dti = date_range(\"20130101\", periods=3)._with_freq(None)\n dtarr = tm.box_expected(dti, box_with_array)\n\n expected = TimedeltaIndex([\"0 Days\", \"1 Day\", \"2 Days\"])\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr - dt64\n tm.assert_equal(result, expected)\n\n result = dt64 - dtarr\n tm.assert_equal(result, -expected)\n\n def test_dt64arr_sub_timestamp(self, box_with_array):\n ser = date_range(\"2014-03-17\", periods=2, freq=\"D\", tz=\"US/Eastern\")\n ser = ser._with_freq(None)\n ts = ser[0]\n\n ser = tm.box_expected(ser, box_with_array)\n\n delta_series = Series([np.timedelta64(0, \"D\"), np.timedelta64(1, \"D\")])\n expected = tm.box_expected(delta_series, box_with_array)\n\n tm.assert_equal(ser - ts, expected)\n tm.assert_equal(ts - ser, -expected)\n\n def test_dt64arr_sub_NaT(self, box_with_array):\n # GH#18808\n dti = DatetimeIndex([NaT, Timestamp(\"19900315\")])\n ser = tm.box_expected(dti, box_with_array)\n\n result = ser - NaT\n expected = Series([NaT, NaT], dtype=\"timedelta64[ns]\")\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n dti_tz = dti.tz_localize(\"Asia/Tokyo\")\n ser_tz = tm.box_expected(dti_tz, box_with_array)\n\n result = ser_tz - NaT\n expected = Series([NaT, NaT], dtype=\"timedelta64[ns]\")\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n # -------------------------------------------------------------\n # Subtraction of datetime-like array-like\n\n def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):\n dti = date_range(\"2016-01-01\", periods=3, tz=tz_naive_fixture)\n expected = dti - dti\n\n obj = tm.box_expected(dti, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = obj - obj.astype(object)\n tm.assert_equal(result, expected)\n\n def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):\n dti = date_range(\"2016-01-01\", periods=3, tz=None)\n dt64vals = dti.values\n\n dtarr = tm.box_expected(dti, box_with_array)\n\n expected = dtarr - dtarr\n result = dtarr - dt64vals\n tm.assert_equal(result, expected)\n result = dt64vals - dtarr\n tm.assert_equal(result, expected)\n\n def test_dt64arr_aware_sub_dt64ndarray_raises(\n self, tz_aware_fixture, box_with_array\n ):\n\n tz = tz_aware_fixture\n dti = date_range(\"2016-01-01\", periods=3, tz=tz)\n dt64vals = dti.values\n\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"subtraction must have the same timezones or\"\n with pytest.raises(TypeError, match=msg):\n dtarr - dt64vals\n with pytest.raises(TypeError, match=msg):\n dt64vals - dtarr\n\n # -------------------------------------------------------------\n # Addition of datetime-like others (invalid)\n\n def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):\n\n tz = tz_naive_fixture\n dti = date_range(\"2016-01-01\", periods=3, tz=tz)\n dt64vals = dti.values\n\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"cannot add\"\n with pytest.raises(TypeError, match=msg):\n dtarr + dt64vals\n with pytest.raises(TypeError, match=msg):\n dt64vals + dtarr\n\n def test_dt64arr_add_timestamp_raises(self, box_with_array):\n # GH#22163 ensure DataFrame doesn't cast Timestamp to i8\n idx = DatetimeIndex([\"2011-01-01\", \"2011-01-02\"])\n idx = tm.box_expected(idx, box_with_array)\n msg = \"cannot add\"\n with pytest.raises(TypeError, match=msg):\n idx + Timestamp(\"2011-01-01\")\n with pytest.raises(TypeError, match=msg):\n Timestamp(\"2011-01-01\") + idx\n\n # -------------------------------------------------------------\n # Other Invalid Addition/Subtraction\n\n @pytest.mark.parametrize(\n \"other\",\n [\n 3.14,\n np.array([2.0, 3.0]),\n # GH#13078 datetime +/- Period is invalid\n Period(\"2011-01-01\", freq=\"D\"),\n # https://github.com/pandas-dev/pandas/issues/10329\n time(1, 2, 3),\n ],\n )\n @pytest.mark.parametrize(\"dti_freq\", [None, \"D\"])\n def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):\n dti = DatetimeIndex([\"2011-01-01\", \"2011-01-02\"], freq=dti_freq)\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"|\".join(\n [\n \"unsupported operand type\",\n \"cannot (add|subtract)\",\n \"cannot use operands with types\",\n \"ufunc '?(add|subtract)'? cannot use operands with types\",\n \"Concatenation operation is not implemented for NumPy arrays\",\n ]\n )\n assert_invalid_addsub_type(dtarr, other, msg)\n\n @pytest.mark.parametrize(\"pi_freq\", [\"D\", \"W\", \"Q\", \"H\"])\n @pytest.mark.parametrize(\"dti_freq\", [None, \"D\"])\n def test_dt64arr_add_sub_parr(\n self, dti_freq, pi_freq, box_with_array, box_with_array2\n ):\n # GH#20049 subtracting PeriodIndex should raise TypeError\n dti = DatetimeIndex([\"2011-01-01\", \"2011-01-02\"], freq=dti_freq)\n pi = dti.to_period(pi_freq)\n\n dtarr = tm.box_expected(dti, box_with_array)\n parr = tm.box_expected(pi, box_with_array2)\n msg = \"|\".join(\n [\n \"cannot (add|subtract)\",\n \"unsupported operand\",\n \"descriptor.*requires\",\n \"ufunc.*cannot use operands\",\n ]\n )\n assert_invalid_addsub_type(dtarr, parr, msg)\n\n def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):\n # https://github.com/pandas-dev/pandas/issues/10329\n\n tz = tz_naive_fixture\n\n obj1 = date_range(\"2012-01-01\", periods=3, tz=tz)\n obj2 = [time(i, i, i) for i in range(3)]\n\n obj1 = tm.box_expected(obj1, box_with_array)\n obj2 = tm.box_expected(obj2, box_with_array)\n\n with warnings.catch_warnings(record=True):\n # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being\n # applied to Series or DatetimeIndex\n # we aren't testing that here, so ignore.\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n\n # If `x + y` raises, then `y + x` should raise here as well\n\n msg = (\n r\"unsupported operand type\\(s\\) for -: \"\n \"'(Timestamp|DatetimeArray)' and 'datetime.time'\"\n )\n with pytest.raises(TypeError, match=msg):\n obj1 - obj2\n\n msg = \"|\".join(\n [\n \"cannot subtract DatetimeArray from ndarray\",\n \"ufunc (subtract|'subtract') cannot use operands with types \"\n r\"dtype\\('O'\\) and dtype\\('<M8\\[ns\\]'\\)\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n obj2 - obj1\n\n msg = (\n r\"unsupported operand type\\(s\\) for \\+: \"\n \"'(Timestamp|DatetimeArray)' and 'datetime.time'\"\n )\n with pytest.raises(TypeError, match=msg):\n obj1 + obj2\n\n msg = \"|\".join(\n [\n r\"unsupported operand type\\(s\\) for \\+: \"\n \"'(Timestamp|DatetimeArray)' and 'datetime.time'\",\n \"ufunc (add|'add') cannot use operands with types \"\n r\"dtype\\('O'\\) and dtype\\('<M8\\[ns\\]'\\)\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n obj2 + obj1\n\n\nclass TestDatetime64DateOffsetArithmetic:\n\n # -------------------------------------------------------------\n # Tick DateOffsets\n\n # TODO: parametrize over timezone?\n def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):\n # GH#4532\n # operate with pd.offsets\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n expected = Series(\n [Timestamp(\"20130101 9:01:05\"), Timestamp(\"20130101 9:02:05\")]\n )\n\n ser = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = ser + pd.offsets.Second(5)\n tm.assert_equal(result, expected)\n\n result2 = pd.offsets.Second(5) + ser\n tm.assert_equal(result2, expected)\n\n def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):\n # GH#4532\n # operate with pd.offsets\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n expected = Series(\n [Timestamp(\"20130101 9:00:55\"), Timestamp(\"20130101 9:01:55\")]\n )\n\n ser = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = ser - pd.offsets.Second(5)\n tm.assert_equal(result, expected)\n\n result2 = -pd.offsets.Second(5) + ser\n tm.assert_equal(result2, expected)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n pd.offsets.Second(5) - ser\n\n @pytest.mark.parametrize(\n \"cls_name\", [\"Day\", \"Hour\", \"Minute\", \"Second\", \"Milli\", \"Micro\", \"Nano\"]\n )\n def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):\n # GH#4532\n # smoke tests for valid DateOffsets\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n ser = tm.box_expected(ser, box_with_array)\n\n offset_cls = getattr(pd.offsets, cls_name)\n ser + offset_cls(5)\n offset_cls(5) + ser\n ser - offset_cls(5)\n\n def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):\n # GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype\n tz = tz_aware_fixture\n if tz == \"US/Pacific\":\n dates = date_range(\"2012-11-01\", periods=3, tz=tz)\n offset = dates + pd.offsets.Hour(5)\n assert dates[0] + pd.offsets.Hour(5) == offset[0]\n\n dates = date_range(\"2010-11-01 00:00\", periods=3, tz=tz, freq=\"H\")\n expected = DatetimeIndex(\n [\"2010-11-01 05:00\", \"2010-11-01 06:00\", \"2010-11-01 07:00\"],\n freq=\"H\",\n tz=tz,\n )\n\n dates = tm.box_expected(dates, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n # TODO: parametrize over the scalar being added? radd? sub?\n offset = dates + pd.offsets.Hour(5)\n tm.assert_equal(offset, expected)\n offset = dates + np.timedelta64(5, \"h\")\n tm.assert_equal(offset, expected)\n offset = dates + timedelta(hours=5)\n tm.assert_equal(offset, expected)\n\n # -------------------------------------------------------------\n # RelativeDelta DateOffsets\n\n def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):\n # GH#10699\n vec = DatetimeIndex(\n [\n Timestamp(\"2000-01-05 00:15:00\"),\n Timestamp(\"2000-01-31 00:23:00\"),\n Timestamp(\"2000-01-01\"),\n Timestamp(\"2000-03-31\"),\n Timestamp(\"2000-02-29\"),\n Timestamp(\"2000-12-31\"),\n Timestamp(\"2000-05-15\"),\n Timestamp(\"2001-06-15\"),\n ]\n )\n vec = tm.box_expected(vec, box_with_array)\n vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec\n\n # DateOffset relativedelta fastpath\n relative_kwargs = [\n (\"years\", 2),\n (\"months\", 5),\n (\"days\", 3),\n (\"hours\", 5),\n (\"minutes\", 10),\n (\"seconds\", 2),\n (\"microseconds\", 5),\n ]\n for i, (unit, value) in enumerate(relative_kwargs):\n off = DateOffset(**{unit: value})\n\n expected = DatetimeIndex([x + off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + off)\n\n expected = DatetimeIndex([x - off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - off)\n\n off = DateOffset(**dict(relative_kwargs[: i + 1]))\n\n expected = DatetimeIndex([x + off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + off)\n\n expected = DatetimeIndex([x - off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - off)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n off - vec\n\n # -------------------------------------------------------------\n # Non-Tick, Non-RelativeDelta DateOffsets\n\n # TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes\n # tz-aware cases which this does not\n @pytest.mark.parametrize(\n \"cls_and_kwargs\",\n [\n \"YearBegin\",\n (\"YearBegin\", {\"month\": 5}),\n \"YearEnd\",\n (\"YearEnd\", {\"month\": 5}),\n \"MonthBegin\",\n \"MonthEnd\",\n \"SemiMonthEnd\",\n \"SemiMonthBegin\",\n \"Week\",\n (\"Week\", {\"weekday\": 3}),\n \"Week\",\n (\"Week\", {\"weekday\": 6}),\n \"BusinessDay\",\n \"BDay\",\n \"QuarterEnd\",\n \"QuarterBegin\",\n \"CustomBusinessDay\",\n \"CDay\",\n \"CBMonthEnd\",\n \"CBMonthBegin\",\n \"BMonthBegin\",\n \"BMonthEnd\",\n \"BusinessHour\",\n \"BYearBegin\",\n \"BYearEnd\",\n \"BQuarterBegin\",\n (\"LastWeekOfMonth\", {\"weekday\": 2}),\n (\n \"FY5253Quarter\",\n {\n \"qtr_with_extra_week\": 1,\n \"startingMonth\": 1,\n \"weekday\": 2,\n \"variation\": \"nearest\",\n },\n ),\n (\"FY5253\", {\"weekday\": 0, \"startingMonth\": 2, \"variation\": \"nearest\"}),\n (\"WeekOfMonth\", {\"weekday\": 2, \"week\": 2}),\n \"Easter\",\n (\"DateOffset\", {\"day\": 4}),\n (\"DateOffset\", {\"month\": 5}),\n ],\n )\n @pytest.mark.parametrize(\"normalize\", [True, False])\n @pytest.mark.parametrize(\"n\", [0, 5])\n def test_dt64arr_add_sub_DateOffsets(\n self, box_with_array, n, normalize, cls_and_kwargs\n ):\n # GH#10699\n # assert vectorized operation matches pointwise operations\n\n if isinstance(cls_and_kwargs, tuple):\n # If cls_name param is a tuple, then 2nd entry is kwargs for\n # the offset constructor\n cls_name, kwargs = cls_and_kwargs\n else:\n cls_name = cls_and_kwargs\n kwargs = {}\n\n if n == 0 and cls_name in [\n \"WeekOfMonth\",\n \"LastWeekOfMonth\",\n \"FY5253Quarter\",\n \"FY5253\",\n ]:\n # passing n = 0 is invalid for these offset classes\n return\n\n vec = DatetimeIndex(\n [\n Timestamp(\"2000-01-05 00:15:00\"),\n Timestamp(\"2000-01-31 00:23:00\"),\n Timestamp(\"2000-01-01\"),\n Timestamp(\"2000-03-31\"),\n Timestamp(\"2000-02-29\"),\n Timestamp(\"2000-12-31\"),\n Timestamp(\"2000-05-15\"),\n Timestamp(\"2001-06-15\"),\n ]\n )\n vec = tm.box_expected(vec, box_with_array)\n vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec\n\n offset_cls = getattr(pd.offsets, cls_name)\n\n with warnings.catch_warnings(record=True):\n # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being\n # applied to Series or DatetimeIndex\n # we aren't testing that here, so ignore.\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n\n offset = offset_cls(n, normalize=normalize, **kwargs)\n\n expected = DatetimeIndex([x + offset for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + offset)\n\n expected = DatetimeIndex([x - offset for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - offset)\n\n expected = DatetimeIndex([offset + x for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, offset + vec)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n offset - vec\n\n def test_dt64arr_add_sub_DateOffset(self, box_with_array):\n # GH#10699\n s = date_range(\"2000-01-01\", \"2000-01-31\", name=\"a\")\n s = tm.box_expected(s, box_with_array)\n result = s + DateOffset(years=1)\n result2 = DateOffset(years=1) + s\n exp = date_range(\"2001-01-01\", \"2001-01-31\", name=\"a\")._with_freq(None)\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n result = s - DateOffset(years=1)\n exp = date_range(\"1999-01-01\", \"1999-01-31\", name=\"a\")._with_freq(None)\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n\n s = DatetimeIndex(\n [\n Timestamp(\"2000-01-15 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-15\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n s = tm.box_expected(s, box_with_array)\n result = s + pd.offsets.Day()\n result2 = pd.offsets.Day() + s\n exp = DatetimeIndex(\n [\n Timestamp(\"2000-01-16 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-16\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n s = DatetimeIndex(\n [\n Timestamp(\"2000-01-15 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-15\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n s = tm.box_expected(s, box_with_array)\n result = s + pd.offsets.MonthEnd()\n result2 = pd.offsets.MonthEnd() + s\n exp = DatetimeIndex(\n [\n Timestamp(\"2000-01-31 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-29\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),\n np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),\n np.array( # matching offsets\n [pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]\n ),\n ],\n )\n @pytest.mark.parametrize(\"op\", [operator.add, roperator.radd, operator.sub])\n @pytest.mark.parametrize(\"box_other\", [True, False])\n def test_dt64arr_add_sub_offset_array(\n self, tz_naive_fixture, box_with_array, box_other, op, other\n ):\n # GH#18849\n # GH#10699 array of offsets\n\n tz = tz_naive_fixture\n dti = date_range(\"2017-01-01\", periods=2, tz=tz)\n dtarr = tm.box_expected(dti, box_with_array)\n\n other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])\n expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])\n expected = tm.box_expected(expected, box_with_array)\n\n if box_other:\n other = tm.box_expected(other, box_with_array)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = op(dtarr, other)\n\n tm.assert_equal(res, expected)\n\n @pytest.mark.parametrize(\n \"op, offset, exp, exp_freq\",\n [\n (\n \"__add__\",\n DateOffset(months=3, days=10),\n [\n Timestamp(\"2014-04-11\"),\n Timestamp(\"2015-04-11\"),\n Timestamp(\"2016-04-11\"),\n Timestamp(\"2017-04-11\"),\n ],\n None,\n ),\n (\n \"__add__\",\n DateOffset(months=3),\n [\n Timestamp(\"2014-04-01\"),\n Timestamp(\"2015-04-01\"),\n Timestamp(\"2016-04-01\"),\n Timestamp(\"2017-04-01\"),\n ],\n \"AS-APR\",\n ),\n (\n \"__sub__\",\n DateOffset(months=3, days=10),\n [\n Timestamp(\"2013-09-21\"),\n Timestamp(\"2014-09-21\"),\n Timestamp(\"2015-09-21\"),\n Timestamp(\"2016-09-21\"),\n ],\n None,\n ),\n (\n \"__sub__\",\n DateOffset(months=3),\n [\n Timestamp(\"2013-10-01\"),\n Timestamp(\"2014-10-01\"),\n Timestamp(\"2015-10-01\"),\n Timestamp(\"2016-10-01\"),\n ],\n \"AS-OCT\",\n ),\n ],\n )\n def test_dti_add_sub_nonzero_mth_offset(\n self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array\n ):\n # GH 26258\n tz = tz_aware_fixture\n date = date_range(start=\"01 Jan 2014\", end=\"01 Jan 2017\", freq=\"AS\", tz=tz)\n date = tm.box_expected(date, box_with_array, False)\n mth = getattr(date, op)\n result = mth(offset)\n\n expected = DatetimeIndex(exp, tz=tz)\n expected = tm.box_expected(expected, box_with_array, False)\n tm.assert_equal(result, expected)\n\n\nclass TestDatetime64OverflowHandling:\n # TODO: box + de-duplicate\n\n def test_dt64_overflow_masking(self, box_with_array):\n # GH#25317\n left = Series([Timestamp(\"1969-12-31\")])\n right = Series([NaT])\n\n left = tm.box_expected(left, box_with_array)\n right = tm.box_expected(right, box_with_array)\n\n expected = TimedeltaIndex([NaT])\n expected = tm.box_expected(expected, box_with_array)\n\n result = left - right\n tm.assert_equal(result, expected)\n\n def test_dt64_series_arith_overflow(self):\n # GH#12534, fixed by GH#19024\n dt = Timestamp(\"1700-01-31\")\n td = Timedelta(\"20000 Days\")\n dti = date_range(\"1949-09-30\", freq=\"100Y\", periods=4)\n ser = Series(dti)\n msg = \"Overflow in int64 addition\"\n with pytest.raises(OverflowError, match=msg):\n ser - dt\n with pytest.raises(OverflowError, match=msg):\n dt - ser\n with pytest.raises(OverflowError, match=msg):\n ser + td\n with pytest.raises(OverflowError, match=msg):\n td + ser\n\n ser.iloc[-1] = NaT\n expected = Series(\n [\"2004-10-03\", \"2104-10-04\", \"2204-10-04\", \"NaT\"], dtype=\"datetime64[ns]\"\n )\n res = ser + td\n tm.assert_series_equal(res, expected)\n res = td + ser\n tm.assert_series_equal(res, expected)\n\n ser.iloc[1:] = NaT\n expected = Series([\"91279 Days\", \"NaT\", \"NaT\", \"NaT\"], dtype=\"timedelta64[ns]\")\n res = ser - dt\n tm.assert_series_equal(res, expected)\n res = dt - ser\n tm.assert_series_equal(res, -expected)\n\n def test_datetimeindex_sub_timestamp_overflow(self):\n dtimax = pd.to_datetime([\"now\", Timestamp.max])\n dtimin = pd.to_datetime([\"now\", Timestamp.min])\n\n tsneg = Timestamp(\"1950-01-01\")\n ts_neg_variants = [\n tsneg,\n tsneg.to_pydatetime(),\n tsneg.to_datetime64().astype(\"datetime64[ns]\"),\n tsneg.to_datetime64().astype(\"datetime64[D]\"),\n ]\n\n tspos = Timestamp(\"1980-01-01\")\n ts_pos_variants = [\n tspos,\n tspos.to_pydatetime(),\n tspos.to_datetime64().astype(\"datetime64[ns]\"),\n tspos.to_datetime64().astype(\"datetime64[D]\"),\n ]\n msg = \"Overflow in int64 addition\"\n for variant in ts_neg_variants:\n with pytest.raises(OverflowError, match=msg):\n dtimax - variant\n\n expected = Timestamp.max.value - tspos.value\n for variant in ts_pos_variants:\n res = dtimax - variant\n assert res[1].value == expected\n\n expected = Timestamp.min.value - tsneg.value\n for variant in ts_neg_variants:\n res = dtimin - variant\n assert res[1].value == expected\n\n for variant in ts_pos_variants:\n with pytest.raises(OverflowError, match=msg):\n dtimin - variant\n\n def test_datetimeindex_sub_datetimeindex_overflow(self):\n # GH#22492, GH#22508\n dtimax = pd.to_datetime([\"now\", Timestamp.max])\n dtimin = pd.to_datetime([\"now\", Timestamp.min])\n\n ts_neg = pd.to_datetime([\"1950-01-01\", \"1950-01-01\"])\n ts_pos = pd.to_datetime([\"1980-01-01\", \"1980-01-01\"])\n\n # General tests\n expected = Timestamp.max.value - ts_pos[1].value\n result = dtimax - ts_pos\n assert result[1].value == expected\n\n expected = Timestamp.min.value - ts_neg[1].value\n result = dtimin - ts_neg\n assert result[1].value == expected\n msg = \"Overflow in int64 addition\"\n with pytest.raises(OverflowError, match=msg):\n dtimax - ts_neg\n\n with pytest.raises(OverflowError, match=msg):\n dtimin - ts_pos\n\n # Edge cases\n tmin = pd.to_datetime([Timestamp.min])\n t1 = tmin + Timedelta.max + Timedelta(\"1us\")\n with pytest.raises(OverflowError, match=msg):\n t1 - tmin\n\n tmax = pd.to_datetime([Timestamp.max])\n t2 = tmax + Timedelta.min - Timedelta(\"1us\")\n with pytest.raises(OverflowError, match=msg):\n tmax - t2\n\n\nclass TestTimestampSeriesArithmetic:\n def test_empty_series_add_sub(self):\n # GH#13844\n a = Series(dtype=\"M8[ns]\")\n b = Series(dtype=\"m8[ns]\")\n tm.assert_series_equal(a, a + b)\n tm.assert_series_equal(a, a - b)\n tm.assert_series_equal(a, b + a)\n msg = \"cannot subtract\"\n with pytest.raises(TypeError, match=msg):\n b - a\n\n def test_operators_datetimelike(self):\n\n # ## timedelta64 ###\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n # ## datetime64 ###\n dt1 = Series(\n [\n Timestamp(\"20111230\"),\n Timestamp(\"20120101\"),\n Timestamp(\"20120103\"),\n ]\n )\n dt1.iloc[2] = np.nan\n dt2 = Series(\n [\n Timestamp(\"20111231\"),\n Timestamp(\"20120102\"),\n Timestamp(\"20120104\"),\n ]\n )\n dt1 - dt2\n dt2 - dt1\n\n # datetime64 with timetimedelta\n dt1 + td1\n td1 + dt1\n dt1 - td1\n\n # timetimedelta with datetime64\n td1 + dt1\n dt1 + td1\n\n def test_dt64ser_sub_datetime_dtype(self):\n ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))\n dt = datetime(1993, 6, 22, 13, 30)\n ser = Series([ts])\n result = pd.to_timedelta(np.abs(ser - dt))\n assert result.dtype == \"timedelta64[ns]\"\n\n # -------------------------------------------------------------\n # TODO: This next block of tests came from tests.series.test_operators,\n # needs to be de-duplicated and parametrized over `box` classes\n\n def test_operators_datetimelike_invalid(self, all_arithmetic_operators):\n # these are all TypeEror ops\n op_str = all_arithmetic_operators\n\n def check(get_ser, test_ser):\n\n # check that we are getting a TypeError\n # with 'operate' (from core/ops.py) for the ops that are not\n # defined\n op = getattr(get_ser, op_str, None)\n # Previously, _validate_for_numeric_binop in core/indexes/base.py\n # did this for us.\n with pytest.raises(\n TypeError, match=\"operate|[cC]annot|unsupported operand\"\n ):\n op(test_ser)\n\n # ## timedelta64 ###\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n # ## datetime64 ###\n dt1 = Series(\n [Timestamp(\"20111230\"), Timestamp(\"20120101\"), Timestamp(\"20120103\")]\n )\n dt1.iloc[2] = np.nan\n dt2 = Series(\n [Timestamp(\"20111231\"), Timestamp(\"20120102\"), Timestamp(\"20120104\")]\n )\n if op_str not in [\"__sub__\", \"__rsub__\"]:\n check(dt1, dt2)\n\n # ## datetime64 with timetimedelta ###\n # TODO(jreback) __rsub__ should raise?\n if op_str not in [\"__add__\", \"__radd__\", \"__sub__\"]:\n check(dt1, td1)\n\n # 8260, 10763\n # datetime64 with tz\n tz = \"US/Eastern\"\n dt1 = Series(date_range(\"2000-01-01 09:00:00\", periods=5, tz=tz), name=\"foo\")\n dt2 = dt1.copy()\n dt2.iloc[2] = np.nan\n td1 = Series(pd.timedelta_range(\"1 days 1 min\", periods=5, freq=\"H\"))\n td2 = td1.copy()\n td2.iloc[1] = np.nan\n\n if op_str not in [\"__add__\", \"__radd__\", \"__sub__\", \"__rsub__\"]:\n check(dt2, td2)\n\n def test_sub_single_tz(self):\n # GH#12290\n s1 = Series([Timestamp(\"2016-02-10\", tz=\"America/Sao_Paulo\")])\n s2 = Series([Timestamp(\"2016-02-08\", tz=\"America/Sao_Paulo\")])\n result = s1 - s2\n expected = Series([Timedelta(\"2days\")])\n tm.assert_series_equal(result, expected)\n result = s2 - s1\n expected = Series([Timedelta(\"-2days\")])\n tm.assert_series_equal(result, expected)\n\n def test_dt64tz_series_sub_dtitz(self):\n # GH#19071 subtracting tzaware DatetimeIndex from tzaware Series\n # (with same tz) raises, fixed by #19024\n dti = date_range(\"1999-09-30\", periods=10, tz=\"US/Pacific\")\n ser = Series(dti)\n expected = Series(TimedeltaIndex([\"0days\"] * 10))\n\n res = dti - ser\n tm.assert_series_equal(res, expected)\n res = ser - dti\n tm.assert_series_equal(res, expected)\n\n def test_sub_datetime_compat(self):\n # see GH#14088\n s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])\n dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)\n exp = Series([Timedelta(\"1 days\"), NaT])\n tm.assert_series_equal(s - dt, exp)\n tm.assert_series_equal(s - Timestamp(dt), exp)\n\n def test_dt64_series_add_mixed_tick_DateOffset(self):\n # GH#4532\n # operate with pd.offsets\n s = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n\n result = s + pd.offsets.Milli(5)\n result2 = pd.offsets.Milli(5) + s\n expected = Series(\n [Timestamp(\"20130101 9:01:00.005\"), Timestamp(\"20130101 9:02:00.005\")]\n )\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)\n expected = Series(\n [Timestamp(\"20130101 9:06:00.005\"), Timestamp(\"20130101 9:07:00.005\")]\n )\n tm.assert_series_equal(result, expected)\n\n def test_datetime64_ops_nat(self):\n # GH#11349\n datetime_series = Series([NaT, Timestamp(\"19900315\")])\n nat_series_dtype_timestamp = Series([NaT, NaT], dtype=\"datetime64[ns]\")\n single_nat_dtype_datetime = Series([NaT], dtype=\"datetime64[ns]\")\n\n # subtraction\n tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)\n msg = \"Unary negative expects\"\n with pytest.raises(TypeError, match=msg):\n -single_nat_dtype_datetime + datetime_series\n\n tm.assert_series_equal(\n -NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n with pytest.raises(TypeError, match=msg):\n -single_nat_dtype_datetime + nat_series_dtype_timestamp\n\n # addition\n tm.assert_series_equal(\n nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n\n # -------------------------------------------------------------\n # Invalid Operations\n # TODO: this block also needs to be de-duplicated and parametrized\n\n @pytest.mark.parametrize(\n \"dt64_series\",\n [\n Series([Timestamp(\"19900315\"), Timestamp(\"19900315\")]),\n Series([NaT, Timestamp(\"19900315\")]),\n Series([NaT, NaT], dtype=\"datetime64[ns]\"),\n ],\n )\n @pytest.mark.parametrize(\"one\", [1, 1.0, np.array(1)])\n def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):\n # multiplication\n msg = \"cannot perform .* with this index type\"\n with pytest.raises(TypeError, match=msg):\n dt64_series * one\n with pytest.raises(TypeError, match=msg):\n one * dt64_series\n\n # division\n with pytest.raises(TypeError, match=msg):\n dt64_series / one\n with pytest.raises(TypeError, match=msg):\n one / dt64_series\n\n # TODO: parametrize over box\n @pytest.mark.parametrize(\"op\", [\"__add__\", \"__radd__\", \"__sub__\", \"__rsub__\"])\n def test_dt64_series_add_intlike(self, tz_naive_fixture, op):\n # GH#19123\n tz = tz_naive_fixture\n dti = DatetimeIndex([\"2016-01-02\", \"2016-02-03\", \"NaT\"], tz=tz)\n ser = Series(dti)\n\n other = Series([20, 30, 40], dtype=\"uint8\")\n\n method = getattr(ser, op)\n msg = \"|\".join(\n [\n \"Addition/subtraction of integers and integer-arrays\",\n \"cannot subtract .* from ndarray\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n method(1)\n with pytest.raises(TypeError, match=msg):\n method(other)\n with pytest.raises(TypeError, match=msg):\n method(np.array(other))\n with pytest.raises(TypeError, match=msg):\n method(pd.Index(other))\n\n # -------------------------------------------------------------\n # Timezone-Centric Tests\n\n def test_operators_datetimelike_with_timezones(self):\n tz = \"US/Eastern\"\n dt1 = Series(date_range(\"2000-01-01 09:00:00\", periods=5, tz=tz), name=\"foo\")\n dt2 = dt1.copy()\n dt2.iloc[2] = np.nan\n\n td1 = Series(pd.timedelta_range(\"1 days 1 min\", periods=5, freq=\"H\"))\n td2 = td1.copy()\n td2.iloc[1] = np.nan\n\n result = dt1 + td1[0]\n exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 + td2[0]\n exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n # odd numpy behavior with scalar timedeltas\n result = td1[0] + dt1\n exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = td2[0] + dt2\n exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt1 - td1[0]\n exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n td1[0] - dt1\n\n result = dt2 - td2[0]\n exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n with pytest.raises(TypeError, match=msg):\n td2[0] - dt2\n\n result = dt1 + td1\n exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 + td2\n exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt1 - td1\n exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 - td2\n exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n msg = \"cannot (add|subtract)\"\n with pytest.raises(TypeError, match=msg):\n td1 - dt1\n with pytest.raises(TypeError, match=msg):\n td2 - dt2\n\n\nclass TestDatetimeIndexArithmetic:\n\n # -------------------------------------------------------------\n # Binary operations DatetimeIndex and int\n\n def test_dti_addsub_int(self, tz_naive_fixture, one):\n # Variants of `one` for #19012\n tz = tz_naive_fixture\n rng = date_range(\"2000-01-01 09:00\", freq=\"H\", periods=10, tz=tz)\n msg = \"Addition/subtraction of integers\"\n\n with pytest.raises(TypeError, match=msg):\n rng + one\n with pytest.raises(TypeError, match=msg):\n rng += one\n with pytest.raises(TypeError, match=msg):\n rng - one\n with pytest.raises(TypeError, match=msg):\n rng -= one\n\n # -------------------------------------------------------------\n # __add__/__sub__ with integer arrays\n\n @pytest.mark.parametrize(\"freq\", [\"H\", \"D\"])\n @pytest.mark.parametrize(\"int_holder\", [np.array, pd.Index])\n def test_dti_add_intarray_tick(self, int_holder, freq):\n # GH#19959\n dti = date_range(\"2016-01-01\", periods=2, freq=freq)\n other = int_holder([4, -1])\n\n msg = \"Addition/subtraction of integers|cannot subtract DatetimeArray from\"\n assert_invalid_addsub_type(dti, other, msg)\n\n @pytest.mark.parametrize(\"freq\", [\"W\", \"M\", \"MS\", \"Q\"])\n @pytest.mark.parametrize(\"int_holder\", [np.array, pd.Index])\n def test_dti_add_intarray_non_tick(self, int_holder, freq):\n # GH#19959\n dti = date_range(\"2016-01-01\", periods=2, freq=freq)\n other = int_holder([4, -1])\n\n msg = \"Addition/subtraction of integers|cannot subtract DatetimeArray from\"\n assert_invalid_addsub_type(dti, other, msg)\n\n @pytest.mark.parametrize(\"int_holder\", [np.array, pd.Index])\n def test_dti_add_intarray_no_freq(self, int_holder):\n # GH#19959\n dti = DatetimeIndex([\"2016-01-01\", \"NaT\", \"2017-04-05 06:07:08\"])\n other = int_holder([9, 4, -1])\n msg = \"|\".join(\n [\"cannot subtract DatetimeArray from\", \"Addition/subtraction of integers\"]\n )\n assert_invalid_addsub_type(dti, other, msg)\n\n # -------------------------------------------------------------\n # Binary operations DatetimeIndex and TimedeltaIndex/array\n\n def test_dti_add_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = date_range(\"2017-01-01\", periods=10, tz=tz)\n expected = expected._with_freq(None)\n\n # add with TimdeltaIndex\n result = dti + tdi\n tm.assert_index_equal(result, expected)\n\n result = tdi + dti\n tm.assert_index_equal(result, expected)\n\n # add with timedelta64 array\n result = dti + tdi.values\n tm.assert_index_equal(result, expected)\n\n result = tdi.values + dti\n tm.assert_index_equal(result, expected)\n\n def test_dti_iadd_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = date_range(\"2017-01-01\", periods=10, tz=tz)\n expected = expected._with_freq(None)\n\n # iadd with TimdeltaIndex\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result += tdi\n tm.assert_index_equal(result, expected)\n\n result = pd.timedelta_range(\"0 days\", periods=10)\n result += dti\n tm.assert_index_equal(result, expected)\n\n # iadd with timedelta64 array\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result += tdi.values\n tm.assert_index_equal(result, expected)\n\n result = pd.timedelta_range(\"0 days\", periods=10)\n result += dti\n tm.assert_index_equal(result, expected)\n\n def test_dti_sub_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = date_range(\"2017-01-01\", periods=10, tz=tz, freq=\"-1D\")\n expected = expected._with_freq(None)\n\n # sub with TimedeltaIndex\n result = dti - tdi\n tm.assert_index_equal(result, expected)\n\n msg = \"cannot subtract .*TimedeltaArray\"\n with pytest.raises(TypeError, match=msg):\n tdi - dti\n\n # sub with timedelta64 array\n result = dti - tdi.values\n tm.assert_index_equal(result, expected)\n\n msg = \"cannot subtract DatetimeArray from\"\n with pytest.raises(TypeError, match=msg):\n tdi.values - dti\n\n def test_dti_isub_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = date_range(\"2017-01-01\", periods=10, tz=tz, freq=\"-1D\")\n expected = expected._with_freq(None)\n\n # isub with TimedeltaIndex\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result -= tdi\n tm.assert_index_equal(result, expected)\n\n msg = \"cannot subtract .* from a TimedeltaArray\"\n with pytest.raises(TypeError, match=msg):\n tdi -= dti\n\n # isub with timedelta64 array\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result -= tdi.values\n tm.assert_index_equal(result, expected)\n\n msg = \"|\".join(\n [\n \"cannot perform __neg__ with this index type:\",\n \"ufunc subtract cannot use operands with types\",\n \"cannot subtract DatetimeArray from\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n tdi.values -= dti\n\n # -------------------------------------------------------------\n # Binary Operations DatetimeIndex and datetime-like\n # TODO: A couple other tests belong in this section. Move them in\n # A PR where there isn't already a giant diff.\n\n @pytest.mark.parametrize(\n \"addend\",\n [\n datetime(2011, 1, 1),\n DatetimeIndex([\"2011-01-01\", \"2011-01-02\"]),\n DatetimeIndex([\"2011-01-01\", \"2011-01-02\"]).tz_localize(\"US/Eastern\"),\n np.datetime64(\"2011-01-01\"),\n Timestamp(\"2011-01-01\"),\n ],\n ids=lambda x: type(x).__name__,\n )\n @pytest.mark.parametrize(\"tz\", [None, \"US/Eastern\"])\n def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):\n # GH#9631\n dti = DatetimeIndex([\"2011-01-01\", \"2011-01-02\"]).tz_localize(tz)\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"cannot add DatetimeArray and\"\n\n with pytest.raises(TypeError, match=msg):\n dtarr + addend\n with pytest.raises(TypeError, match=msg):\n addend + dtarr\n\n # -------------------------------------------------------------\n\n def test_dta_add_sub_index(self, tz_naive_fixture):\n # Check that DatetimeArray defers to Index classes\n dti = date_range(\"20130101\", periods=3, tz=tz_naive_fixture)\n dta = dti.array\n result = dta - dti\n expected = dti - dti\n tm.assert_index_equal(result, expected)\n\n tdi = result\n result = dta + tdi\n expected = dti + tdi\n tm.assert_index_equal(result, expected)\n\n result = dta - tdi\n expected = dti - tdi\n tm.assert_index_equal(result, expected)\n\n def test_sub_dti_dti(self):\n # previously performed setop (deprecated in 0.16.0), now changed to\n # return subtraction -> TimeDeltaIndex (GH ...)\n\n dti = date_range(\"20130101\", periods=3)\n dti_tz = date_range(\"20130101\", periods=3).tz_localize(\"US/Eastern\")\n dti_tz2 = date_range(\"20130101\", periods=3).tz_localize(\"UTC\")\n expected = TimedeltaIndex([0, 0, 0])\n\n result = dti - dti\n tm.assert_index_equal(result, expected)\n\n result = dti_tz - dti_tz\n tm.assert_index_equal(result, expected)\n msg = \"DatetimeArray subtraction must have the same timezones or\"\n with pytest.raises(TypeError, match=msg):\n dti_tz - dti\n\n with pytest.raises(TypeError, match=msg):\n dti - dti_tz\n\n with pytest.raises(TypeError, match=msg):\n dti_tz - dti_tz2\n\n # isub\n dti -= dti\n tm.assert_index_equal(dti, expected)\n\n # different length raises ValueError\n dti1 = date_range(\"20130101\", periods=3)\n dti2 = date_range(\"20130101\", periods=4)\n msg = \"cannot add indices of unequal length\"\n with pytest.raises(ValueError, match=msg):\n dti1 - dti2\n\n # NaN propagation\n dti1 = DatetimeIndex([\"2012-01-01\", np.nan, \"2012-01-03\"])\n dti2 = DatetimeIndex([\"2012-01-02\", \"2012-01-03\", np.nan])\n expected = TimedeltaIndex([\"1 days\", np.nan, np.nan])\n result = dti2 - dti1\n tm.assert_index_equal(result, expected)\n\n # -------------------------------------------------------------------\n # TODO: Most of this block is moved from series or frame tests, needs\n # cleanup, box-parametrization, and de-duplication\n\n @pytest.mark.parametrize(\"op\", [operator.add, operator.sub])\n def test_timedelta64_equal_timedelta_supported_ops(self, op):\n ser = Series(\n [\n Timestamp(\"20130301\"),\n Timestamp(\"20130228 23:00:00\"),\n Timestamp(\"20130228 22:00:00\"),\n Timestamp(\"20130228 21:00:00\"),\n ]\n )\n\n intervals = [\"D\", \"h\", \"m\", \"s\", \"us\"]\n\n def timedelta64(*args):\n # see casting notes in NumPy gh-12927\n return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))\n\n for d, h, m, s, us in product(*([range(2)] * 5)):\n nptd = timedelta64(d, h, m, s, us)\n pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)\n lhs = op(ser, nptd)\n rhs = op(ser, pytd)\n\n tm.assert_series_equal(lhs, rhs)\n\n def test_ops_nat_mixed_datetime64_timedelta64(self):\n # GH#11349\n timedelta_series = Series([NaT, Timedelta(\"1s\")])\n datetime_series = Series([NaT, Timestamp(\"19900315\")])\n nat_series_dtype_timedelta = Series([NaT, NaT], dtype=\"timedelta64[ns]\")\n nat_series_dtype_timestamp = Series([NaT, NaT], dtype=\"datetime64[ns]\")\n single_nat_dtype_datetime = Series([NaT], dtype=\"datetime64[ns]\")\n single_nat_dtype_timedelta = Series([NaT], dtype=\"timedelta64[ns]\")\n\n # subtraction\n tm.assert_series_equal(\n datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(\n datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n -single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp\n )\n\n # without a Series wrapping the NaT, it is ambiguous\n # whether it is a datetime64 or timedelta64\n # defaults to interpreting it as timedelta64\n tm.assert_series_equal(\n nat_series_dtype_timestamp - single_nat_dtype_datetime,\n nat_series_dtype_timedelta,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp - single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n -single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n msg = \"cannot subtract a datelike\"\n with pytest.raises(TypeError, match=msg):\n timedelta_series - single_nat_dtype_datetime\n\n # addition\n tm.assert_series_equal(\n nat_series_dtype_timestamp + single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp + single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timedelta + single_nat_dtype_datetime,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_datetime + nat_series_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n\n def test_ufunc_coercions(self):\n idx = date_range(\"2011-01-01\", periods=3, freq=\"2D\", name=\"x\")\n\n delta = np.timedelta64(1, \"D\")\n exp = date_range(\"2011-01-02\", periods=3, freq=\"2D\", name=\"x\")\n for result in [idx + delta, np.add(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == \"2D\"\n\n exp = date_range(\"2010-12-31\", periods=3, freq=\"2D\", name=\"x\")\n\n for result in [idx - delta, np.subtract(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == \"2D\"\n\n # When adding/subtracting an ndarray (which has no .freq), the result\n # does not infer freq\n idx = idx._with_freq(None)\n delta = np.array(\n [np.timedelta64(1, \"D\"), np.timedelta64(2, \"D\"), np.timedelta64(3, \"D\")]\n )\n exp = DatetimeIndex([\"2011-01-02\", \"2011-01-05\", \"2011-01-08\"], name=\"x\")\n\n for result in [idx + delta, np.add(idx, delta)]:\n tm.assert_index_equal(result, exp)\n assert result.freq == exp.freq\n\n exp = DatetimeIndex([\"2010-12-31\", \"2011-01-01\", \"2011-01-02\"], name=\"x\")\n for result in [idx - delta, np.subtract(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == exp.freq\n\n def test_dti_add_series(self, tz_naive_fixture, names):\n # GH#13905\n tz = tz_naive_fixture\n index = DatetimeIndex(\n [\"2016-06-28 05:30\", \"2016-06-28 05:31\"], tz=tz, name=names[0]\n )\n ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])\n expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])\n\n # passing name arg isn't enough when names[2] is None\n expected.name = names[2]\n assert expected.dtype == index.dtype\n result = ser + index\n tm.assert_series_equal(result, expected)\n result2 = index + ser\n tm.assert_series_equal(result2, expected)\n\n expected = index + Timedelta(seconds=5)\n result3 = ser.values + index\n tm.assert_index_equal(result3, expected)\n result4 = index + ser.values\n tm.assert_index_equal(result4, expected)\n\n @pytest.mark.parametrize(\"op\", [operator.add, roperator.radd, operator.sub])\n def test_dti_addsub_offset_arraylike(\n self, tz_naive_fixture, names, op, index_or_series\n ):\n # GH#18849, GH#19744\n box = pd.Index\n other_box = index_or_series\n\n tz = tz_naive_fixture\n dti = date_range(\"2017-01-01\", periods=2, tz=tz, name=names[0])\n other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])\n\n xbox = get_upcast_box(box, other)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = op(dti, other)\n\n expected = DatetimeIndex(\n [op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq=\"infer\"\n )\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(res, expected)\n\n @pytest.mark.parametrize(\"other_box\", [pd.Index, np.array])\n def test_dti_addsub_object_arraylike(\n self, tz_naive_fixture, box_with_array, other_box\n ):\n tz = tz_naive_fixture\n\n dti = date_range(\"2017-01-01\", periods=2, tz=tz)\n dtarr = tm.box_expected(dti, box_with_array)\n other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)])\n xbox = get_upcast_box(box_with_array, other)\n\n expected = DatetimeIndex([\"2017-01-31\", \"2017-01-06\"], tz=tz_naive_fixture)\n expected = tm.box_expected(expected, xbox)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = dtarr + other\n tm.assert_equal(result, expected)\n\n expected = DatetimeIndex([\"2016-12-31\", \"2016-12-29\"], tz=tz_naive_fixture)\n expected = tm.box_expected(expected, xbox)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = dtarr - other\n tm.assert_equal(result, expected)\n\n\[email protected](\"years\", [-1, 0, 1])\[email protected](\"months\", [-2, 0, 2])\ndef test_shift_months(years, months):\n dti = DatetimeIndex(\n [\n Timestamp(\"2000-01-05 00:15:00\"),\n Timestamp(\"2000-01-31 00:23:00\"),\n Timestamp(\"2000-01-01\"),\n Timestamp(\"2000-02-29\"),\n Timestamp(\"2000-12-31\"),\n ]\n )\n actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))\n\n raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]\n expected = DatetimeIndex(raw)\n tm.assert_index_equal(actual, expected)\n\n\ndef test_dt64arr_addsub_object_dtype_2d():\n # block-wise DataFrame operations will require operating on 2D\n # DatetimeArray/TimedeltaArray, so check that specifically.\n dti = date_range(\"1994-02-13\", freq=\"2W\", periods=4)\n dta = dti._data.reshape((4, 1))\n\n other = np.array([[pd.offsets.Day(n)] for n in range(4)])\n assert other.shape == dta.shape\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = dta + other\n with tm.assert_produces_warning(PerformanceWarning):\n expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)\n\n assert isinstance(result, DatetimeArray)\n assert result.freq is None\n tm.assert_numpy_array_equal(result._data, expected._data)\n\n with tm.assert_produces_warning(PerformanceWarning):\n # Case where we expect to get a TimedeltaArray back\n result2 = dta - dta.astype(object)\n\n assert isinstance(result2, TimedeltaArray)\n assert result2.shape == (4, 1)\n assert result2.freq is None\n assert (result2.asi8 == 0).all()\n"
] | [
[
"pandas.to_datetime",
"pandas.Series",
"pandas.offsets.Day",
"pandas.tests.arithmetic.common.assert_invalid_addsub_type",
"pandas.offsets.DateOffset",
"numpy.all",
"pandas.tests.arithmetic.common.get_upcast_box",
"pandas._testing.box_expected",
"pandas._testing.makeDateIndex",
"pandas._libs.tslibs.conversion.localize_pydatetime",
"pandas.offsets.Hour",
"pandas._testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.compat.np_datetime64_compat",
"numpy.subtract",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.offsets.MonthEnd",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal",
"pandas._testing.assert_produces_warning",
"pandas._libs.tslibs.offsets.shift_months",
"pandas.array",
"pandas.offsets.Milli",
"pandas.Timedelta",
"numpy.timedelta64",
"pandas.date_range",
"pandas.offsets.Second",
"numpy.array",
"pandas.timedelta_range",
"pandas.DateOffset",
"pandas._testing.assert_equal",
"pandas.TimedeltaIndex",
"numpy.abs",
"pandas.period_range",
"pandas.tests.arithmetic.common.assert_invalid_comparison",
"numpy.datetime64",
"pandas.offsets.Minute",
"pandas.Timestamp.now",
"pandas.Period",
"numpy.add",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jhuapl-boss/intern | [
"cd6513e9f3ef3af02d3a82e3dda5d905a4003d2c"
] | [
"intern/convenience/array.py"
] | [
"\"\"\"\nCopyright 2018-2021 The Johns Hopkins University Applied Physics Laboratory.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n# Standard imports\nfrom typing import Optional, Union, Tuple\nimport abc\nimport json\nfrom collections import namedtuple\nfrom urllib.parse import unquote\n\nfrom intern.service.boss.httperrorlist import HTTPErrorList\n\nfrom .uri import parse_fquri\n\n\n# Pip-installable imports\nimport numpy as np\n\nfrom intern.resource.boss.resource import (\n CollectionResource,\n ChannelResource,\n CoordinateFrameResource,\n ExperimentResource,\n)\nfrom intern.service.boss.metadata import MetadataService\nfrom intern.remote.boss import BossRemote\n\n# A named tuple that represents a bossDB URI.\nbossdbURI = namedtuple(\n \"bossdbURI\", [\"collection\", \"experiment\", \"channel\", \"resolution\"]\n)\n\n_DEFAULT_BOSS_OPTIONS = {\n \"protocol\": \"https\",\n \"host\": \"api.bossdb.io\",\n \"token\": \"public\",\n}\n\n\nclass VolumeProvider(abc.ABC):\n \"\"\"\n A provider for the common get/put cutout operations on a Remote.\n\n TODO: This should ultimately be subsumed back into the Remote API.\n\n \"\"\"\n\n def get_channel(self, channel: str, collection: str, experiment: str):\n ...\n\n def get_project(self, resource):\n ...\n\n def create_project(self, resource):\n ...\n\n def get_cutout(\n self,\n channel: ChannelResource,\n resolution: int,\n xs: Tuple[int, int],\n ys: Tuple[int, int],\n zs: Tuple[int, int],\n ):\n ...\n\n def create_cutout(\n self,\n channel: ChannelResource,\n resolution: int,\n xs: Tuple[int, int],\n ys: Tuple[int, int],\n zs: Tuple[int, int],\n data,\n ):\n ...\n\n\nclass _InternVolumeProvider(VolumeProvider):\n \"\"\"\n A VolumeProvider that backends the intern.BossRemote API.\n\n This is used instead of directly accessing the BossRemote so that the\n convenience `array` can be easily stripped out. (The array module was\n originally a visitor from another Python package called `emboss`, so moving\n VolumeProvider endpoints back into the Remote API is an outstanding TODO.)\n \"\"\"\n\n def __init__(self, boss: BossRemote = None):\n if boss is None:\n try:\n boss = BossRemote()\n except:\n boss = BossRemote(_DEFAULT_BOSS_OPTIONS)\n self.boss = boss\n\n def get_channel(self, channel: str, collection: str, experiment: str):\n return self.boss.get_channel(channel, collection, experiment)\n\n def get_project(self, resource):\n return self.boss.get_project(resource)\n\n def create_project(self, resource):\n return self.boss.create_project(resource)\n\n def get_cutout(\n self,\n channel: ChannelResource,\n resolution: int,\n xs: Tuple[int, int],\n ys: Tuple[int, int],\n zs: Tuple[int, int],\n ):\n return self.boss.get_cutout(channel, resolution, xs, ys, zs)\n\n def create_cutout(\n self,\n channel: ChannelResource,\n resolution: int,\n xs: Tuple[int, int],\n ys: Tuple[int, int],\n zs: Tuple[int, int],\n data,\n ):\n return self.boss.create_cutout(channel, resolution, xs, ys, zs, data)\n\n\ndef _construct_boss_url(boss, col, exp, chan, res, xs, ys, zs) -> str:\n # TODO: use boss host\n return f\"https://api.theboss.io/v1/cutout/{col}/{exp}/{chan}/{res}/{xs[0]}:{xs[1]}/{ys[0]}:{ys[1]}/{zs[0]}:{zs[1]}\"\n\n\ndef parse_bossdb_uri(uri: str) -> bossdbURI:\n \"\"\"\n Parse a bossDB URI and handle malform errors.\n\n Arguments:\n uri (str): URI of the form bossdb://<collection>/<experiment>/<channel>\n\n Returns:\n bossdbURI\n\n \"\"\"\n t = uri.split(\"://\")[1].split(\"/\")\n if len(t) == 3:\n return bossdbURI(t[0], t[1], t[2], None)\n if len(t) == 4:\n return bossdbURI(t[0], t[1], t[2], int(t[3]))\n raise ValueError(f\"Cannot parse URI {uri}.\")\n\n\nclass AxisOrder:\n XYZ = \"XYZ\"\n ZYX = \"ZYX\"\n\n\nclass _MetadataProvider:\n \"\"\"\n Serves as a dictionary-like API for resource metadata.\n\n \"\"\"\n\n def __init__(self, dataset) -> None:\n \"\"\"\n Create a new metadata provider.\n\n Arguments:\n dataset (array)\n\n \"\"\"\n self._array = dataset\n self._resource = dataset._channel\n self._remote = dataset.volume_provider.boss\n\n def keys(self):\n return self._remote.list_metadata(self._resource)\n\n def items(self):\n for key in self.keys():\n yield (key, self[key])\n\n def __delitem__(self, key):\n return self._remote.delete_metadata(self._resource, [key])\n\n def __contains__(self, key):\n try:\n self[key]\n return True\n except KeyError:\n return False\n\n def __getitem__(self, key):\n try:\n return self._remote.get_metadata(self._resource, [key])[key]\n except HTTPErrorList as err:\n raise KeyError(\n f\"The key {key!s} was not found in the metadata database.\"\n ) from err\n\n def __setitem__(self, key, value):\n return self._remote.create_metadata(self._resource, {key: value})\n\n def update_item(self, key, value):\n return self._remote.update_metadata(self._resource, {key: value})\n\n def bulk_update(self, items: dict):\n return self._remote.create_metadata(self._resource, items)\n\n def bulk_delete(self, keys: list):\n return self._remote.delete_metadata(self._resource, keys)\n\nclass array:\n \"\"\"\n An intern/bossDB-backed numpy array.\n\n Like a numpy.memmap array, an `intern.array` is backed by data that lives\n outside of conventional memory. The data can live in, for example, a bossDB\n that lives in AWS, or it can live in a local or remote bossphorus instance.\n\n Data are downloaded when a request is made. This means that even \"simple\"\n commands like `array#[:]sum()` are very network-heavy (don't do this!).\n\n Examples:\n\n >>> import intern.array\n >>> data = array(\"bossdb://collection/experiment/channel\")\n >>> downloaded_sample = data[100, 100:200, 100:200]\n\n \"\"\"\n\n def __init__(\n self,\n channel: Union[ChannelResource, Tuple, str],\n resolution: int = 0,\n volume_provider: VolumeProvider = None,\n axis_order: str = AxisOrder.ZYX,\n create_new: bool = False,\n description: Optional[str] = None,\n dtype: Optional[str] = None,\n extents: Optional[Tuple[int, int, int]] = None,\n voxel_size: Optional[Tuple[int, int, int]] = None,\n voxel_unit: Optional[str] = None,\n downsample_levels: int = 6,\n downsample_method: Optional[str] = \"anisotropic\",\n coordinate_frame_name: Optional[str] = None,\n coordinate_frame_desc: Optional[str] = None,\n collection_desc: Optional[str] = None,\n experiment_desc: Optional[str] = None,\n source_channel: Optional[str] = None,\n boss_config: Optional[dict] = None,\n ) -> None:\n \"\"\"\n Construct a new intern-backed array.\n\n Arguments:\n channel (intern.resource.boss.ChannelResource): The channel from\n which data will be downloaded.\n resolution (int: 0): The native resolution or MIP to use\n volume_provider (VolumeProvider): The remote-like to use\n axis_order (str = AxisOrder.ZYX): The axis-ordering to use for data\n cutouts. Defaults to ZYX. DOES NOT affect the `voxel_size` or\n `extents` arguments to this constructor.\n create_new (bool: False): Whether to create new Resources if they\n do not exist. Does not work with public token.\n dtype (str): Only required if `create_new = True`. Specifies the\n numpy-style datatype for this new dataset (e.g. \"uint8\").\n description (str): Only required if `create_new = True`. Sets the\n description for the newly-created collection, experiment,\n channel, and coordframe resources.\n extents: Optional[Tuple[int, int, int]]: Only required if\n `create_new = True`. Specifies the total dataset extents of\n this new dataset, in ZYX order.\n voxel_size: Optional[Tuple[int, int, int]]: Only required if\n `create_new = True`. Specifies the voxel dimensions of this new\n dataset, in ZYX order.\n voxel_unit: Optional[str]: Only required if `create_new = True`.\n Specifies the voxel-dimension unit. For example, \"nanometers\".\n downsample_levels (int: 6): The number of downsample levels.\n downsample_method (Optional[str]): The type of downsample to use.\n If unset, defaults to 'anisotropic'.\n coordinate_frame_name (Optional[str]): If set, the name to use for\n the newly created coordinate frame. If not set, the name of the\n coordinate frame will be chosen automatically.\n coordinate_frame_desc (Optional[str]): If set, the description text\n to use for the newly created coordinate frame. If not set, the\n description will be chosen automatically.\n collection_desc (Optional[str]): The description text to use for a\n newly created collection. If not set, the description will be\n chosen automatically.\n experiment_desc (Optional[str]): The description text to use for a\n newly created experiment. If not set, the description will be\n chosen automatically.\n source_channel (Optional[str]): The channel to use as the source\n for this new channel, if `create_new` is True and this is\n going to be an annotation channel (dtype!=uint8).\n boss_config (Optional[dict]): The BossRemote configuration dict to\n use in order to authenticate with a BossDB remote. This option\n is mutually exclusive with the VolumeProvider configuration. If\n the `volume_provider` arg is set, this will be ignored.\n\n \"\"\"\n self.axis_order = axis_order\n\n # Handle custom Remote:\n self.volume_provider = volume_provider\n if volume_provider is None:\n if boss_config:\n self.volume_provider = _InternVolumeProvider(BossRemote(boss_config))\n else:\n self.volume_provider = _InternVolumeProvider()\n\n if create_new:\n\n # We'll need at least `extents` and `voxel_size`.\n description = description or \"Created with intern\"\n dtype = dtype or \"uint8\"\n\n if extents is None:\n raise ValueError(\n \"If `create_new` is True, you must specify the extents of the new coordinate frame as a [x, y, z].\"\n )\n if voxel_size is None:\n raise ValueError(\n \"If `create_new` is True, you must specify the voxel_size of the new coordinate frame as a [x, y, z].\"\n )\n\n uri = parse_bossdb_uri(channel)\n\n # create collection if it doesn't exist:\n try:\n # Try to get an existing collection:\n collection = self.volume_provider.get_project(\n CollectionResource(uri.collection)\n )\n except:\n # Create the collection:\n collection = CollectionResource(\n uri.collection, description=collection_desc or description\n )\n self.volume_provider.create_project(collection)\n\n # create coordframe if it doesn't exist:\n try:\n # Try to get an existing coordframe:\n coordframe = self.volume_provider.get_project(\n CoordinateFrameResource(\n coordinate_frame_name or f\"CF_{uri.collection}_{uri.experiment}\"\n )\n )\n except:\n # Default to nanometers if a voxel unit isn't provided\n voxel_unit = voxel_unit or \"nanometers\"\n # Create the coordframe:\n coordframe = CoordinateFrameResource(\n coordinate_frame_name or f\"CF_{uri.collection}_{uri.experiment}\",\n description=coordinate_frame_desc or description,\n x_start=0,\n y_start=0,\n z_start=0,\n x_stop=extents[2],\n y_stop=extents[1],\n z_stop=extents[0],\n x_voxel_size=voxel_size[2],\n y_voxel_size=voxel_size[1],\n z_voxel_size=voxel_size[0],\n voxel_unit=voxel_unit,\n )\n self.volume_provider.create_project(coordframe)\n\n # create experiment if it doesn't exist:\n try:\n # Try to get an existing experiment:\n experiment = self.volume_provider.get_project(\n ExperimentResource(uri.experiment, uri.collection)\n )\n except:\n # Create the experiment:\n experiment = ExperimentResource(\n uri.experiment,\n uri.collection,\n description=experiment_desc or description,\n coord_frame=coordframe.name,\n num_hierarchy_levels=downsample_levels,\n hierarchy_method=downsample_method,\n )\n self.volume_provider.create_project(experiment)\n\n # create channel if it doesn't exist:\n try:\n # Try to get an existing channel:\n channel = self.volume_provider.get_project(\n ChannelResource(uri.channel, uri.collection, uri.experiment)\n )\n except:\n # Create the channel:\n channel = ChannelResource(\n uri.channel,\n uri.collection,\n uri.experiment,\n description=description,\n type=\"image\" if dtype in [\"uint8\", \"uint16\"] else \"annotation\",\n datatype=dtype,\n sources=[source_channel] if source_channel else [],\n )\n self.volume_provider.create_project(channel)\n\n self.resolution = resolution\n # If the channel is set as a Resource, then use that resource.\n if isinstance(channel, ChannelResource):\n self._channel = channel\n # If it is set as a string, then parse the channel and generate an\n # intern.Resource from a bossDB URI.\n elif isinstance(channel, str):\n uri = parse_bossdb_uri(channel)\n self.resolution = (\n uri.resolution if not (uri.resolution is None) else self.resolution\n )\n self._channel = self.volume_provider.get_channel(\n uri.channel, uri.collection, uri.experiment\n )\n else:\n raise NotImplementedError(\n \"You must specify a channel of the form \"\n \"'bossdb://collection/experiment/channel' or you must \"\n \"provide an intern.Remote.\"\n )\n\n # Set empty experiment (will be dict)\n self._exp = None\n # Set empty coordframe (will be dict)\n self._coord_frame = None\n\n # Set col/exp/chan based upon the channel or URI provided.\n self.collection_name = self._channel.coll_name\n self.experiment_name = self._channel.exp_name\n self.channel_name = self._channel.name\n\n # Create a pointer to the metadata for the channel.\n self._channel_metadata = _MetadataProvider(self)\n\n @property\n def metadata(self):\n \"\"\"\n Returns a pointer to the metadata provider.\n \"\"\"\n return self._channel_metadata\n\n @property\n def dtype(self):\n \"\"\"\n Return the datatype of the array.\n\n Will default to the dtype of the channel.\n \"\"\"\n return self._channel.datatype\n\n @property\n def url(self):\n \"\"\"\n Get a pointer to this Channel on the BossDB page.\n \"\"\"\n return f\"{self.volume_provider.boss._project._base_protocol}://{self.volume_provider.boss._project._base_url}/v1/mgmt/resources/{self.collection_name}/{self.experiment_name}/{self.channel_name}\"\n\n @property\n def visualize(self):\n \"\"\"\n Get a pointer to this Channel on the BossDB page.\n \"\"\"\n return \"https://neuroglancer.bossdb.io/#!{'layers':{'image':{'source':'boss://__replace_me__'}}}\".replace(\n \"__replace_me__\",\n f\"{self.volume_provider.boss._project._base_protocol}://{self.volume_provider.boss._project._base_url}/{self.collection_name}/{self.experiment_name}/{self.channel_name}\",\n )\n\n @property\n def shape(self):\n \"\"\"\n Get the dimensions (numpy-flavored) of the array.\n\n Will return (1, 1, 1) if a coordinate frame does not exist (as in cases\n of pre-v2 bossphorus instances); this will not restrict indexing.\n \"\"\"\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n # From the coordinate frame, get the x, y, and z sizes. Note that this\n # is the SIZE, not the extents; in other words, a cframe that starts at\n # x=10 and extends to x=110 will have a size of 100 here.\n if self.axis_order == AxisOrder.XYZ:\n return (\n int(\n (self._coord_frame.y_stop - self._coord_frame.y_start)\n / (2 ** self.resolution)\n ),\n int(\n (self._coord_frame.x_stop - self._coord_frame.x_start)\n / (2 ** self.resolution)\n ),\n (self._coord_frame.z_stop - self._coord_frame.z_start),\n )\n elif self.axis_order == AxisOrder.ZYX:\n return (\n (self._coord_frame.z_stop - self._coord_frame.z_start),\n int(\n (self._coord_frame.y_stop - self._coord_frame.y_start)\n / (2 ** self.resolution)\n ),\n int(\n (self._coord_frame.x_stop - self._coord_frame.x_start)\n / (2 ** self.resolution)\n ),\n )\n\n @property\n def voxel_size(self):\n \"\"\"\n Get the dimensions (numpy-flavored) of the array.\n\n Will return (1, 1, 1) if a coordinate frame does not exist (as in cases\n of pre-v2 bossphorus instances); this will not restrict indexing.\n \"\"\"\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n if self.axis_order == AxisOrder.XYZ:\n vox_size = (\n self._coord_frame.x_voxel_size,\n self._coord_frame.y_voxel_size,\n self._coord_frame.z_voxel_size,\n )\n elif self.axis_order == AxisOrder.ZYX:\n vox_size = (\n self._coord_frame.z_voxel_size,\n self._coord_frame.y_voxel_size,\n self._coord_frame.x_voxel_size,\n )\n return vox_size\n\n @property\n def voxel_unit(self):\n if self._coord_frame is None:\n self._populate_coord_frame()\n return self._coord_frame.voxel_unit\n\n def _populate_exp(self):\n \"\"\"\n Populate the experiment component of this array.\n\n Cache the results for later.\n \"\"\"\n self._exp = self.volume_provider.get_project(\n ExperimentResource(self._channel.exp_name, self._channel.coll_name)\n )\n\n def _populate_coord_frame(self):\n \"\"\"\n Populate the array coordinate frame.\n\n Cache the results for later.\n \"\"\"\n if self._exp is None:\n self._populate_exp()\n self._coord_frame = self.volume_provider.get_project(\n CoordinateFrameResource(self._exp.coord_frame)\n )\n \n @property\n def downsample_status(self):\n \"\"\"\n Return the downsample status of the underlying channel.\n \"\"\"\n return self._channel.downsample_status\n \n @property\n def available_resolutions(self):\n \"\"\"\n Return a list of available resolutions for this channel.\n \n Arguments:\n None\n \n Returns:\n List[int]: A list of resolutions at which this dataset can be downloaded\n \n \"\"\"\n self._populate_exp()\n return list(range(dataset._exp.num_hierarchy_levels))\n\n def __getitem__(self, key: Tuple) -> np.array:\n \"\"\"\n Get a subarray or subvolume.\n\n Uses one of two indexing methods:\n 1. Start/Stop (`int:int`)\n 2. Single index (`int`)\n\n Each element of the key can be one of those two options. For example,\n\n myarray[1, 1:100, 2]\n\n \"\"\"\n # If the user has requested XYZ mode, the first thing to do is reverse\n # the array indices. Then you can continue this fn without any\n # additional changes.\n if self.axis_order == AxisOrder.XYZ:\n key = (key[2], key[1], key[0])\n\n # Next, we need to get the shape of the dataset. We do this currently\n # by getting the coordinate frame, which means that we need the\n # coordframe data and experiment data if we don't have it already. In\n # the future, we may also want to allow the user to specify general\n # shape information so that we can avoid calling the API.\n\n # Populate the experiment metadata if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Populate the coordinate frame metadata if not yet set:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n # Now we can begin. There is a wide variety of indexing options\n # available, including single-integer indexing, tuple-of-slices\n # indexing, tuple-of-int indexing...\n\n # First we'll address if the user presents a single integer.\n # ```\n # my_array[500]\n # ```\n # In this case, the user is asking for a single Z slice (or single X\n # slice if in XYZ order... But that's a far less common use case.)\n # We will get the full XY extents and download a single 2D array:\n if isinstance(key, int):\n # Get the full Z slice:\n xs = (0, self.shape[2])\n ys = (0, self.shape[1])\n zs = (key, key + 1)\n else:\n # We also support indexing with units. For example, you can ask for\n # ```\n # my_array[0:10, 0:10, 0:10, \"nanometers\"]\n # ```\n # which will download as many pixels as are required in order to\n # download 10nm in each dimension. We do this by storing a\n # \"normalized units\" measure which is a rescale factor for each\n # dimension (in the same order, e.g. ZYX, as the array).\n _normalize_units = (1, 1, 1)\n if isinstance(key[-1], str) and len(key) == 4:\n if key[-1] != self._coord_frame.voxel_unit:\n raise NotImplementedError(\n \"Can only reference voxels in native size format which is \"\n f\"{self._coord_frame.voxel_unit} for this dataset.\"\n )\n _normalize_units = self.voxel_size\n\n # We will now do the following codeblock three times, for X,Y,Z:\n # First, we check to see if this index is a single integer. If so,\n # the user is requesting a 2D array with zero depth along this\n # dimension. For example, if the user asks for\n # ```\n # my_data[0:120, 0:120, 150]\n # ```\n # Then \"150\" suggests that the user just wants one single X slice.\n if isinstance(key[2], int):\n xs = (key[2], key[2] + 1)\n else:\n # If the key is a Slice, then it has .start and .stop attrs.\n # (The user is requesting an array with more than one slice\n # in this dimension.)\n start = key[2].start if key[2].start else 0\n stop = key[2].stop if key[2].stop else self.shape[0]\n\n start = int(start / _normalize_units[0])\n stop = int(stop / _normalize_units[0])\n\n # Cast the coords to integers (since Boss needs int coords)\n xs = (int(start), int(stop))\n\n # Do the same thing again for the next dimension: Either a single\n # integer, or a slice...\n if isinstance(key[1], int):\n ys = (key[1], key[1] + 1)\n else:\n start = key[1].start if key[1].start else 0\n stop = key[1].stop if key[1].stop else self.shape[1]\n\n start = start / _normalize_units[1]\n stop = stop / _normalize_units[1]\n\n ys = (int(start), int(stop))\n\n # Do the same thing again for the last dimension: Either a single\n # integer, or a slice...\n if isinstance(key[0], int):\n zs = (key[0], key[0] + 1)\n else:\n start = key[0].start if key[0].start else 0\n stop = key[0].stop if key[0].stop else self.shape[2]\n\n start = start / _normalize_units[2]\n stop = stop / _normalize_units[2]\n\n zs = (int(start), int(stop))\n\n # Finally, we can perform the cutout itself, using the x, y, and z\n # coordinates that we computed in the previous step.\n cutout = self.volume_provider.get_cutout(\n self._channel, self.resolution, xs, ys, zs\n )\n\n # Data are returned in ZYX order:\n if self.axis_order == AxisOrder.XYZ:\n data = np.rollaxis(np.rollaxis(cutout, 1), 2)\n elif self.axis_order == AxisOrder.ZYX:\n data = cutout\n\n # If any of the dimensions are of length 1, it's because the user\n # requested a single slice in their key; flatten the array in that\n # dimension. For example, if you request `[10, 0:10, 0:10]` then the\n # result should be 2D (no Z component).\n _shape = data.shape\n if _shape[0] == 1:\n data = data[0, :, :]\n if _shape[1] == 1:\n data = data[:, 0, :]\n if _shape[2] == 1:\n data = data[:, :, 0]\n return data\n\n def __setitem__(self, key: Tuple, value: np.array) -> np.array:\n \"\"\"\n Set a subarray or subvolume.\n\n Uses one of two indexing methods:\n 1. Start/Stop (`int:int`)\n 2. Single index (`int`)\n\n Each element of the key can be one of those two options. For example,\n\n myarray[1, 1:100, 2]\n\n Start-only (`10:`) or stop-only (`:10`) indexing is unsupported.\n \"\"\"\n\n if self.axis_order == AxisOrder.XYZ:\n key = (key[2], key[1], key[0])\n\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n _normalize_units = (1, 1, 1)\n if isinstance(key[-1], str) and len(key) == 4:\n if key[-1] != self._coord_frame.voxel_unit:\n raise NotImplementedError(\n \"Can only reference voxels in native size format which is \"\n f\"{self._coord_frame.voxel_unit} for this dataset.\"\n )\n _normalize_units = self.voxel_size\n\n if isinstance(key[2], int):\n xs = (key[2], key[2] + 1)\n else:\n start = key[2].start if key[2].start else 0\n stop = key[2].stop if key[2].stop else self.shape[0]\n\n start = start / _normalize_units[0]\n stop = stop / _normalize_units[0]\n\n xs = (int(start), int(stop))\n\n if isinstance(key[1], int):\n ys = (key[1], key[1] + 1)\n else:\n start = key[1].start if key[1].start else 0\n stop = key[1].stop if key[1].stop else self.shape[1]\n\n start = start / _normalize_units[1]\n stop = stop / _normalize_units[1]\n\n ys = (int(start), int(stop))\n\n if isinstance(key[0], int):\n zs = (key[0], key[0] + 1)\n else:\n start = key[0].start if key[0].start else 0\n stop = key[0].stop if key[0].stop else self.shape[2]\n\n start = start / _normalize_units[2]\n stop = stop / _normalize_units[2]\n\n zs = (int(start), int(stop))\n\n if len(value.shape) == 2:\n # TODO: Support other 2D shapes as well\n value = np.array([value])\n\n cutout = self.volume_provider.create_cutout(\n self._channel, self.resolution, xs, ys, zs, value\n )\n\n\ndef arrays_from_neuroglancer(url: str):\n \"\"\"\n Construct array(s) from a neuroglancer link.\n\n Arguments:\n url (str): The neuroglancer link to parse\n\n Returns:\n Dict[str, array]: A dictionary of arrays, where each is keyed by\n the name of the channel in neuroglancer.\n\n \"\"\"\n ngl_state = json.loads(unquote(url).split(\"#!\")[1])\n\n arrays = {}\n for source in ngl_state[\"layers\"]:\n source_url = \"\"\n if \"boss://\" in source[\"source\"]:\n source_url = source[\"source\"]\n elif (\n isinstance(source[\"source\"], dict) and \"boss://\" in source[\"source\"][\"url\"]\n ):\n source_url = source[\"source\"][\"url\"]\n else:\n continue\n remote, channel = parse_fquri(source_url)\n arrays[source[\"name\"]] = array(\n channel=channel, volume_provider=_InternVolumeProvider(remote)\n )\n return arrays\n\n\ndef volumes_from_neuroglancer(\n url: str, radius_zyx: Tuple[int, int, int] = (10, 1024, 1024)\n):\n \"\"\"\n Download numpy arrays from BossDB based upon a neuroglancer URL.\n\n Arguments:\n url (str): The neuroglancer link to parse\n radius_zyx (Tuple[int, int, int]): The amount of data along each axis\n to download, centered at the position from the URL.\n\n Returns:\n Dict[str, np.ndarray]: A dictionary of np.arrays, where each is keyed\n by the name of the channel in neuroglancer.\n\n\n \"\"\"\n ngl_state = json.loads(unquote(url).split(\"#!\")[1])\n\n x, y, z = ngl_state[\"position\"]\n zr, yr, xr = radius_zyx\n\n arrays = arrays_from_neuroglancer(url)\n return {\n key: dataset[z - zr : z + zr, y - yr : y + yr, x - xr : x + xr]\n for key, dataset in arrays.items()\n }\n"
] | [
[
"numpy.rollaxis",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
annapasca/ephypype | [
"6dbacdd6913234a28b690b401862ff062accecc7",
"6dbacdd6913234a28b690b401862ff062accecc7"
] | [
"examples/plot_inverse.py",
"ephypype/power.py"
] | [
"\"\"\"\n.. _source_reconstruction:\n\n========================\nCompute inverse solution\n========================\nThe inverse solution pipeline performs source reconstruction starting either\nfrom raw/epoched data (*.fif* format) specified by the user or from the output\nof the Preprocessing pipeline (cleaned raw data).\n\"\"\"\n\n# Authors: Annalisa Pascarella <[email protected]>\n# License: BSD (3-clause)\n\n# sphinx_gallery_thumbnail_number = 2\n\nimport os.path as op\nimport numpy as np\nimport nipype.pipeline.engine as pe\nimport nipype.interfaces.io as nio\n\nimport ephypype\nfrom ephypype.nodes import create_iterator\nfrom ephypype.datasets import fetch_omega_dataset\n\n\n###############################################################################\n# Let us fetch the data first. It is around 675 MB download.\n\nbase_path = op.join(op.dirname(ephypype.__file__), '..', 'examples')\ndata_path = fetch_omega_dataset(base_path)\n\n###############################################################################\n# then read the parameters for experiment and inverse problem from a\n# :download:`json <https://github.com/neuropycon/ephypype/tree/master/examples/params.json>`\n# file and print it\n\nimport json # noqa\nimport pprint # noqa\nparams = json.load(open(\"params.json\"))\n\npprint.pprint({'experiment parameters': params[\"general\"]})\nsubject_ids = params[\"general\"][\"subject_ids\"] # sub-003\nsession_ids = params[\"general\"][\"session_ids\"] # ses-0001\nNJOBS = params[\"general\"][\"NJOBS\"]\n\npprint.pprint({'inverse parameters': params[\"inverse\"]})\nspacing = params[\"inverse\"]['spacing'] # ico-5 vs oct-6\nsnr = params[\"inverse\"]['snr'] # use smaller SNR for raw data\ninv_method = params[\"inverse\"]['img_method'] # sLORETA, MNE, dSPM, LCMV\nparc = params[\"inverse\"]['parcellation'] # parcellation to use: 'aparc' vs 'aparc.a2009s' # noqa\n# noise covariance matrix filename template\nnoise_cov_fname = params[\"inverse\"]['noise_cov_fname']\n\n# set sbj dir path, i.e. where the FS folfers are\nsubjects_dir = op.join(data_path, params[\"general\"][\"subjects_dir\"])\n\n###############################################################################\n# Then, we create our workflow and specify the `base_dir` which tells\n# nipype the directory in which to store the outputs.\n\n# workflow directory within the `base_dir`\nsrc_reconstruction_pipeline_name = 'source_reconstruction_' + \\\n inv_method + '_' + parc.replace('.', '')\n\nmain_workflow = pe.Workflow(name=src_reconstruction_pipeline_name)\nmain_workflow.base_dir = data_path\n\n###############################################################################\n# Then we create a node to pass input filenames to DataGrabber from nipype\n\ninfosource = create_iterator(['subject_id', 'session_id'],\n [subject_ids, session_ids])\n\n###############################################################################\n# and a node to grab data. The template_args in this node iterate upon\n# the values in the infosource node\n\ndatasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],\n outfields=['raw_file', 'trans_file']), # noqa\n name='datasource')\n\ndatasource.inputs.base_directory = data_path\ndatasource.inputs.template = '*%s/%s/meg/%s*rest*%s.fif'\n\ndatasource.inputs.template_args = dict(\n raw_file=[['subject_id', 'session_id', 'subject_id', '0_60*ica']],\n trans_file=[['subject_id', 'session_id', 'subject_id', \"-trans\"]])\n\ndatasource.inputs.sort_filelist = True\n\n###############################################################################\n# Ephypype creates for us a pipeline which can be connected to these\n# nodes we created. The inverse solution pipeline is implemented by the\n# function\n# :func:`ephypype.pipelines.preproc_meeg.create_pipeline_source_reconstruction`\n# thus to instantiate the inverse pipeline node, we import it and pass our\n# parameters to it.\n# The inverse pipeline contains three nodes that wrap the MNE Python functions\n# that perform the source reconstruction steps.\n#\n# In particular, the three nodes are:\n#\n# * :class:`ephypype.interfaces.mne.LF_computation.LFComputation` compute the\n# Lead Field matrix\n# * :class:`ephypype.interfaces.mne.Inverse_solution.NoiseCovariance` computes\n# the noise covariance matrix\n# * :class:`ephypype.interfaces.mne.Inverse_solution.InverseSolution` estimates\n# the time series of the neural sources on a set of dipoles grid\n\nfrom ephypype.pipelines import create_pipeline_source_reconstruction # noqa\ninv_sol_workflow = create_pipeline_source_reconstruction(\n data_path, subjects_dir, spacing=spacing, inv_method=inv_method, parc=parc,\n noise_cov_fname=noise_cov_fname)\n\n###############################################################################\n# We then connect the nodes two at a time. First, we connect the two outputs\n# (subject_id and session_id) of the infosource node to the datasource node.\n# So, these two nodes taken together can grab data.\n\nmain_workflow.connect(infosource, 'subject_id', datasource, 'subject_id')\nmain_workflow.connect(infosource, 'session_id', datasource, 'session_id')\n\n###############################################################################\n# Similarly, for the inputnode of the preproc_workflow. Things will become\n# clearer in a moment when we plot the graph of the workflow.\n\nmain_workflow.connect(infosource, 'subject_id',\n inv_sol_workflow, 'inputnode.sbj_id')\nmain_workflow.connect(datasource, 'raw_file',\n inv_sol_workflow, 'inputnode.raw')\nmain_workflow.connect(datasource, 'trans_file',\n inv_sol_workflow, 'inputnode.trans_file')\n\n###############################################################################\n# To do so, we first write the workflow graph (optional)\n\nmain_workflow.write_graph(graph2use='colored') # colored\n\n###############################################################################\n# and visualize it. Take a moment to pause and notice how the connections\n# here correspond to how we connected the nodes.\n\nimport matplotlib.pyplot as plt # noqa\nimg = plt.imread(op.join(data_path, src_reconstruction_pipeline_name, 'graph.png')) # noqa\nplt.figure(figsize=(8, 8))\nplt.imshow(img)\nplt.axis('off')\n\n###############################################################################\n# Finally, we are now ready to execute our workflow.\n\nmain_workflow.config['execution'] = {'remove_unnecessary_outputs': 'false'}\n\n# Run workflow locally on 1 CPU\nmain_workflow.run(plugin='LegacyMultiProc', plugin_args={'n_procs': NJOBS})\n\n###############################################################################\n# The output is the source reconstruction matrix stored in the workflow\n# directory defined by `base_dir`. This matrix can be used as input of\n# the Connectivity pipeline.\n#\n# .. warning:: To use this pipeline, we need a cortical segmentation of MRI\n# data, that could be provided by Freesurfer\n\n##############################################################################\n\nimport pickle # noqa\nfrom ephypype.gather import get_results # noqa\nfrom visbrain.objects import BrainObj, ColorbarObj, SceneObj # noqa\n\ntime_series_files, label_files = get_results(main_workflow.base_dir,\n main_workflow.name,\n pipeline='inverse')\n\ntime_pts = 30\n\nsc = SceneObj(size=(800, 500), bgcolor=(0, 0, 0))\nlh_file = op.join(subjects_dir, 'fsaverage', 'label/lh.aparc.annot')\nrh_file = op.join(subjects_dir, 'fsaverage', 'label/rh.aparc.annot')\ncmap = 'bwr'\ntxtcolor = 'white'\nfor inverse_file, label_file in zip(time_series_files, label_files):\n # Load files :\n with open(label_file, 'rb') as f:\n ar = pickle.load(f)\n names, xyz, colors = ar['ROI_names'], ar['ROI_coords'], ar['ROI_colors'] # noqa\n ts = np.squeeze(np.load(inverse_file))\n cen = np.array([k.mean(0) for k in xyz])\n\n # Get the data of the left / right hemisphere :\n lh_data, rh_data = ts[::2, time_pts], ts[1::2, time_pts]\n clim = (ts[:, time_pts].min(), ts[:, time_pts].max())\n roi_names = [k[0:-3] for k in np.array(names)[::2]]\n\n # Left hemisphere outside :\n b_obj_li = BrainObj('white', translucent=False, hemisphere='left')\n b_obj_li.parcellize(lh_file, select=roi_names, data=lh_data, cmap=cmap)\n sc.add_to_subplot(b_obj_li, rotate='left')\n\n # Left hemisphere inside :\n b_obj_lo = BrainObj('white', translucent=False, hemisphere='left')\n b_obj_lo.parcellize(lh_file, select=roi_names, data=lh_data, cmap=cmap)\n sc.add_to_subplot(b_obj_lo, col=1, rotate='right')\n\n # Right hemisphere outside :\n b_obj_ro = BrainObj('white', translucent=False, hemisphere='right')\n b_obj_ro.parcellize(rh_file, select=roi_names, data=rh_data, cmap=cmap)\n sc.add_to_subplot(b_obj_ro, row=1, rotate='right')\n\n # Right hemisphere inside :\n b_obj_ri = BrainObj('white', translucent=False, hemisphere='right')\n b_obj_ri.parcellize(rh_file, select=roi_names, data=rh_data, cmap=cmap)\n sc.add_to_subplot(b_obj_ri, row=1, col=1, rotate='left')\n\n # Add the colorbar :\n cbar = ColorbarObj(b_obj_li, txtsz=15, cbtxtsz=20, txtcolor=txtcolor,\n cblabel='Intensity')\n sc.add_to_subplot(cbar, col=2, row_span=2)\n\nsc.preview()\n",
"\"\"\"Power functions.\"\"\"\n\n# Author: Dmitrii Altukhov <[email protected]>\n# Annalisa Pascarella <[email protected]>\nimport os\nimport numpy as np\n\nfrom nipype.utils.filemanip import split_filename\nfrom mne import read_epochs\nfrom mne.io import read_raw_fif\nfrom scipy.signal import welch\n\nfrom .fif2array import _get_raw_array\n\n\ndef _compute_and_save_psd(data_fname, fmin=0, fmax=120,\n method='welch', is_epoched=False,\n n_fft=256, n_overlap=0,\n picks=None, proj=False, n_jobs=1, verbose=None):\n \"\"\"Load epochs/raw from file, compute psd and save the result.\"\"\"\n\n if is_epoched:\n epochs = read_epochs(data_fname)\n else:\n epochs = read_raw_fif(data_fname, preload=True)\n\n epochs_meg = epochs.pick_types(meg=True, eeg=False, eog=False, ecg=False)\n\n if method == 'welch':\n from mne.time_frequency import psd_welch\n psds, freqs = psd_welch(epochs_meg, fmin=fmin, fmax=fmax)\n elif method == 'multitaper':\n from mne.time_frequency import psd_multitaper\n psds, freqs = psd_multitaper(epochs_meg, fmin=fmin, fmax=fmax)\n else:\n raise Exception('nonexistent method for psd computation')\n\n _get_raw_array(data_fname, save_data=False)\n\n psds_fname = _save_psd(data_fname, psds, freqs)\n _save_psd_img(data_fname, psds, freqs, is_epoched, method)\n\n return psds_fname\n\n\ndef _compute_and_save_src_psd(data_fname, sfreq, fmin=0, fmax=120,\n is_epoched=False,\n n_fft=256, n_overlap=0,\n n_jobs=1, verbose=None):\n \"\"\"Load epochs/raw from file, compute psd and save the result.\"\"\"\n src_data = np.load(data_fname)\n dim = src_data.shape\n if len(dim) == 3 and dim[0] == 1:\n src_data = np.squeeze(src_data)\n print(('src data dim: {}'.format(src_data.shape)))\n\n if n_fft > src_data.shape[1]:\n nperseg = src_data.shape[1]\n else:\n nperseg = n_fft\n\n n_freqs = nperseg // 2 + 1\n psds = np.empty([src_data.shape[0], n_freqs])\n for i in range(src_data.shape[0]):\n freqs, Pxx = welch(src_data[i, :], fs=sfreq, window='hamming',\n nperseg=nperseg, noverlap=n_overlap, nfft=None)\n psds[i, :] = Pxx\n\n psds_fname = _save_psd(data_fname, psds, freqs)\n _save_psd_img(data_fname, psds, freqs, is_epoched)\n\n return psds_fname\n\n\ndef _compute_mean_band_psd(psds_file, freq_bands):\n \"\"\"Compute mean band psd.\"\"\"\n npzfile = np.load(psds_file)\n print(('the .npz file contain {} \\n'.format(npzfile.files)))\n\n # is a matrix with dim n_channels(n_voxel) x n_freqs\n psds = npzfile['psds']\n print(('psds is a matrix {} \\n'.format(psds.shape)))\n\n # list of frequencies in which psds was computed;\n # its length = columns of psds\n freqs = npzfile['freqs']\n print(('freqs contains {} frequencies \\n'.format(len(freqs))))\n\n n_row, _ = psds.shape\n n_fr = len(freq_bands)\n\n m_px = np.empty([n_row, n_fr])\n\n for f in range(n_fr):\n min_fr = freq_bands[f][0]\n max_fr = freq_bands[f][1]\n print(('*** frequency band [{}, {}] ***\\n'.format(min_fr, max_fr)))\n m_px[:, f] = np.mean(psds[:, (freqs >= min_fr) * (freqs <= max_fr)], 1)\n\n psds_mean_fname = _save_m_px(psds_file, m_px)\n\n return psds_mean_fname\n\n\ndef _save_m_px(psds_file, m_px):\n data_path, basename, ext = split_filename(psds_file)\n\n psds_mean_fname = basename + '-mean_band.npy'\n psds_mean_fname = os.path.abspath(psds_mean_fname)\n print((m_px.shape))\n np.save(psds_mean_fname, m_px)\n\n return psds_mean_fname\n\n\ndef _save_psd(data_fname, psds, freqs):\n data_path, basename, ext = split_filename(data_fname)\n\n psds_fname = basename + '-psds.npz'\n psds_fname = os.path.abspath(psds_fname)\n print((psds.shape))\n print(('*** save {} ***'.format(psds_fname)))\n np.savez(psds_fname, psds=psds, freqs=freqs)\n\n return psds_fname\n\n\ndef _save_psd_img(data_fname, psds, freqs, is_epoched=False, method=''):\n import matplotlib.pyplot as plt\n\n data_path, basename, ext = split_filename(data_fname)\n psds_img_fname = basename + '-psds.png'\n psds_img_fname = os.path.abspath(psds_img_fname)\n\n # save PSD as img\n f, ax = plt.subplots()\n psds = 10 * np.log10(psds)\n if is_epoched:\n psds_mean = psds.mean(0).mean(0)\n psds_std = psds.mean(0).std(0)\n else:\n psds_mean = psds.mean(0)\n psds_std = psds.std(0)\n\n ax.plot(freqs, psds_mean, color='g')\n ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,\n color='g', alpha=.5)\n ax.set(title='{} PSD'.format(method), xlabel='Frequency',\n ylabel='Power Spectral Density (dB)')\n\n print(('*** save {} ***'.format(psds_img_fname)))\n plt.savefig(psds_img_fname)\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"numpy.load",
"numpy.array",
"matplotlib.pyplot.figure"
],
[
"numpy.savez",
"numpy.squeeze",
"scipy.signal.welch",
"matplotlib.pyplot.subplots",
"numpy.save",
"matplotlib.pyplot.savefig",
"numpy.log10",
"numpy.mean",
"numpy.load",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
hanke/nilearn | [
"96a3f0f72b4f25af771116251324cbec4c0d2055",
"96a3f0f72b4f25af771116251324cbec4c0d2055",
"96a3f0f72b4f25af771116251324cbec4c0d2055"
] | [
"nilearn/_utils/cache_mixin.py",
"nilearn/plotting/displays.py",
"nilearn/input_data/base_masker.py"
] | [
"\"\"\"\nMixin for cache with joblib\n\"\"\"\n# Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais\n# License: simplified BSD\n\nimport json\nimport warnings\nimport os\nimport shutil\nfrom distutils.version import LooseVersion\n\nimport nibabel\nfrom sklearn.externals.joblib import Memory\n\nMEMORY_CLASSES = (Memory, )\n\ntry:\n from joblib import Memory as JoblibMemory\n MEMORY_CLASSES = (Memory, JoblibMemory)\nexcept ImportError:\n pass\n\nimport nilearn\n\nfrom .compat import _basestring\n\n__CACHE_CHECKED = dict()\n\n\ndef _safe_cache(memory, func, **kwargs):\n \"\"\" A wrapper for mem.cache that flushes the cache if the version\n number of nibabel has changed.\n \"\"\"\n cachedir = memory.cachedir\n\n if cachedir is None or cachedir in __CACHE_CHECKED:\n return memory.cache(func, **kwargs)\n\n version_file = os.path.join(cachedir, 'module_versions.json')\n\n versions = dict()\n if os.path.exists(version_file):\n with open(version_file, 'r') as _version_file:\n versions = json.load(_version_file)\n\n modules = (nibabel, )\n # Keep only the major + minor version numbers\n my_versions = dict((m.__name__, LooseVersion(m.__version__).version[:2])\n for m in modules)\n commons = set(versions.keys()).intersection(set(my_versions.keys()))\n collisions = [m for m in commons if versions[m] != my_versions[m]]\n\n # Flush cache if version collision\n if len(collisions) > 0:\n if nilearn.CHECK_CACHE_VERSION:\n warnings.warn(\"Incompatible cache in %s: \"\n \"different version of nibabel. Deleting \"\n \"the cache. Put nilearn.CHECK_CACHE_VERSION \"\n \"to false to avoid this behavior.\"\n % cachedir)\n try:\n tmp_dir = (os.path.split(cachedir)[:-1]\n + ('old_%i' % os.getpid(), ))\n tmp_dir = os.path.join(*tmp_dir)\n # We use rename + unlink to be more robust to race\n # conditions\n os.rename(cachedir, tmp_dir)\n shutil.rmtree(tmp_dir)\n except OSError:\n # Another process could have removed this dir\n pass\n\n try:\n os.makedirs(cachedir)\n except OSError:\n # File exists?\n pass\n else:\n warnings.warn(\"Incompatible cache in %s: \"\n \"old version of nibabel.\" % cachedir)\n\n # Write json files if configuration is different\n if versions != my_versions:\n with open(version_file, 'w') as _version_file:\n json.dump(my_versions, _version_file)\n\n __CACHE_CHECKED[cachedir] = True\n\n return memory.cache(func, **kwargs)\n\n\ndef cache(func, memory, func_memory_level=None, memory_level=None,\n **kwargs):\n \"\"\" Return a joblib.Memory object.\n\n The memory_level determines the level above which the wrapped\n function output is cached. By specifying a numeric value for\n this level, the user can to control the amount of cache memory\n used. This function will cache the function call or not\n depending on the cache level.\n\n Parameters\n ----------\n func: function\n The function which output is to be cached.\n\n memory: instance of joblib.Memory or string\n Used to cache the function call.\n\n func_memory_level: int, optional\n The memory_level from which caching must be enabled for the wrapped\n function.\n\n memory_level: int, optional\n The memory_level used to determine if function call must\n be cached or not (if user_memory_level is equal of greater than\n func_memory_level the function is cached)\n\n kwargs: keyword arguments\n The keyword arguments passed to memory.cache\n\n Returns\n -------\n mem: joblib.MemorizedFunc\n object that wraps the function func. This object may be\n a no-op, if the requested level is lower than the value given\n to _cache()). For consistency, a joblib.Memory object is always\n returned.\n \"\"\"\n verbose = kwargs.get('verbose', 0)\n\n # memory_level and func_memory_level must be both None or both integers.\n memory_levels = [memory_level, func_memory_level]\n both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels)\n both_params_none = all(lvl is None for lvl in memory_levels)\n\n if not (both_params_integers or both_params_none):\n raise ValueError('Reference and user memory levels must be both None '\n 'or both integers.')\n\n if memory is not None and (func_memory_level is None or\n memory_level >= func_memory_level):\n if isinstance(memory, _basestring):\n memory = Memory(cachedir=memory, verbose=verbose)\n if not isinstance(memory, MEMORY_CLASSES):\n raise TypeError(\"'memory' argument must be a string or a \"\n \"joblib.Memory object. \"\n \"%s %s was given.\" % (memory, type(memory)))\n if (memory.cachedir is None and memory_level is not None\n and memory_level > 1):\n warnings.warn(\"Caching has been enabled (memory_level = %d) \"\n \"but no Memory object or path has been provided\"\n \" (parameter memory). Caching deactivated for \"\n \"function %s.\" %\n (memory_level, func.__name__),\n stacklevel=2)\n else:\n memory = Memory(cachedir=None, verbose=verbose)\n return _safe_cache(memory, func, **kwargs)\n\n\nclass CacheMixin(object):\n \"\"\"Mixin to add caching to a class.\n\n This class is a thin layer on top of joblib.Memory, that mainly adds a\n \"caching level\", similar to a \"log level\".\n\n Usage: to cache the results of a method, wrap it in self._cache()\n defined by this class. Caching is performed only if the user-specified\n cache level (self._memory_level) is greater than the value given as a\n parameter to self._cache(). See _cache() documentation for details.\n \"\"\"\n def _cache(self, func, func_memory_level=1, **kwargs):\n \"\"\"Return a joblib.Memory object.\n\n The memory_level determines the level above which the wrapped\n function output is cached. By specifying a numeric value for\n this level, the user can to control the amount of cache memory\n used. This function will cache the function call or not\n depending on the cache level.\n\n Parameters\n ----------\n func: function\n The function the output of which is to be cached.\n\n memory_level: int\n The memory_level from which caching must be enabled for the wrapped\n function.\n\n Returns\n -------\n mem: joblib.Memory\n object that wraps the function func. This object may be\n a no-op, if the requested level is lower than the value given\n to _cache()). For consistency, a joblib.Memory object is always\n returned.\n\n \"\"\"\n\n verbose = getattr(self, 'verbose', 0)\n\n # Creates attributes if they don't exist\n # This is to make creating them in __init__() optional.\n if not hasattr(self, \"memory_level\"):\n self.memory_level = 0\n if not hasattr(self, \"memory\"):\n self.memory = Memory(cachedir=None, verbose=verbose)\n if isinstance(self.memory, _basestring):\n self.memory = Memory(cachedir=self.memory, verbose=verbose)\n\n # If cache level is 0 but a memory object has been provided, set\n # memory_level to 1 with a warning.\n if self.memory_level == 0:\n if (isinstance(self.memory, _basestring)\n or self.memory.cachedir is not None):\n warnings.warn(\"memory_level is currently set to 0 but \"\n \"a Memory object has been provided. \"\n \"Setting memory_level to 1.\")\n self.memory_level = 1\n\n return cache(func, self.memory, func_memory_level=func_memory_level,\n memory_level=self.memory_level, **kwargs)\n",
"\"\"\"\nThe Slicer classes.\n\nThe main purpose of these classes is to have auto adjust of axes size to\nthe data with different layout of cuts.\n\"\"\"\n\nimport collections\nimport numbers\n\nimport numpy as np\nfrom scipy import sparse, stats\n\nfrom ..image import new_img_like\nfrom .. import _utils\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import transforms, colors\nfrom matplotlib.colorbar import ColorbarBase\nfrom matplotlib import cm as mpl_cm\nfrom matplotlib import lines\n\n# Local imports\nfrom . import glass_brain, cm\nfrom .find_cuts import find_xyz_cut_coords, find_cut_slices\nfrom .edge_detect import _edge_map\nfrom ..image.resampling import (get_bounds, reorder_img, coord_transform,\n get_mask_bounds)\n\n\n###############################################################################\n# class BaseAxes\n###############################################################################\n\nclass BaseAxes(object):\n \"\"\" An MPL axis-like object that displays a 2D view of 3D volumes\n \"\"\"\n\n def __init__(self, ax, direction, coord):\n \"\"\" An MPL axis-like object that displays a cut of 3D volumes\n\n Parameters\n ==========\n ax: a MPL axes instance\n The axes in which the plots will be drawn\n direction: {'x', 'y', 'z'}\n The directions of the view\n coord: float\n The coordinate along the direction of the cut\n\n \"\"\"\n self.ax = ax\n self.direction = direction\n self.coord = coord\n self._object_bounds = list()\n\n def transform_to_2d(self, data, affine):\n raise NotImplementedError(\"'transform_to_2d' needs to be implemented \"\n \"in derived classes'\")\n\n def add_object_bounds(self, bounds):\n \"\"\"Ensures that axes get rescaled when adding object bounds\n\n \"\"\"\n old_object_bounds = self.get_object_bounds()\n self._object_bounds.append(bounds)\n new_object_bounds = self.get_object_bounds()\n\n if new_object_bounds != old_object_bounds:\n self.ax.axis(self.get_object_bounds())\n\n def draw_2d(self, data_2d, data_bounds, bounding_box,\n type='imshow', **kwargs):\n # kwargs massaging\n kwargs['origin'] = 'upper'\n\n if self.direction == 'y':\n (xmin, xmax), (_, _), (zmin, zmax) = data_bounds\n (xmin_, xmax_), (_, _), (zmin_, zmax_) = bounding_box\n elif self.direction == 'x':\n (_, _), (xmin, xmax), (zmin, zmax) = data_bounds\n (_, _), (xmin_, xmax_), (zmin_, zmax_) = bounding_box\n elif self.direction == 'z':\n (xmin, xmax), (zmin, zmax), (_, _) = data_bounds\n (xmin_, xmax_), (zmin_, zmax_), (_, _) = bounding_box\n else:\n raise ValueError('Invalid value for direction %s' %\n self.direction)\n ax = self.ax\n # Here we need to do a copy to avoid having the image changing as\n # we change the data\n im = getattr(ax, type)(data_2d.copy(),\n extent=(xmin, xmax, zmin, zmax),\n **kwargs)\n\n self.add_object_bounds((xmin_, xmax_, zmin_, zmax_))\n\n return im\n\n def get_object_bounds(self):\n \"\"\" Return the bounds of the objects on this axes.\n \"\"\"\n if len(self._object_bounds) == 0:\n # Nothing plotted yet\n return -.01, .01, -.01, .01\n xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds).T\n xmax = max(xmaxs.max(), xmins.max())\n xmin = min(xmins.min(), xmaxs.min())\n ymax = max(ymaxs.max(), ymins.max())\n ymin = min(ymins.min(), ymaxs.min())\n\n return xmin, xmax, ymin, ymax\n\n def draw_left_right(self, size, bg_color, **kwargs):\n if self.direction == 'x':\n return\n ax = self.ax\n ax.text(.1, .95, 'L',\n transform=ax.transAxes,\n horizontalalignment='left',\n verticalalignment='top',\n size=size,\n bbox=dict(boxstyle=\"square,pad=0\",\n ec=bg_color, fc=bg_color, alpha=1),\n **kwargs)\n\n ax.text(.9, .95, 'R',\n transform=ax.transAxes,\n horizontalalignment='right',\n verticalalignment='top',\n size=size,\n bbox=dict(boxstyle=\"square,pad=0\", ec=bg_color, fc=bg_color),\n **kwargs)\n\n def draw_position(self, size, bg_color, **kwargs):\n raise NotImplementedError(\"'draw_position' should be implemented \"\n \"in derived classes\")\n\n\n###############################################################################\n# class CutAxes\n###############################################################################\n\nclass CutAxes(BaseAxes):\n \"\"\" An MPL axis-like object that displays a cut of 3D volumes\n \"\"\"\n def transform_to_2d(self, data, affine):\n \"\"\" Cut the 3D volume into a 2D slice\n\n Parameters\n ==========\n data: 3D ndarray\n The 3D volume to cut\n affine: 4x4 ndarray\n The affine of the volume\n \"\"\"\n coords = [0, 0, 0]\n coords['xyz'.index(self.direction)] = self.coord\n x_map, y_map, z_map = [int(np.round(c)) for c in\n coord_transform(coords[0],\n coords[1],\n coords[2],\n np.linalg.inv(affine))]\n if self.direction == 'y':\n cut = np.rot90(data[:, y_map, :])\n elif self.direction == 'x':\n cut = np.rot90(data[x_map, :, :])\n elif self.direction == 'z':\n cut = np.rot90(data[:, :, z_map])\n else:\n raise ValueError('Invalid value for direction %s' %\n self.direction)\n return cut\n\n def draw_position(self, size, bg_color, **kwargs):\n ax = self.ax\n ax.text(0, 0, '%s=%i' % (self.direction, self.coord),\n transform=ax.transAxes,\n horizontalalignment='left',\n verticalalignment='bottom',\n size=size,\n bbox=dict(boxstyle=\"square,pad=0\",\n ec=bg_color, fc=bg_color, alpha=1),\n **kwargs)\n\n\ndef _coords_3d_to_2d(coords_3d, direction):\n \"\"\"Project 3d coordinates into 2d ones given the direction of a cut\n \"\"\"\n direction_to_index = {'x': [1, 2],\n 'y': [0, 2],\n 'z': [0, 1]}\n index = direction_to_index.get(direction)\n\n if index is None:\n message = (\n '{0} is not a valid direction. '\n \"Allowed values are 'x', 'y' and 'z'\").format(direction)\n raise ValueError(message)\n\n return coords_3d[:, index]\n\n\nclass GlassBrainAxes(BaseAxes):\n \"\"\"An MPL axis-like object that displays a 2D projection of 3D\n volumes with a schematic view of the brain.\n\n \"\"\"\n def __init__(self, ax, direction, coord, plot_abs=True, **kwargs):\n super(GlassBrainAxes, self).__init__(ax, direction, coord)\n self._plot_abs = plot_abs\n if ax is not None:\n object_bounds = glass_brain.plot_brain_schematics(ax,\n direction,\n **kwargs)\n self.add_object_bounds(object_bounds)\n\n def transform_to_2d(self, data, affine):\n \"\"\" Returns the maximum of the absolute value of the 3D volume\n along an axis.\n\n Parameters\n ==========\n data: 3D ndarray\n The 3D volume\n affine: 4x4 ndarray\n The affine of the volume\n\n \"\"\"\n max_axis = 'xyz'.index(self.direction)\n\n if not self._plot_abs:\n # get the shape of the array we are projecting to\n new_shape = list(data.shape)\n del new_shape[max_axis]\n\n # generate a 3D indexing array that points to max abs value in the\n # current projection\n a1, a2 = np.indices(new_shape)\n inds = [a1, a2]\n inds.insert(max_axis, np.abs(data).argmax(axis=max_axis))\n\n # take the values where the absolute value of the projection\n # is the highest\n maximum_intensity_data = data[inds]\n else:\n maximum_intensity_data = np.abs(data).max(axis=max_axis)\n\n return np.rot90(maximum_intensity_data)\n\n def draw_position(self, size, bg_color, **kwargs):\n # It does not make sense to draw crosses for the position of\n # the cuts since we are taking the max along one axis\n pass\n\n def _add_markers(self, marker_coords, marker_color, marker_size, **kwargs):\n \"\"\"Plot markers\"\"\"\n marker_coords_2d = _coords_3d_to_2d(marker_coords, self.direction)\n\n xdata, ydata = marker_coords_2d.T\n\n defaults = {'marker': 'o',\n 'zorder': 1000}\n for k, v in defaults.items():\n kwargs.setdefault(k, v)\n\n self.ax.scatter(xdata, ydata, s=marker_size,\n c=marker_color, **kwargs)\n\n def _add_lines(self, line_coords, line_values, cmap,\n vmin=None, vmax=None, **kwargs):\n \"\"\"Plot lines\n\n Parameters\n ----------\n line_coords: list of numpy arrays of shape (2, 3)\n 3d coordinates of lines start points and end points.\n line_values: array_like\n values of the lines.\n cmap: colormap\n colormap used to map line_values to a color.\n vmin: float, optional, default: None\n vmax: float, optional, default: None\n If not None, either or both of these values will be used to\n as the minimum and maximum values to color lines. If None are\n supplied the maximum absolute value within the given threshold\n will be used as minimum (multiplied by -1) and maximum\n coloring levels.\n kwargs: dict\n additional arguments to pass to matplotlib Line2D.\n \"\"\"\n if vmin is None and vmax is None:\n abs_line_values_max = np.abs(line_values).max()\n vmin = -abs_line_values_max\n vmax = abs_line_values_max\n elif vmin is None:\n if vmax > 0:\n vmin = -vmax\n else:\n raise ValueError(\n \"If vmax is set to a non-positive number \"\n \"then vmin needs to be specified\"\n )\n elif vmax is None:\n if vmin < 0:\n vmin = -vmax\n else:\n raise ValueError(\n \"If vmin is set to a non-negative number \"\n \"then vmax needs to be specified\"\n )\n norm = colors.Normalize(vmin=vmin,\n vmax=vmax)\n abs_norm = colors.Normalize(vmin=0,\n vmax=vmax)\n value_to_color = plt.cm.ScalarMappable(norm=norm, cmap=cmap).to_rgba\n\n for start_end_point_3d, line_value in zip(\n line_coords, line_values):\n start_end_point_2d = _coords_3d_to_2d(start_end_point_3d,\n self.direction)\n\n color = value_to_color(line_value)\n abs_line_value = abs(line_value)\n linewidth = 1 + 2 * abs_norm(abs_line_value)\n # Hacky way to put the strongest connections on top of the weakest\n # note sign does not matter hence using 'abs'\n zorder = 10 + 10 * abs_norm(abs_line_value)\n this_kwargs = {'color': color, 'linewidth': linewidth,\n 'zorder': zorder}\n # kwargs should have priority over this_kwargs so that the\n # user can override the default logic\n this_kwargs.update(kwargs)\n xdata, ydata = start_end_point_2d.T\n line = lines.Line2D(xdata, ydata, **this_kwargs)\n self.ax.add_line(line)\n\n\n###############################################################################\n# class BaseSlicer\n###############################################################################\n\nclass BaseSlicer(object):\n \"\"\" The main purpose of these class is to have auto adjust of axes size\n to the data with different layout of cuts.\n \"\"\"\n # This actually encodes the figsize for only one axe\n _default_figsize = [2.2, 2.6]\n _axes_class = CutAxes\n\n def __init__(self, cut_coords, axes=None, black_bg=False, **kwargs):\n \"\"\" Create 3 linked axes for plotting orthogonal cuts.\n\n Parameters\n ----------\n cut_coords: 3 tuple of ints\n The cut position, in world space.\n axes: matplotlib axes object, optional\n The axes that will be subdivided in 3.\n black_bg: boolean, optional\n If True, the background of the figure will be put to\n black. If you wish to save figures with a black background,\n you will need to pass \"facecolor='k', edgecolor='k'\"\n to matplotlib.pyplot.savefig.\n\n \"\"\"\n self.cut_coords = cut_coords\n if axes is None:\n axes = plt.axes((0., 0., 1., 1.))\n axes.axis('off')\n self.frame_axes = axes\n axes.set_zorder(1)\n bb = axes.get_position()\n self.rect = (bb.x0, bb.y0, bb.x1, bb.y1)\n self._black_bg = black_bg\n self._colorbar = False\n self._colorbar_width = 0.05 * bb.width\n self._colorbar_margin = dict(left=0.25 * bb.width,\n right=0.02 * bb.width,\n top=0.05 * bb.height,\n bottom=0.05 * bb.height)\n self._init_axes(**kwargs)\n\n @staticmethod\n def find_cut_coords(img=None, threshold=None, cut_coords=None):\n # Implement this as a staticmethod or a classmethod when\n # subclassing\n raise NotImplementedError\n\n @classmethod\n def init_with_figure(cls, img, threshold=None,\n cut_coords=None, figure=None, axes=None,\n black_bg=False, leave_space=False, colorbar=False,\n **kwargs):\n \"Initialize the slicer with an image\"\n # deal with \"fake\" 4D images\n if img is not None and img is not False:\n img = _utils.check_niimg_3d(img)\n\n cut_coords = cls.find_cut_coords(img, threshold, cut_coords)\n\n if isinstance(axes, plt.Axes) and figure is None:\n figure = axes.figure\n\n if not isinstance(figure, plt.Figure):\n # Make sure that we have a figure\n figsize = cls._default_figsize[:]\n\n # Adjust for the number of axes\n figsize[0] *= len(cut_coords)\n\n # Make space for the colorbar\n if colorbar:\n figsize[0] += .7\n\n facecolor = 'k' if black_bg else 'w'\n\n if leave_space:\n figsize[0] += 3.4\n figure = plt.figure(figure, figsize=figsize,\n facecolor=facecolor)\n if isinstance(axes, plt.Axes):\n assert axes.figure is figure, (\"The axes passed are not \"\n \"in the figure\")\n\n if axes is None:\n axes = [0., 0., 1., 1.]\n if leave_space:\n axes = [0.3, 0, .7, 1.]\n if isinstance(axes, collections.Sequence):\n axes = figure.add_axes(axes)\n # People forget to turn their axis off, or to set the zorder, and\n # then they cannot see their slicer\n axes.axis('off')\n return cls(cut_coords, axes, black_bg, **kwargs)\n\n def title(self, text, x=0.01, y=0.99, size=15, color=None, bgcolor=None,\n alpha=1, **kwargs):\n \"\"\" Write a title to the view.\n\n Parameters\n ----------\n text: string\n The text of the title\n x: float, optional\n The horizontal position of the title on the frame in\n fraction of the frame width.\n y: float, optional\n The vertical position of the title on the frame in\n fraction of the frame height.\n size: integer, optional\n The size of the title text.\n color: matplotlib color specifier, optional\n The color of the font of the title.\n bgcolor: matplotlib color specifier, optional\n The color of the background of the title.\n alpha: float, optional\n The alpha value for the background.\n kwargs:\n Extra keyword arguments are passed to matplotlib's text\n function.\n \"\"\"\n if color is None:\n color = 'k' if self._black_bg else 'w'\n if bgcolor is None:\n bgcolor = 'w' if self._black_bg else 'k'\n if hasattr(self, '_cut_displayed'):\n first_axe = self._cut_displayed[0]\n else:\n first_axe = self.cut_coords[0]\n ax = self.axes[first_axe].ax\n ax.text(x, y, text,\n transform=self.frame_axes.transAxes,\n horizontalalignment='left',\n verticalalignment='top',\n size=size, color=color,\n bbox=dict(boxstyle=\"square,pad=.3\",\n ec=bgcolor, fc=bgcolor, alpha=alpha),\n zorder=1000,\n **kwargs)\n ax.set_zorder(1000)\n\n def add_overlay(self, img, threshold=1e-6, colorbar=False, **kwargs):\n \"\"\" Plot a 3D map in all the views.\n\n Parameters\n -----------\n img: Niimg-like object\n See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.\n If it is a masked array, only the non-masked part will be\n plotted.\n threshold : a number, None\n If None is given, the maps are not thresholded.\n If a number is given, it is used to threshold the maps:\n values below the threshold (in absolute value) are\n plotted as transparent.\n colorbar: boolean, optional\n If True, display a colorbar on the right of the plots.\n kwargs:\n Extra keyword arguments are passed to imshow.\n \"\"\"\n if colorbar and self._colorbar:\n raise ValueError(\"This figure already has an overlay with a \"\n \"colorbar.\")\n else:\n self._colorbar = colorbar\n\n img = _utils.check_niimg_3d(img)\n\n # Make sure that add_overlay shows consistent default behavior\n # with plot_stat_map\n kwargs.setdefault('interpolation', 'nearest')\n ims = self._map_show(img, type='imshow', threshold=threshold, **kwargs)\n\n if colorbar:\n self._colorbar_show(ims[0], threshold)\n\n plt.draw_if_interactive()\n\n def add_contours(self, img, filled=False, **kwargs):\n \"\"\" Contour a 3D map in all the views.\n\n Parameters\n -----------\n img: Niimg-like object\n See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.\n Provides image to plot.\n filled: boolean, optional\n If filled=True, contours are displayed with color fillings.\n kwargs:\n Extra keyword arguments are passed to contour, see the\n documentation of pylab.contour\n Useful, arguments are typical \"levels\", which is a\n list of values to use for plotting a contour, and\n \"colors\", which is one color or a list of colors for\n these contours.\n \"\"\"\n self._map_show(img, type='contour', **kwargs)\n if filled:\n colors = kwargs['colors']\n levels = kwargs['levels']\n if len(levels) <= 1:\n # contour fillings levels should be given as (lower, upper).\n levels.append(np.inf)\n alpha = kwargs['alpha']\n self._map_show(img, type='contourf', levels=levels, alpha=alpha,\n colors=colors[:3])\n\n plt.draw_if_interactive()\n\n def _map_show(self, img, type='imshow',\n resampling_interpolation='continuous',\n threshold=None, **kwargs):\n img = reorder_img(img, resample=resampling_interpolation)\n threshold = float(threshold) if threshold is not None else None\n\n if threshold is not None:\n data = img.get_data()\n if threshold == 0:\n data = np.ma.masked_equal(data, 0, copy=False)\n else:\n data = np.ma.masked_inside(data, -threshold, threshold,\n copy=False)\n img = new_img_like(img, data, img.get_affine())\n\n affine = img.get_affine()\n data = img.get_data()\n data_bounds = get_bounds(data.shape, affine)\n (xmin, xmax), (ymin, ymax), (zmin, zmax) = data_bounds\n\n xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \\\n xmin, xmax, ymin, ymax, zmin, zmax\n\n if hasattr(data, 'mask') and isinstance(data.mask, np.ndarray):\n not_mask = np.logical_not(data.mask)\n xmin_, xmax_, ymin_, ymax_, zmin_, zmax_ = \\\n get_mask_bounds(new_img_like(img, not_mask, affine))\n\n data_2d_list = []\n for display_ax in self.axes.values():\n try:\n data_2d = display_ax.transform_to_2d(data, affine)\n except IndexError:\n # We are cutting outside the indices of the data\n data_2d = None\n\n data_2d_list.append(data_2d)\n\n if kwargs.get('vmin') is None:\n kwargs['vmin'] = np.ma.min([d.min() for d in data_2d_list\n if d is not None])\n if kwargs.get('vmax') is None:\n kwargs['vmax'] = np.ma.max([d.max() for d in data_2d_list\n if d is not None])\n\n bounding_box = (xmin_, xmax_), (ymin_, ymax_), (zmin_, zmax_)\n\n ims = []\n to_iterate_over = zip(self.axes.values(), data_2d_list)\n for display_ax, data_2d in to_iterate_over:\n if data_2d is not None:\n im = display_ax.draw_2d(data_2d, data_bounds, bounding_box,\n type=type, **kwargs)\n ims.append(im)\n return ims\n\n def _colorbar_show(self, im, threshold):\n if threshold is None:\n offset = 0\n else:\n offset = threshold\n if offset > im.norm.vmax:\n offset = im.norm.vmax\n\n # create new axis for the colorbar\n figure = self.frame_axes.figure\n _, y0, x1, y1 = self.rect\n height = y1 - y0\n x_adjusted_width = self._colorbar_width / len(self.axes)\n x_adjusted_margin = self._colorbar_margin['right'] / len(self.axes)\n lt_wid_top_ht = [x1 - (x_adjusted_width + x_adjusted_margin),\n y0 + self._colorbar_margin['top'],\n x_adjusted_width,\n height - (self._colorbar_margin['top'] +\n self._colorbar_margin['bottom'])]\n self._colorbar_ax = figure.add_axes(lt_wid_top_ht, axis_bgcolor='w')\n\n our_cmap = im.cmap\n # edge case where the data has a single value\n # yields a cryptic matplotlib error message\n # when trying to plot the color bar\n nb_ticks = 5 if im.norm.vmin != im.norm.vmax else 1\n ticks = np.linspace(im.norm.vmin, im.norm.vmax, nb_ticks)\n bounds = np.linspace(im.norm.vmin, im.norm.vmax, our_cmap.N)\n\n # some colormap hacking\n cmaplist = [our_cmap(i) for i in range(our_cmap.N)]\n istart = int(im.norm(-offset, clip=True) * (our_cmap.N - 1))\n istop = int(im.norm(offset, clip=True) * (our_cmap.N - 1))\n for i in range(istart, istop):\n cmaplist[i] = (0.5, 0.5, 0.5, 1.) # just an average gray color\n if im.norm.vmin == im.norm.vmax: # len(np.unique(data)) == 1 ?\n return\n else:\n our_cmap = our_cmap.from_list('Custom cmap', cmaplist, our_cmap.N)\n\n self._cbar = ColorbarBase(\n self._colorbar_ax, ticks=ticks, norm=im.norm,\n orientation='vertical', cmap=our_cmap, boundaries=bounds,\n spacing='proportional', format='%.2g')\n\n self._colorbar_ax.yaxis.tick_left()\n tick_color = 'w' if self._black_bg else 'k'\n for tick in self._colorbar_ax.yaxis.get_ticklabels():\n tick.set_color(tick_color)\n self._colorbar_ax.yaxis.set_tick_params(width=0)\n\n def add_edges(self, img, color='r'):\n \"\"\" Plot the edges of a 3D map in all the views.\n\n Parameters\n -----------\n map: 3D ndarray\n The 3D map to be plotted. If it is a masked array, only\n the non-masked part will be plotted.\n affine: 4x4 ndarray\n The affine matrix giving the transformation from voxel\n indices to world space.\n color: matplotlib color: string or (r, g, b) value\n The color used to display the edge map\n \"\"\"\n img = reorder_img(img, resample='continuous')\n data = img.get_data()\n affine = img.get_affine()\n single_color_cmap = colors.ListedColormap([color])\n data_bounds = get_bounds(data.shape, img.get_affine())\n\n # For each ax, cut the data and plot it\n for display_ax in self.axes.values():\n try:\n data_2d = display_ax.transform_to_2d(data, affine)\n edge_mask = _edge_map(data_2d)\n except IndexError:\n # We are cutting outside the indices of the data\n continue\n display_ax.draw_2d(edge_mask, data_bounds, data_bounds,\n type='imshow', cmap=single_color_cmap)\n\n plt.draw_if_interactive()\n\n def annotate(self, left_right=True, positions=True, size=12, **kwargs):\n \"\"\" Add annotations to the plot.\n\n Parameters\n ----------\n left_right: boolean, optional\n If left_right is True, annotations indicating which side\n is left and which side is right are drawn.\n positions: boolean, optional\n If positions is True, annotations indicating the\n positions of the cuts are drawn.\n size: integer, optional\n The size of the text used.\n kwargs:\n Extra keyword arguments are passed to matplotlib's text\n function.\n \"\"\"\n kwargs = kwargs.copy()\n if not 'color' in kwargs:\n if self._black_bg:\n kwargs['color'] = 'w'\n else:\n kwargs['color'] = 'k'\n\n bg_color = ('k' if self._black_bg else 'w')\n if left_right:\n for display_ax in self.axes.values():\n display_ax.draw_left_right(size=size, bg_color=bg_color,\n **kwargs)\n\n if positions:\n for display_ax in self.axes.values():\n display_ax.draw_position(size=size, bg_color=bg_color,\n **kwargs)\n\n def close(self):\n \"\"\" Close the figure. This is necessary to avoid leaking memory.\n \"\"\"\n plt.close(self.frame_axes.figure.number)\n\n def savefig(self, filename, dpi=None):\n \"\"\" Save the figure to a file\n\n Parameters\n ==========\n filename: string\n The file name to save to. It's extension determines the\n file type, typically '.png', '.svg' or '.pdf'.\n\n dpi: None or scalar\n The resolution in dots per inch.\n \"\"\"\n facecolor = edgecolor = 'k' if self._black_bg else 'w'\n self.frame_axes.figure.savefig(filename, dpi=dpi,\n facecolor=facecolor,\n edgecolor=edgecolor)\n\n\n###############################################################################\n# class OrthoSlicer\n###############################################################################\n\nclass OrthoSlicer(BaseSlicer):\n \"\"\" A class to create 3 linked axes for plotting orthogonal\n cuts of 3D maps.\n\n Attributes\n ----------\n\n axes: dictionnary of axes\n The 3 axes used to plot each view.\n frame_axes: axes\n The axes framing the whole set of views.\n\n Notes\n -----\n\n The extent of the different axes are adjusted to fit the data\n best in the viewing area.\n \"\"\"\n _cut_displayed = 'yxz'\n _axes_class = CutAxes\n\n @classmethod\n def find_cut_coords(self, img=None, threshold=None, cut_coords=None):\n \"Instanciate the slicer and find cut coordinates\"\n if cut_coords is None:\n if img is None or img is False:\n cut_coords = (0, 0, 0)\n else:\n cut_coords = find_xyz_cut_coords(\n img, activation_threshold=threshold)\n cut_coords = [cut_coords['xyz'.find(c)]\n for c in sorted(self._cut_displayed)]\n return cut_coords\n\n def _init_axes(self, **kwargs):\n cut_coords = self.cut_coords\n if len(cut_coords) != len(self._cut_displayed):\n raise ValueError('The number cut_coords passed does not'\n 'match the display_mode')\n x0, y0, x1, y1 = self.rect\n axisbg = 'k' if self._black_bg else 'w'\n # Create our axes:\n self.axes = dict()\n for index, direction in enumerate(self._cut_displayed):\n fh = self.frame_axes.get_figure()\n ax = fh.add_axes([0.3 * index * (x1 - x0) + x0, y0,\n .3 * (x1 - x0), y1 - y0],\n axisbg=axisbg, aspect='equal')\n ax.axis('off')\n coord = self.cut_coords[\n sorted(self._cut_displayed).index(direction)]\n display_ax = self._axes_class(ax, direction, coord, **kwargs)\n self.axes[direction] = display_ax\n ax.set_axes_locator(self._locator)\n\n if self._black_bg:\n for ax in self.axes.values():\n ax.ax.imshow(np.zeros((2, 2, 3)),\n extent=[-5000, 5000, -5000, 5000],\n zorder=-500, aspect='equal')\n\n # To have a black background in PDF, we need to create a\n # patch in black for the background\n self.frame_axes.imshow(np.zeros((2, 2, 3)),\n extent=[-5000, 5000, -5000, 5000],\n zorder=-500, aspect='auto')\n self.frame_axes.set_zorder(-1000)\n\n def _locator(self, axes, renderer):\n \"\"\" The locator function used by matplotlib to position axes.\n Here we put the logic used to adjust the size of the axes.\n \"\"\"\n x0, y0, x1, y1 = self.rect\n width_dict = dict()\n # A dummy axes, for the situation in which we are not plotting\n # all three (x, y, z) cuts\n dummy_ax = self._axes_class(None, None, None)\n width_dict[dummy_ax.ax] = 0\n display_ax_dict = self.axes\n\n if self._colorbar:\n adjusted_width = self._colorbar_width / len(self.axes)\n right_margin = self._colorbar_margin['right'] / len(self.axes)\n ticks_margin = self._colorbar_margin['left'] / len(self.axes)\n x1 = x1 - (adjusted_width + ticks_margin + right_margin)\n\n for display_ax in display_ax_dict.values():\n bounds = display_ax.get_object_bounds()\n if not bounds:\n # This happens if the call to _map_show was not\n # succesful. As it happens asyncroniously (during a\n # refresh of the figure) we capture the problem and\n # ignore it: it only adds a non informative traceback\n bounds = [0, 1, 0, 1]\n xmin, xmax, ymin, ymax = bounds\n width_dict[display_ax.ax] = (xmax - xmin)\n\n total_width = float(sum(width_dict.values()))\n for ax, width in width_dict.items():\n width_dict[ax] = width / total_width * (x1 - x0)\n x_ax = display_ax_dict.get('x', dummy_ax)\n y_ax = display_ax_dict.get('y', dummy_ax)\n z_ax = display_ax_dict.get('z', dummy_ax)\n left_dict = dict()\n left_dict[y_ax.ax] = x0\n left_dict[x_ax.ax] = x0 + width_dict[y_ax.ax]\n left_dict[z_ax.ax] = x0 + width_dict[x_ax.ax] + width_dict[y_ax.ax]\n\n return transforms.Bbox([[left_dict[axes], y0],\n [left_dict[axes] + width_dict[axes], y1]])\n\n def draw_cross(self, cut_coords=None, **kwargs):\n \"\"\" Draw a crossbar on the plot to show where the cut is\n performed.\n\n Parameters\n ----------\n cut_coords: 3-tuple of floats, optional\n The position of the cross to draw. If none is passed, the\n ortho_slicer's cut coordinates are used.\n kwargs:\n Extra keyword arguments are passed to axhline\n \"\"\"\n if cut_coords is None:\n cut_coords = self.cut_coords\n coords = dict()\n for direction in 'xyz':\n coord = None\n if direction in self._cut_displayed:\n coord = cut_coords[\n sorted(self._cut_displayed).index(direction)]\n coords[direction] = coord\n x, y, z = coords['x'], coords['y'], coords['z']\n\n kwargs = kwargs.copy()\n if not 'color' in kwargs:\n if self._black_bg:\n kwargs['color'] = '.8'\n else:\n kwargs['color'] = 'k'\n\n if 'y' in self.axes:\n ax = self.axes['y'].ax\n if x is not None:\n ax.axvline(x, ymin=.05, ymax=.95, **kwargs)\n if z is not None:\n ax.axhline(z, **kwargs)\n\n if 'x' in self.axes:\n ax = self.axes['x'].ax\n if y is not None:\n ax.axvline(y, ymin=.05, ymax=.95, **kwargs)\n if z is not None:\n ax.axhline(z, xmax=.95, **kwargs)\n\n if 'z' in self.axes:\n ax = self.axes['z'].ax\n if x is not None:\n ax.axvline(x, ymin=.05, ymax=.95, **kwargs)\n if y is not None:\n ax.axhline(y, **kwargs)\n\n\n###############################################################################\n# class BaseStackedSlicer\n###############################################################################\n\nclass BaseStackedSlicer(BaseSlicer):\n \"\"\" A class to create linked axes for plotting stacked\n cuts of 3D maps.\n\n Attributes\n ----------\n\n axes: dictionnary of axes\n The axes used to plot each view.\n frame_axes: axes\n The axes framing the whole set of views.\n\n Notes\n -----\n\n The extent of the different axes are adjusted to fit the data\n best in the viewing area.\n \"\"\"\n @classmethod\n def find_cut_coords(cls, img=None, threshold=None, cut_coords=None):\n \"Instanciate the slicer and find cut coordinates\"\n if cut_coords is None:\n cut_coords = 7\n\n if img is None or img is False:\n bounds = ((-40, 40), (-30, 30), (-30, 75))\n lower, upper = bounds['xyz'.index(cls._direction)]\n cut_coords = np.linspace(lower, upper, cut_coords).tolist()\n else:\n if (not isinstance(cut_coords, collections.Sequence) and\n isinstance(cut_coords, numbers.Number)):\n cut_coords = find_cut_slices(img,\n direction=cls._direction,\n n_cuts=cut_coords)\n\n return cut_coords\n\n def _init_axes(self, **kwargs):\n x0, y0, x1, y1 = self.rect\n # Create our axes:\n self.axes = dict()\n fraction = 1. / len(self.cut_coords)\n for index, coord in enumerate(self.cut_coords):\n coord = float(coord)\n fh = self.frame_axes.get_figure()\n ax = fh.add_axes([fraction * index * (x1 - x0) + x0, y0,\n fraction * (x1 - x0), y1 - y0])\n ax.axis('off')\n display_ax = self._axes_class(ax, self._direction,\n coord, **kwargs)\n self.axes[coord] = display_ax\n ax.set_axes_locator(self._locator)\n\n if self._black_bg:\n for ax in self.axes.values():\n ax.ax.imshow(np.zeros((2, 2, 3)),\n extent=[-5000, 5000, -5000, 5000],\n zorder=-500, aspect='equal')\n\n # To have a black background in PDF, we need to create a\n # patch in black for the background\n self.frame_axes.imshow(np.zeros((2, 2, 3)),\n extent=[-5000, 5000, -5000, 5000],\n zorder=-500, aspect='auto')\n self.frame_axes.set_zorder(-1000)\n\n def _locator(self, axes, renderer):\n \"\"\" The locator function used by matplotlib to position axes.\n Here we put the logic used to adjust the size of the axes.\n \"\"\"\n x0, y0, x1, y1 = self.rect\n width_dict = dict()\n display_ax_dict = self.axes\n\n if self._colorbar:\n adjusted_width = self._colorbar_width / len(self.axes)\n right_margin = self._colorbar_margin['right'] / len(self.axes)\n ticks_margin = self._colorbar_margin['left'] / len(self.axes)\n x1 = x1 - (adjusted_width + right_margin + ticks_margin)\n\n for display_ax in display_ax_dict.values():\n bounds = display_ax.get_object_bounds()\n if not bounds:\n # This happens if the call to _map_show was not\n # succesful. As it happens asyncroniously (during a\n # refresh of the figure) we capture the problem and\n # ignore it: it only adds a non informative traceback\n bounds = [0, 1, 0, 1]\n xmin, xmax, ymin, ymax = bounds\n width_dict[display_ax.ax] = (xmax - xmin)\n total_width = float(sum(width_dict.values()))\n for ax, width in width_dict.items():\n width_dict[ax] = width / total_width * (x1 - x0)\n left_dict = dict()\n left = float(x0)\n for coord, display_ax in sorted(display_ax_dict.items()):\n left_dict[display_ax.ax] = left\n this_width = width_dict[display_ax.ax]\n left += this_width\n return transforms.Bbox([[left_dict[axes], y0],\n [left_dict[axes] + width_dict[axes], y1]])\n\n def draw_cross(self, cut_coords=None, **kwargs):\n \"\"\" Draw a crossbar on the plot to show where the cut is\n performed.\n\n Parameters\n ----------\n cut_coords: 3-tuple of floats, optional\n The position of the cross to draw. If none is passed, the\n ortho_slicer's cut coordinates are used.\n kwargs:\n Extra keyword arguments are passed to axhline\n \"\"\"\n return\n\n\nclass XSlicer(BaseStackedSlicer):\n _direction = 'x'\n _default_figsize = [2.6, 2.3]\n\n\nclass YSlicer(BaseStackedSlicer):\n _direction = 'y'\n _default_figsize = [2.2, 2.3]\n\n\nclass ZSlicer(BaseStackedSlicer):\n _direction = 'z'\n _default_figsize = [2.2, 2.3]\n\n\nclass XZSlicer(OrthoSlicer):\n _cut_displayed = 'xz'\n\n\nclass YXSlicer(OrthoSlicer):\n _cut_displayed = 'yx'\n\n\nclass YZSlicer(OrthoSlicer):\n _cut_displayed = 'yz'\n\n\nSLICERS = dict(ortho=OrthoSlicer,\n xz=XZSlicer,\n yz=YZSlicer,\n yx=YXSlicer,\n x=XSlicer,\n y=YSlicer,\n z=ZSlicer)\n\n\nclass OrthoProjector(OrthoSlicer):\n \"\"\"A class to create linked axes for plotting orthogonal projections\n of 3D maps.\n \"\"\"\n _axes_class = GlassBrainAxes\n\n @classmethod\n def find_cut_coords(cls, img=None, threshold=None, cut_coords=None):\n return (None, ) * len(cls._cut_displayed)\n\n def draw_cross(self, cut_coords=None, **kwargs):\n # It does not make sense to draw crosses for the position of\n # the cuts since we are taking the max along one axis\n pass\n\n def add_graph(self, adjacency_matrix, node_coords,\n node_color='auto', node_size=50,\n edge_cmap=cm.bwr,\n edge_vmin=None, edge_vmax=None,\n edge_threshold=None,\n edge_kwargs=None, node_kwargs=None):\n \"\"\"Plot undirected graph on each of the axes\n\n Parameters\n ----------\n adjacency_matrix: numpy array of shape (n, n)\n represents the edges strengths of the graph. Assumed to be\n a symmetric matrix.\n node_coords: numpy array_like of shape (n, 3)\n 3d coordinates of the graph nodes in world space.\n node_color: color or sequence of colors\n color(s) of the nodes.\n node_size: scalar or array_like\n size(s) of the nodes in points^2.\n edge_cmap: colormap\n colormap used for representing the strength of the edges.\n edge_vmin: float, optional, default: None\n edge_vmax: float, optional, default: None\n If not None, either or both of these values will be used to\n as the minimum and maximum values to color edges. If None are\n supplied the maximum absolute value within the given threshold\n will be used as minimum (multiplied by -1) and maximum\n coloring levels.\n edge_threshold: str or number\n If it is a number only the edges with a value greater than\n edge_threshold will be shown.\n If it is a string it must finish with a percent sign,\n e.g. \"25.3%\", and only the edges with a abs(value) above\n the given percentile will be shown.\n edge_kwargs: dict\n will be passed as kwargs for each edge matlotlib Line2D.\n node_kwargs: dict\n will be passed as kwargs to the plt.scatter call that plots all\n the nodes in one go.\n\n \"\"\"\n # set defaults\n if edge_kwargs is None:\n edge_kwargs = {}\n if node_kwargs is None:\n node_kwargs = {}\n if node_color == 'auto':\n nb_nodes = len(node_coords)\n node_color = mpl_cm.Set2(np.linspace(0, 1, nb_nodes))\n\n node_coords = np.asarray(node_coords)\n\n # decompress input matrix if sparse\n if sparse.issparse(adjacency_matrix):\n adjacency_matrix = adjacency_matrix.toarray()\n\n # make the lines below well-behaved\n adjacency_matrix = np.nan_to_num(adjacency_matrix)\n\n # safety checks\n if 's' in node_kwargs:\n raise ValueError(\"Please use 'node_size' and not 'node_kwargs' \"\n \"to specify node sizes\")\n if 'c' in node_kwargs:\n raise ValueError(\"Please use 'node_color' and not 'node_kwargs' \"\n \"to specify node colors\")\n\n adjacency_matrix_shape = adjacency_matrix.shape\n if (len(adjacency_matrix_shape) != 2 or\n adjacency_matrix_shape[0] != adjacency_matrix_shape[1]):\n raise ValueError(\n \"'adjacency_matrix' is supposed to have shape (n, n).\"\n ' Its shape was {0}'.format(adjacency_matrix_shape))\n\n node_coords_shape = node_coords.shape\n if len(node_coords_shape) != 2 or node_coords_shape[1] != 3:\n message = (\n \"Invalid shape for 'node_coords'. You passed an \"\n \"'adjacency_matrix' of shape {0} therefore \"\n \"'node_coords' should be a array with shape ({0[0]}, 3) \"\n 'while its shape was {1}').format(adjacency_matrix_shape,\n node_coords_shape)\n\n raise ValueError(message)\n\n if node_coords_shape[0] != adjacency_matrix_shape[0]:\n raise ValueError(\n \"Shape mismatch between 'adjacency_matrix' \"\n \"and 'node_coords'\"\n \"'adjacency_matrix' shape is {0}, 'node_coords' shape is {1}\"\n .format(adjacency_matrix_shape, node_coords_shape))\n\n if not np.allclose(adjacency_matrix, adjacency_matrix.T, rtol=1e-3):\n raise ValueError(\"'adjacency_matrix' should be symmetric\")\n\n # For a masked array, masked values are replaced with zeros\n if hasattr(adjacency_matrix, 'mask'):\n if not (adjacency_matrix.mask == adjacency_matrix.mask.T).all():\n raise ValueError(\n \"'adjacency_matrix' was masked with a non symmetric mask\")\n adjacency_matrix = adjacency_matrix.filled(0)\n\n if edge_threshold is not None:\n # Keep a percentile of edges with the highest absolute\n # values, so only need to look at the covariance\n # coefficients below the diagonal\n lower_diagonal_indices = np.tril_indices_from(adjacency_matrix,\n k=-1)\n lower_diagonal_values = adjacency_matrix[\n lower_diagonal_indices]\n edge_threshold = _utils.param_validation.check_threshold(\n edge_threshold, np.abs(lower_diagonal_values),\n stats.scoreatpercentile, 'edge_threshold')\n\n adjacency_matrix = adjacency_matrix.copy()\n threshold_mask = np.abs(adjacency_matrix) < edge_threshold\n adjacency_matrix[threshold_mask] = 0\n\n lower_triangular_adjacency_matrix = np.tril(adjacency_matrix, k=-1)\n non_zero_indices = lower_triangular_adjacency_matrix.nonzero()\n\n line_coords = [node_coords[list(index)]\n for index in zip(*non_zero_indices)]\n\n adjacency_matrix_values = adjacency_matrix[non_zero_indices]\n for ax in self.axes.values():\n ax._add_markers(node_coords, node_color, node_size, **node_kwargs)\n if line_coords:\n ax._add_lines(line_coords, adjacency_matrix_values, edge_cmap,\n vmin=edge_vmin, vmax=edge_vmax,\n **edge_kwargs)\n\n plt.draw_if_interactive()\n\n\nclass XProjector(OrthoProjector):\n _cut_displayed = 'x'\n _default_figsize = [2.6, 2.3]\n\n\nclass YProjector(OrthoProjector):\n _cut_displayed = 'y'\n _default_figsize = [2.2, 2.3]\n\n\nclass ZProjector(OrthoProjector):\n _cut_displayed = 'z'\n _default_figsize = [2.2, 2.3]\n\n\nclass XZProjector(OrthoProjector):\n _cut_displayed = 'xz'\n\n\nclass YXProjector(OrthoProjector):\n _cut_displayed = 'yx'\n\n\nclass YZProjector(OrthoProjector):\n _cut_displayed = 'yz'\n\n\nPROJECTORS = dict(ortho=OrthoProjector,\n xz=XZProjector,\n yz=YZProjector,\n yx=YXProjector,\n x=XProjector,\n y=YProjector,\n z=ZProjector)\n\n\ndef get_create_display_fun(display_mode, class_dict):\n try:\n return class_dict[display_mode].init_with_figure\n except KeyError:\n message = ('{0} is not a valid display_mode. '\n 'Valid options are {1}').format(\n display_mode, sorted(class_dict.keys()))\n raise ValueError(message)\n\n\ndef get_slicer(display_mode):\n \"Internal function to retrieve a slicer\"\n return get_create_display_fun(display_mode, SLICERS)\n\n\ndef get_projector(display_mode):\n \"Internal function to retrieve a projector\"\n return get_create_display_fun(display_mode, PROJECTORS)\n",
"\"\"\"\nTransformer used to apply basic transformations on MRI data.\n\"\"\"\n# Author: Gael Varoquaux, Alexandre Abraham\n# License: simplified BSD\n\nimport warnings\nimport abc\n\nimport numpy as np\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.externals.joblib import Memory\n\nfrom .. import masking\nfrom .. import image\nfrom .. import signal\nfrom .. import _utils\nfrom .._utils.cache_mixin import CacheMixin, cache\nfrom .._utils.class_inspect import enclosing_scope_name\nfrom .._utils.compat import _basestring\n\n\ndef filter_and_extract(imgs, extraction_function,\n parameters,\n memory_level=0, memory=Memory(cachedir=None),\n verbose=0,\n confounds=None,\n copy=True):\n \"\"\"Extract representative time series using given function.\n\n Parameters\n ----------\n imgs: 3D/4D Niimg-like object\n Images to be masked. Can be 3-dimensional or 4-dimensional.\n\n extraction_function: function\n Function used to extract the time series from 4D data. This function\n should take images as argument and returns a tuple containing a 2D\n array with masked signals along with a auxiliary value used if\n returning a second value is needed.\n If any other parameter is needed, a functor or a partial\n function must be provided.\n\n For all other parameters refer to NiftiMasker documentation\n\n Returns\n -------\n signals: 2D numpy array\n Signals extracted using the extraction function. It is a scikit-learn\n friendly 2D array with shape n_samples x n_features.\n \"\"\"\n # Since the calling class can be any *Nifti*Masker, we look for exact type\n if verbose > 0:\n class_name = enclosing_scope_name(stack_level=10)\n\n # If we have a string (filename), we won't need to copy, as\n # there will be no side effect\n if isinstance(imgs, _basestring):\n copy = False\n\n if verbose > 0:\n print(\"[%s] Loading data from %s\" % (\n class_name,\n _utils._repr_niimgs(imgs)[:200]))\n imgs = _utils.check_niimg(imgs, atleast_4d=True, ensure_ndim=4)\n\n sample_mask = parameters.get('sample_mask')\n if sample_mask is not None:\n imgs = image.index_img(imgs, sample_mask)\n\n target_shape = parameters.get('target_shape')\n target_affine = parameters.get('target_affine')\n if target_shape is not None or target_affine is not None:\n if verbose > 0:\n print(\"[%s] Resampling images\" % class_name)\n imgs = cache(\n image.resample_img, memory, func_memory_level=2,\n memory_level=memory_level, ignore=['copy'])(\n imgs, interpolation=\"continuous\",\n target_shape=target_shape,\n target_affine=target_affine,\n copy=copy)\n\n smoothing_fwhm = parameters.get('smoothing_fwhm')\n if smoothing_fwhm is not None:\n if verbose > 0:\n print(\"[%s] Smoothing images\" % class_name)\n imgs = cache(\n image.smooth_img, memory, func_memory_level=2,\n memory_level=memory_level)(\n imgs, parameters['smoothing_fwhm'])\n\n if verbose > 0:\n print(\"[%s] Extracting region signals\" % class_name)\n region_signals, aux = cache(extraction_function, memory,\n func_memory_level=2,\n memory_level=memory_level)(imgs)\n\n # Temporal\n # ========\n # Detrending (optional)\n # Filtering\n # Confounds removing (from csv file or numpy array)\n # Normalizing\n\n if verbose > 0:\n print(\"[%s] Cleaning extracted signals\" % class_name)\n sessions = parameters.get('sessions')\n region_signals = cache(\n signal.clean, memory=memory, func_memory_level=2,\n memory_level=memory_level)(\n region_signals,\n detrend=parameters['detrend'],\n standardize=parameters['standardize'],\n t_r=parameters['t_r'],\n low_pass=parameters['low_pass'],\n high_pass=parameters['high_pass'],\n confounds=confounds,\n sessions=sessions)\n\n return region_signals, aux\n\n\nclass BaseMasker(BaseEstimator, TransformerMixin, CacheMixin):\n \"\"\"Base class for NiftiMaskers\n \"\"\"\n\n @abc.abstractmethod\n def transform_single_imgs(self, imgs, confounds=None, copy=True):\n \"\"\"Extract signals from a single 4D niimg.\n\n Parameters\n ----------\n imgs: 3D/4D Niimg-like object\n See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.\n Images to process. It must boil down to a 4D image with scans\n number as last dimension.\n\n confounds: CSV file or array-like, optional\n This parameter is passed to signal.clean. Please see the related\n documentation for details.\n shape: (number of scans, number of confounds)\n\n Returns\n -------\n region_signals: 2D numpy.ndarray\n Signal for each element.\n shape: (number of scans, number of elements)\n \"\"\"\n raise NotImplementedError()\n\n def transform(self, imgs, confounds=None):\n \"\"\"Apply mask, spatial and temporal preprocessing\n\n Parameters\n ----------\n imgs: 3D/4D Niimg-like object\n See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.\n Images to process. It must boil down to a 4D image with scans\n number as last dimension.\n\n confounds: CSV file or array-like, optional\n This parameter is passed to signal.clean. Please see the related\n documentation for details.\n shape: (number of scans, number of confounds)\n\n Returns\n -------\n region_signals: 2D numpy.ndarray\n Signal for each element.\n shape: (number of scans, number of elements)\n \"\"\"\n self._check_fitted()\n\n return self.transform_single_imgs(imgs, confounds)\n\n def fit_transform(self, X, y=None, confounds=None, **fit_params):\n \"\"\"Fit to data, then transform it\n\n Parameters\n ----------\n X : Niimg-like object\n See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.\n\n y : numpy array of shape [n_samples]\n Target values.\n\n confounds: list of confounds, optional\n List of confounds (2D arrays or filenames pointing to CSV\n files). Must be of same length than imgs_list.\n\n Returns\n -------\n X_new : numpy array of shape [n_samples, n_features_new]\n Transformed array.\n \"\"\"\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n if y is None:\n # fit method of arity 1 (unsupervised transformation)\n if self.mask_img is None:\n return self.fit(X, **fit_params\n ).transform(X, confounds=confounds)\n else:\n return self.fit(**fit_params).transform(X, confounds=confounds)\n else:\n # fit method of arity 2 (supervised transformation)\n if self.mask_img is None:\n return self.fit(X, y, **fit_params\n ).transform(X, confounds=confounds)\n else:\n warnings.warn('[%s.fit] Generation of a mask has been'\n ' requested (y != None) while a mask has'\n ' been provided at masker creation. Given mask'\n ' will be used.' % self.__class__.__name__)\n return self.fit(**fit_params).transform(X, confounds=confounds)\n\n def inverse_transform(self, X):\n \"\"\" Transform the 2D data matrix back to an image in brain space.\n \"\"\"\n self._check_fitted()\n img = self._cache(masking.unmask)(X, self.mask_img_)\n # Be robust again memmapping that will create read-only arrays in\n # internal structures of the header: remove the memmaped array\n try:\n img._header._structarr = np.array(img._header._structarr).copy()\n except:\n pass\n return img\n\n def _check_fitted(self):\n if not hasattr(self, \"mask_img_\"):\n raise ValueError('It seems that %s has not been fitted. '\n 'You must call fit() before calling transform().'\n % self.__class__.__name__)\n"
] | [
[
"sklearn.externals.joblib.Memory"
],
[
"matplotlib.transforms.Bbox",
"numpy.linspace",
"numpy.asarray",
"numpy.nan_to_num",
"matplotlib.pyplot.axes",
"numpy.round",
"numpy.tril",
"scipy.sparse.issparse",
"numpy.allclose",
"matplotlib.colorbar.ColorbarBase",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.rot90",
"numpy.logical_not",
"numpy.linalg.inv",
"numpy.ma.masked_equal",
"matplotlib.colors.ListedColormap",
"numpy.array",
"matplotlib.pyplot.cm.ScalarMappable",
"numpy.ma.masked_inside",
"numpy.abs",
"numpy.tril_indices_from",
"matplotlib.lines.Line2D",
"numpy.indices",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.draw_if_interactive"
],
[
"numpy.array",
"sklearn.externals.joblib.Memory"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
m-novikov/hytra | [
"0dc28deaa2571fa8bea63ca178f0e53cc1cd7508",
"0dc28deaa2571fa8bea63ca178f0e53cc1cd7508",
"0dc28deaa2571fa8bea63ca178f0e53cc1cd7508",
"0dc28deaa2571fa8bea63ca178f0e53cc1cd7508",
"0dc28deaa2571fa8bea63ca178f0e53cc1cd7508",
"0dc28deaa2571fa8bea63ca178f0e53cc1cd7508"
] | [
"hytra/core/divisionfeatures.py",
"cvpr15_eval/plot_fmeasuer_vs_proposal.py",
"cvpr15_eval/plot_score_vs_loss.py",
"hytra/plugins/merger_resolver/gmm_merger_resolver.py",
"hytra/dvid/upload_dataset.py",
"hytra/core/hypothesesgraph.py"
] | [
"import numpy as np\nimport math\n\n\ndef dotproduct(v1, v2):\n return sum((a * b) for a, b in zip(v1, v2))\n\n\ndef length(v):\n return math.sqrt(dotproduct(v, v))\n\n\ndef angle(v1, v2):\n try:\n if length(v1) * length(v2) == 0:\n radians = 0\n else:\n radians = math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))\n except Exception as e:\n # print str(e), ': math.acos(', dotproduct(v1, v2) / (length(v1) * length(v2)), '), v1 =', v1, ', v2 =', v2\n radians = 0\n return (float(radians) * 180.0) / math.pi\n\n\n##### Feature base class #######\n\n\nclass Feature(object):\n name = \"Feature\"\n plugin = \"Tracking Features\"\n default_value = 0\n dimensionality = None\n\n def __init__(\n self,\n feats_name,\n default_value=None,\n delim=\"_\",\n scales=[1.0, 1.0, 1.0],\n ndim=2,\n feat_dim=1,\n ):\n self.name += str(delim) + str(feats_name)\n self.feats_name = feats_name\n if default_value != None:\n self.default_value = default_value\n self.scales = scales\n self.ndim = ndim\n self.feat_dim = feat_dim\n\n def compute(self, feats_cur, feats_next, **kwargs):\n raise NotImplementedError(\"Feature not fully implemented yet.\")\n\n def getName(self):\n return self.name\n\n def getPlugin(self):\n return self.plugin\n\n def dim(self):\n return self.dimensionality\n\n\nclass ParentChildrenRatio(Feature):\n name = \"ParentChildrenRatio\"\n dimensionality = 1\n\n def compute(self, feats_cur, feats_next, **kwargs):\n if len(feats_next) < 2:\n return np.array(len(feats_cur) * [self.default_value])\n result = np.array(feats_cur) / np.array(feats_next[0] + feats_next[1])\n for i in range(len(result)):\n if math.isnan(result[i]):\n result[i] = self.default_value\n return result\n\n def dim(self):\n return self.dimensionality * self.feat_dim\n\n\nclass ChildrenRatio(Feature):\n name = \"ChildrenRatio\"\n dimensionality = 1\n\n def compute(self, feats_cur, feats_next, **kwargs):\n if len(feats_next) < 2:\n return np.array(len(feats_cur) * [self.default_value])\n ratio = np.array(feats_next[0]) / np.array(feats_next[1])\n for i in range(len(ratio)):\n if math.isnan(ratio[i]):\n ratio[i] = self.default_value\n if ratio[i] > 1 and ratio[i] != 0:\n ratio[i] = 1.0 / ratio[i]\n return ratio\n\n def dim(self):\n return self.dimensionality * self.feat_dim\n\n\nclass SquaredDistances(Feature):\n name = \"SquaredDistances\"\n\n def compute(self, feats_cur, feats_next, **kwargs):\n return feats_cur\n\n def dim(self):\n return self.ndim\n\n\nclass ParentChildrenAngle(Feature):\n name = \"ParentChildrenAngle\"\n dimensionality = 1\n\n def compute(self, feats_cur, feats_next, **kwargs):\n angles = []\n for idx, com1 in enumerate(feats_next):\n v1 = (com1 - feats_cur) * self.scales[0 : com1.shape[0]]\n for com2 in feats_next[idx + 1 :]:\n v2 = (com2 - feats_cur) * self.scales[0 : com2.shape[0]]\n ang = angle(v1, v2)\n if ang > 180:\n assert ang <= 360.01, \"the angle must be smaller than 360 degrees\"\n ang = 360 - ang\n angles.append(ang)\n\n if len(angles) == 0:\n angles = [self.default_value]\n\n return max(angles)\n\n\nclass ParentIdentity(Feature):\n name = \"\"\n\n def compute(self, feats_cur, feats_next, **kwargs):\n return feats_cur\n\n\nclass FeatureManager(object):\n\n feature_mappings = {\n \"ParentIdentity\": ParentIdentity,\n \"SquaredDistances\": SquaredDistances,\n \"ChildrenRatio\": ChildrenRatio,\n \"ParentChildrenRatio\": ParentChildrenRatio,\n \"ParentChildrenAngle\": ParentChildrenAngle,\n }\n\n def __init__(\n self,\n scales=[1.0, 1.0, 1.0],\n n_best=3,\n com_name_cur=\"RegionCenter\",\n com_name_next=\"RegionCenter\",\n size_name=\"Count\",\n delim=\"_\",\n template_size=50,\n ndim=2,\n size_filter=4,\n squared_distance_default=9999,\n ):\n self.scales = scales[0:ndim]\n self.n_best = n_best\n self.com_name_cur = com_name_cur\n self.com_name_next = com_name_next\n self.size_name = size_name\n self.delim = delim\n self.template_size = template_size\n self.ndim = ndim\n self.size_filter = size_filter\n self.squared_distance_default = squared_distance_default\n\n def _getBestSquaredDistances(\n self, com_cur, coms_next, size_filter=None, sizes_next=[], default_value=9999\n ):\n \"\"\" returns the squared distances to the objects in the neighborhood of com_curr, optionally with size filter \"\"\"\n squaredDistances = []\n\n for label_next in coms_next.keys():\n assert label_next in sizes_next.keys()\n if size_filter != None and sizes_next[label_next] >= size_filter:\n dist = np.linalg.norm(coms_next[label_next] - com_cur * self.scales)\n squaredDistances.append([label_next, dist])\n\n squaredDistances = np.array(squaredDistances)\n # sort the array in the second column in ascending order\n squaredDistances = np.array(\n sorted(squaredDistances, key=lambda a_entry: a_entry[1])\n )\n\n # initialize with label -1 and default value\n result = np.array(\n [[-1, default_value] for x in range(self.n_best)], dtype=np.float32\n )\n if squaredDistances.shape[0] != 0:\n result[\n 0 : min(squaredDistances.shape[0], result.shape[0]), :\n ] = squaredDistances[0 : min(squaredDistances.shape[0], result.shape[0]), :]\n\n return result\n\n def computeFeatures_at(\n self, feats_cur, feats_next, img_next, feat_names, label_image_filename=None\n ):\n \"\"\"\n **Parameters:**\n \n * if `label_image_filename` is given, it is used to filter the objects from the feature dictionaries \n that belong to that label image only (in the JST setting) \n \"\"\"\n\n # n_labels = list(feats_cur.values())[0].shape[0]\n result = {}\n\n # find available features\n vigra_feat_names = set([self.com_name_cur, self.com_name_next, self.size_name])\n feat_classes = {}\n\n for name in feat_names:\n name_split = name.split(self.delim)\n if \"SquaredDistances\" in name_split:\n continue\n\n if len(name_split) != 2:\n raise ValueError(\n \"tracking features consist of an operator and a feature name only, given name={}\".format(\n name_split\n )\n )\n if len(feats_cur[name_split[1]].shape) > 1:\n feat_dim = feats_cur[name_split[1]].shape[1]\n else:\n feat_dim = 1\n feat_classes[name] = self.feature_mappings[name_split[0]](\n name_split[1], delim=self.delim, ndim=self.ndim, feat_dim=feat_dim\n )\n\n shape = (list(feats_cur.values())[0].shape[0], feat_classes[name].dim())\n result[name] = np.ones(shape) * feat_classes[name].default_value\n\n vigra_feat_names.add(name_split[1])\n\n # initialize squared distances\n for idx in range(self.n_best):\n name = \"SquaredDistances_\" + str(idx)\n result[name] = (\n np.ones((list(feats_cur.values())[0].shape[0], 1))\n * self.squared_distance_default\n )\n\n # construct mapping which we only need if label_image_filename was given and the features 'filename' and 'id' exist\n if (\n label_image_filename is not None\n and \"filename\" in feats_next\n and \"id\" in feats_next\n ):\n global_indices_current_label_image_only = [\n l\n for l, f in enumerate(feats_next[\"filename\"])\n if f == label_image_filename\n ]\n local_to_global_index_map = dict(\n [\n (feats_next[\"id\"][l], l)\n for l in global_indices_current_label_image_only\n ]\n )\n\n # for every object in this frame, check which objects are in the vicinity in the next frame\n valid_indices = [0]\n for label_cur, com_cur in enumerate(feats_cur[self.com_name_cur]):\n if (\n label_image_filename is not None\n and \"filename\" in feats_cur\n and feats_cur[\"filename\"][label_cur] != label_image_filename\n ):\n # in the JST context, only look at objects from a given segmentation hypotheses set\n continue\n if label_cur == 0:\n continue\n\n valid_indices.append(label_cur)\n feats_next_subset = {}\n for k in vigra_feat_names:\n feats_next_subset[k] = {}\n\n if feats_next is not None and img_next is not None:\n # find roi around the center of the current object\n idx_cur = [round(x) for x in com_cur]\n\n roi = []\n for idx, coord in enumerate(idx_cur):\n start = max(coord - self.template_size / 2, 0)\n stop = min(coord + self.template_size / 2, img_next.shape[idx])\n roi.append(slice(int(start), int(stop)))\n\n # find all coms in the neighborhood of com_cur by checking the next frame's labelimage in the roi\n subimg_next = img_next[roi]\n labels_next = np.unique(subimg_next).tolist()\n\n # if 'id' in features, map the labels first -- because labels_next refers image object ids,\n # whereas the features are the union of objects from several segmentations\n if \"id\" in feats_next:\n labels_next = [\n local_to_global_index_map[l] for l in labels_next if l != 0\n ]\n\n for l in labels_next:\n if l != 0:\n for n in vigra_feat_names:\n feats_next_subset[n][l] = np.array(\n [feats_next[n][l]]\n ).flatten()\n\n sq_dist_label = self._getBestSquaredDistances(\n com_cur,\n feats_next_subset[self.com_name_next],\n self.size_filter,\n feats_next_subset[self.size_name],\n default_value=self.squared_distance_default,\n )\n\n feats_next_subset_best = {}\n for n in vigra_feat_names:\n feats_next_subset_best[n] = []\n for idx, row in enumerate(sq_dist_label):\n l = row[0]\n if l != -1:\n feats_next_subset_best[n].append(feats_next_subset[n][l])\n\n # first add squared distances\n for idx in range(self.n_best):\n name = \"SquaredDistances_\" + str(idx)\n result[name][label_cur] = sq_dist_label[idx][1]\n\n # add all other features\n for name, feat_class in feat_classes.items():\n if feat_class.feats_name == \"SquaredDistances\":\n f_next = sq_dist_label[0:2, 1]\n f_cur = None\n else:\n f_cur = np.array(\n [feats_cur[feat_class.feats_name][label_cur]]\n ).flatten()\n f_next = np.array(\n [feats_next_subset_best[feat_class.feats_name]]\n ).reshape((-1, f_cur.shape[0]))\n result[name][label_cur] = feat_class.compute(f_cur, f_next)\n\n # return only valid labels\n for feature_name in result:\n result[feature_name] = result[feature_name][valid_indices]\n\n return result\n\n\nif __name__ == \"__main__\":\n import vigra\n import numpy as np\n\n img_cur = vigra.readImage(\"/home/mschiegg/tmp/segmentImage.tif\")\n img_next = img_cur\n\n labels_cur = vigra.analysis.labelImage(img_cur)\n feats_cur = vigra.analysis.extractRegionFeatures(\n labels_cur.astype(np.float32),\n labels_cur.astype(np.uint32),\n features=\"all\",\n ignoreLabel=0,\n )\n\n feat_names = [\n \"ParentChildrenRatio_Count\",\n \"ParentChildrenRatio_Mean\",\n \"ChildrenRatio_Count\",\n \"ChildrenRatio_Mean\",\n \"ParentChildrenAngle_RegionCenter\",\n \"ChildrenRatio_SquaredDistances\",\n ]\n fm = FeatureManager()\n res = fm.computeFeatures_at(feats_cur, feats_cur, img_cur, feat_names)\n",
"#!/usr/bin/env python\nimport os.path\nimport sys\n\nsys.path.append('.')\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../hytra/.')\n\nimport numpy as np\nimport os\nimport string\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_area_auto_adjustable\nimport argparse\nimport trackingfeatures\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Plot feature importance')\n\n # file paths\n parser.add_argument('--folders', required=True, type=str,nargs='+', dest='folders',\n help='File containing the learned re-ranker weights')\n parser.add_argument('--legend', type=str,nargs='+', dest='legend',\n help='File containing the legend of weight files',default=[])\n # parser.add_argument('--feature-names', type=str, dest='feature_names', default='',\n # help='Description for each feature')\n # parser.add_argument('--expansion-range', type=int, nargs='+', dest='expansion_range',\n # help='Lower and upper bound of track weight expansions (default 0, 1)')\n parser.add_argument('-o', required=True, type=str, dest='out_file',\n help='Name of the file the plot is saved to')\n # parser.add_argument('--non-zero', action='store_true', dest='non_zero', help='display only non zero feature importance')\n\n # parser.add_argument('--log', action='store_true', dest='logscale', help='use log scale on y axis')\n # parser.add_argument('--sort', action='store_true', dest='sort', help='sort features')\n # parser.add_argument('--limit', type=float, dest='limit',\n # help='threshold for featureweight if non zero is used',default=0)\n parser.add_argument('--fontsize', type=int, dest='fontsize', help='fontsize of ticks',default=15)\n parser.add_argument('--linewidth', type=int, dest='linewidth', help='linewidth of graph lines',default=8)\n parser.add_argument('--latex', action='store_true', dest='latex', help='export graph in latex format')\n\n\n\n options = parser.parse_args()\n\n #Direct input \n plt.rcParams['text.latex.preamble']=[r\"\\usepackage{lmodern}\"]\n #Options\n params = {'text.usetex' : True,\n 'font.size' : options.fontsize,\n 'font.family' : 'lmodern',\n 'text.latex.unicode': True,\n }\n plt.rcParams.update(params) \n\n colors = ['b', 'g', 'r', 'c', 'm', 'y','dodgerblue','orangered','cyan']\n\n data = []\n for folder in options.folders:\n label = folder\n data.append([])\n\n for subfolder in os.listdir(folder+\"/test/\"):\n if subfolder.startswith(\"iter_\"):\n # print folder+\"/\"+subfolder+\"/result.txt\"\n with open(str(folder)+\"/test/\"+str(subfolder)+\"/result.txt\") as f:\n # print f.read().splitlines()\n i = int(subfolder[5])\n precission, recall, fmeasure = string.split(f.read().splitlines()[0],\",\")[2:5]\n data[-1].append((i, precission, recall, fmeasure))\n\n # data[-1] = sorted(zip(*data[-1]), key=lambda pair: pair[0])\n # print data[-1]\n # print zip(*data[-1]) \n data[-1] = sorted(data[-1], key=lambda pair: pair[0])\n index, pre, rec, fme = zip(*data[-1]) \n # plt.plot(index, pre, 'r-',label=\"pre\")\n # plt.plot(index, rec, 'g-',label=\"rec\")\n if len(options.legend) > 0:\n plotname = options.legend[len(data)-1]\n else:\n plotname = folder\n plt.plot(index, fme, color=colors[len(data)-1],label=plotname,linewidth=options.linewidth)\n # plt.axis([0, 6, 0, 20])\n\n plt.title('F-Measure of M-th proposal')\n plt.xlabel('Proposal')\n plt.ylabel('F-Measure')\n plt.tight_layout(.5)\n plt.legend()\n\n if options.latex:\n plt.savefig(options.out_file, \n #This is simple recomendation for publication plots\n dpi=1000, \n # Plot will be occupy a maximum of available space\n bbox_inches='tight', \n )\n else:\n plt.savefig(options.out_file)\n",
"#!/usr/bin/env python\n\nimport os.path\nimport sys\nsys.path.append('.')\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../hytra/.')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\n# need this for weighted loss loading!\n\nimport structsvm\n\n\ndef load_proposals(input_filenames):\n if input_filenames:\n return [structsvm.utils.load_proposal_labeling(p) for p in input_filenames]\n else:\n return []\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Scatter-Plot the loss between proposals and vs the loss w.r.t. ground truth')\n\n # file paths\n parser.add_argument('--proposals', type=str, nargs='+', dest='proposals',\n help='Proposal labeling files')\n parser.add_argument('--ground-truth', required=True, type=str, dest='ground_truth',\n help='Ground truth labeling file')\n parser.add_argument('--features', type=str, dest='features',\n help='file containing the features for the proposals')\n parser.add_argument('--reranker-weights', type=str, dest='reranker_weights',\n help='file containing the learned reranker weights')\n parser.add_argument('--loss-weights', required=True, type=float, nargs='+', dest='loss_weights',\n help='Loss weight vector indicating the loss of each class')\n parser.add_argument('-o', required=True, type=str, dest='out_file',\n help='Name of the file the plot is saved to')\n\n options = parser.parse_args()\n\n # get labelings\n ground_truth_label, ground_truth_label_class_multiplier_list, _ = \\\n structsvm.utils.load_labelfile_with_classes_and_multipliers(options.ground_truth)\n\n proposals = load_proposals(options.proposals)\n losses = []\n for i in range(len(proposals)):\n gt_loss = structsvm.utils.multiclass_weighted_hamming_loss(\n proposals[i], ground_truth_label, ground_truth_label_class_multiplier_list, options.loss_weights)\n losses.append(gt_loss)\n\n # compute scores\n feature_vectors = np.loadtxt(options.features)\n weights = np.loadtxt(options.reranker_weights)\n means = np.loadtxt(os.path.splitext(options.reranker_weights)[0] + '_means.txt')\n variances = np.loadtxt(os.path.splitext(options.reranker_weights)[0] + '_variances.txt')\n structsvm.utils.apply_feature_normalization(feature_vectors, means, variances)\n scores = np.dot(weights, feature_vectors)\n\n # print scores\n # print losses\n \n # plot\n plt.figure()\n plt.hold(True)\n plt.scatter(losses, scores)\n plt.xlabel(\"Loss\")\n plt.ylabel(\"Score\")\n \n plt.savefig(options.out_file)",
"from hytra.pluginsystem import merger_resolver_plugin\nimport numpy as np\n\nfrom sklearn import mixture\n\n\nclass GMMMergerResolver(merger_resolver_plugin.MergerResolverPlugin):\n \"\"\"\n Computes the subtraction of features in the feature vector\n \"\"\"\n\n def initGMM(self, mergerCount, object_init_list=None):\n gmm = mixture.GaussianMixture(n_components=mergerCount)\n if object_init_list is not None and len(object_init_list) > 0:\n gmm.weights_ = np.array([o[0] for o in object_init_list])\n gmm.covariances_ = np.array([o[1] for o in object_init_list])\n gmm.means_ = np.array([o[2] for o in object_init_list])\n # Needed since mandatory switch from mixture.GMM to\n # mixture.GaussianMixture in sklearn 0.20:\n gmm.precisions_cholesky_ = np.array([o[3] for o in object_init_list])\n return gmm\n\n def getObjectInitializationList(self, gmm):\n return zip(gmm.weights_, gmm.covariances_, gmm.means_, gmm.precisions_cholesky_)\n\n def resolveMergerForCoords(self, coordinates, mergerCount, initializations=None):\n \"\"\"\n Resolve the pixel coordinates belonging to an object ID, into `mergerCount`\n new segments by fitting some kind of model. The `initializations` provide fits\n in the preceding frame of all possible incomings (list may be empty, but could\n also be more than `mergerCount`).\n \n `coordinates` pixel coordinates that belong to a merger ID in labelImage\n \n `mergerCount` number of gaussians to fit\n \n **returns** a list of fitted objects\n \"\"\"\n\n # fit GMM to label image data\n gmm = self.initGMM(mergerCount, initializations)\n gmm.fit(coordinates)\n assert gmm.converged_\n\n return self.getObjectInitializationList(gmm)\n\n def resolveMerger(\n self, labelImage, objectId, nextId, mergerCount, initializations=None\n ):\n \"\"\"\n Resolve the object with the ID `objectId` in the `labelImage` into `mergerCount`\n new segments by fitting some kind of model. The `initializations` provide fits\n in the preceding frame of all possible incomings (list may be empty, but could\n also be more than `mergerCount`).\n \n `labelImage` is used read-only, use `updateLabelImage` to refine the segmentation\n \n **returns** a list of fitted objects\n \"\"\"\n\n # fit GMM to label image data\n coordinates = np.transpose(np.vstack(np.where(labelImage == objectId)))\n gmm = self.initGMM(mergerCount, initializations)\n gmm.fit(coordinates)\n assert gmm.converged_\n\n return self.getObjectInitializationList(gmm)\n\n def updateLabelImage(self, labelImage, objectId, fits, newIds, offset=None):\n \"\"\"\n Resolve the object with the ID `objectId` in the `labelImage` into the fitted models with the given new IDs.\n `labelImage` should be updated by replacing all pixels that were labelled with `objectId`\n to get a new Id depending on the fit.\n \"\"\"\n\n if len(fits) > 1:\n assert len(fits) == len(newIds)\n # edit labelimage in-place\n coordinates = np.transpose(np.vstack(np.where(labelImage == objectId)))\n if offset is not None:\n assert coordinates.shape[1] == len(offset)\n coordinates = coordinates + offset\n gmm = self.initGMM(len(fits), fits)\n responsibilities = gmm.predict(coordinates)\n newIds = np.array(newIds)\n newObjectIds = newIds[responsibilities]\n labelImage[labelImage == objectId] = newObjectIds\n",
"import logging\nfrom libdvid import DVIDNodeService, DVIDServerService\nimport numpy as np\nimport json_tricks.np as json\nfrom pluginsystem.plugin_manager import TrackingPluginManager\n\n\ndef dataToBlock(data, dtype=np.uint8, block_size=32):\n if len(data.shape) == 2:\n data = np.expand_dims(data, axis=2)\n elif len(data.shape) != 3:\n raise ValueError(\"Cannot prepare data of shape that is not a 2D or 3D frame\")\n\n old_shape = data.shape\n new_shape = []\n for d in old_shape:\n if d % block_size == 0:\n new_shape.append(d)\n else:\n new_shape.append(((d // block_size) + 1) * block_size)\n\n logging.debug(\"transformed data from {} to {}\".format(old_shape, new_shape))\n new_data = np.zeros(new_shape, dtype=dtype)\n new_data[0 : old_shape[0], 0 : old_shape[1], 0 : old_shape[2]] = data\n return new_data\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Upload raw data and segmentation of a dataset to dvid.\n\n Example: python dvid/upload_dataset.py --dvid-address 104.196.46.138:80 --label-image /Users/chaubold/hci/data/animal-tracking/FlyBowlTracking/FlyBowlTracking.ilp --raw /Users/chaubold/hci/data/animal-tracking/FlyBowlTracking/FlyBowlMovie.h5 --raw-path data --dataset-name flybowl-test-2016-04-07 --time-range 0 10\n \"\"\"\n import argparse\n\n parser = argparse.ArgumentParser(\n description=\"Upload raw data and segmentation to dvid\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"--dataset-name\",\n required=True,\n type=str,\n dest=\"datasetName\",\n help=\"Datset name that will be seen in the DVID web interface\",\n )\n parser.add_argument(\n \"--dvid-address\",\n required=True,\n type=str,\n dest=\"dvidAddress\",\n help=\"<IP>:<Port> of the dvid server\",\n )\n parser.add_argument(\n \"--label-image\",\n required=True,\n type=str,\n dest=\"ilpFilename\",\n help=\"Filename of the HDF5 file containing the label images\",\n )\n parser.add_argument(\n \"--raw\",\n required=True,\n type=str,\n dest=\"rawFilename\",\n help=\"Filename of the hdf5 file containing the raw data\",\n )\n parser.add_argument(\n \"--raw-path\",\n required=True,\n type=str,\n dest=\"rawPath\",\n help=\"Path inside HDF5 file to raw volume\",\n )\n parser.add_argument(\n \"--label-image-path\",\n type=str,\n dest=\"labelImagePath\",\n help=\"Path inside ilastik project file to the label image\",\n default=\"/TrackingFeatureExtraction/LabelImage/0000/[[%d, 0, 0, 0, 0], [%d, %d, %d, %d, 1]]\",\n )\n parser.add_argument(\n \"--object-count-classifier-file\",\n type=str,\n dest=\"objectCountClassifierFile\",\n help=\"HDF5 file containing the object count classifier\",\n default=None,\n )\n parser.add_argument(\n \"--object-count-classifier-path\",\n type=str,\n dest=\"objectCountClassifierPath\",\n help=\"Path inside HDF5 file to the object count classifier\",\n default=\"/CountClassification\",\n )\n parser.add_argument(\n \"--rf-zero-padding\",\n type=int,\n dest=\"rfZeroPadding\",\n default=4,\n help=\"Number of digits per forest index inside the ClassifierForests HDF5 group\",\n )\n parser.add_argument(\n \"--time-range\",\n type=int,\n nargs=2,\n dest=\"timeRange\",\n help=\"Set time range to upload (inclusive!)\",\n )\n parser.add_argument(\n \"--verbose\", type=bool, dest=\"verbose\", default=False, help=\"verbose logs\"\n )\n\n args = parser.parse_args()\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n # initialize plugin manager\n plugin_manager = TrackingPluginManager(verbose=False)\n image_provider = plugin_manager.getImageProvider()\n\n # create dataset on server and get uuid\n server_address = args.dvidAddress\n server_service = DVIDServerService(server_address)\n uuid = server_service.create_new_repo(args.datasetName, \"description\")\n logging.info(\"UUID:\\n{}\".format(uuid))\n\n # get node service\n node_service = DVIDNodeService(server_address, uuid)\n\n # get dataset size and store in dvid\n shape = image_provider.getImageShape(args.ilpFilename, args.labelImagePath)\n time_range = image_provider.getTimeRange(args.ilpFilename, args.labelImagePath)\n if args.timeRange is not None:\n time_range = (\n max(time_range[0], args.timeRange[0]),\n min(time_range[1], args.timeRange[1]),\n )\n logging.info(\"Uploading time range {} to {}\".format(time_range, server_address))\n keyvalue_store = \"config\"\n node_service.create_keyvalue(keyvalue_store)\n settings = {\"shape\": shape, \"time_range\": time_range}\n node_service.put(keyvalue_store, \"imageInfo\", json.dumps(settings))\n\n # upload all frames\n for frame in range(time_range[0], time_range[1]):\n logging.info(\"Uploading frame {}\".format(frame))\n label_image = image_provider.getLabelImageForFrame(\n args.ilpFilename, args.labelImagePath, frame\n )\n raw_image = image_provider.getImageDataAtTimeFrame(\n args.rawFilename, args.rawPath, frame\n )\n\n raw_name = \"raw-{}\".format(frame)\n seg_name = \"seg-{}\".format(frame)\n node_service.create_grayscale8(raw_name)\n node_service.put_gray3D(\n raw_name, dataToBlock(raw_image, dtype=np.uint8), (0, 0, 0)\n )\n node_service.create_labelblk(seg_name)\n node_service.put_labels3D(\n seg_name, dataToBlock(label_image, dtype=np.uint64), (0, 0, 0)\n )\n\n # TODO: upload classifier\n",
"import logging\nimport copy\nimport networkx as nx\nimport numpy as np\nfrom sklearn.neighbors import KDTree\nimport hytra.core.jsongraph\nfrom hytra.core.jsongraph import negLog, listify\nfrom hytra.util.progressbar import DefaultProgressVisitor\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef getTraxelFeatureVector(traxel, featureName, maxNumDimensions=3):\n \"\"\"\n extract a feature vector from a traxel\n \"\"\"\n result = []\n for i in range(maxNumDimensions):\n try:\n result.append(traxel.get_feature_value(str(featureName), i))\n except:\n if i == 0:\n logger.error(\n f\"Error when accessing feature {featureName}[{i}] for \"\n f\"traxel (Id={traxel.Id},Timestep={traxel.Timestep})\"\n )\n logger.error(traxel.print_available_features())\n raise Exception\n else:\n logger.error(\n f\"Error: Classifier was trained with less merger than maxNumObjects {maxNumDimensions}.\"\n )\n raise Exception\n return result\n\n\nclass NodeMap:\n \"\"\"\n To access per node features of the hypotheses graph,\n this node map provides the same interface as pgmlink's NodeMaps\n \"\"\"\n\n def __init__(self, graph: nx.DiGraph, attributeName):\n assert isinstance(graph, nx.DiGraph), \"Expecting the graph to be directed\"\n self.__graph = graph\n self.__attributeName = attributeName\n\n def __getitem__(self, key):\n return self.__graph.nodes[key][self.__attributeName]\n\n\nclass HypothesesGraph:\n \"\"\"\n Replacement for pgmlink's hypotheses graph,\n with a similar API so it can be used as drop-in replacement.\n\n Internally it uses [networkx](http://networkx.github.io/) to construct the graph.\n\n Use the insertEnergies() method to populate the nodes and arcs with the energies for different\n configurations (according to DPCT's JSON style'), derived from given probability generation functions.\n\n **Notes:** `self._graph.node`'s are indexed by tuples (int(timestep), int(id)), and contain either a\n single `'traxel'` attribute, or a list of traxels in `'tracklet'`.\n Nodes also get a unique ID assigned once they are added to the graph.\n \"\"\"\n\n def __init__(self):\n self._graph = nx.DiGraph()\n self.withTracklets = False\n self.allowLengthOneTracks = True\n self._nextNodeUuid = 0\n self.progressVisitor = DefaultProgressVisitor()\n\n def nodeIterator(self):\n return self._graph.nodes()\n\n def arcIterator(self):\n return self._graph.edges()\n\n def countNodes(self):\n return self._graph.number_of_nodes()\n\n def countArcs(self):\n return self._graph.number_of_edges()\n\n def hasNode(self, node):\n return self._graph.has_node(node)\n\n def hasEdge(self, u, v):\n return self._graph.has_edge(u, v)\n\n @staticmethod\n def source(edge):\n return edge[0]\n\n @staticmethod\n def target(edge):\n return edge[1]\n\n def _findNearestNeighbors(\n self, kdtreeObjectPair, traxel, numNeighbors, maxNeighborDist\n ):\n \"\"\"\n Return a list of object IDs which are the 'numNeighbors' closest elements \n in the kdtree less than maxNeighborDist away of the traxel.\n \"\"\"\n kdtree, objectIdList = kdtreeObjectPair\n if len(objectIdList) <= numNeighbors:\n return objectIdList\n distances, neighbors = kdtree.query(\n [self._extractCenter(traxel)], k=numNeighbors, return_distance=True\n )\n return [\n objectIdList[index]\n for distance, index in zip(distances[0], neighbors[0])\n if distance < maxNeighborDist\n ]\n\n def _extractCenter(self, traxel):\n try:\n # python probabilityGenerator\n if \"com\" in traxel.Features:\n return traxel.Features[\"com\"]\n else:\n return traxel.Features[\"RegionCenter\"]\n except:\n # C++ pgmlink probabilityGenerator\n try:\n return getTraxelFeatureVector(traxel, \"com\")\n except:\n try:\n return getTraxelFeatureVector(traxel, \"RegionCenter\")\n except:\n raise ValueError(\n \"given traxel (t={},id={}) does not have \"\n '\"com\" or \"RegionCenter\"'.format(traxel.Timestep, traxel.Id)\n )\n\n def _traxelMightDivide(self, traxel, divisionThreshold):\n assert \"divProb\" in traxel.Features\n return traxel.Features[\"divProb\"][0] > divisionThreshold\n\n def _buildFrameKdTree(self, traxelDict):\n \"\"\"\n Collect the centers of all traxels and their ids of this frame's traxels.\n Then build a kdtree and return (kdtree, listOfObjectIdsInFrame), where the second argument\n is needed to decode the object id of the nearest neighbors in _findNearestNeighbors().\n \"\"\"\n objectIdList = []\n features = []\n for obj, traxel in traxelDict.items():\n if obj == 0:\n continue\n objectIdList.append(obj)\n features.append(list(self._extractCenter(traxel)))\n\n return (KDTree(features, metric=\"euclidean\"), objectIdList)\n\n def _addNodesForFrame(self, frame, traxelDict):\n \"\"\"\n Insert nodes for all objects in this frame, with the attribute \"traxel\"\n \"\"\"\n for obj, traxel in traxelDict.items():\n if obj == 0:\n continue\n self._graph.add_node((frame, obj), traxel=traxel, id=self._nextNodeUuid)\n self._nextNodeUuid += 1\n\n def addNodeFromTraxel(self, traxel, **kwargs):\n \"\"\"\n Insert a single node specified by a traxel.\n All keyword arguments are passed to the node as well.\n \"\"\"\n assert traxel is not None\n assert not self.withTracklets\n self._graph.add_node(\n (traxel.Timestep, traxel.Id), traxel=traxel, id=self._nextNodeUuid, **kwargs\n )\n self._nextNodeUuid += 1\n\n def buildFromProbabilityGenerator(\n self,\n probabilityGenerator,\n maxNeighborDist=200,\n numNearestNeighbors=1,\n forwardBackwardCheck=True,\n withDivisions=True,\n divisionThreshold=0.1,\n skipLinks=1,\n ):\n \"\"\"\n Takes a python probabilityGenerator containing traxel features and finds probable links between frames.\n Builds a kdTree with the 'numNearestneighbors' for each frame and adds the nodes. In the same iteration, it adds\n a number of 'skipLinks' between the nodes separated by 'skipLinks' frames.\n \"\"\"\n assert probabilityGenerator is not None\n assert len(probabilityGenerator.TraxelsPerFrame) > 0\n assert skipLinks > 0\n\n def checkNodeWhileAddingLinks(frame, obj):\n if (frame, obj) not in self._graph:\n logger.warning(\n \"Adding node ({}, {}) when setting up links\".format(frame, obj)\n )\n\n kdTreeFrames = [None] * (skipLinks + 1)\n # len(probabilityGenerator.TraxelsPerFrame.keys()) is NOT an indicator for the total number of frames,\n # because an empty frame does not create a key in the dictionary. E.g. for one frame in the middle of the\n # dataset, we won't access the last one.\n # Idea: take the max key in the dict. Remember, frame numbering starts with 0.\n frameMax = max(probabilityGenerator.TraxelsPerFrame.keys())\n frameMin = min(probabilityGenerator.TraxelsPerFrame.keys())\n numFrames = frameMax - frameMin + 1\n\n self.progressVisitor.showState(\"Probability Generator\")\n\n countFrames = 0\n for frame in range(numFrames):\n countFrames += 1\n self.progressVisitor.showProgress(countFrames / float(numFrames))\n if frame > 0:\n del kdTreeFrames[0] # this is the current frame\n if (\n frame + skipLinks < numFrames\n and frameMin + frame + skipLinks\n in probabilityGenerator.TraxelsPerFrame.keys()\n ):\n kdTreeFrames.append(\n self._buildFrameKdTree(\n probabilityGenerator.TraxelsPerFrame[\n frameMin + frame + skipLinks\n ]\n )\n )\n self._addNodesForFrame(\n frameMin + frame + skipLinks,\n probabilityGenerator.TraxelsPerFrame[\n frameMin + frame + skipLinks\n ],\n )\n else:\n for i in range(0, skipLinks + 1):\n if (\n frameMin + frame + i\n in probabilityGenerator.TraxelsPerFrame.keys()\n ): # empty frame\n kdTreeFrames[i] = self._buildFrameKdTree(\n probabilityGenerator.TraxelsPerFrame[frameMin + frame + i]\n )\n self._addNodesForFrame(\n frameMin + frame + i,\n probabilityGenerator.TraxelsPerFrame[frameMin + frame + i],\n )\n\n # find forward links\n if (\n frameMin + frame in probabilityGenerator.TraxelsPerFrame.keys()\n ): # 'frame' could be empty\n for obj, traxel in probabilityGenerator.TraxelsPerFrame[\n frameMin + frame\n ].items():\n divisionPreservingNumNearestNeighbors = numNearestNeighbors\n if (\n divisionPreservingNumNearestNeighbors < 2\n and withDivisions\n and self._traxelMightDivide(traxel, divisionThreshold)\n ):\n divisionPreservingNumNearestNeighbors = 2\n for i in range(1, skipLinks + 1):\n if (\n frame + i < numFrames\n and frameMin + frame + i\n in probabilityGenerator.TraxelsPerFrame.keys()\n ):\n neighbors = self._findNearestNeighbors(\n kdTreeFrames[i],\n traxel,\n divisionPreservingNumNearestNeighbors,\n maxNeighborDist,\n )\n # type(neighbors) is list\n for n in neighbors:\n edge_start = (frameMin + frame, obj)\n edge_end = (frameMin + frame + i, n)\n checkNodeWhileAddingLinks(*edge_start)\n checkNodeWhileAddingLinks(*edge_end)\n self._graph.add_edge(edge_start, edge_end)\n self._graph.edges[edge_start, edge_end][\n \"src\"\n ] = self._graph.nodes[edge_start][\"id\"]\n self._graph.edges[edge_start, edge_end][\n \"dest\"\n ] = self._graph.nodes[edge_end][\"id\"]\n\n # find backward links\n if forwardBackwardCheck:\n for i in range(1, skipLinks + 1):\n if frame + i < numFrames:\n if (\n frameMin + frame + i\n in probabilityGenerator.TraxelsPerFrame.keys()\n ): # empty frame\n for obj, traxel in probabilityGenerator.TraxelsPerFrame[\n frameMin + frame + i\n ].items():\n if kdTreeFrames[0] is not None:\n neighbors = self._findNearestNeighbors(\n kdTreeFrames[0],\n traxel,\n numNearestNeighbors,\n maxNeighborDist,\n )\n for n in neighbors:\n edge_start = (frameMin + frame, n)\n edge_end = (frameMin + frame + i, obj)\n checkNodeWhileAddingLinks(*edge_start)\n checkNodeWhileAddingLinks(*edge_end)\n self._graph.add_edge(edge_start, edge_end)\n self._graph.edges[edge_start, edge_end][\n \"src\"\n ] = self._graph.nodes[edge_start][\"id\"]\n self._graph.edges[edge_start, edge_end][\n \"dest\"\n ] = self._graph.nodes[edge_end][\"id\"]\n\n def generateTrackletGraph(self):\n \"\"\"\n **Return** a new hypotheses graph where chains of detections with only one possible \n incoming/outgoing transition are contracted into one node in the graph.\n The returned graph will have `withTracklets` set to `True`!\n\n The `'tracklet'` node map contains a list of traxels that each node represents.\n \"\"\"\n logger.info(\"generating tracklet graph...\")\n tracklet_graph = copy.copy(self)\n tracklet_graph._graph = tracklet_graph._graph.copy()\n tracklet_graph.withTracklets = True\n tracklet_graph.referenceTraxelGraph = self\n tracklet_graph.progressVisitor = self.progressVisitor\n\n self.progressVisitor.showState(\"Initializing Tracklet Graph\")\n # initialize tracklet map to contain a list of only one traxel per node\n countNodes = 0\n numNodes = tracklet_graph.countNodes()\n for node in tracklet_graph._graph.nodes():\n countNodes += 1\n self.progressVisitor.showProgress(countNodes / float(numNodes))\n tracklet_graph._graph.nodes[node][\"tracklet\"] = [\n tracklet_graph._graph.nodes[node][\"traxel\"]\n ]\n del tracklet_graph._graph.nodes[node][\"traxel\"]\n\n # set up a list of links that indicates whether the target's in- and source's out-degree\n # are one, meaning the edge can be contracted\n links_to_be_contracted = []\n node_remapping = {}\n self.progressVisitor.showState(\"Finding Tracklets in Graph\")\n countEdges = 0\n numEdges = tracklet_graph.countArcs()\n for edge in tracklet_graph._graph.edges():\n countEdges += 1\n self.progressVisitor.showProgress(countEdges / float(numEdges))\n if (\n tracklet_graph._graph.out_degree(edge[0]) == 1\n and tracklet_graph._graph.in_degree(edge[1]) == 1\n ):\n links_to_be_contracted.append(edge)\n for i in [0, 1]:\n node_remapping[edge[i]] = edge[i]\n\n # apply edge contraction\n self.progressVisitor.showState(\"Contracting Edges in Tracklet Graph\")\n countLinks = 0\n numLinks = len(links_to_be_contracted)\n for edge in links_to_be_contracted:\n countLinks += 1\n self.progressVisitor.showProgress(countLinks / float(numLinks))\n src = node_remapping[edge[0]]\n dest = node_remapping[edge[1]]\n if (\n tracklet_graph._graph.in_degree(src) == 0\n and tracklet_graph._graph.out_degree(dest) == 0\n ):\n # if this tracklet would contract to a single node without incoming or outgoing edges,\n # then do NOT contract, as our tracking cannot handle length-one-tracks\n continue\n\n tracklet_graph._graph.nodes[src][\"tracklet\"].extend(\n tracklet_graph._graph.nodes[dest][\"tracklet\"]\n )\n # duplicate out arcs with new source\n for out_edge in tracklet_graph._graph.out_edges(dest):\n tracklet_graph._graph.add_edge(src, out_edge[1])\n # adjust node remapping to point to new source for all contracted traxels\n for t in tracklet_graph._graph.nodes[dest][\"tracklet\"]:\n node_remapping[(t.Timestep, t.Id)] = src\n tracklet_graph._graph.remove_node(dest)\n\n logger.info(\n \"tracklet graph has {} nodes and {} edges (before {},{})\".format(\n tracklet_graph.countNodes(),\n tracklet_graph.countArcs(),\n self.countNodes(),\n self.countArcs(),\n )\n )\n\n return tracklet_graph\n\n def getNodeTraxelMap(self):\n return NodeMap(self._graph, \"traxel\")\n\n def getNodeTrackletMap(self):\n return NodeMap(self._graph, \"tracklet\")\n\n def insertEnergies(\n self,\n maxNumObjects,\n detectionProbabilityFunc,\n transitionProbabilityFunc,\n boundaryCostMultiplierFunc,\n divisionProbabilityFunc,\n skipLinksBias,\n ):\n \"\"\"\n Insert energies for detections, divisions and links into the hypotheses graph, \n by transforming the probabilities for certain\n events (given by the `*ProbabilityFunc`-functions per traxel) into energies. If the given graph\n contained tracklets (`self.withTracklets is True`), then also the probabilities over all contained traxels will be\n accumulated for those nodes in the graph.\n\n The energies are stored in the networkx graph under the following attribute names (to match the format for solvers):\n * detection energies: `self._graph.nodes[n]['features']`\n * division energies: `self._graph.nodes[n]['divisionFeatures']`\n * appearance energies: `self._graph.nodes[n]['appearanceFeatures']`\n * disappearance energies: `self._graph.nodes[n]['disappearanceFeatures']`\n * transition energies: `self._graph.edges[src][dest]['features']`\n * additionally we also store the timestep (range for traxels) per node as `timestep` attribute\n\n ** Parameters: **\n\n * `maxNumObjects`: the max number of objects per detections\n * `detectionProbabilityFunc`: should take a traxel and return its detection probabilities\n ([prob0objects, prob1object,...])\n * `transitionProbabilityFunc`: should take two traxels and return this link's probabilities\n ([prob0objectsInTransition, prob1objectsInTransition,...])\n * `boundaryCostMultiplierFunc`: should take a traxel and a boolean that is true if we are seeking for an appearance cost multiplier, \n false for disappearance, and return a scalar multiplier between 0 and 1 for the\n appearance/disappearance cost that depends on the traxel's distance to the spacial and time boundary\n * `divisionProbabilityFunc`: should take a traxel and return its division probabilities ([probNoDiv, probDiv])\n \"\"\"\n numElements = self._graph.number_of_nodes() + self._graph.number_of_edges()\n self.progressVisitor.showState(\"Inserting energies\")\n\n # insert detection probabilities for all detections (and some also get a div probability)\n countElements = 0\n for n in self._graph.nodes():\n countElements += 1\n if not self.withTracklets:\n # only one traxel, but make it a list so everything below works the same\n traxels = [self._graph.nodes[n][\"traxel\"]]\n else:\n traxels = self._graph.nodes[n][\"tracklet\"]\n\n # accumulate features over all contained traxels\n previousTraxel = None\n detectionFeatures = np.zeros(maxNumObjects + 1)\n for t in traxels:\n detectionFeatures += np.array(negLog(detectionProbabilityFunc(t)))\n if previousTraxel is not None:\n detectionFeatures += np.array(\n negLog(transitionProbabilityFunc(previousTraxel, t))\n )\n previousTraxel = t\n\n detectionFeatures = listify(list(detectionFeatures))\n\n # division only if probability is big enough\n divisionFeatures = divisionProbabilityFunc(traxels[-1])\n if divisionFeatures is not None:\n divisionFeatures = listify(negLog(divisionFeatures))\n\n # appearance/disappearance\n appearanceFeatures = listify(\n [0.0] + [boundaryCostMultiplierFunc(traxels[0], True)] * maxNumObjects\n )\n disappearanceFeatures = listify(\n [0.0] + [boundaryCostMultiplierFunc(traxels[-1], False)] * maxNumObjects\n )\n\n self._graph.nodes[n][\"features\"] = detectionFeatures\n if divisionFeatures is not None:\n self._graph.nodes[n][\"divisionFeatures\"] = divisionFeatures\n self._graph.nodes[n][\"appearanceFeatures\"] = appearanceFeatures\n self._graph.nodes[n][\"disappearanceFeatures\"] = disappearanceFeatures\n self._graph.nodes[n][\"timestep\"] = [\n traxels[0].Timestep,\n traxels[-1].Timestep,\n ]\n\n self.progressVisitor.showProgress(countElements / float(numElements))\n\n # insert transition probabilities for all links\n for a in self._graph.edges():\n countElements += 1\n self.progressVisitor.showProgress(countElements / float(numElements))\n\n if not self.withTracklets:\n srcTraxel = self._graph.nodes[self.source(a)][\"traxel\"]\n destTraxel = self._graph.nodes[self.target(a)][\"traxel\"]\n else:\n srcTraxel = self._graph.nodes[self.source(a)][\"tracklet\"][\n -1\n ] # src is last of the traxels in source tracklet\n destTraxel = self._graph.nodes[self.target(a)][\"tracklet\"][\n 0\n ] # dest is first of traxels in destination tracklet\n\n features = listify(negLog(transitionProbabilityFunc(srcTraxel, destTraxel)))\n\n # add feature for additional Frames. Since we do not want these edges to be primarily taken, we add a bias to the edge. Now: hard coded, future: parameter\n frame_gap = destTraxel.Timestep - srcTraxel.Timestep\n\n # 1. method\n if frame_gap > 1:\n features[1][0] = features[1][0] + skipLinksBias * frame_gap\n\n # # 2. method\n # # introduce a new energies like: [[6], [15]] -> [[6, 23], [15, 23]] for first links and\n # # [[6], [15]] -> [[23, 6], [23, 15]] for second links, and so on for 3rd order links\n # # !!! this will introduce a new weight in the weight.json file. For the 2nd link, comes in 2nd row and so on.\n # # drawback: did not manage to adjust parameter to get sensible results.\n # for feat in features:\n # for i in range(frame_gap):\n # feat.append(23)\n # if frame_gap > 1:\n # feat[frame_gap-1], feat[0] = feat[0], feat[frame_gap-1]\n\n self._graph.edges[a[0], a[1]][\"src\"] = self._graph.nodes[a[0]][\"id\"]\n self._graph.edges[a[0], a[1]][\"dest\"] = self._graph.nodes[a[1]][\"id\"]\n self._graph.edges[a[0], a[1]][\"features\"] = features\n\n def getMappingsBetweenUUIDsAndTraxels(self):\n \"\"\"\n Extract the mapping from UUID to traxel and vice versa from the networkx graph.\n\n ** Returns: a tuple of **\n\n * `traxelIdPerTimestepToUniqueIdMap`: a dictionary of the structure `{str(timestep):{str(labelimageId):int(uuid), \n str(labelimageId):int(uuid), ...}, str(nextTimestep):{}, ...}`\n * `uuidToTraxelMap`: a dictionary with keys = int(uuid), values = list(of timestep-Id-tuples (int(Timestep), int(Id)))\n \"\"\"\n\n uuidToTraxelMap = {}\n traxelIdPerTimestepToUniqueIdMap = {}\n\n for n in self._graph.nodes():\n uuid = self._graph.nodes[n][\"id\"]\n traxels = []\n if self.withTracklets:\n traxels = self._graph.nodes[n][\"tracklet\"]\n else:\n traxels = [self._graph.nodes[n][\"traxel\"]]\n uuidToTraxelMap[uuid] = [(t.Timestep, t.Id) for t in traxels]\n\n for t in uuidToTraxelMap[uuid]:\n traxelIdPerTimestepToUniqueIdMap.setdefault(str(t[0]), {})[\n str(t[1])\n ] = uuid\n\n # sort the list of traxels per UUID by their timesteps\n for v in uuidToTraxelMap.values():\n v.sort(key=lambda timestepIdTuple: timestepIdTuple[0])\n\n return traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap\n\n def toTrackingGraph(self, noFeatures=False):\n \"\"\"\n Create a dictionary representation of this graph which can be passed to the solvers directly.\n The resulting graph (=model) is wrapped within a `hytra.jsongraph.JsonTrackingGraph` structure for convenience.\n If `noFeatures` is `True`, then only the structure of the graph will be exported.\n \"\"\"\n requiredNodeAttribs = [\"id\"]\n requiredLinkAttribs = [\"src\", \"dest\"]\n\n if not noFeatures:\n requiredNodeAttribs.append(\"features\")\n requiredLinkAttribs.append(\"features\")\n\n def translateNodeToDict(n):\n result = {}\n attrs = self._graph.nodes[n]\n for k in [\n \"id\",\n \"features\",\n \"appearanceFeatures\",\n \"disappearanceFeatures\",\n \"divisionFeatures\",\n \"timestep\",\n ]:\n if k in attrs:\n result[k] = attrs[k]\n elif k in requiredNodeAttribs:\n raise ValueError(\n \"Cannot use graph nodes without assigned ID and features, run insertEnergies() first\"\n )\n return result\n\n def translateLinkToDict(l):\n result = {}\n attrs = self._graph.edges[l[0], l[1]]\n for k in [\"src\", \"dest\", \"features\"]:\n if k in attrs:\n result[k] = attrs[k]\n elif k in requiredLinkAttribs:\n raise ValueError(\n \"Cannot use graph links without source, target, and features, run insertEnergies() first\"\n )\n return result\n\n traxelIdPerTimestepToUniqueIdMap, _ = self.getMappingsBetweenUUIDsAndTraxels()\n model = {\n \"segmentationHypotheses\": [\n translateNodeToDict(n) for n in self._graph.nodes()\n ],\n \"linkingHypotheses\": [translateLinkToDict(e) for e in self._graph.edges()],\n \"divisionHypotheses\": [],\n \"traxelToUniqueId\": traxelIdPerTimestepToUniqueIdMap,\n \"settings\": {\n \"statesShareWeights\": True,\n \"allowPartialMergerAppearance\": False,\n \"requireSeparateChildrenOfDivision\": True,\n \"optimizerEpGap\": 0.01,\n \"optimizerVerbose\": True,\n \"optimizerNumThreads\": 1,\n },\n }\n\n # extract exclusion sets:\n exclusions = set([])\n for n in self._graph.nodes():\n if self.withTracklets:\n traxel = self._graph.nodes[n][\"tracklet\"][0]\n else:\n traxel = self._graph.nodes[n][\"traxel\"]\n\n if traxel.conflictingTraxelIds is not None:\n if self.withTracklets:\n logger.error(\n \"Exclusion constraints do not work with tracklets yet!\"\n )\n\n conflictingIds = [\n traxelIdPerTimestepToUniqueIdMap[str(traxel.Timestep)][str(i)]\n for i in traxel.conflictingTraxelIds\n ]\n myId = traxelIdPerTimestepToUniqueIdMap[str(traxel.Timestep)][\n str(traxel.Id)\n ]\n for ci in conflictingIds:\n # insert pairwise exclusion constraints only, and always put the lower id first\n if ci < myId:\n exclusions.add((ci, myId))\n else:\n exclusions.add((myId, ci))\n\n model[\"exclusions\"] = [list(t) for t in exclusions]\n\n # TODO: this recomputes the uuidToTraxelMap even though we have it already...\n trackingGraph = hytra.core.jsongraph.JsonTrackingGraph(\n model=model, progressVisitor=self.progressVisitor\n )\n return trackingGraph\n\n def insertSolution(self, resultDictionary):\n \"\"\"\n Add solution values to nodes and arcs from dictionary representation of solution.\n The resulting graph (=model) gets an additional property \"value\" that represents the number of objects inside a detection/arc\n Additionally a division indicator is saved in the node property \"divisionValue\".\n The link also gets a new attribute: the gap that is covered. E.g. 1, if consecutive timeframes, 2 if link skipping one timeframe.\n \"\"\"\n assert isinstance(self._graph, nx.DiGraph), \"Expecting the graph to be directed\"\n _, uuidToTraxelMap = self.getMappingsBetweenUUIDsAndTraxels()\n\n if self.withTracklets:\n traxelgraph = self.referenceTraxelGraph\n else:\n traxelgraph = self\n\n # reset all values\n for n in traxelgraph._graph.nodes():\n traxelgraph._graph.nodes[n][\"value\"] = 0\n traxelgraph._graph.nodes[n][\"divisionValue\"] = False\n\n for e in traxelgraph._graph.edges():\n traxelgraph._graph.edges[e[0], e[1]][\"value\"] = 0\n\n # store values from dict\n for detection in resultDictionary[\"detectionResults\"]:\n traxels = uuidToTraxelMap[detection[\"id\"]]\n for traxel in traxels:\n traxelgraph._graph.nodes[traxel][\"value\"] = detection[\"value\"]\n for internal_edge in zip(traxels, traxels[1:]):\n traxelgraph._graph.edges[internal_edge[0], internal_edge[1]][\n \"value\"\n ] = detection[\"value\"]\n\n if (\n \"linkingResults\" in resultDictionary\n and resultDictionary[\"linkingResults\"] is not None\n ):\n for link in resultDictionary[\"linkingResults\"]:\n source, dest = (\n uuidToTraxelMap[link[\"src\"]][-1],\n uuidToTraxelMap[link[\"dest\"]][0],\n )\n if (source in traxelgraph._graph.predecessors(dest)) and (\n dest in traxelgraph._graph.neighbors(source)\n ):\n traxelgraph._graph.edges[source, dest][\"value\"] = link[\"value\"]\n traxelgraph._graph.edges[source, dest][\"gap\"] = dest[0] - source[0]\n\n if (\n \"divisionResults\" in resultDictionary\n and resultDictionary[\"divisionResults\"] is not None\n ):\n for division in resultDictionary[\"divisionResults\"]:\n traxelgraph._graph.nodes[uuidToTraxelMap[division[\"id\"]][-1]][\n \"divisionValue\"\n ] = division[\"value\"]\n\n def getSolutionDictionary(self):\n \"\"\"\n Return the solution encoded in the `value` and `divisionValue` attributes of nodes and edges\n as a python dictionary in the style that can be saved to JSON or sent to our solvers as ground truths.\n \"\"\"\n resultDictionary = {}\n\n if self.withTracklets:\n traxelgraph = self.referenceTraxelGraph\n else:\n traxelgraph = self\n\n detectionList = []\n divisionList = []\n linkList = []\n\n def checkAttributeValue(element, attribName, default):\n if attribName in element:\n return element[attribName]\n else:\n return default\n\n for n in traxelgraph._graph.nodes():\n newDetection = {}\n newDetection[\"id\"] = traxelgraph._graph.nodes[n][\"id\"]\n newDetection[\"value\"] = checkAttributeValue(\n traxelgraph._graph.nodes[n], \"value\", 0\n )\n detectionList.append(newDetection)\n if \"divisionValue\" in traxelgraph._graph.nodes[n]:\n newDivsion = {}\n newDivsion[\"id\"] = traxelgraph._graph.nodes[n][\"id\"]\n newDivsion[\"value\"] = checkAttributeValue(\n traxelgraph._graph.nodes[n], \"divisionValue\", False\n )\n divisionList.append(newDivsion)\n\n for a in traxelgraph.arcIterator():\n newLink = {}\n src = self.source(a)\n dest = self.target(a)\n newLink[\"src\"] = traxelgraph._graph.nodes[src][\"id\"]\n newLink[\"dest\"] = traxelgraph._graph.nodes[dest][\"id\"]\n newLink[\"value\"] = checkAttributeValue(\n traxelgraph._graph.edges[src, dest], \"value\", 0\n )\n newLink[\"gap\"] = checkAttributeValue(\n traxelgraph._graph.edges[src, dest], \"gap\", 1\n )\n\n linkList.append(newLink)\n\n resultDictionary[\"detectionResults\"] = detectionList\n resultDictionary[\"linkingResults\"] = linkList\n resultDictionary[\"divisionResults\"] = divisionList\n\n return resultDictionary\n\n def countIncomingObjects(self, node):\n \"\"\"\n Once a solution was written to the graph, this returns the number of\n incoming objects of a node, and the number of active incoming edges.\n If the latter is greater than 1, this shows that we have a merger.\n \"\"\"\n numberOfIncomingObject = 0\n numberOfIncomingEdges = 0\n for in_edge in self._graph.in_edges(node):\n if \"value\" in self._graph.edges[in_edge[0], node]:\n numberOfIncomingObject += self._graph.edges[in_edge[0], node][\"value\"]\n numberOfIncomingEdges += 1\n return numberOfIncomingObject, numberOfIncomingEdges\n\n def countOutgoingObjects(self, node):\n \"\"\"\n Once a solution was written to the graph, this returns the number of\n outgoing objects of a node, and the number of active outgoing edges.\n If the latter is greater than 1, this shows that we have a merger splitting up, or a division.\n \"\"\"\n numberOfOutgoingObject = 0\n numberOfOutgoingEdges = 0\n for out_edge in self._graph.out_edges(node):\n if (\n \"value\" in self._graph.edges[node, out_edge[1]]\n and self._graph.edges[node, out_edge[1]][\"value\"] > 0\n ):\n numberOfOutgoingObject += self._graph.edges[node, out_edge[1]][\"value\"]\n numberOfOutgoingEdges += 1\n return numberOfOutgoingObject, numberOfOutgoingEdges\n\n def computeLineage(self, firstTrackId=2, firstLineageId=2, skipLinks=1):\n \"\"\"\n computes lineage and track id for every node in the graph\n \"\"\"\n\n update_queue = []\n # start lineages / tracks at 2, because 0 means background=black, 1 means misdetection in ilastik\n max_lineage_id = firstLineageId\n max_track_id = firstTrackId\n\n if self.withTracklets:\n traxelgraph = self.referenceTraxelGraph\n else:\n traxelgraph = self\n\n self.progressVisitor.showState(\"Compute lineage\")\n\n # find start of lineages\n numElements = 2 * traxelgraph.countNodes()\n countElements = 0\n for n in traxelgraph.nodeIterator():\n countElements += 1\n self.progressVisitor.showProgress(countElements / float(numElements))\n\n if (\n traxelgraph.countIncomingObjects(n)[0] == 0\n and \"value\" in traxelgraph._graph.nodes[n]\n and traxelgraph._graph.nodes[n][\"value\"] > 0\n and (\n self.allowLengthOneTracks\n or traxelgraph.countOutgoingObjects(n)[0] > 0\n )\n ):\n # found start of a track\n update_queue.append((n, max_lineage_id, max_track_id))\n max_lineage_id += 1\n max_track_id += 1\n else:\n traxelgraph._graph.nodes[n][\"lineageId\"] = None\n traxelgraph._graph.nodes[n][\"trackId\"] = None\n\n while len(update_queue) > 0:\n countElements += 1\n current_node, lineage_id, track_id = update_queue.pop()\n self.progressVisitor.showProgress(countElements / float(numElements))\n\n # if we did not run merger resolving, it can happen that we reach a node several times,\n # and would propagate the new lineage+track IDs to all descendants again! We simply\n # stop propagating in that case and just use the lineageID that reached the node first.\n if (\n traxelgraph._graph.nodes[current_node].get(\"lineageId\", None)\n is not None\n and traxelgraph._graph.nodes[current_node].get(\"trackId\", None)\n is not None\n ):\n logger.debug(\"Several tracks are merging here, stopping a later one\")\n continue\n\n # set a new trackID\n traxelgraph._graph.nodes[current_node][\"lineageId\"] = lineage_id\n traxelgraph._graph.nodes[current_node][\"trackId\"] = track_id\n\n numberOfOutgoingObject, numberOfOutgoingEdges = traxelgraph.countOutgoingObjects(\n current_node\n )\n\n if numberOfOutgoingObject != numberOfOutgoingEdges:\n logger.warning(\n \"running lineage computation on unresolved graphs depends on a race condition\"\n )\n\n if (\n \"divisionValue\" in traxelgraph._graph.nodes[current_node]\n and traxelgraph._graph.nodes[current_node][\"divisionValue\"]\n ):\n assert traxelgraph.countOutgoingObjects(current_node)[1] == 2\n traxelgraph._graph.nodes[current_node][\"children\"] = []\n for a in traxelgraph._graph.out_edges(current_node):\n\n if (\n \"value\" in traxelgraph._graph.edges[current_node, a[1]]\n and traxelgraph._graph.edges[current_node, a[1]][\"value\"] > 0\n ):\n traxelgraph._graph.nodes[a[1]][\"gap\"] = skipLinks\n traxelgraph._graph.nodes[current_node][\"children\"].append(a[1])\n traxelgraph._graph.nodes[a[1]][\"parent\"] = current_node\n update_queue.append(\n (traxelgraph.target(a), lineage_id, max_track_id)\n )\n max_track_id += 1\n else:\n if traxelgraph.countOutgoingObjects(current_node)[1] > 1:\n logger.debug(\n \"Found merger splitting into several objects, propagating lineage and track to all descendants!\"\n )\n\n for a in traxelgraph._graph.out_edges(current_node):\n if (\n \"value\" in traxelgraph._graph.edges[current_node, a[1]]\n and traxelgraph._graph.edges[current_node, a[1]][\"value\"] > 0\n ):\n if (\n \"gap\" in traxelgraph._graph.edges[current_node, a[1]]\n and traxelgraph._graph.edges[current_node, a[1]][\"gap\"] == 1\n ) or \"gap\" not in traxelgraph._graph.edges[current_node, a[1]]:\n traxelgraph._graph.nodes[a[1]][\"gap\"] = 1\n update_queue.append(\n (traxelgraph.target(a), lineage_id, track_id)\n )\n if (\n \"gap\" in traxelgraph._graph.edges[current_node, a[1]]\n and traxelgraph._graph.edges[current_node, a[1]][\"gap\"] > 1\n ):\n traxelgraph._graph.nodes[a[1]][\"gap\"] = skipLinks\n traxelgraph._graph.nodes[a[1]][\"gap_parent\"] = current_node\n update_queue.append(\n (traxelgraph.target(a), lineage_id, max_track_id)\n )\n max_track_id += 1\n\n def pruneGraphToSolution(self, distanceToSolution=0):\n \"\"\"\n creates a new pruned HypothesesGraph that around the result. Assumes that value==0 corresponds\n to unlabeled parts of the graph.\n distanceToSolution determines how many negative examples are included\n distanceToSolution = 0: only include negative edges that connect used objects\n distanceToSolution = 1: additionally include edges that connect used objects with unlabeled objects\n \"\"\"\n prunedGraph = HypothesesGraph()\n for n in self.nodeIterator():\n if \"value\" in self._graph.nodes[n] and self._graph.nodes[n][\"value\"] > 0:\n prunedGraph._graph.add_node(n, **self._graph.nodes[n])\n\n for e in self.arcIterator():\n src = self.source(e)\n dest = self.target(e)\n if distanceToSolution == 0:\n if src in prunedGraph._graph and dest in prunedGraph._graph:\n prunedGraph._graph.add_edge(\n src, dest, **self._graph.edges[src, dest]\n )\n\n # TODO: can be optimized by looping over the pruned graph nodes(might sacrifice readability)\n for distance in range(1, distanceToSolution + 1):\n for e in self.arcIterator():\n src = self.source(e)\n dest = self.target(e)\n if src in prunedGraph._graph or dest in prunedGraph._graph:\n prunedGraph._graph.add_node(src, **self._graph.nodes[src])\n prunedGraph._graph.add_node(dest, **self._graph.nodes[dest])\n prunedGraph._graph.add_edge(\n src, dest, **self._graph.edges[src, dest]\n )\n\n # in case a node is NOT an appearance and\n # has all the incoming edges with value 0, we remove all these incoming edges\n #\n # in case a node is NOT a disappearance and\n # has all the outgoing edges with value 0, we remove all these outgoing edges\n withAppearanceFeatures = True\n withDisappearanceFeatures = True\n withFeatures = True\n correctAppearanceFeatureLength = True\n correctDisappearanceFeatureLength = True\n correctFeatureLength = True\n maxNumObjects = None\n maxNumObjectsAppearance = None\n maxNumObjectsDisappearance = None\n for n in self.nodeIterator():\n try:\n maxNumObjectsApp = len(self._graph.nodes[n][\"appearanceFeatures\"]) - 1\n if maxNumObjectsAppearance is None:\n maxNumObjectsAppearance = maxNumObjectsApp\n elif not maxNumObjectsApp == maxNumObjectsAppearance:\n correctAppearanceFeatureLength = False\n logger.info(\n \"Appearance/disappearance features have different lengths!\"\n )\n except:\n withAppearanceFeatures = False\n logger.info(\"There are no appearance features in node properties!\")\n break\n\n try:\n maxNumObjectsDis = (\n len(self._graph.nodes[n][\"disappearanceFeatures\"]) - 1\n )\n if maxNumObjectsDisappearance is None:\n maxNumObjectsDisappearance = maxNumObjectsDis\n elif not maxNumObjectsDis == maxNumObjectsDisappearance:\n correctDisappearanceFeatureLength = False\n logger.info(\"Disappearance features have different lengths!\")\n except:\n withDisappearanceFeatures = False\n logger.info(\"There are no disappearance features in node properties!\")\n break\n\n if withAppearanceFeatures and withDisappearanceFeatures:\n if (\n correctAppearanceFeatureLength\n and correctDisappearanceFeatureLength\n and maxNumObjectsAppearance == maxNumObjectsDisappearance\n ):\n maxNumObjects = maxNumObjectsAppearance\n else:\n correctFeatureLength = False\n logger.info(\n \"Appearance and disappearance features have different lengths!\"\n )\n else:\n withFeatures = False\n\n if withFeatures and correctFeatureLength:\n for n in self.nodeIterator():\n if not (\n \"appearance\" in self._graph.nodes[n].keys()\n and self._graph.nodes[n][\"appearance\"]\n ):\n allArcsWithValueZero = True\n in_edges = self._graph.in_edges(n)\n for edge in list(in_edges):\n if (\n \"value\" in self._graph.edges[edge[0]][edge[1]].keys()\n and not self._graph.edges[edge[0]][edge[1]][\"value\"] == 0\n ):\n allArcsWithValueZero = False\n break\n\n self._graph.nodes[n][\"appearanceFeatures\"] = listify(\n [0.0] + [0.0] * maxNumObjects\n )\n if allArcsWithValueZero:\n if not in_edges == []:\n self._graph.remove_edges_from(in_edges)\n\n if not (\n \"disappearance\" in self._graph.nodes[n].keys()\n and self._graph.nodes[n][\"disappearance\"]\n ):\n allArcsWithValueZero = True\n out_edges = self._graph.out_edges(n)\n for edge in list(out_edges):\n if (\n \"value\" in self._graph.edges[edge[0]][edge[1]].keys()\n and not self._graph.edges[edge[0]][edge[1]][\"value\"] == 0\n ):\n allArcsWithValueZero = False\n break\n\n self._graph.nodes[n][\"disappearanceFeatures\"] = listify(\n [0.0] + [0.0] * maxNumObjects\n )\n if allArcsWithValueZero:\n if not out_edges == []:\n self._graph.remove_edges_from(out_edges)\n\n return prunedGraph\n\n def _getNodeAttribute(self, timestep, objectId, attribute):\n \"\"\"\n return some attribute of a certain node specified by timestep and objectId\n \"\"\"\n try:\n return self._graph.nodes[(int(timestep), int(objectId))][attribute]\n except KeyError:\n logger.error(\n attribute\n + \" not found in graph node properties, call computeLineage() first!\"\n )\n raise\n\n def getLineageId(self, timestep, objectId):\n \"\"\"\n return the lineage Id of a certain node specified by timestep and objectId\n \"\"\"\n if self.withTracklets:\n traxelgraph = self.referenceTraxelGraph\n else:\n traxelgraph = self\n return traxelgraph._getNodeAttribute(timestep, objectId, \"lineageId\")\n\n def getTrackId(self, timestep, objectId):\n \"\"\"\n return the track Id of a certain node specified by timestep and objectId\n \"\"\"\n if self.withTracklets:\n traxelgraph = self.referenceTraxelGraph\n else:\n traxelgraph = self\n return traxelgraph._getNodeAttribute(timestep, objectId, \"trackId\")\n"
] | [
[
"numpy.unique",
"numpy.array",
"numpy.linalg.norm",
"numpy.ones"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.ylabel"
],
[
"numpy.dot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.hold",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
],
[
"sklearn.mixture.GaussianMixture",
"numpy.array",
"numpy.where"
],
[
"numpy.expand_dims",
"numpy.zeros"
],
[
"sklearn.neighbors.KDTree",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrsempress/transfer_learning | [
"217622ca3052b6c79a07792e627394c08440ec84"
] | [
"codes/JDA/JDA.py"
] | [
"# encoding=utf-8\n\"\"\"\n Created on 9:38 2019/07/16\n @author: Chenxi Huang\n It implements \"Transfer Feature Learning with Joint Distribution Adaptation\"\n Refer to Long Mingsheng's(the writer) code in Matlab\n\"\"\"\nimport numpy as np\nimport os\nimport scipy.io\nimport scipy.linalg\nimport sklearn.metrics\nfrom sklearn import preprocessing\nfrom sklearn.neighbors import KNeighborsClassifier\nimport Network\nimport Log\n\n\ndef work(source, target, gpu, _k=100, _lambd=1.0, _ker='primal', _gamma=1.0):\n # set log information\n log = Log.Log()\n log.set_dir('JDA', source, target)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu\n # domains = ['caltech.mat', 'amazon.mat', 'webcam.mat', 'dslr.mat'] # databases: Office-31\n srcStr = ['Caltech10', 'Caltech10', 'Caltech10', 'amazon', 'amazon', 'amazon', 'webcam', 'webcam', 'webcam', 'dslr',\n 'dslr', 'dslr']\n tgtStr = ['amazon', 'webcam', 'dslr', 'Caltech10', 'webcam', 'dslr', 'Caltech10', 'amazon', 'dslr', 'Caltech10',\n 'amazon', 'webcam']\n # result = []\n # for i in range(12):\n # src, tar = '../data/JDA/' + srcStr[i] + '_SURF_L10.mat', '../data/JDA/' + tgtStr[i] + '_SURF_L10.mat'\n src, tar = 'data/JDA/' + source + '_SURF_L10.mat', 'data/JDA/' + target + '_SURF_L10.mat'\n print(\"src is \" + src + \", tar is \" + tar)\n # load algorithm options\n src_domain, tar_domain = scipy.io.loadmat(src), scipy.io.loadmat(tar)\n # print(src_domain['fts'])\n # print(np.size(src_domain['fts'], 0)) # 1123\n # print(np.size(src_domain['fts'], 1)) # 800\n # print(src_domain['fts'].sum(0))\n # print(np.size(src_domain['fts'].sum(0), 0)) # 800\n # print(len(src_domain['fts'])) # 1123\n Xs = src_domain['fts'] / np.tile(src_domain['fts'].sum(0), 1)\n scale1 = preprocessing.minmax_scale(Xs, feature_range=(0, 1), axis=0, copy=True)\n # print(src_domain['labels'])\n Ys = src_domain['labels']\n\n Xt = tar_domain['fts'] / np.tile(tar_domain['fts'].sum(0), 1)\n scale2 = preprocessing.minmax_scale(Xs, feature_range=(0, 1), axis=0, copy=True)\n Yt = tar_domain['labels']\n\n # 1NN evaluation\n clf = KNeighborsClassifier(n_neighbors=1)\n clf.fit(Xs, Ys.ravel())\n Y_pred = clf.predict(Xt)\n acc = sklearn.metrics.accuracy_score(Yt, Y_pred)\n print('NN = ', acc)\n\n # JDA evaluation\n # because in office-31 all are objects, so lambda = 1\n k, lambd, ker, gamma = _k, _lambd, _ker, _gamma # 'primal' | 'linear' | 'rbf'\n T = 10\n Cls = []\n Acc = []\n for t in range(T):\n print('==============================Iteration [' + str(t) + ']==============================')\n jda = Network.JDA_LMS(kernel_type=ker, dim=30, lamb=lambd, gamma=gamma)\n Z, A = jda.fit_predict(Xs, Ys, Xt, Yt)\n Z /= np.linalg.norm(Z, axis=0)\n Xs_new, Xt_new = Z[:, :len(Xs)].T, Z[:, len(Xs):].T\n\n clf = KNeighborsClassifier(n_neighbors=1)\n clf.fit(Xs_new, Ys.ravel())\n Y_pred = clf.predict(Xt_new)\n acc = sklearn.metrics.accuracy_score(Yt, Y_pred)\n Acc.append(acc)\n print('JDA iteration [{}/{}]: Acc: {:.4f}'.format(t + 1, T, acc))\n # add log\n log.add_log(t, '*', '*', acc)\n # result.append(Acc[-1])\n\n # save log\n log.save_log()\n\n\nif __name__ == '__main__':\n work('amazon', 'webcam', '3')\n"
] | [
[
"sklearn.preprocessing.minmax_scale",
"numpy.linalg.norm",
"sklearn.neighbors.KNeighborsClassifier"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhengjian2322/soln-ml | [
"ceff6c432921df9809f9552aaa1b0bd5a1b7e8d2",
"ceff6c432921df9809f9552aaa1b0bd5a1b7e8d2",
"ceff6c432921df9809f9552aaa1b0bd5a1b7e8d2",
"ceff6c432921df9809f9552aaa1b0bd5a1b7e8d2"
] | [
"solnml/components/models/object_detection/nn_utils/retinanet.py",
"test/exp_scripts/exp_sys_small.py",
"examples/ci_examples/example_meta.py",
"solnml/components/feature_engineering/transformations/generator/random_trees_embedding.py"
] | [
"\"\"\"\nRetinaNet code borrowed from\nhttps://github.com/yhenon/pytorch-retinanet/blob/master/retinanet/model.py\n\"\"\"\n\nimport torch.nn as nn\nimport torch\nimport math\nimport torch.utils.model_zoo as model_zoo\nfrom torchvision.ops import nms\nfrom .retinanet_utils import BasicBlock, Bottleneck, BBoxTransform, ClipBoxes, Anchors, FocalLoss\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\nclass PyramidFeatures(nn.Module):\n def __init__(self, C3_size, C4_size, C5_size, feature_size=256):\n super(PyramidFeatures, self).__init__()\n\n # upsample C5 to get P5 from the FPN paper\n self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)\n self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')\n self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n\n # add P5 elementwise to C4\n self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)\n self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')\n self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n\n # add P4 elementwise to C3\n self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)\n self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n\n # \"P6 is obtained via a 3x3 stride-2 conv on C5\"\n self.P6 = nn.Conv2d(C5_size, feature_size, kernel_size=3, stride=2, padding=1)\n\n # \"P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6\"\n self.P7_1 = nn.ReLU()\n self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)\n\n def forward(self, inputs):\n C3, C4, C5 = inputs\n\n P5_x = self.P5_1(C5)\n P5_upsampled_x = self.P5_upsampled(P5_x)\n P5_x = self.P5_2(P5_x)\n\n P4_x = self.P4_1(C4)\n P4_x = P5_upsampled_x + P4_x\n P4_upsampled_x = self.P4_upsampled(P4_x)\n P4_x = self.P4_2(P4_x)\n\n P3_x = self.P3_1(C3)\n P3_x = P3_x + P4_upsampled_x\n P3_x = self.P3_2(P3_x)\n\n P6_x = self.P6(C5)\n\n P7_x = self.P7_1(P6_x)\n P7_x = self.P7_2(P7_x)\n\n return [P3_x, P4_x, P5_x, P6_x, P7_x]\n\n\nclass RegressionModel(nn.Module):\n def __init__(self, num_features_in, num_anchors=9, feature_size=256):\n super(RegressionModel, self).__init__()\n\n self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)\n self.act1 = nn.ReLU()\n\n self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act2 = nn.ReLU()\n\n self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act3 = nn.ReLU()\n\n self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act4 = nn.ReLU()\n\n self.output = nn.Conv2d(feature_size, num_anchors * 4, kernel_size=3, padding=1)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.act1(out)\n\n out = self.conv2(out)\n out = self.act2(out)\n\n out = self.conv3(out)\n out = self.act3(out)\n\n out = self.conv4(out)\n out = self.act4(out)\n\n out = self.output(out)\n\n # out is B x C x W x H, with C = 4*num_anchors\n out = out.permute(0, 2, 3, 1)\n\n return out.contiguous().view(out.shape[0], -1, 4)\n\n\nclass ClassificationModel(nn.Module):\n def __init__(self, num_features_in, num_anchors=9, num_classes=80, prior=0.01, feature_size=256):\n super(ClassificationModel, self).__init__()\n\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n\n self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)\n self.act1 = nn.ReLU()\n\n self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act2 = nn.ReLU()\n\n self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act3 = nn.ReLU()\n\n self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)\n self.act4 = nn.ReLU()\n\n self.output = nn.Conv2d(feature_size, num_anchors * num_classes, kernel_size=3, padding=1)\n self.output_act = nn.Sigmoid()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.act1(out)\n\n out = self.conv2(out)\n out = self.act2(out)\n\n out = self.conv3(out)\n out = self.act3(out)\n\n out = self.conv4(out)\n out = self.act4(out)\n\n out = self.output(out)\n out = self.output_act(out)\n\n # out is B x C x W x H, with C = n_classes + n_anchors\n out1 = out.permute(0, 2, 3, 1)\n\n batch_size, width, height, channels = out1.shape\n\n out2 = out1.view(batch_size, width, height, self.num_anchors, self.num_classes)\n\n return out2.contiguous().view(x.shape[0], -1, self.num_classes)\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, num_classes, block, layers):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n if block == BasicBlock:\n fpn_sizes = [self.layer2[layers[1] - 1].conv2.out_channels, self.layer3[layers[2] - 1].conv2.out_channels,\n self.layer4[layers[3] - 1].conv2.out_channels]\n elif block == Bottleneck:\n fpn_sizes = [self.layer2[layers[1] - 1].conv3.out_channels, self.layer3[layers[2] - 1].conv3.out_channels,\n self.layer4[layers[3] - 1].conv3.out_channels]\n else:\n raise ValueError(f\"Block type {block} not understood\")\n\n self.fpn = PyramidFeatures(fpn_sizes[0], fpn_sizes[1], fpn_sizes[2])\n\n self.regressionModel = RegressionModel(256)\n self.classificationModel = ClassificationModel(256, num_classes=num_classes)\n\n self.anchors = Anchors()\n\n self.regressBoxes = BBoxTransform()\n\n self.clipBoxes = ClipBoxes()\n\n self.focalLoss = FocalLoss()\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n prior = 0.01\n\n self.classificationModel.output.weight.data.fill_(0)\n self.classificationModel.output.bias.data.fill_(-math.log((1.0 - prior) / prior))\n\n self.regressionModel.output.weight.data.fill_(0)\n self.regressionModel.output.bias.data.fill_(0)\n\n self.freeze_bn()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = [block(self.inplanes, planes, stride, downsample)]\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def freeze_bn(self):\n '''Freeze BatchNorm layers.'''\n for layer in self.modules():\n if isinstance(layer, nn.BatchNorm2d):\n layer.eval()\n\n def forward(self, inputs):\n\n if self.training:\n img_batch, annotations = inputs\n else:\n img_batch = inputs\n\n x = self.conv1(img_batch)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x1 = self.layer1(x)\n x2 = self.layer2(x1)\n x3 = self.layer3(x2)\n x4 = self.layer4(x3)\n\n features = self.fpn([x2, x3, x4])\n\n regression = torch.cat([self.regressionModel(feature) for feature in features], dim=1)\n\n classification = torch.cat([self.classificationModel(feature) for feature in features], dim=1)\n\n anchors = self.anchors(img_batch)\n\n if self.training:\n return self.focalLoss(classification, regression, anchors, annotations)\n else:\n transformed_anchors = self.regressBoxes(anchors, regression)\n transformed_anchors = self.clipBoxes(transformed_anchors, img_batch)\n\n scores = torch.max(classification, dim=2, keepdim=True)[0]\n\n scores_over_thresh = (scores > 0.05)[0, :, 0]\n\n if scores_over_thresh.sum() == 0:\n # no boxes to NMS, just return\n return [torch.zeros(0), torch.zeros(0), torch.zeros(0, 4)]\n\n classification = classification[:, scores_over_thresh, :]\n transformed_anchors = transformed_anchors[:, scores_over_thresh, :]\n scores = scores[:, scores_over_thresh, :]\n\n anchors_nms_idx = nms(transformed_anchors[0, :, :], scores[0, :, 0], 0.5)\n\n nms_scores, nms_class = classification[0, anchors_nms_idx, :].max(dim=1)\n\n return [nms_scores, nms_class, transformed_anchors[0, anchors_nms_idx, :]]\n\n\ndef resnet18(num_classes, pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model\n\n\ndef resnet34(num_classes, pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)\n return model\n\n\ndef resnet50(num_classes, pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(num_classes, Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False)\n return model\n\n\ndef resnet101(num_classes, pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(num_classes, Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir='.'), strict=False)\n return model\n\n\ndef resnet152(num_classes, pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(num_classes, Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False)\n return model\n",
"\"\"\"\n This script is used to compare the strategies/algorithms in the FE-HPO selection\n problem and Bayesian optimization based solution (Auto-scikitlearn)\n\"\"\"\nimport os\nimport sys\nimport shutil\nimport time\nimport pickle\nimport argparse\nimport tabulate\nimport numpy as np\nfrom sklearn.metrics import balanced_accuracy_score, mean_squared_error\n\nsys.path.append(os.getcwd())\nimport autosklearn.classification\nimport autosklearn.regression\n\nfrom solnml.datasets.utils import load_train_test_data\nfrom solnml.components.utils.constants import CATEGORICAL, MULTICLASS_CLS, REGRESSION\n\nparser = argparse.ArgumentParser()\ndataset_set = 'diabetes,spectf,credit,ionosphere,lymphography,pc4,vehicle,yeast,' \\\n 'messidor_features,winequality_red,winequality_white,splice,spambase,amazon_employee'\nparser.add_argument('--datasets', type=str, default=dataset_set)\nparser.add_argument('--task_type', type=str, default='cls', choices=['cls', 'rgs'])\nparser.add_argument('--mode', type=str, default='alter_hpo')\nparser.add_argument('--cv', type=str, choices=['cv', 'holdout', 'partial'], default='holdout')\nparser.add_argument('--ens', type=str, default='None')\nparser.add_argument('--enable_meta', type=str, default='false', choices=['true', 'false'])\nparser.add_argument('--time_cost', type=int, default=600)\nparser.add_argument('--start_id', type=int, default=0)\nparser.add_argument('--rep_num', type=int, default=5)\n# choices=['rb', 'alter_hpo', 'fixed', 'plot', 'all', 'ausk', 'combined']\nproject_dir = './'\nsave_folder = project_dir + 'data/exp_sys/'\nif not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n\ndef evaluate_sys(run_id, task_type, mth, dataset, ens_method, enable_meta,\n eval_type='holdout', time_limit=1200, seed=1):\n _task_type = MULTICLASS_CLS if task_type == 'cls' else REGRESSION\n train_data, test_data = load_train_test_data(dataset, task_type=_task_type)\n _enable_meta = True if enable_meta == 'true' else False\n if task_type == 'cls':\n from solnml.estimators import Classifier\n estimator = Classifier(time_limit=time_limit,\n per_run_time_limit=300,\n output_dir=save_folder,\n ensemble_method=ens_method,\n enable_meta_algorithm_selection=_enable_meta,\n evaluation=eval_type,\n metric='bal_acc',\n include_algorithms=['liblinear_svc', 'random_forest', 'adaboost'],\n include_preprocessors=['extra_trees_based_selector',\n 'generic_univariate_selector',\n 'liblinear_based_selector',\n 'percentile_selector'],\n n_jobs=1)\n else:\n from solnml.estimators import Regressor\n estimator = Regressor(time_limit=time_limit,\n per_run_time_limit=300,\n output_dir=save_folder,\n ensemble_method=ens_method,\n enable_meta_algorithm_selection=_enable_meta,\n evaluation=eval_type,\n metric='mse',\n include_algorithms=['liblinear_svr', 'random_forest', 'adaboost'],\n include_preprocessors=['extra_trees_based_selector_regression',\n 'generic_univariate_selector',\n 'liblinear_based_selector',\n 'percentile_selector_regression'],\n n_jobs=1)\n\n start_time = time.time()\n estimator.fit(train_data, opt_strategy=mth, dataset_id=dataset)\n pred = estimator.predict(test_data)\n if task_type == 'cls':\n test_score = balanced_accuracy_score(test_data.data[1], pred)\n else:\n test_score = mean_squared_error(test_data.data[1], pred)\n validation_score = estimator._ml_engine.solver.incumbent_perf\n eval_dict = estimator._ml_engine.solver.get_eval_dict()\n print('Run ID : %d' % run_id)\n print('Dataset : %s' % dataset)\n print('Val/Test score : %f - %f' % (validation_score, test_score))\n\n save_path = save_folder + 'small_%s_%s_%s_%s_%d_%d_%d.pkl' % (\n task_type, mth, dataset, enable_meta, time_limit, (ens_method is None), run_id)\n with open(save_path, 'wb') as f:\n pickle.dump([dataset, validation_score, test_score, start_time, eval_dict], f)\n\n # Delete output dir\n shutil.rmtree(os.path.join(estimator.get_output_dir()))\n\n\ndef evaluate_ausk(run_id, task_type, mth, dataset, ens_method, enable_meta,\n eval_type='holdout', time_limit=1200, seed=1):\n tmp_dir = 'data/exp_sys/ausk_tmp_%s_%s_%s_%d_%d' % (task_type, mth, dataset, time_limit, run_id)\n output_dir = 'data/exp_sys/ausk_output_%s_%s_%s_%d_%d' % (task_type, mth, dataset, time_limit, run_id)\n initial_configs = 25 if enable_meta == 'true' else 0\n if os.path.exists(tmp_dir):\n try:\n shutil.rmtree(tmp_dir)\n shutil.rmtree(output_dir)\n except:\n pass\n\n if task_type == 'cls':\n automl = autosklearn.classification.AutoSklearnClassifier(\n time_left_for_this_task=int(time_limit),\n per_run_time_limit=300,\n n_jobs=1,\n include_estimators=['liblinear_svc', 'random_forest', 'adaboost'],\n include_preprocessors=['extra_trees_preproc_for_classification',\n 'liblinear_svc_preprocessor',\n 'select_percentile_classification',\n 'select_rates'],\n ensemble_memory_limit=16384,\n ml_memory_limit=16384,\n ensemble_size=1 if ens_method is None else 50,\n initial_configurations_via_metalearning=initial_configs,\n tmp_folder=tmp_dir,\n output_folder=output_dir,\n delete_tmp_folder_after_terminate=False,\n delete_output_folder_after_terminate=False,\n seed=int(seed),\n resampling_strategy='holdout',\n resampling_strategy_arguments={'train_size': 0.67}\n )\n else:\n automl = autosklearn.regression.AutoSklearnRegressor(\n time_left_for_this_task=int(time_limit),\n per_run_time_limit=300,\n n_jobs=1,\n include_estimators=['liblinear_svr', 'random_forest', 'adaboost'],\n include_preprocessors=['extra_trees_preproc_for_regression',\n 'select_percentile_regression',\n 'select_rates'],\n ensemble_memory_limit=16384,\n ml_memory_limit=16384,\n ensemble_size=1 if ens_method is None else 50,\n initial_configurations_via_metalearning=initial_configs,\n tmp_folder=tmp_dir,\n output_folder=output_dir,\n delete_tmp_folder_after_terminate=False,\n delete_output_folder_after_terminate=False,\n seed=int(seed),\n resampling_strategy='holdout',\n resampling_strategy_arguments={'train_size': 0.67}\n )\n\n print(automl)\n _task_type = MULTICLASS_CLS if task_type == 'cls' else REGRESSION\n train_data, test_data = load_train_test_data(dataset, task_type=_task_type)\n X, y = train_data.data\n X_test, y_test = test_data.data\n feat_type = ['Categorical' if _type == CATEGORICAL else 'Numerical'\n for _type in train_data.feature_types]\n from autosklearn.metrics import make_scorer\n if task_type == 'cls':\n scorer = make_scorer(name='balanced_accuracy', score_func=balanced_accuracy_score)\n score_func = balanced_accuracy_score\n else:\n scorer = make_scorer(name='mean_squared_error', score_func=mean_squared_error, greater_is_better=False)\n score_func = mean_squared_error\n start_time = time.time()\n automl.fit(X.copy(), y.copy(), feat_type=feat_type,\n metric=scorer)\n valid_results = automl.cv_results_['mean_test_score']\n if task_type == 'cls':\n validation_score = np.max(valid_results)\n else:\n valid_results = [ele - valid_results[-1] for ele in valid_results[:-1]]\n validation_score = np.min(valid_results)\n # automl.refit(X.copy(), y.copy())\n predictions = automl.predict(X_test)\n test_score = score_func(y_test, predictions)\n model_desc = automl.show_models()\n str_stats = automl.sprint_statistics()\n result_score = automl.cv_results_['mean_test_score']\n result_time = automl.cv_results_['mean_fit_time']\n\n print('=' * 10)\n # print(model_desc)\n print(str_stats)\n print('=' * 10)\n\n print('Validation score', validation_score)\n print('Test score', test_score)\n # print(automl.show_models())\n save_path = save_folder + 'small_%s_%s_%s_%s_%d_%d_%d.pkl' % (\n task_type, mth, dataset, enable_meta, time_limit, (ens_method is None), run_id)\n with open(save_path, 'wb') as f:\n pickle.dump([dataset, validation_score, test_score, start_time, result_score, result_time], f)\n\n shutil.rmtree(output_dir)\n shutil.rmtree(os.path.join(tmp_dir, '.auto-sklearn'))\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n dataset_str = args.datasets\n time_cost = args.time_cost\n mode = args.mode\n task_type = args.task_type\n ens_method = args.ens\n if ens_method == 'None':\n ens_method = None\n cv = args.cv\n np.random.seed(1)\n rep = args.rep_num\n start_id = args.start_id\n enable_meta = args.enable_meta\n seeds = np.random.randint(low=1, high=10000, size=start_id + rep)\n dataset_list = dataset_str.split(',')\n\n if not mode.startswith('plot'):\n if mode == 'all':\n methods = ['rb', 'fixed', 'alter_hpo']\n else:\n methods = [mode]\n\n for dataset in dataset_list:\n\n for method in methods:\n for _id in range(start_id, start_id + rep):\n seed = seeds[_id]\n print('Running %s with %d-th seed' % (dataset, _id + 1))\n if method in ['rb', 'fixed', 'alter_hpo', 'combined', 'rb_hpo']:\n evaluate_sys(_id, task_type, method, dataset, ens_method, enable_meta,\n eval_type=cv, time_limit=time_cost, seed=seed)\n elif method in ['ausk']:\n evaluate_ausk(_id, task_type, method, dataset, ens_method, enable_meta,\n eval_type=cv, time_limit=time_cost, seed=seed)\n else:\n raise ValueError('Invalid mode: %s!' % method)\n\n else:\n headers = ['dataset']\n # method_ids = ['fixed', 'alter_hpo', 'rb', 'ausk']\n method_ids = mode.split(',')[1:]\n if len(method_ids) == 0:\n method_ids = ['alter_hpo', 'combined', 'ausk', 'tpot']\n for mth in method_ids:\n headers.extend(['val-%s' % mth, 'test-%s' % mth])\n tbl_data = list()\n for dataset in dataset_list:\n row_data = [dataset]\n for mth in method_ids:\n results = list()\n for run_id in range(rep):\n if mth == 'tpot':\n _ens_method = None\n else:\n _ens_method = ens_method\n file_path = save_folder + 'small_%s_%s_%s_%s_%d_%d_%d.pkl' % (\n task_type, mth, dataset, enable_meta, time_cost, (_ens_method is None), run_id)\n if not os.path.exists(file_path):\n continue\n with open(file_path, 'rb') as f:\n data = pickle.load(f)\n if mth == 'ausk' and task_type == 'rgs':\n test_acc = data[2]\n val_acc = min([ele - 2 for ele in data[4] if ele != 2])\n elif task_type == 'rgs':\n val_acc, test_acc = -data[1], data[2]\n if isinstance(test_acc, list):\n test_acc = test_acc[-1]\n else:\n val_acc, test_acc = data[1], data[2]\n if isinstance(test_acc, list):\n test_acc = test_acc[-1]\n results.append([val_acc, test_acc])\n print(mth, results)\n if len(results) == rep:\n results = np.array(results)\n # print(mth, results)\n stats_ = zip(np.mean(results, axis=0), np.std(results, axis=0))\n string = ''\n for mean_t, std_t in stats_:\n string += u'%.3f\\u00B1%.3f |' % (mean_t, std_t)\n print(dataset, mth, '=' * 30)\n print('%s-%s: mean\\u00B1std' % (dataset, mth), string)\n print('%s-%s: median' % (dataset, mth), np.median(results, axis=0))\n\n for idx in range(results.shape[1]):\n vals = results[:, idx]\n median = np.median(vals)\n if median == 0.:\n row_data.append('-')\n else:\n row_data.append(u'%.4f' % median)\n else:\n row_data.extend(['-'] * 2)\n\n tbl_data.append(row_data)\n print(tabulate.tabulate(tbl_data, headers, tablefmt='github'))\n",
"import os\nimport shutil\nfrom sklearn.datasets import load_iris, load_boston\nfrom sklearn.metrics import accuracy_score, mean_squared_error\nfrom sklearn.model_selection import train_test_split\n\nfrom solnml.utils.data_manager import DataManager\nfrom solnml.estimators import Classifier, Regressor\n\n\ndef test_cls():\n save_dir = './data/eval_exps/soln-ml'\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n time_limit = 60\n print('==> Start to evaluate with Budget %d' % time_limit)\n ensemble_method = 'ensemble_selection'\n eval_type = 'holdout'\n\n iris = load_iris()\n X, y = iris.data, iris.target\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1, stratify=y)\n dm = DataManager(X_train, y_train)\n train_data = dm.get_data_node(X_train, y_train)\n test_data = dm.get_data_node(X_test, y_test)\n\n clf = Classifier(time_limit=time_limit,\n output_dir=save_dir,\n ensemble_method=ensemble_method,\n enable_meta_algorithm_selection=True,\n ensemble_size=10,\n evaluation=eval_type,\n metric='bal_acc')\n clf.fit(train_data)\n print(clf.summary())\n\n pred = clf.predict(test_data)\n print(accuracy_score(test_data.data[1], pred))\n\n shutil.rmtree(save_dir)\n\n\ndef test_rgs():\n time_limit = 60\n print('==> Start to evaluate with Budget %d' % time_limit)\n ensemble_method = 'ensemble_selection'\n eval_type = 'holdout'\n\n boston = load_boston()\n X, y = boston.data, boston.target\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)\n dm = DataManager(X_train, y_train)\n train_data = dm.get_data_node(X_train, y_train)\n test_data = dm.get_data_node(X_test, y_test)\n\n save_dir = './data/eval_exps/soln-ml'\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n rgs = Regressor(metric='mse',\n ensemble_method=ensemble_method,\n enable_meta_algorithm_selection=True,\n evaluation=eval_type,\n time_limit=time_limit,\n output_dir=save_dir)\n\n rgs.fit(train_data)\n print(rgs.summary())\n\n pred = rgs.predict(test_data)\n print(mean_squared_error(test_data.data[1], pred))\n\n shutil.rmtree(save_dir)\n\n\nif __name__ == '__main__':\n test_cls()\n test_rgs()\n",
"from ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import UniformIntegerHyperparameter, \\\n UnParametrizedHyperparameter, Constant, CategoricalHyperparameter\nfrom solnml.components.feature_engineering.transformations.base_transformer import *\nfrom solnml.components.utils.configspace_utils import check_none, check_for_bool\n\n\nclass RandomTreesEmbeddingTransformation(Transformer):\n type = 18\n\n def __init__(self, n_estimators=10, max_depth=5, min_samples_split=2,\n min_samples_leaf=1, min_weight_fraction_leaf=1.0, max_leaf_nodes='None',\n sparse_output=True, bootstrap='False', n_jobs=-1, random_state=1):\n super().__init__(\"random_trees_embedding\")\n self.input_type = [NUMERICAL, DISCRETE, CATEGORICAL]\n self.compound_mode = 'only_new'\n self.output_type = CATEGORICAL\n\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.max_leaf_nodes = max_leaf_nodes\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.bootstrap = bootstrap\n self.sparse_output = sparse_output\n self.n_jobs = n_jobs\n self.random_state = random_state\n\n @ease_trans\n def operate(self, input_datanode: DataNode, target_fields=None):\n from sklearn.ensemble import RandomTreesEmbedding\n\n X, y = input_datanode.data\n if target_fields is None:\n target_fields = collect_fields(input_datanode.feature_types, self.input_type)\n X_new = X[:, target_fields]\n if not self.model:\n self.n_estimators = int(self.n_estimators)\n if check_none(self.max_depth):\n self.max_depth = None\n else:\n self.max_depth = int(self.max_depth)\n\n # Skip heavy computation. max depth is set to 6.\n if X.shape[0] > 5000:\n self.max_depth = min(6, self.max_depth)\n\n self.min_samples_split = int(self.min_samples_split)\n self.min_samples_leaf = int(self.min_samples_leaf)\n if check_none(self.max_leaf_nodes):\n self.max_leaf_nodes = None\n else:\n self.max_leaf_nodes = int(self.max_leaf_nodes)\n self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)\n self.bootstrap = check_for_bool(self.bootstrap)\n\n self.model = RandomTreesEmbedding(\n n_estimators=self.n_estimators,\n max_depth=self.max_depth,\n min_samples_split=self.min_samples_split,\n min_samples_leaf=self.min_samples_leaf,\n max_leaf_nodes=self.max_leaf_nodes,\n sparse_output=self.sparse_output,\n n_jobs=self.n_jobs,\n random_state=self.random_state\n )\n\n self.model.fit(X_new)\n\n _X = self.model.transform(X_new).toarray()\n\n return _X\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):\n n_estimators = UniformIntegerHyperparameter(name=\"n_estimators\",\n lower=10, upper=100,\n default_value=10)\n max_depth = UniformIntegerHyperparameter(name=\"max_depth\",\n lower=2, upper=10,\n default_value=5)\n min_samples_split = UniformIntegerHyperparameter(name=\"min_samples_split\",\n lower=2, upper=20,\n default_value=2)\n min_samples_leaf = UniformIntegerHyperparameter(name=\"min_samples_leaf\",\n lower=1, upper=20,\n default_value=1)\n min_weight_fraction_leaf = Constant('min_weight_fraction_leaf', 1.0)\n max_leaf_nodes = UnParametrizedHyperparameter(name=\"max_leaf_nodes\",\n value=\"None\")\n bootstrap = CategoricalHyperparameter('bootstrap', ['True', 'False'])\n cs = ConfigurationSpace()\n cs.add_hyperparameters([n_estimators, max_depth, min_samples_split,\n min_samples_leaf, min_weight_fraction_leaf,\n max_leaf_nodes, bootstrap])\n return cs\n"
] | [
[
"torch.nn.Sequential",
"torch.max",
"torch.zeros",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.MaxPool2d",
"torch.nn.Upsample",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url"
],
[
"numpy.random.seed",
"numpy.min",
"sklearn.metrics.balanced_accuracy_score",
"numpy.median",
"sklearn.metrics.mean_squared_error",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.random.randint"
],
[
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_error",
"sklearn.datasets.load_boston",
"sklearn.metrics.accuracy_score"
],
[
"sklearn.ensemble.RandomTreesEmbedding"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cassinius/right-to-forget-data | [
"5aa3a480d93e66065118866f294f06e6cfd5d3a1"
] | [
"src/multi_class/gradient_boosting.py"
] | [
"from sklearn import ensemble\nfrom src.multi_class import input_preproc\nfrom src.multi_class import calculate_metrics\n\n\ndef runClassifier(X_train, X_test, y_train, y_test):\n # GRADIENT BOOSTING\n cls = ensemble.GradientBoostingClassifier(\n n_estimators=100,\n learning_rate=0.1,\n max_depth=5,\n verbose=0\n )\n\n predictions = cls.fit(X_train, y_train).predict(X_test)\n\n # Metrics...\n precision, recall, f1, accuracy = calculate_metrics.calculateMetrics(predictions, y_test)\n print( \"intermediary results (precision | recall | F1 Score | Accuracy):\" )\n print( \"%.6f %.6f %.6f %.6f\" % (precision, recall, f1, accuracy) )\n return precision, recall, f1, accuracy\n\n\nif __name__ == \"__main__\":\n X_train, X_test, y_train, y_test = input_preproc.readIris()\n precision, recall, f1, accuracy = runClassifier(X_train, X_test, y_train, y_test)\n print( \"\\n================================\" )\n print( \"Precision | Recall | F1 Score | Accuracy: \" )\n print( \"%.6f %.6f %.6f %.6f\" % (precision, recall, f1, accuracy) )\n"
] | [
[
"sklearn.ensemble.GradientBoostingClassifier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhezherun/pandas | [
"1f02bf240c3d0d3da338af868d056bfc169b28c2",
"36c1104b7ad9761e020f7e8198eb60da4045d169",
"36c1104b7ad9761e020f7e8198eb60da4045d169",
"36c1104b7ad9761e020f7e8198eb60da4045d169"
] | [
"pandas/tests/indexes/datetimes/test_indexing.py",
"pandas/io/parsers.py",
"pandas/core/window.py",
"pandas/tests/scalar/interval/test_interval.py"
] | [
"from datetime import datetime, time, timedelta\n\nimport numpy as np\nimport pytest\nimport pytz\n\nimport pandas.compat as compat\n\nimport pandas as pd\nfrom pandas import DatetimeIndex, Index, Timestamp, date_range, notna\nimport pandas.util.testing as tm\n\nfrom pandas.tseries.offsets import BDay, CDay\n\nSTART, END = datetime(2009, 1, 1), datetime(2010, 1, 1)\n\n\nclass TestGetItem(object):\n def test_getitem(self):\n idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')\n idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',\n tz='Asia/Tokyo', name='idx')\n\n for idx in [idx1, idx2]:\n result = idx[0]\n assert result == Timestamp('2011-01-01', tz=idx.tz)\n\n result = idx[0:5]\n expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[0:10:2]\n expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[-20:-5:3]\n expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[4::-1]\n expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',\n '2011-01-02', '2011-01-01'],\n freq='-1D', tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n def test_dti_business_getitem(self):\n rng = pd.bdate_range(START, END)\n smaller = rng[:5]\n exp = DatetimeIndex(rng.view(np.ndarray)[:5])\n tm.assert_index_equal(smaller, exp)\n\n assert smaller.freq == rng.freq\n\n sliced = rng[::5]\n assert sliced.freq == BDay() * 5\n\n fancy_indexed = rng[[4, 3, 2, 1, 0]]\n assert len(fancy_indexed) == 5\n assert isinstance(fancy_indexed, DatetimeIndex)\n assert fancy_indexed.freq is None\n\n # 32-bit vs. 64-bit platforms\n assert rng[4] == rng[np.int_(4)]\n\n def test_dti_business_getitem_matplotlib_hackaround(self):\n rng = pd.bdate_range(START, END)\n values = rng[:, None]\n expected = rng.values[:, None]\n tm.assert_numpy_array_equal(values, expected)\n\n def test_dti_custom_getitem(self):\n rng = pd.bdate_range(START, END, freq='C')\n smaller = rng[:5]\n exp = DatetimeIndex(rng.view(np.ndarray)[:5])\n tm.assert_index_equal(smaller, exp)\n assert smaller.freq == rng.freq\n\n sliced = rng[::5]\n assert sliced.freq == CDay() * 5\n\n fancy_indexed = rng[[4, 3, 2, 1, 0]]\n assert len(fancy_indexed) == 5\n assert isinstance(fancy_indexed, DatetimeIndex)\n assert fancy_indexed.freq is None\n\n # 32-bit vs. 64-bit platforms\n assert rng[4] == rng[np.int_(4)]\n\n def test_dti_custom_getitem_matplotlib_hackaround(self):\n rng = pd.bdate_range(START, END, freq='C')\n values = rng[:, None]\n expected = rng.values[:, None]\n tm.assert_numpy_array_equal(values, expected)\n\n\nclass TestWhere(object):\n def test_where_other(self):\n # other is ndarray or Index\n i = pd.date_range('20130101', periods=3, tz='US/Eastern')\n\n for arr in [np.nan, pd.NaT]:\n result = i.where(notna(i), other=np.nan)\n expected = i\n tm.assert_index_equal(result, expected)\n\n i2 = i.copy()\n i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())\n result = i.where(notna(i2), i2)\n tm.assert_index_equal(result, i2)\n\n i2 = i.copy()\n i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())\n result = i.where(notna(i2), i2.values)\n tm.assert_index_equal(result, i2)\n\n def test_where_tz(self):\n i = pd.date_range('20130101', periods=3, tz='US/Eastern')\n result = i.where(notna(i))\n expected = i\n tm.assert_index_equal(result, expected)\n\n i2 = i.copy()\n i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())\n result = i.where(notna(i2))\n expected = i2\n tm.assert_index_equal(result, expected)\n\n\nclass TestTake(object):\n def test_take(self):\n # GH#10295\n idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')\n idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',\n tz='Asia/Tokyo', name='idx')\n\n for idx in [idx1, idx2]:\n result = idx.take([0])\n assert result == Timestamp('2011-01-01', tz=idx.tz)\n\n result = idx.take([0, 1, 2])\n expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([0, 2, 4])\n expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([7, 4, 1])\n expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',\n tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([3, 2, 5])\n expected = DatetimeIndex(['2011-01-04', '2011-01-03',\n '2011-01-06'],\n freq=None, tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq is None\n\n result = idx.take([-3, 2, 5])\n expected = DatetimeIndex(['2011-01-29', '2011-01-03',\n '2011-01-06'],\n freq=None, tz=idx.tz, name='idx')\n tm.assert_index_equal(result, expected)\n assert result.freq is None\n\n def test_take_invalid_kwargs(self):\n idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')\n indices = [1, 6, 5, 9, 10, 13, 15, 3]\n\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n with pytest.raises(TypeError, match=msg):\n idx.take(indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, mode='clip')\n\n # TODO: This method came from test_datetime; de-dup with version above\n @pytest.mark.parametrize('tz', [None, 'US/Eastern', 'Asia/Tokyo'])\n def test_take2(self, tz):\n dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15),\n datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)]\n\n idx = DatetimeIndex(start='2010-01-01 09:00',\n end='2010-02-01 09:00', freq='H', tz=tz,\n name='idx')\n expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz)\n\n taken1 = idx.take([5, 6, 8, 12])\n taken2 = idx[[5, 6, 8, 12]]\n\n for taken in [taken1, taken2]:\n tm.assert_index_equal(taken, expected)\n assert isinstance(taken, DatetimeIndex)\n assert taken.freq is None\n assert taken.tz == expected.tz\n assert taken.name == expected.name\n\n def test_take_fill_value(self):\n # GH#12631\n idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],\n name='xxx')\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with pytest.raises(IndexError):\n idx.take(np.array([1, -5]))\n\n def test_take_fill_value_with_timezone(self):\n idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],\n name='xxx', tz='US/Eastern')\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx', tz='US/Eastern')\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],\n name='xxx', tz='US/Eastern')\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx', tz='US/Eastern')\n tm.assert_index_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with pytest.raises(IndexError):\n idx.take(np.array([1, -5]))\n\n\nclass TestDatetimeIndex(object):\n @pytest.mark.parametrize('null', [None, np.nan, pd.NaT])\n @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern'])\n def test_insert_nat(self, tz, null):\n # GH#16537, GH#18295 (test missing)\n idx = pd.DatetimeIndex(['2017-01-01'], tz=tz)\n expected = pd.DatetimeIndex(['NaT', '2017-01-01'], tz=tz)\n res = idx.insert(0, null)\n tm.assert_index_equal(res, expected)\n\n def test_insert(self):\n idx = DatetimeIndex(\n ['2000-01-04', '2000-01-01', '2000-01-02'], name='idx')\n\n result = idx.insert(2, datetime(2000, 1, 5))\n exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',\n '2000-01-02'], name='idx')\n tm.assert_index_equal(result, exp)\n\n # insertion of non-datetime should coerce to object index\n result = idx.insert(1, 'inserted')\n expected = Index([datetime(2000, 1, 4), 'inserted',\n datetime(2000, 1, 1),\n datetime(2000, 1, 2)], name='idx')\n assert not isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n\n idx = date_range('1/1/2000', periods=3, freq='M', name='idx')\n\n # preserve freq\n expected_0 = DatetimeIndex(['1999-12-31', '2000-01-31', '2000-02-29',\n '2000-03-31'], name='idx', freq='M')\n expected_3 = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31',\n '2000-04-30'], name='idx', freq='M')\n\n # reset freq to None\n expected_1_nofreq = DatetimeIndex(['2000-01-31', '2000-01-31',\n '2000-02-29',\n '2000-03-31'], name='idx',\n freq=None)\n expected_3_nofreq = DatetimeIndex(['2000-01-31', '2000-02-29',\n '2000-03-31',\n '2000-01-02'], name='idx',\n freq=None)\n\n cases = [(0, datetime(1999, 12, 31), expected_0),\n (-3, datetime(1999, 12, 31), expected_0),\n (3, datetime(2000, 4, 30), expected_3),\n (1, datetime(2000, 1, 31), expected_1_nofreq),\n (3, datetime(2000, 1, 2), expected_3_nofreq)]\n\n for n, d, expected in cases:\n result = idx.insert(n, d)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n # reset freq to None\n result = idx.insert(3, datetime(2000, 1, 2))\n expected = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31',\n '2000-01-02'], name='idx', freq=None)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq is None\n\n # see gh-7299\n idx = date_range('1/1/2000', periods=3, freq='D', tz='Asia/Tokyo',\n name='idx')\n with pytest.raises(ValueError):\n idx.insert(3, pd.Timestamp('2000-01-04'))\n with pytest.raises(ValueError):\n idx.insert(3, datetime(2000, 1, 4))\n with pytest.raises(ValueError):\n idx.insert(3, pd.Timestamp('2000-01-04', tz='US/Eastern'))\n with pytest.raises(ValueError):\n idx.insert(3, datetime(2000, 1, 4,\n tzinfo=pytz.timezone('US/Eastern')))\n\n for tz in ['US/Pacific', 'Asia/Singapore']:\n idx = date_range('1/1/2000 09:00', periods=6, freq='H', tz=tz,\n name='idx')\n # preserve freq\n expected = date_range('1/1/2000 09:00', periods=7, freq='H', tz=tz,\n name='idx')\n for d in [pd.Timestamp('2000-01-01 15:00', tz=tz),\n pytz.timezone(tz).localize(datetime(2000, 1, 1, 15))]:\n\n result = idx.insert(6, d)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n assert result.tz == expected.tz\n\n expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 10:00',\n '2000-01-01 11:00',\n '2000-01-01 12:00', '2000-01-01 13:00',\n '2000-01-01 14:00',\n '2000-01-01 10:00'], name='idx',\n tz=tz, freq=None)\n # reset freq to None\n for d in [pd.Timestamp('2000-01-01 10:00', tz=tz),\n pytz.timezone(tz).localize(datetime(2000, 1, 1, 10))]:\n result = idx.insert(6, d)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.tz == expected.tz\n assert result.freq is None\n\n def test_delete(self):\n idx = date_range(start='2000-01-01', periods=5, freq='M', name='idx')\n\n # prserve freq\n expected_0 = date_range(start='2000-02-01', periods=4, freq='M',\n name='idx')\n expected_4 = date_range(start='2000-01-01', periods=4, freq='M',\n name='idx')\n\n # reset freq to None\n expected_1 = DatetimeIndex(['2000-01-31', '2000-03-31', '2000-04-30',\n '2000-05-31'], freq=None, name='idx')\n\n cases = {0: expected_0,\n -5: expected_0,\n -1: expected_4,\n 4: expected_4,\n 1: expected_1}\n for n, expected in compat.iteritems(cases):\n result = idx.delete(n)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n with pytest.raises((IndexError, ValueError)):\n # either depending on numpy version\n idx.delete(5)\n\n for tz in [None, 'Asia/Tokyo', 'US/Pacific']:\n idx = date_range(start='2000-01-01 09:00', periods=10, freq='H',\n name='idx', tz=tz)\n\n expected = date_range(start='2000-01-01 10:00', periods=9,\n freq='H', name='idx', tz=tz)\n result = idx.delete(0)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freqstr == 'H'\n assert result.tz == expected.tz\n\n expected = date_range(start='2000-01-01 09:00', periods=9,\n freq='H', name='idx', tz=tz)\n result = idx.delete(-1)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freqstr == 'H'\n assert result.tz == expected.tz\n\n def test_delete_slice(self):\n idx = date_range(start='2000-01-01', periods=10, freq='D', name='idx')\n\n # prserve freq\n expected_0_2 = date_range(start='2000-01-04', periods=7, freq='D',\n name='idx')\n expected_7_9 = date_range(start='2000-01-01', periods=7, freq='D',\n name='idx')\n\n # reset freq to None\n expected_3_5 = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03',\n '2000-01-07', '2000-01-08', '2000-01-09',\n '2000-01-10'], freq=None, name='idx')\n\n cases = {(0, 1, 2): expected_0_2,\n (7, 8, 9): expected_7_9,\n (3, 4, 5): expected_3_5}\n for n, expected in compat.iteritems(cases):\n result = idx.delete(n)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n result = idx.delete(slice(n[0], n[-1] + 1))\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n for tz in [None, 'Asia/Tokyo', 'US/Pacific']:\n ts = pd.Series(1, index=pd.date_range(\n '2000-01-01 09:00', periods=10, freq='H', name='idx', tz=tz))\n # preserve freq\n result = ts.drop(ts.index[:5]).index\n expected = pd.date_range('2000-01-01 14:00', periods=5, freq='H',\n name='idx', tz=tz)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n assert result.tz == expected.tz\n\n # reset freq to None\n result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index\n expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 11:00',\n '2000-01-01 13:00',\n '2000-01-01 15:00', '2000-01-01 17:00'],\n freq=None, name='idx', tz=tz)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n assert result.tz == expected.tz\n\n def test_get_loc(self):\n idx = pd.date_range('2000-01-01', periods=3)\n\n for method in [None, 'pad', 'backfill', 'nearest']:\n assert idx.get_loc(idx[1], method) == 1\n assert idx.get_loc(idx[1].to_pydatetime(), method) == 1\n assert idx.get_loc(str(idx[1]), method) == 1\n\n if method is not None:\n assert idx.get_loc(idx[1], method,\n tolerance=pd.Timedelta('0 days')) == 1\n\n assert idx.get_loc('2000-01-01', method='nearest') == 0\n assert idx.get_loc('2000-01-01T12', method='nearest') == 1\n\n assert idx.get_loc('2000-01-01T12', method='nearest',\n tolerance='1 day') == 1\n assert idx.get_loc('2000-01-01T12', method='nearest',\n tolerance=pd.Timedelta('1D')) == 1\n assert idx.get_loc('2000-01-01T12', method='nearest',\n tolerance=np.timedelta64(1, 'D')) == 1\n assert idx.get_loc('2000-01-01T12', method='nearest',\n tolerance=timedelta(1)) == 1\n with pytest.raises(ValueError, match='unit abbreviation w/o a number'):\n idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')\n with pytest.raises(KeyError):\n idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')\n with pytest.raises(\n ValueError,\n match='tolerance size must match target index size'):\n idx.get_loc('2000-01-01', method='nearest',\n tolerance=[pd.Timedelta('1day').to_timedelta64(),\n pd.Timedelta('1day').to_timedelta64()])\n\n assert idx.get_loc('2000', method='nearest') == slice(0, 3)\n assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)\n\n assert idx.get_loc('1999', method='nearest') == 0\n assert idx.get_loc('2001', method='nearest') == 2\n\n with pytest.raises(KeyError):\n idx.get_loc('1999', method='pad')\n with pytest.raises(KeyError):\n idx.get_loc('2001', method='backfill')\n\n with pytest.raises(KeyError):\n idx.get_loc('foobar')\n with pytest.raises(TypeError):\n idx.get_loc(slice(2))\n\n idx = pd.to_datetime(['2000-01-01', '2000-01-04'])\n assert idx.get_loc('2000-01-02', method='nearest') == 0\n assert idx.get_loc('2000-01-03', method='nearest') == 1\n assert idx.get_loc('2000-01', method='nearest') == slice(0, 2)\n\n # time indexing\n idx = pd.date_range('2000-01-01', periods=24, freq='H')\n tm.assert_numpy_array_equal(idx.get_loc(time(12)),\n np.array([12]), check_dtype=False)\n tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),\n np.array([]), check_dtype=False)\n with pytest.raises(NotImplementedError):\n idx.get_loc(time(12, 30), method='pad')\n\n def test_get_indexer(self):\n idx = pd.date_range('2000-01-01', periods=3)\n exp = np.array([0, 1, 2], dtype=np.intp)\n tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)\n\n target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',\n '1 day 1 hour'])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),\n np.array([-1, 0, 1], dtype=np.intp))\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),\n np.array([0, 1, 2], dtype=np.intp))\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),\n np.array([0, 1, 1], dtype=np.intp))\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, 'nearest',\n tolerance=pd.Timedelta('1 hour')),\n np.array([0, -1, 1], dtype=np.intp))\n tol_raw = [pd.Timedelta('1 hour'),\n pd.Timedelta('1 hour'),\n pd.Timedelta('1 hour').to_timedelta64(), ]\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, 'nearest',\n tolerance=[np.timedelta64(x) for x in tol_raw]),\n np.array([0, -1, 1], dtype=np.intp))\n tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),\n pd.Timedelta('1 hour').to_timedelta64(),\n 'foo', ]\n with pytest.raises(\n ValueError, match='abbreviation w/o a number'):\n idx.get_indexer(target, 'nearest', tolerance=tol_bad)\n with pytest.raises(ValueError):\n idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')\n\n def test_reasonable_key_error(self):\n # GH#1062\n index = DatetimeIndex(['1/3/2000'])\n with pytest.raises(KeyError, match='2000'):\n index.get_loc('1/1/2000')\n\n @pytest.mark.parametrize('key', [pd.Timedelta(0),\n pd.Timedelta(1),\n timedelta(0)])\n def test_timedelta_invalid_key(self, key):\n # GH#20464\n dti = pd.date_range('1970-01-01', periods=10)\n with pytest.raises(TypeError):\n dti.get_loc(key)\n\n def test_get_loc_nat(self):\n # GH#20464\n index = DatetimeIndex(['1/3/2000', 'NaT'])\n assert index.get_loc(pd.NaT) == 1\n",
"\"\"\"\nModule contains tools for processing files into DataFrames or other objects\n\"\"\"\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nimport csv\nimport datetime\nimport re\nimport sys\nfrom textwrap import fill\nimport warnings\n\nimport numpy as np\n\nimport pandas._libs.lib as lib\nimport pandas._libs.ops as libops\nimport pandas._libs.parsers as parsers\nfrom pandas._libs.tslibs import parsing\nimport pandas.compat as compat\nfrom pandas.compat import (\n PY3, StringIO, lrange, lzip, map, range, string_types, u, zip)\nfrom pandas.errors import (\n AbstractMethodError, EmptyDataError, ParserError, ParserWarning)\nfrom pandas.util._decorators import Appender\n\nfrom pandas.core.dtypes.cast import astype_nansafe\nfrom pandas.core.dtypes.common import (\n ensure_object, is_categorical_dtype, is_dtype_equal, is_float, is_integer,\n is_integer_dtype, is_list_like, is_object_dtype, is_scalar,\n is_string_dtype)\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core import algorithms\nfrom pandas.core.arrays import Categorical\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.index import (\n Index, MultiIndex, RangeIndex, ensure_index_from_sequences)\nfrom pandas.core.series import Series\nfrom pandas.core.tools import datetimes as tools\n\nfrom pandas.io.common import (\n _NA_VALUES, BaseIterator, UnicodeReader, UTF8Recoder, _get_handle,\n _infer_compression, _validate_header_arg, get_filepath_or_buffer,\n is_file_like)\nfrom pandas.io.date_converters import generic_parser\n\n# BOM character (byte order mark)\n# This exists at the beginning of a file to indicate endianness\n# of a file (stream). Unfortunately, this marker screws up parsing,\n# so we need to remove it if we see it.\n_BOM = u('\\ufeff')\n\n_parser_params = r\"\"\"Also supports optionally iterating or breaking of the file\ninto chunks.\n\nAdditional help can be found in the `online docs for IO Tools\n<http://pandas.pydata.org/pandas-docs/stable/io.html>`_.\n\nParameters\n----------\nfilepath_or_buffer : str, path object, or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be: file://localhost/path/to/table.csv.\n\n If you want to pass in a path object, pandas accepts either\n ``pathlib.Path`` or ``py._path.local.LocalPath``.\n\n By file-like object, we refer to objects with a ``read()`` method, such as\n a file handler (e.g. via builtin ``open`` function) or ``StringIO``.\n%s\ndelim_whitespace : boolean, default False\n Specifies whether or not whitespace (e.g. ``' '`` or ``'\\t'``) will be\n used as the sep. Equivalent to setting ``sep='\\s+'``. If this option\n is set to True, nothing should be passed in for the ``delimiter``\n parameter.\n\n .. versionadded:: 0.18.1 support for the Python parser.\n\nheader : int or list of ints, default 'infer'\n Row number(s) to use as the column names, and the start of the\n data. Default behavior is to infer the column names: if no names\n are passed the behavior is identical to ``header=0`` and column\n names are inferred from the first line of the file, if column\n names are passed explicitly then the behavior is identical to\n ``header=None``. Explicitly pass ``header=0`` to be able to\n replace existing names. The header can be a list of integers that\n specify row locations for a multi-index on the columns\n e.g. [0,1,3]. Intervening rows that are not specified will be\n skipped (e.g. 2 in this example is skipped). Note that this\n parameter ignores commented lines and empty lines if\n ``skip_blank_lines=True``, so header=0 denotes the first line of\n data rather than the first line of the file.\nnames : array-like, default None\n List of column names to use. If file contains no header row, then you\n should explicitly pass header=None. Duplicates in this list will cause\n a ``UserWarning`` to be issued.\nindex_col : int or sequence or False, default None\n Column to use as the row labels of the DataFrame. If a sequence is given, a\n MultiIndex is used. If you have a malformed file with delimiters at the end\n of each line, you might consider index_col=False to force pandas to _not_\n use the first column as the index (row names)\nusecols : list-like or callable, default None\n Return a subset of the columns. If list-like, all elements must either\n be positional (i.e. integer indices into the document columns) or strings\n that correspond to column names provided either by the user in `names` or\n inferred from the document header row(s). For example, a valid list-like\n `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Element\n order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.\n To instantiate a DataFrame from ``data`` with element order preserved use\n ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns\n in ``['foo', 'bar']`` order or\n ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``\n for ``['bar', 'foo']`` order.\n\n If callable, the callable function will be evaluated against the column\n names, returning names where the callable function evaluates to True. An\n example of a valid callable argument would be ``lambda x: x.upper() in\n ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster\n parsing time and lower memory usage.\nsqueeze : boolean, default False\n If the parsed data only contains one column then return a Series\nprefix : str, default None\n Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...\nmangle_dupe_cols : boolean, default True\n Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than\n 'X'...'X'. Passing in False will cause data to be overwritten if there\n are duplicate names in the columns.\ndtype : Type name or dict of column -> type, default None\n Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}\n Use `str` or `object` together with suitable `na_values` settings\n to preserve and not interpret dtype.\n If converters are specified, they will be applied INSTEAD\n of dtype conversion.\n%s\nconverters : dict, default None\n Dict of functions for converting values in certain columns. Keys can either\n be integers or column labels\ntrue_values : list, default None\n Values to consider as True\nfalse_values : list, default None\n Values to consider as False\nskipinitialspace : boolean, default False\n Skip spaces after delimiter.\nskiprows : list-like or integer or callable, default None\n Line numbers to skip (0-indexed) or number of lines to skip (int)\n at the start of the file.\n\n If callable, the callable function will be evaluated against the row\n indices, returning True if the row should be skipped and False otherwise.\n An example of a valid callable argument would be ``lambda x: x in [0, 2]``.\nskipfooter : int, default 0\n Number of lines at bottom of file to skip (Unsupported with engine='c')\nnrows : int, default None\n Number of rows of file to read. Useful for reading pieces of large files\nna_values : scalar, str, list-like, or dict, default None\n Additional strings to recognize as NA/NaN. If dict passed, specific\n per-column NA values. By default the following values are interpreted as\n NaN: '\"\"\" + fill(\"', '\".join(sorted(_NA_VALUES)),\n 70, subsequent_indent=\" \") + \"\"\"'.\nkeep_default_na : bool, default True\n Whether or not to include the default NaN values when parsing the data.\n Depending on whether `na_values` is passed in, the behavior is as follows:\n\n * If `keep_default_na` is True, and `na_values` are specified, `na_values`\n is appended to the default NaN values used for parsing.\n * If `keep_default_na` is True, and `na_values` are not specified, only\n the default NaN values are used for parsing.\n * If `keep_default_na` is False, and `na_values` are specified, only\n the NaN values specified `na_values` are used for parsing.\n * If `keep_default_na` is False, and `na_values` are not specified, no\n strings will be parsed as NaN.\n\n Note that if `na_filter` is passed in as False, the `keep_default_na` and\n `na_values` parameters will be ignored.\nna_filter : boolean, default True\n Detect missing value markers (empty strings and the value of na_values). In\n data without any NAs, passing na_filter=False can improve the performance\n of reading a large file\nverbose : boolean, default False\n Indicate number of NA values placed in non-numeric columns\nskip_blank_lines : boolean, default True\n If True, skip over blank lines rather than interpreting as NaN values\nparse_dates : boolean or list of ints or names or list of lists or dict, \\\ndefault False\n\n * boolean. If True -> try parsing the index.\n * list of ints or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n each as a separate date column.\n * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as\n a single date column.\n * dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result\n 'foo'\n\n If a column or index contains an unparseable date, the entire column or\n index will be returned unaltered as an object data type. For non-standard\n datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``\n\n Note: A fast-path exists for iso8601-formatted dates.\ninfer_datetime_format : boolean, default False\n If True and `parse_dates` is enabled, pandas will attempt to infer the\n format of the datetime strings in the columns, and if it can be inferred,\n switch to a faster method of parsing them. In some cases this can increase\n the parsing speed by 5-10x.\nkeep_date_col : boolean, default False\n If True and `parse_dates` specifies combining multiple columns then\n keep the original columns.\ndate_parser : function, default None\n Function to use for converting a sequence of string columns to an array of\n datetime instances. The default uses ``dateutil.parser.parser`` to do the\n conversion. Pandas will try to call `date_parser` in three different ways,\n advancing to the next if an exception occurs: 1) Pass one or more arrays\n (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the\n string values from the columns defined by `parse_dates` into a single array\n and pass that; and 3) call `date_parser` once for each row using one or\n more strings (corresponding to the columns defined by `parse_dates`) as\n arguments.\ndayfirst : boolean, default False\n DD/MM format dates, international and European format\niterator : boolean, default False\n Return TextFileReader object for iteration or getting chunks with\n ``get_chunk()``.\nchunksize : int, default None\n Return TextFileReader object for iteration.\n See the `IO Tools docs\n <http://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_\n for more information on ``iterator`` and ``chunksize``.\ncompression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'\n For on-the-fly decompression of on-disk data. If 'infer' and\n `filepath_or_buffer` is path-like, then detect compression from the\n following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no\n decompression). If using 'zip', the ZIP file must contain only one data\n file to be read in. Set to None for no decompression.\n\n .. versionadded:: 0.18.1 support for 'zip' and 'xz' compression.\n\nthousands : str, default None\n Thousands separator\ndecimal : str, default '.'\n Character to recognize as decimal point (e.g. use ',' for European data).\nfloat_precision : string, default None\n Specifies which converter the C engine should use for floating-point\n values. The options are `None` for the ordinary converter,\n `high` for the high-precision converter, and `round_trip` for the\n round-trip converter.\nlineterminator : str (length 1), default None\n Character to break file into lines. Only valid with C parser.\nquotechar : str (length 1), optional\n The character used to denote the start and end of a quoted item. Quoted\n items can include the delimiter and it will be ignored.\nquoting : int or csv.QUOTE_* instance, default 0\n Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of\n QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).\ndoublequote : boolean, default ``True``\n When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate\n whether or not to interpret two consecutive quotechar elements INSIDE a\n field as a single ``quotechar`` element.\nescapechar : str (length 1), default None\n One-character string used to escape delimiter when quoting is QUOTE_NONE.\ncomment : str, default None\n Indicates remainder of line should not be parsed. If found at the beginning\n of a line, the line will be ignored altogether. This parameter must be a\n single character. Like empty lines (as long as ``skip_blank_lines=True``),\n fully commented lines are ignored by the parameter `header` but not by\n `skiprows`. For example, if ``comment='#'``, parsing\n ``#empty\\\\na,b,c\\\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being\n treated as the header.\nencoding : str, default None\n Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python\n standard encodings\n <https://docs.python.org/3/library/codecs.html#standard-encodings>`_\ndialect : str or csv.Dialect instance, default None\n If provided, this parameter will override values (default or not) for the\n following parameters: `delimiter`, `doublequote`, `escapechar`,\n `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to\n override values, a ParserWarning will be issued. See csv.Dialect\n documentation for more details.\ntupleize_cols : boolean, default False\n .. deprecated:: 0.21.0\n This argument will be removed and will always convert to MultiIndex\n\n Leave a list of tuples on columns as is (default is to convert to\n a MultiIndex on the columns)\nerror_bad_lines : boolean, default True\n Lines with too many fields (e.g. a csv line with too many commas) will by\n default cause an exception to be raised, and no DataFrame will be returned.\n If False, then these \"bad lines\" will dropped from the DataFrame that is\n returned.\nwarn_bad_lines : boolean, default True\n If error_bad_lines is False, and warn_bad_lines is True, a warning for each\n \"bad line\" will be output.\nlow_memory : boolean, default True\n Internally process the file in chunks, resulting in lower memory use\n while parsing, but possibly mixed type inference. To ensure no mixed\n types either set False, or specify the type with the `dtype` parameter.\n Note that the entire file is read into a single DataFrame regardless,\n use the `chunksize` or `iterator` parameter to return the data in chunks.\n (Only valid with C parser)\nmemory_map : boolean, default False\n If a filepath is provided for `filepath_or_buffer`, map the file object\n directly onto memory and access the data directly from there. Using this\n option can improve performance because there is no longer any I/O overhead.\n\nReturns\n-------\nresult : DataFrame or TextParser\n\"\"\"\n\n# engine is not used in read_fwf() so is factored out of the shared docstring\n_engine_doc = \"\"\"engine : {'c', 'python'}, optional\n Parser engine to use. The C engine is faster while the python engine is\n currently more feature-complete.\"\"\"\n\n_sep_doc = r\"\"\"sep : str, default {default}\n Delimiter to use. If sep is None, the C engine cannot automatically detect\n the separator, but the Python parsing engine can, meaning the latter will\n be used and automatically detect the separator by Python's builtin sniffer\n tool, ``csv.Sniffer``. In addition, separators longer than 1 character and\n different from ``'\\s+'`` will be interpreted as regular expressions and\n will also force the use of the Python parsing engine. Note that regex\n delimiters are prone to ignoring quoted data. Regex example: ``'\\r\\t'``\ndelimiter : str, default ``None``\n Alternative argument name for sep.\"\"\"\n\n_read_csv_doc = \"\"\"\nRead CSV (comma-separated) file into DataFrame\n\n%s\n\"\"\" % (_parser_params % (_sep_doc.format(default=\"','\"), _engine_doc))\n\n_read_table_doc = \"\"\"\n\n.. deprecated:: 0.24.0\n Use :func:`pandas.read_csv` instead, passing ``sep='\\t'`` if necessary.\n\nRead general delimited file into DataFrame\n\n%s\n\"\"\" % (_parser_params % (_sep_doc.format(default=\"\\\\t (tab-stop)\"),\n _engine_doc))\n\n_fwf_widths = \"\"\"\\\ncolspecs : list of pairs (int, int) or 'infer'. optional\n A list of pairs (tuples) giving the extents of the fixed-width\n fields of each line as half-open intervals (i.e., [from, to[ ).\n String value 'infer' can be used to instruct the parser to try\n detecting the column specifications from the first 100 rows of\n the data which are not being skipped via skiprows (default='infer').\nwidths : list of ints. optional\n A list of field widths which can be used instead of 'colspecs' if\n the intervals are contiguous.\ndelimiter : str, default ``'\\t' + ' '``\n Characters to consider as filler characters in the fixed-width file.\n Can be used to specify the filler character of the fields\n if it is not spaces (e.g., '~').\n\"\"\"\n\n_read_fwf_doc = \"\"\"\nRead a table of fixed-width formatted lines into DataFrame\n\n%s\n\"\"\" % (_parser_params % (_fwf_widths, ''))\n\n\ndef _validate_integer(name, val, min_val=0):\n \"\"\"\n Checks whether the 'name' parameter for parsing is either\n an integer OR float that can SAFELY be cast to an integer\n without losing accuracy. Raises a ValueError if that is\n not the case.\n\n Parameters\n ----------\n name : string\n Parameter name (used for error reporting)\n val : int or float\n The value to check\n min_val : int\n Minimum allowed value (val < min_val will result in a ValueError)\n \"\"\"\n msg = \"'{name:s}' must be an integer >={min_val:d}\".format(name=name,\n min_val=min_val)\n\n if val is not None:\n if is_float(val):\n if int(val) != val:\n raise ValueError(msg)\n val = int(val)\n elif not (is_integer(val) and val >= min_val):\n raise ValueError(msg)\n\n return val\n\n\ndef _validate_names(names):\n \"\"\"\n Check if the `names` parameter contains duplicates.\n\n If duplicates are found, we issue a warning before returning.\n\n Parameters\n ----------\n names : array-like or None\n An array containing a list of the names used for the output DataFrame.\n\n Returns\n -------\n names : array-like or None\n The original `names` parameter.\n \"\"\"\n\n if names is not None:\n if len(names) != len(set(names)):\n msg = (\"Duplicate names specified. This \"\n \"will raise an error in the future.\")\n warnings.warn(msg, UserWarning, stacklevel=3)\n\n return names\n\n\ndef _read(filepath_or_buffer, kwds):\n \"\"\"Generic reader of line files.\"\"\"\n encoding = kwds.get('encoding', None)\n if encoding is not None:\n encoding = re.sub('_', '-', encoding).lower()\n kwds['encoding'] = encoding\n\n compression = kwds.get('compression')\n compression = _infer_compression(filepath_or_buffer, compression)\n filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(\n filepath_or_buffer, encoding, compression)\n kwds['compression'] = compression\n\n if kwds.get('date_parser', None) is not None:\n if isinstance(kwds['parse_dates'], bool):\n kwds['parse_dates'] = True\n\n # Extract some of the arguments (pass chunksize on).\n iterator = kwds.get('iterator', False)\n chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1)\n nrows = kwds.get('nrows', None)\n\n # Check for duplicates in names.\n _validate_names(kwds.get(\"names\", None))\n\n # Create the parser.\n parser = TextFileReader(filepath_or_buffer, **kwds)\n\n if chunksize or iterator:\n return parser\n\n try:\n data = parser.read(nrows)\n finally:\n parser.close()\n\n if should_close:\n try:\n filepath_or_buffer.close()\n except ValueError:\n pass\n\n return data\n\n\n_parser_defaults = {\n 'delimiter': None,\n\n 'doublequote': True,\n 'escapechar': None,\n 'quotechar': '\"',\n 'quoting': csv.QUOTE_MINIMAL,\n 'skipinitialspace': False,\n 'lineterminator': None,\n\n 'header': 'infer',\n 'index_col': None,\n 'names': None,\n 'prefix': None,\n 'skiprows': None,\n 'na_values': None,\n 'true_values': None,\n 'false_values': None,\n 'converters': None,\n 'dtype': None,\n 'skipfooter': 0,\n\n 'keep_default_na': True,\n 'thousands': None,\n 'comment': None,\n 'decimal': b'.',\n\n # 'engine': 'c',\n 'parse_dates': False,\n 'keep_date_col': False,\n 'dayfirst': False,\n 'date_parser': None,\n\n 'usecols': None,\n\n 'nrows': None,\n # 'iterator': False,\n 'chunksize': None,\n 'verbose': False,\n 'encoding': None,\n 'squeeze': False,\n 'compression': None,\n 'mangle_dupe_cols': True,\n 'tupleize_cols': False,\n 'infer_datetime_format': False,\n 'skip_blank_lines': True\n}\n\n\n_c_parser_defaults = {\n 'delim_whitespace': False,\n 'na_filter': True,\n 'low_memory': True,\n 'memory_map': False,\n 'error_bad_lines': True,\n 'warn_bad_lines': True,\n 'tupleize_cols': False,\n 'float_precision': None\n}\n\n_fwf_defaults = {\n 'colspecs': 'infer',\n 'widths': None,\n}\n\n_c_unsupported = {'skipfooter'}\n_python_unsupported = {\n 'low_memory',\n 'float_precision',\n}\n\n_deprecated_defaults = {\n 'tupleize_cols': None\n}\n_deprecated_args = {\n 'tupleize_cols',\n}\n\n\ndef _make_parser_function(name, default_sep=','):\n\n # prepare read_table deprecation\n if name == \"read_table\":\n sep = False\n else:\n sep = default_sep\n\n def parser_f(filepath_or_buffer,\n sep=sep,\n delimiter=None,\n\n # Column and Index Locations and Names\n header='infer',\n names=None,\n index_col=None,\n usecols=None,\n squeeze=False,\n prefix=None,\n mangle_dupe_cols=True,\n\n # General Parsing Configuration\n dtype=None,\n engine=None,\n converters=None,\n true_values=None,\n false_values=None,\n skipinitialspace=False,\n skiprows=None,\n nrows=None,\n\n # NA and Missing Data Handling\n na_values=None,\n keep_default_na=True,\n na_filter=True,\n verbose=False,\n skip_blank_lines=True,\n\n # Datetime Handling\n parse_dates=False,\n infer_datetime_format=False,\n keep_date_col=False,\n date_parser=None,\n dayfirst=False,\n\n # Iteration\n iterator=False,\n chunksize=None,\n\n # Quoting, Compression, and File Format\n compression='infer',\n thousands=None,\n decimal=b'.',\n lineterminator=None,\n quotechar='\"',\n quoting=csv.QUOTE_MINIMAL,\n escapechar=None,\n comment=None,\n encoding=None,\n dialect=None,\n tupleize_cols=None,\n\n # Error Handling\n error_bad_lines=True,\n warn_bad_lines=True,\n\n skipfooter=0,\n\n # Internal\n doublequote=True,\n delim_whitespace=False,\n low_memory=_c_parser_defaults['low_memory'],\n memory_map=False,\n float_precision=None):\n\n # deprecate read_table GH21948\n if name == \"read_table\":\n if sep is False and delimiter is None:\n warnings.warn(\"read_table is deprecated, use read_csv \"\n \"instead, passing sep='\\\\t'.\",\n FutureWarning, stacklevel=2)\n else:\n warnings.warn(\"read_table is deprecated, use read_csv \"\n \"instead.\",\n FutureWarning, stacklevel=2)\n if sep is False:\n sep = default_sep\n\n # Alias sep -> delimiter.\n if delimiter is None:\n delimiter = sep\n\n if delim_whitespace and delimiter != default_sep:\n raise ValueError(\"Specified a delimiter with both sep and\"\n \" delim_whitespace=True; you can only\"\n \" specify one.\")\n\n if engine is not None:\n engine_specified = True\n else:\n engine = 'c'\n engine_specified = False\n\n kwds = dict(delimiter=delimiter,\n engine=engine,\n dialect=dialect,\n compression=compression,\n engine_specified=engine_specified,\n\n doublequote=doublequote,\n escapechar=escapechar,\n quotechar=quotechar,\n quoting=quoting,\n skipinitialspace=skipinitialspace,\n lineterminator=lineterminator,\n\n header=header,\n index_col=index_col,\n names=names,\n prefix=prefix,\n skiprows=skiprows,\n na_values=na_values,\n true_values=true_values,\n false_values=false_values,\n keep_default_na=keep_default_na,\n thousands=thousands,\n comment=comment,\n decimal=decimal,\n\n parse_dates=parse_dates,\n keep_date_col=keep_date_col,\n dayfirst=dayfirst,\n date_parser=date_parser,\n\n nrows=nrows,\n iterator=iterator,\n chunksize=chunksize,\n skipfooter=skipfooter,\n converters=converters,\n dtype=dtype,\n usecols=usecols,\n verbose=verbose,\n encoding=encoding,\n squeeze=squeeze,\n memory_map=memory_map,\n float_precision=float_precision,\n\n na_filter=na_filter,\n delim_whitespace=delim_whitespace,\n warn_bad_lines=warn_bad_lines,\n error_bad_lines=error_bad_lines,\n low_memory=low_memory,\n mangle_dupe_cols=mangle_dupe_cols,\n tupleize_cols=tupleize_cols,\n infer_datetime_format=infer_datetime_format,\n skip_blank_lines=skip_blank_lines)\n\n return _read(filepath_or_buffer, kwds)\n\n parser_f.__name__ = name\n\n return parser_f\n\n\nread_csv = _make_parser_function('read_csv', default_sep=',')\nread_csv = Appender(_read_csv_doc)(read_csv)\n\nread_table = _make_parser_function('read_table', default_sep='\\t')\nread_table = Appender(_read_table_doc)(read_table)\n\n\n@Appender(_read_fwf_doc)\ndef read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds):\n # Check input arguments.\n if colspecs is None and widths is None:\n raise ValueError(\"Must specify either colspecs or widths\")\n elif colspecs not in (None, 'infer') and widths is not None:\n raise ValueError(\"You must specify only one of 'widths' and \"\n \"'colspecs'\")\n\n # Compute 'colspecs' from 'widths', if specified.\n if widths is not None:\n colspecs, col = [], 0\n for w in widths:\n colspecs.append((col, col + w))\n col += w\n\n kwds['colspecs'] = colspecs\n kwds['engine'] = 'python-fwf'\n return _read(filepath_or_buffer, kwds)\n\n\nclass TextFileReader(BaseIterator):\n \"\"\"\n\n Passed dialect overrides any of the related parser options\n\n \"\"\"\n\n def __init__(self, f, engine=None, **kwds):\n\n self.f = f\n\n if engine is not None:\n engine_specified = True\n else:\n engine = 'python'\n engine_specified = False\n\n self._engine_specified = kwds.get('engine_specified', engine_specified)\n\n if kwds.get('dialect') is not None:\n dialect = kwds['dialect']\n if dialect in csv.list_dialects():\n dialect = csv.get_dialect(dialect)\n\n # Any valid dialect should have these attributes.\n # If any are missing, we will raise automatically.\n for param in ('delimiter', 'doublequote', 'escapechar',\n 'skipinitialspace', 'quotechar', 'quoting'):\n try:\n dialect_val = getattr(dialect, param)\n except AttributeError:\n raise ValueError(\"Invalid dialect '{dialect}' provided\"\n .format(dialect=kwds['dialect']))\n provided = kwds.get(param, _parser_defaults[param])\n\n # Messages for conflicting values between the dialect instance\n # and the actual parameters provided.\n conflict_msgs = []\n\n if dialect_val != provided:\n conflict_msgs.append((\n \"Conflicting values for '{param}': '{val}' was \"\n \"provided, but the dialect specifies '{diaval}'. \"\n \"Using the dialect-specified value.\".format(\n param=param, val=provided, diaval=dialect_val)))\n\n if conflict_msgs:\n warnings.warn('\\n\\n'.join(conflict_msgs), ParserWarning,\n stacklevel=2)\n kwds[param] = dialect_val\n\n if kwds.get(\"skipfooter\"):\n if kwds.get(\"iterator\") or kwds.get(\"chunksize\"):\n raise ValueError(\"'skipfooter' not supported for 'iteration'\")\n if kwds.get(\"nrows\"):\n raise ValueError(\"'skipfooter' not supported with 'nrows'\")\n\n if kwds.get('header', 'infer') == 'infer':\n kwds['header'] = 0 if kwds.get('names') is None else None\n\n self.orig_options = kwds\n\n # miscellanea\n self.engine = engine\n self._engine = None\n self._currow = 0\n\n options = self._get_options_with_defaults(engine)\n\n self.chunksize = options.pop('chunksize', None)\n self.nrows = options.pop('nrows', None)\n self.squeeze = options.pop('squeeze', False)\n\n # might mutate self.engine\n self.engine = self._check_file_or_buffer(f, engine)\n self.options, self.engine = self._clean_options(options, engine)\n\n if 'has_index_names' in kwds:\n self.options['has_index_names'] = kwds['has_index_names']\n\n self._make_engine(self.engine)\n\n def close(self):\n self._engine.close()\n\n def _get_options_with_defaults(self, engine):\n kwds = self.orig_options\n\n options = {}\n\n for argname, default in compat.iteritems(_parser_defaults):\n value = kwds.get(argname, default)\n\n # see gh-12935\n if argname == 'mangle_dupe_cols' and not value:\n raise ValueError('Setting mangle_dupe_cols=False is '\n 'not supported yet')\n else:\n options[argname] = value\n\n for argname, default in compat.iteritems(_c_parser_defaults):\n if argname in kwds:\n value = kwds[argname]\n\n if engine != 'c' and value != default:\n if ('python' in engine and\n argname not in _python_unsupported):\n pass\n elif value == _deprecated_defaults.get(argname, default):\n pass\n else:\n raise ValueError(\n 'The %r option is not supported with the'\n ' %r engine' % (argname, engine))\n else:\n value = _deprecated_defaults.get(argname, default)\n options[argname] = value\n\n if engine == 'python-fwf':\n for argname, default in compat.iteritems(_fwf_defaults):\n options[argname] = kwds.get(argname, default)\n\n return options\n\n def _check_file_or_buffer(self, f, engine):\n # see gh-16530\n if is_file_like(f):\n next_attr = \"__next__\" if PY3 else \"next\"\n\n # The C engine doesn't need the file-like to have the \"next\" or\n # \"__next__\" attribute. However, the Python engine explicitly calls\n # \"next(...)\" when iterating through such an object, meaning it\n # needs to have that attribute (\"next\" for Python 2.x, \"__next__\"\n # for Python 3.x)\n if engine != \"c\" and not hasattr(f, next_attr):\n msg = (\"The 'python' engine cannot iterate \"\n \"through this file buffer.\")\n raise ValueError(msg)\n\n return engine\n\n def _clean_options(self, options, engine):\n result = options.copy()\n\n engine_specified = self._engine_specified\n fallback_reason = None\n\n sep = options['delimiter']\n delim_whitespace = options['delim_whitespace']\n\n # C engine not supported yet\n if engine == 'c':\n if options['skipfooter'] > 0:\n fallback_reason = (\"the 'c' engine does not support\"\n \" skipfooter\")\n engine = 'python'\n\n encoding = sys.getfilesystemencoding() or 'utf-8'\n if sep is None and not delim_whitespace:\n if engine == 'c':\n fallback_reason = (\"the 'c' engine does not support\"\n \" sep=None with delim_whitespace=False\")\n engine = 'python'\n elif sep is not None and len(sep) > 1:\n if engine == 'c' and sep == r'\\s+':\n result['delim_whitespace'] = True\n del result['delimiter']\n elif engine not in ('python', 'python-fwf'):\n # wait until regex engine integrated\n fallback_reason = (\"the 'c' engine does not support\"\n \" regex separators (separators > 1 char and\"\n r\" different from '\\s+' are\"\n \" interpreted as regex)\")\n engine = 'python'\n elif delim_whitespace:\n if 'python' in engine:\n result['delimiter'] = r'\\s+'\n elif sep is not None:\n encodeable = True\n try:\n if len(sep.encode(encoding)) > 1:\n encodeable = False\n except UnicodeDecodeError:\n encodeable = False\n if not encodeable and engine not in ('python', 'python-fwf'):\n fallback_reason = (\"the separator encoded in {encoding}\"\n \" is > 1 char long, and the 'c' engine\"\n \" does not support such separators\"\n .format(encoding=encoding))\n engine = 'python'\n\n quotechar = options['quotechar']\n if (quotechar is not None and\n isinstance(quotechar, (str, compat.text_type, bytes))):\n if (len(quotechar) == 1 and ord(quotechar) > 127 and\n engine not in ('python', 'python-fwf')):\n fallback_reason = (\"ord(quotechar) > 127, meaning the \"\n \"quotechar is larger than one byte, \"\n \"and the 'c' engine does not support \"\n \"such quotechars\")\n engine = 'python'\n\n if fallback_reason and engine_specified:\n raise ValueError(fallback_reason)\n\n if engine == 'c':\n for arg in _c_unsupported:\n del result[arg]\n\n if 'python' in engine:\n for arg in _python_unsupported:\n if fallback_reason and result[arg] != _c_parser_defaults[arg]:\n msg = (\"Falling back to the 'python' engine because\"\n \" {reason}, but this causes {option!r} to be\"\n \" ignored as it is not supported by the 'python'\"\n \" engine.\").format(reason=fallback_reason,\n option=arg)\n raise ValueError(msg)\n del result[arg]\n\n if fallback_reason:\n warnings.warn((\"Falling back to the 'python' engine because\"\n \" {0}; you can avoid this warning by specifying\"\n \" engine='python'.\").format(fallback_reason),\n ParserWarning, stacklevel=5)\n\n index_col = options['index_col']\n names = options['names']\n converters = options['converters']\n na_values = options['na_values']\n skiprows = options['skiprows']\n\n _validate_header_arg(options['header'])\n\n depr_warning = ''\n\n for arg in _deprecated_args:\n parser_default = _c_parser_defaults[arg]\n depr_default = _deprecated_defaults[arg]\n\n msg = (\"The '{arg}' argument has been deprecated \"\n \"and will be removed in a future version.\"\n .format(arg=arg))\n\n if arg == 'tupleize_cols':\n msg += (' Column tuples will then '\n 'always be converted to MultiIndex.')\n\n if result.get(arg, depr_default) != depr_default:\n # raise Exception(result.get(arg, depr_default), depr_default)\n depr_warning += msg + '\\n\\n'\n else:\n result[arg] = parser_default\n\n if depr_warning != '':\n warnings.warn(depr_warning, FutureWarning, stacklevel=2)\n\n if index_col is True:\n raise ValueError(\"The value of index_col couldn't be 'True'\")\n if _is_index_col(index_col):\n if not isinstance(index_col, (list, tuple, np.ndarray)):\n index_col = [index_col]\n result['index_col'] = index_col\n\n names = list(names) if names is not None else names\n\n # type conversion-related\n if converters is not None:\n if not isinstance(converters, dict):\n raise TypeError('Type converters must be a dict or'\n ' subclass, input was '\n 'a {0!r}'.format(type(converters).__name__))\n else:\n converters = {}\n\n # Converting values to NA\n keep_default_na = options['keep_default_na']\n na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)\n\n # handle skiprows; this is internally handled by the\n # c-engine, so only need for python parsers\n if engine != 'c':\n if is_integer(skiprows):\n skiprows = lrange(skiprows)\n if skiprows is None:\n skiprows = set()\n elif not callable(skiprows):\n skiprows = set(skiprows)\n\n # put stuff back\n result['names'] = names\n result['converters'] = converters\n result['na_values'] = na_values\n result['na_fvalues'] = na_fvalues\n result['skiprows'] = skiprows\n\n return result, engine\n\n def __next__(self):\n try:\n return self.get_chunk()\n except StopIteration:\n self.close()\n raise\n\n def _make_engine(self, engine='c'):\n if engine == 'c':\n self._engine = CParserWrapper(self.f, **self.options)\n else:\n if engine == 'python':\n klass = PythonParser\n elif engine == 'python-fwf':\n klass = FixedWidthFieldParser\n else:\n raise ValueError('Unknown engine: {engine} (valid options are'\n ' \"c\", \"python\", or' ' \"python-fwf\")'.format(\n engine=engine))\n self._engine = klass(self.f, **self.options)\n\n def _failover_to_python(self):\n raise AbstractMethodError(self)\n\n def read(self, nrows=None):\n nrows = _validate_integer('nrows', nrows)\n ret = self._engine.read(nrows)\n\n # May alter columns / col_dict\n index, columns, col_dict = self._create_index(ret)\n\n if index is None:\n if col_dict:\n # Any column is actually fine:\n new_rows = len(compat.next(compat.itervalues(col_dict)))\n index = RangeIndex(self._currow, self._currow + new_rows)\n else:\n new_rows = 0\n else:\n new_rows = len(index)\n\n df = DataFrame(col_dict, columns=columns, index=index)\n\n self._currow += new_rows\n\n if self.squeeze and len(df.columns) == 1:\n return df[df.columns[0]].copy()\n return df\n\n def _create_index(self, ret):\n index, columns, col_dict = ret\n return index, columns, col_dict\n\n def get_chunk(self, size=None):\n if size is None:\n size = self.chunksize\n if self.nrows is not None:\n if self._currow >= self.nrows:\n raise StopIteration\n size = min(size, self.nrows - self._currow)\n return self.read(nrows=size)\n\n\ndef _is_index_col(col):\n return col is not None and col is not False\n\n\ndef _is_potential_multi_index(columns):\n \"\"\"\n Check whether or not the `columns` parameter\n could be converted into a MultiIndex.\n\n Parameters\n ----------\n columns : array-like\n Object which may or may not be convertible into a MultiIndex\n\n Returns\n -------\n boolean : Whether or not columns could become a MultiIndex\n \"\"\"\n return (len(columns) and not isinstance(columns, MultiIndex) and\n all(isinstance(c, tuple) for c in columns))\n\n\ndef _evaluate_usecols(usecols, names):\n \"\"\"\n Check whether or not the 'usecols' parameter\n is a callable. If so, enumerates the 'names'\n parameter and returns a set of indices for\n each entry in 'names' that evaluates to True.\n If not a callable, returns 'usecols'.\n \"\"\"\n if callable(usecols):\n return {i for i, name in enumerate(names) if usecols(name)}\n return usecols\n\n\ndef _validate_usecols_names(usecols, names):\n \"\"\"\n Validates that all usecols are present in a given\n list of names. If not, raise a ValueError that\n shows what usecols are missing.\n\n Parameters\n ----------\n usecols : iterable of usecols\n The columns to validate are present in names.\n names : iterable of names\n The column names to check against.\n\n Returns\n -------\n usecols : iterable of usecols\n The `usecols` parameter if the validation succeeds.\n\n Raises\n ------\n ValueError : Columns were missing. Error message will list them.\n \"\"\"\n missing = [c for c in usecols if c not in names]\n if len(missing) > 0:\n raise ValueError(\n \"Usecols do not match columns, \"\n \"columns expected but not found: {missing}\".format(missing=missing)\n )\n\n return usecols\n\n\ndef _validate_skipfooter_arg(skipfooter):\n \"\"\"\n Validate the 'skipfooter' parameter.\n\n Checks whether 'skipfooter' is a non-negative integer.\n Raises a ValueError if that is not the case.\n\n Parameters\n ----------\n skipfooter : non-negative integer\n The number of rows to skip at the end of the file.\n\n Returns\n -------\n validated_skipfooter : non-negative integer\n The original input if the validation succeeds.\n\n Raises\n ------\n ValueError : 'skipfooter' was not a non-negative integer.\n \"\"\"\n\n if not is_integer(skipfooter):\n raise ValueError(\"skipfooter must be an integer\")\n\n if skipfooter < 0:\n raise ValueError(\"skipfooter cannot be negative\")\n\n return skipfooter\n\n\ndef _validate_usecols_arg(usecols):\n \"\"\"\n Validate the 'usecols' parameter.\n\n Checks whether or not the 'usecols' parameter contains all integers\n (column selection by index), strings (column by name) or is a callable.\n Raises a ValueError if that is not the case.\n\n Parameters\n ----------\n usecols : list-like, callable, or None\n List of columns to use when parsing or a callable that can be used\n to filter a list of table columns.\n\n Returns\n -------\n usecols_tuple : tuple\n A tuple of (verified_usecols, usecols_dtype).\n\n 'verified_usecols' is either a set if an array-like is passed in or\n 'usecols' if a callable or None is passed in.\n\n 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like\n is passed in or None if a callable or None is passed in.\n \"\"\"\n msg = (\"'usecols' must either be list-like of all strings, all unicode, \"\n \"all integers or a callable.\")\n if usecols is not None:\n if callable(usecols):\n return usecols, None\n # GH20529, ensure is iterable container but not string.\n elif not is_list_like(usecols):\n raise ValueError(msg)\n else:\n usecols_dtype = lib.infer_dtype(usecols)\n if usecols_dtype not in ('empty', 'integer',\n 'string', 'unicode'):\n raise ValueError(msg)\n return set(usecols), usecols_dtype\n return usecols, None\n\n\ndef _validate_parse_dates_arg(parse_dates):\n \"\"\"\n Check whether or not the 'parse_dates' parameter\n is a non-boolean scalar. Raises a ValueError if\n that is the case.\n \"\"\"\n msg = (\"Only booleans, lists, and \"\n \"dictionaries are accepted \"\n \"for the 'parse_dates' parameter\")\n\n if parse_dates is not None:\n if is_scalar(parse_dates):\n if not lib.is_bool(parse_dates):\n raise TypeError(msg)\n\n elif not isinstance(parse_dates, (list, dict)):\n raise TypeError(msg)\n\n return parse_dates\n\n\nclass ParserBase(object):\n\n def __init__(self, kwds):\n self.names = kwds.get('names')\n self.orig_names = None\n self.prefix = kwds.pop('prefix', None)\n\n self.index_col = kwds.get('index_col', None)\n self.unnamed_cols = set()\n self.index_names = None\n self.col_names = None\n\n self.parse_dates = _validate_parse_dates_arg(\n kwds.pop('parse_dates', False))\n self.date_parser = kwds.pop('date_parser', None)\n self.dayfirst = kwds.pop('dayfirst', False)\n self.keep_date_col = kwds.pop('keep_date_col', False)\n\n self.na_values = kwds.get('na_values')\n self.na_fvalues = kwds.get('na_fvalues')\n self.na_filter = kwds.get('na_filter', False)\n self.keep_default_na = kwds.get('keep_default_na', True)\n\n self.true_values = kwds.get('true_values')\n self.false_values = kwds.get('false_values')\n self.tupleize_cols = kwds.get('tupleize_cols', False)\n self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True)\n self.infer_datetime_format = kwds.pop('infer_datetime_format', False)\n\n self._date_conv = _make_date_converter(\n date_parser=self.date_parser,\n dayfirst=self.dayfirst,\n infer_datetime_format=self.infer_datetime_format\n )\n\n # validate header options for mi\n self.header = kwds.get('header')\n if isinstance(self.header, (list, tuple, np.ndarray)):\n if not all(map(is_integer, self.header)):\n raise ValueError(\"header must be integer or list of integers\")\n if kwds.get('usecols'):\n raise ValueError(\"cannot specify usecols when \"\n \"specifying a multi-index header\")\n if kwds.get('names'):\n raise ValueError(\"cannot specify names when \"\n \"specifying a multi-index header\")\n\n # validate index_col that only contains integers\n if self.index_col is not None:\n is_sequence = isinstance(self.index_col, (list, tuple,\n np.ndarray))\n if not (is_sequence and\n all(map(is_integer, self.index_col)) or\n is_integer(self.index_col)):\n raise ValueError(\"index_col must only contain row numbers \"\n \"when specifying a multi-index header\")\n\n # GH 16338\n elif self.header is not None and not is_integer(self.header):\n raise ValueError(\"header must be integer or list of integers\")\n\n self._name_processed = False\n\n self._first_chunk = True\n\n # GH 13932\n # keep references to file handles opened by the parser itself\n self.handles = []\n\n def close(self):\n for f in self.handles:\n f.close()\n\n @property\n def _has_complex_date_col(self):\n return (isinstance(self.parse_dates, dict) or\n (isinstance(self.parse_dates, list) and\n len(self.parse_dates) > 0 and\n isinstance(self.parse_dates[0], list)))\n\n def _should_parse_dates(self, i):\n if isinstance(self.parse_dates, bool):\n return self.parse_dates\n else:\n if self.index_names is not None:\n name = self.index_names[i]\n else:\n name = None\n j = self.index_col[i]\n\n if is_scalar(self.parse_dates):\n return ((j == self.parse_dates) or\n (name is not None and name == self.parse_dates))\n else:\n return ((j in self.parse_dates) or\n (name is not None and name in self.parse_dates))\n\n def _extract_multi_indexer_columns(self, header, index_names, col_names,\n passed_names=False):\n \"\"\" extract and return the names, index_names, col_names\n header is a list-of-lists returned from the parsers \"\"\"\n if len(header) < 2:\n return header[0], index_names, col_names, passed_names\n\n # the names are the tuples of the header that are not the index cols\n # 0 is the name of the index, assuming index_col is a list of column\n # numbers\n ic = self.index_col\n if ic is None:\n ic = []\n\n if not isinstance(ic, (list, tuple, np.ndarray)):\n ic = [ic]\n sic = set(ic)\n\n # clean the index_names\n index_names = header.pop(-1)\n index_names, names, index_col = _clean_index_names(index_names,\n self.index_col,\n self.unnamed_cols)\n\n # extract the columns\n field_count = len(header[0])\n\n def extract(r):\n return tuple(r[i] for i in range(field_count) if i not in sic)\n\n columns = lzip(*[extract(r) for r in header])\n names = ic + columns\n\n # If we find unnamed columns all in a single\n # level, then our header was too long.\n for n in range(len(columns[0])):\n if all(compat.to_str(c[n]) in self.unnamed_cols for c in columns):\n raise ParserError(\n \"Passed header=[%s] are too many rows for this \"\n \"multi_index of columns\"\n % ','.join(str(x) for x in self.header)\n )\n\n # Clean the column names (if we have an index_col).\n if len(ic):\n col_names = [r[0] if (len(r[0]) and\n r[0] not in self.unnamed_cols) else None\n for r in header]\n else:\n col_names = [None] * len(header)\n\n passed_names = True\n\n return names, index_names, col_names, passed_names\n\n def _maybe_dedup_names(self, names):\n # see gh-7160 and gh-9424: this helps to provide\n # immediate alleviation of the duplicate names\n # issue and appears to be satisfactory to users,\n # but ultimately, not needing to butcher the names\n # would be nice!\n if self.mangle_dupe_cols:\n names = list(names) # so we can index\n counts = defaultdict(int)\n is_potential_mi = _is_potential_multi_index(names)\n\n for i, col in enumerate(names):\n cur_count = counts[col]\n\n while cur_count > 0:\n counts[col] = cur_count + 1\n\n if is_potential_mi:\n col = col[:-1] + ('%s.%d' % (col[-1], cur_count),)\n else:\n col = '%s.%d' % (col, cur_count)\n cur_count = counts[col]\n\n names[i] = col\n counts[col] = cur_count + 1\n\n return names\n\n def _maybe_make_multi_index_columns(self, columns, col_names=None):\n # possibly create a column mi here\n if _is_potential_multi_index(columns):\n columns = MultiIndex.from_tuples(columns, names=col_names)\n return columns\n\n def _make_index(self, data, alldata, columns, indexnamerow=False):\n if not _is_index_col(self.index_col) or not self.index_col:\n index = None\n\n elif not self._has_complex_date_col:\n index = self._get_simple_index(alldata, columns)\n index = self._agg_index(index)\n elif self._has_complex_date_col:\n if not self._name_processed:\n (self.index_names, _,\n self.index_col) = _clean_index_names(list(columns),\n self.index_col,\n self.unnamed_cols)\n self._name_processed = True\n index = self._get_complex_date_index(data, columns)\n index = self._agg_index(index, try_parse_dates=False)\n\n # add names for the index\n if indexnamerow:\n coffset = len(indexnamerow) - len(columns)\n index = index.set_names(indexnamerow[:coffset])\n\n # maybe create a mi on the columns\n columns = self._maybe_make_multi_index_columns(columns, self.col_names)\n\n return index, columns\n\n _implicit_index = False\n\n def _get_simple_index(self, data, columns):\n def ix(col):\n if not isinstance(col, compat.string_types):\n return col\n raise ValueError('Index %s invalid' % col)\n\n to_remove = []\n index = []\n for idx in self.index_col:\n i = ix(idx)\n to_remove.append(i)\n index.append(data[i])\n\n # remove index items from content and columns, don't pop in\n # loop\n for i in reversed(sorted(to_remove)):\n data.pop(i)\n if not self._implicit_index:\n columns.pop(i)\n\n return index\n\n def _get_complex_date_index(self, data, col_names):\n def _get_name(icol):\n if isinstance(icol, compat.string_types):\n return icol\n\n if col_names is None:\n raise ValueError(('Must supply column order to use %s as '\n 'index') % str(icol))\n\n for i, c in enumerate(col_names):\n if i == icol:\n return c\n\n to_remove = []\n index = []\n for idx in self.index_col:\n name = _get_name(idx)\n to_remove.append(name)\n index.append(data[name])\n\n # remove index items from content and columns, don't pop in\n # loop\n for c in reversed(sorted(to_remove)):\n data.pop(c)\n col_names.remove(c)\n\n return index\n\n def _agg_index(self, index, try_parse_dates=True):\n arrays = []\n\n for i, arr in enumerate(index):\n\n if try_parse_dates and self._should_parse_dates(i):\n arr = self._date_conv(arr)\n\n if self.na_filter:\n col_na_values = self.na_values\n col_na_fvalues = self.na_fvalues\n else:\n col_na_values = set()\n col_na_fvalues = set()\n\n if isinstance(self.na_values, dict):\n col_name = self.index_names[i]\n if col_name is not None:\n col_na_values, col_na_fvalues = _get_na_values(\n col_name, self.na_values, self.na_fvalues,\n self.keep_default_na)\n\n arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)\n arrays.append(arr)\n\n names = self.index_names\n index = ensure_index_from_sequences(arrays, names)\n\n return index\n\n def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,\n converters=None, dtypes=None):\n result = {}\n for c, values in compat.iteritems(dct):\n conv_f = None if converters is None else converters.get(c, None)\n if isinstance(dtypes, dict):\n cast_type = dtypes.get(c, None)\n else:\n # single dtype or None\n cast_type = dtypes\n\n if self.na_filter:\n col_na_values, col_na_fvalues = _get_na_values(\n c, na_values, na_fvalues, self.keep_default_na)\n else:\n col_na_values, col_na_fvalues = set(), set()\n\n if conv_f is not None:\n # conv_f applied to data before inference\n if cast_type is not None:\n warnings.warn((\"Both a converter and dtype were specified \"\n \"for column {0} - only the converter will \"\n \"be used\").format(c), ParserWarning,\n stacklevel=7)\n\n try:\n values = lib.map_infer(values, conv_f)\n except ValueError:\n mask = algorithms.isin(\n values, list(na_values)).view(np.uint8)\n values = lib.map_infer_mask(values, conv_f, mask)\n\n cvals, na_count = self._infer_types(\n values, set(col_na_values) | col_na_fvalues,\n try_num_bool=False)\n else:\n # skip inference if specified dtype is object\n try_num_bool = not (cast_type and is_string_dtype(cast_type))\n\n # general type inference and conversion\n cvals, na_count = self._infer_types(\n values, set(col_na_values) | col_na_fvalues,\n try_num_bool)\n\n # type specified in dtype param\n if cast_type and not is_dtype_equal(cvals, cast_type):\n cvals = self._cast_types(cvals, cast_type, c)\n\n result[c] = cvals\n if verbose and na_count:\n print('Filled %d NA values in column %s' % (na_count, str(c)))\n return result\n\n def _infer_types(self, values, na_values, try_num_bool=True):\n \"\"\"\n Infer types of values, possibly casting\n\n Parameters\n ----------\n values : ndarray\n na_values : set\n try_num_bool : bool, default try\n try to cast values to numeric (first preference) or boolean\n\n Returns:\n --------\n converted : ndarray\n na_count : int\n \"\"\"\n na_count = 0\n if issubclass(values.dtype.type, (np.number, np.bool_)):\n mask = algorithms.isin(values, list(na_values))\n na_count = mask.sum()\n if na_count > 0:\n if is_integer_dtype(values):\n values = values.astype(np.float64)\n np.putmask(values, mask, np.nan)\n return values, na_count\n\n if try_num_bool:\n try:\n result = lib.maybe_convert_numeric(values, na_values, False)\n na_count = isna(result).sum()\n except Exception:\n result = values\n if values.dtype == np.object_:\n na_count = parsers.sanitize_objects(result,\n na_values, False)\n else:\n result = values\n if values.dtype == np.object_:\n na_count = parsers.sanitize_objects(values, na_values, False)\n\n if result.dtype == np.object_ and try_num_bool:\n result = libops.maybe_convert_bool(np.asarray(values),\n true_values=self.true_values,\n false_values=self.false_values)\n\n return result, na_count\n\n def _cast_types(self, values, cast_type, column):\n \"\"\"\n Cast values to specified type\n\n Parameters\n ----------\n values : ndarray\n cast_type : string or np.dtype\n dtype to cast values to\n column : string\n column name - used only for error reporting\n\n Returns\n -------\n converted : ndarray\n \"\"\"\n\n if is_categorical_dtype(cast_type):\n known_cats = (isinstance(cast_type, CategoricalDtype) and\n cast_type.categories is not None)\n\n if not is_object_dtype(values) and not known_cats:\n # XXX this is for consistency with\n # c-parser which parses all categories\n # as strings\n values = astype_nansafe(values, str)\n\n cats = Index(values).unique().dropna()\n values = Categorical._from_inferred_categories(\n cats, cats.get_indexer(values), cast_type\n )\n\n else:\n try:\n values = astype_nansafe(values, cast_type,\n copy=True, skipna=True)\n except ValueError:\n raise ValueError(\"Unable to convert column %s to \"\n \"type %s\" % (column, cast_type))\n return values\n\n def _do_date_conversions(self, names, data):\n # returns data, columns\n\n if self.parse_dates is not None:\n data, names = _process_date_conversion(\n data, self._date_conv, self.parse_dates, self.index_col,\n self.index_names, names, keep_date_col=self.keep_date_col)\n\n return names, data\n\n\nclass CParserWrapper(ParserBase):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, src, **kwds):\n self.kwds = kwds\n kwds = kwds.copy()\n\n ParserBase.__init__(self, kwds)\n\n if (kwds.get('compression') is None\n and 'utf-16' in (kwds.get('encoding') or '')):\n # if source is utf-16 plain text, convert source to utf-8\n if isinstance(src, compat.string_types):\n src = open(src, 'rb')\n self.handles.append(src)\n src = UTF8Recoder(src, kwds['encoding'])\n kwds['encoding'] = 'utf-8'\n\n # #2442\n kwds['allow_leading_cols'] = self.index_col is not False\n\n # GH20529, validate usecol arg before TextReader\n self.usecols, self.usecols_dtype = _validate_usecols_arg(\n kwds['usecols'])\n kwds['usecols'] = self.usecols\n\n self._reader = parsers.TextReader(src, **kwds)\n self.unnamed_cols = self._reader.unnamed_cols\n\n passed_names = self.names is None\n\n if self._reader.header is None:\n self.names = None\n else:\n if len(self._reader.header) > 1:\n # we have a multi index in the columns\n self.names, self.index_names, self.col_names, passed_names = (\n self._extract_multi_indexer_columns(\n self._reader.header, self.index_names, self.col_names,\n passed_names\n )\n )\n else:\n self.names = list(self._reader.header[0])\n\n if self.names is None:\n if self.prefix:\n self.names = ['%s%d' % (self.prefix, i)\n for i in range(self._reader.table_width)]\n else:\n self.names = lrange(self._reader.table_width)\n\n # gh-9755\n #\n # need to set orig_names here first\n # so that proper indexing can be done\n # with _set_noconvert_columns\n #\n # once names has been filtered, we will\n # then set orig_names again to names\n self.orig_names = self.names[:]\n\n if self.usecols:\n usecols = _evaluate_usecols(self.usecols, self.orig_names)\n\n # GH 14671\n if (self.usecols_dtype == 'string' and\n not set(usecols).issubset(self.orig_names)):\n _validate_usecols_names(usecols, self.orig_names)\n\n if len(self.names) > len(usecols):\n self.names = [n for i, n in enumerate(self.names)\n if (i in usecols or n in usecols)]\n\n if len(self.names) < len(usecols):\n _validate_usecols_names(usecols, self.names)\n\n self._set_noconvert_columns()\n\n self.orig_names = self.names\n\n if not self._has_complex_date_col:\n if (self._reader.leading_cols == 0 and\n _is_index_col(self.index_col)):\n\n self._name_processed = True\n (index_names, self.names,\n self.index_col) = _clean_index_names(self.names,\n self.index_col,\n self.unnamed_cols)\n\n if self.index_names is None:\n self.index_names = index_names\n\n if self._reader.header is None and not passed_names:\n self.index_names = [None] * len(self.index_names)\n\n self._implicit_index = self._reader.leading_cols > 0\n\n def close(self):\n for f in self.handles:\n f.close()\n\n # close additional handles opened by C parser (for compression)\n try:\n self._reader.close()\n except ValueError:\n pass\n\n def _set_noconvert_columns(self):\n \"\"\"\n Set the columns that should not undergo dtype conversions.\n\n Currently, any column that is involved with date parsing will not\n undergo such conversions.\n \"\"\"\n names = self.orig_names\n if self.usecols_dtype == 'integer':\n # A set of integers will be converted to a list in\n # the correct order every single time.\n usecols = list(self.usecols)\n usecols.sort()\n elif (callable(self.usecols) or\n self.usecols_dtype not in ('empty', None)):\n # The names attribute should have the correct columns\n # in the proper order for indexing with parse_dates.\n usecols = self.names[:]\n else:\n # Usecols is empty.\n usecols = None\n\n def _set(x):\n if usecols is not None and is_integer(x):\n x = usecols[x]\n\n if not is_integer(x):\n x = names.index(x)\n\n self._reader.set_noconvert(x)\n\n if isinstance(self.parse_dates, list):\n for val in self.parse_dates:\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif isinstance(self.parse_dates, dict):\n for val in self.parse_dates.values():\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif self.parse_dates:\n if isinstance(self.index_col, list):\n for k in self.index_col:\n _set(k)\n elif self.index_col is not None:\n _set(self.index_col)\n\n def set_error_bad_lines(self, status):\n self._reader.set_error_bad_lines(int(status))\n\n def read(self, nrows=None):\n try:\n data = self._reader.read(nrows)\n except StopIteration:\n if self._first_chunk:\n self._first_chunk = False\n names = self._maybe_dedup_names(self.orig_names)\n index, columns, col_dict = _get_empty_meta(\n names, self.index_col, self.index_names,\n dtype=self.kwds.get('dtype'))\n columns = self._maybe_make_multi_index_columns(\n columns, self.col_names)\n\n if self.usecols is not None:\n columns = self._filter_usecols(columns)\n\n col_dict = dict(filter(lambda item: item[0] in columns,\n col_dict.items()))\n\n return index, columns, col_dict\n\n else:\n raise\n\n # Done with first read, next time raise StopIteration\n self._first_chunk = False\n\n names = self.names\n\n if self._reader.leading_cols:\n if self._has_complex_date_col:\n raise NotImplementedError('file structure not yet supported')\n\n # implicit index, no index names\n arrays = []\n\n for i in range(self._reader.leading_cols):\n if self.index_col is None:\n values = data.pop(i)\n else:\n values = data.pop(self.index_col[i])\n\n values = self._maybe_parse_dates(values, i,\n try_parse_dates=True)\n arrays.append(values)\n\n index = ensure_index_from_sequences(arrays)\n\n if self.usecols is not None:\n names = self._filter_usecols(names)\n\n names = self._maybe_dedup_names(names)\n\n # rename dict keys\n data = sorted(data.items())\n data = {k: v for k, (i, v) in zip(names, data)}\n\n names, data = self._do_date_conversions(names, data)\n\n else:\n # rename dict keys\n data = sorted(data.items())\n\n # ugh, mutation\n names = list(self.orig_names)\n names = self._maybe_dedup_names(names)\n\n if self.usecols is not None:\n names = self._filter_usecols(names)\n\n # columns as list\n alldata = [x[1] for x in data]\n\n data = {k: v for k, (i, v) in zip(names, data)}\n\n names, data = self._do_date_conversions(names, data)\n index, names = self._make_index(data, alldata, names)\n\n # maybe create a mi on the columns\n names = self._maybe_make_multi_index_columns(names, self.col_names)\n\n return index, names, data\n\n def _filter_usecols(self, names):\n # hackish\n usecols = _evaluate_usecols(self.usecols, names)\n if usecols is not None and len(names) != len(usecols):\n names = [name for i, name in enumerate(names)\n if i in usecols or name in usecols]\n return names\n\n def _get_index_names(self):\n names = list(self._reader.header[0])\n idx_names = None\n\n if self._reader.leading_cols == 0 and self.index_col is not None:\n (idx_names, names,\n self.index_col) = _clean_index_names(names, self.index_col,\n self.unnamed_cols)\n\n return names, idx_names\n\n def _maybe_parse_dates(self, values, index, try_parse_dates=True):\n if try_parse_dates and self._should_parse_dates(index):\n values = self._date_conv(values)\n return values\n\n\ndef TextParser(*args, **kwds):\n \"\"\"\n Converts lists of lists/tuples into DataFrames with proper type inference\n and optional (e.g. string to datetime) conversion. Also enables iterating\n lazily over chunks of large files\n\n Parameters\n ----------\n data : file-like object or list\n delimiter : separator character to use\n dialect : str or csv.Dialect instance, default None\n Ignored if delimiter is longer than 1 character\n names : sequence, default\n header : int, default 0\n Row to use to parse column labels. Defaults to the first row. Prior\n rows will be discarded\n index_col : int or list, default None\n Column or columns to use as the (possibly hierarchical) index\n has_index_names: boolean, default False\n True if the cols defined in index_col have an index name and are\n not in the header\n na_values : scalar, str, list-like, or dict, default None\n Additional strings to recognize as NA/NaN.\n keep_default_na : bool, default True\n thousands : str, default None\n Thousands separator\n comment : str, default None\n Comment out remainder of line\n parse_dates : boolean, default False\n keep_date_col : boolean, default False\n date_parser : function, default None\n skiprows : list of integers\n Row numbers to skip\n skipfooter : int\n Number of line at bottom of file to skip\n converters : dict, default None\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the cell (not column) content, and return the\n transformed content.\n encoding : string, default None\n Encoding to use for UTF when reading/writing (ex. 'utf-8')\n squeeze : boolean, default False\n returns Series if only one column\n infer_datetime_format: boolean, default False\n If True and `parse_dates` is True for a column, try to infer the\n datetime format based on the first datetime string. If the format\n can be inferred, there often will be a large parsing speed-up.\n float_precision : string, default None\n Specifies which converter the C engine should use for floating-point\n values. The options are None for the ordinary converter,\n 'high' for the high-precision converter, and 'round_trip' for the\n round-trip converter.\n \"\"\"\n kwds['engine'] = 'python'\n return TextFileReader(*args, **kwds)\n\n\ndef count_empty_vals(vals):\n return sum(1 for v in vals if v == '' or v is None)\n\n\nclass PythonParser(ParserBase):\n\n def __init__(self, f, **kwds):\n \"\"\"\n Workhorse function for processing nested list into DataFrame\n\n Should be replaced by np.genfromtxt eventually?\n \"\"\"\n ParserBase.__init__(self, kwds)\n\n self.data = None\n self.buf = []\n self.pos = 0\n self.line_pos = 0\n\n self.encoding = kwds['encoding']\n self.compression = kwds['compression']\n self.memory_map = kwds['memory_map']\n self.skiprows = kwds['skiprows']\n\n if callable(self.skiprows):\n self.skipfunc = self.skiprows\n else:\n self.skipfunc = lambda x: x in self.skiprows\n\n self.skipfooter = _validate_skipfooter_arg(kwds['skipfooter'])\n self.delimiter = kwds['delimiter']\n\n self.quotechar = kwds['quotechar']\n if isinstance(self.quotechar, compat.text_type):\n self.quotechar = str(self.quotechar)\n\n self.escapechar = kwds['escapechar']\n self.doublequote = kwds['doublequote']\n self.skipinitialspace = kwds['skipinitialspace']\n self.lineterminator = kwds['lineterminator']\n self.quoting = kwds['quoting']\n self.usecols, _ = _validate_usecols_arg(kwds['usecols'])\n self.skip_blank_lines = kwds['skip_blank_lines']\n\n self.warn_bad_lines = kwds['warn_bad_lines']\n self.error_bad_lines = kwds['error_bad_lines']\n\n self.names_passed = kwds['names'] or None\n\n self.has_index_names = False\n if 'has_index_names' in kwds:\n self.has_index_names = kwds['has_index_names']\n\n self.verbose = kwds['verbose']\n self.converters = kwds['converters']\n self.dtype = kwds['dtype']\n\n self.thousands = kwds['thousands']\n self.decimal = kwds['decimal']\n\n self.comment = kwds['comment']\n self._comment_lines = []\n\n mode = 'r' if PY3 else 'rb'\n f, handles = _get_handle(f, mode, encoding=self.encoding,\n compression=self.compression,\n memory_map=self.memory_map)\n self.handles.extend(handles)\n\n # Set self.data to something that can read lines.\n if hasattr(f, 'readline'):\n self._make_reader(f)\n else:\n self.data = f\n\n # Get columns in two steps: infer from data, then\n # infer column indices from self.usecols if it is specified.\n self._col_indices = None\n (self.columns, self.num_original_columns,\n self.unnamed_cols) = self._infer_columns()\n\n # Now self.columns has the set of columns that we will process.\n # The original set is stored in self.original_columns.\n if len(self.columns) > 1:\n # we are processing a multi index column\n self.columns, self.index_names, self.col_names, _ = (\n self._extract_multi_indexer_columns(\n self.columns, self.index_names, self.col_names\n )\n )\n # Update list of original names to include all indices.\n self.num_original_columns = len(self.columns)\n else:\n self.columns = self.columns[0]\n\n # get popped off for index\n self.orig_names = list(self.columns)\n\n # needs to be cleaned/refactored\n # multiple date column thing turning into a real spaghetti factory\n\n if not self._has_complex_date_col:\n (index_names, self.orig_names, self.columns) = (\n self._get_index_name(self.columns))\n self._name_processed = True\n if self.index_names is None:\n self.index_names = index_names\n\n if self.parse_dates:\n self._no_thousands_columns = self._set_no_thousands_columns()\n else:\n self._no_thousands_columns = None\n\n if len(self.decimal) != 1:\n raise ValueError('Only length-1 decimal markers supported')\n\n if self.thousands is None:\n self.nonnum = re.compile('[^-^0-9^%s]+' % self.decimal)\n else:\n self.nonnum = re.compile('[^-^0-9^%s^%s]+' % (self.thousands,\n self.decimal))\n\n def _set_no_thousands_columns(self):\n # Create a set of column ids that are not to be stripped of thousands\n # operators.\n noconvert_columns = set()\n\n def _set(x):\n if is_integer(x):\n noconvert_columns.add(x)\n else:\n noconvert_columns.add(self.columns.index(x))\n\n if isinstance(self.parse_dates, list):\n for val in self.parse_dates:\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif isinstance(self.parse_dates, dict):\n for val in self.parse_dates.values():\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif self.parse_dates:\n if isinstance(self.index_col, list):\n for k in self.index_col:\n _set(k)\n elif self.index_col is not None:\n _set(self.index_col)\n\n return noconvert_columns\n\n def _make_reader(self, f):\n sep = self.delimiter\n\n if sep is None or len(sep) == 1:\n if self.lineterminator:\n raise ValueError('Custom line terminators not supported in '\n 'python parser (yet)')\n\n class MyDialect(csv.Dialect):\n delimiter = self.delimiter\n quotechar = self.quotechar\n escapechar = self.escapechar\n doublequote = self.doublequote\n skipinitialspace = self.skipinitialspace\n quoting = self.quoting\n lineterminator = '\\n'\n\n dia = MyDialect\n\n sniff_sep = True\n\n if sep is not None:\n sniff_sep = False\n dia.delimiter = sep\n # attempt to sniff the delimiter\n if sniff_sep:\n line = f.readline()\n while self.skipfunc(self.pos):\n self.pos += 1\n line = f.readline()\n\n line = self._check_comments([line])[0]\n\n self.pos += 1\n self.line_pos += 1\n sniffed = csv.Sniffer().sniff(line)\n dia.delimiter = sniffed.delimiter\n if self.encoding is not None:\n self.buf.extend(list(\n UnicodeReader(StringIO(line),\n dialect=dia,\n encoding=self.encoding)))\n else:\n self.buf.extend(list(csv.reader(StringIO(line),\n dialect=dia)))\n\n if self.encoding is not None:\n reader = UnicodeReader(f, dialect=dia,\n encoding=self.encoding,\n strict=True)\n else:\n reader = csv.reader(f, dialect=dia,\n strict=True)\n\n else:\n def _read():\n line = f.readline()\n\n if compat.PY2 and self.encoding:\n line = line.decode(self.encoding)\n\n pat = re.compile(sep)\n yield pat.split(line.strip())\n for line in f:\n yield pat.split(line.strip())\n reader = _read()\n\n self.data = reader\n\n def read(self, rows=None):\n try:\n content = self._get_lines(rows)\n except StopIteration:\n if self._first_chunk:\n content = []\n else:\n raise\n\n # done with first read, next time raise StopIteration\n self._first_chunk = False\n\n columns = list(self.orig_names)\n if not len(content): # pragma: no cover\n # DataFrame with the right metadata, even though it's length 0\n names = self._maybe_dedup_names(self.orig_names)\n index, columns, col_dict = _get_empty_meta(\n names, self.index_col, self.index_names, self.dtype)\n columns = self._maybe_make_multi_index_columns(\n columns, self.col_names)\n return index, columns, col_dict\n\n # handle new style for names in index\n count_empty_content_vals = count_empty_vals(content[0])\n indexnamerow = None\n if self.has_index_names and count_empty_content_vals == len(columns):\n indexnamerow = content[0]\n content = content[1:]\n\n alldata = self._rows_to_cols(content)\n data = self._exclude_implicit_index(alldata)\n\n columns = self._maybe_dedup_names(self.columns)\n columns, data = self._do_date_conversions(columns, data)\n\n data = self._convert_data(data)\n index, columns = self._make_index(data, alldata, columns, indexnamerow)\n\n return index, columns, data\n\n def _exclude_implicit_index(self, alldata):\n names = self._maybe_dedup_names(self.orig_names)\n\n if self._implicit_index:\n excl_indices = self.index_col\n\n data = {}\n offset = 0\n for i, col in enumerate(names):\n while i + offset in excl_indices:\n offset += 1\n data[col] = alldata[i + offset]\n else:\n data = {k: v for k, v in zip(names, alldata)}\n\n return data\n\n # legacy\n def get_chunk(self, size=None):\n if size is None:\n size = self.chunksize\n return self.read(rows=size)\n\n def _convert_data(self, data):\n # apply converters\n def _clean_mapping(mapping):\n \"converts col numbers to names\"\n clean = {}\n for col, v in compat.iteritems(mapping):\n if isinstance(col, int) and col not in self.orig_names:\n col = self.orig_names[col]\n clean[col] = v\n return clean\n\n clean_conv = _clean_mapping(self.converters)\n if not isinstance(self.dtype, dict):\n # handles single dtype applied to all columns\n clean_dtypes = self.dtype\n else:\n clean_dtypes = _clean_mapping(self.dtype)\n\n # Apply NA values.\n clean_na_values = {}\n clean_na_fvalues = {}\n\n if isinstance(self.na_values, dict):\n for col in self.na_values:\n na_value = self.na_values[col]\n na_fvalue = self.na_fvalues[col]\n\n if isinstance(col, int) and col not in self.orig_names:\n col = self.orig_names[col]\n\n clean_na_values[col] = na_value\n clean_na_fvalues[col] = na_fvalue\n else:\n clean_na_values = self.na_values\n clean_na_fvalues = self.na_fvalues\n\n return self._convert_to_ndarrays(data, clean_na_values,\n clean_na_fvalues, self.verbose,\n clean_conv, clean_dtypes)\n\n def _infer_columns(self):\n names = self.names\n num_original_columns = 0\n clear_buffer = True\n unnamed_cols = set()\n\n if self.header is not None:\n header = self.header\n\n if isinstance(header, (list, tuple, np.ndarray)):\n have_mi_columns = len(header) > 1\n # we have a mi columns, so read an extra line\n if have_mi_columns:\n header = list(header) + [header[-1] + 1]\n else:\n have_mi_columns = False\n header = [header]\n\n columns = []\n for level, hr in enumerate(header):\n try:\n line = self._buffered_line()\n\n while self.line_pos <= hr:\n line = self._next_line()\n\n except StopIteration:\n if self.line_pos < hr:\n raise ValueError(\n 'Passed header=%s but only %d lines in file'\n % (hr, self.line_pos + 1))\n\n # We have an empty file, so check\n # if columns are provided. That will\n # serve as the 'line' for parsing\n if have_mi_columns and hr > 0:\n if clear_buffer:\n self._clear_buffer()\n columns.append([None] * len(columns[-1]))\n return columns, num_original_columns, unnamed_cols\n\n if not self.names:\n raise EmptyDataError(\n \"No columns to parse from file\")\n\n line = self.names[:]\n\n this_columns = []\n this_unnamed_cols = []\n\n for i, c in enumerate(line):\n if c == '':\n if have_mi_columns:\n col_name = (\"Unnamed: {i}_level_{level}\"\n .format(i=i, level=level))\n else:\n col_name = \"Unnamed: {i}\".format(i=i)\n\n this_unnamed_cols.append(i)\n this_columns.append(col_name)\n else:\n this_columns.append(c)\n\n if not have_mi_columns and self.mangle_dupe_cols:\n counts = defaultdict(int)\n\n for i, col in enumerate(this_columns):\n cur_count = counts[col]\n\n while cur_count > 0:\n counts[col] = cur_count + 1\n col = \"%s.%d\" % (col, cur_count)\n cur_count = counts[col]\n\n this_columns[i] = col\n counts[col] = cur_count + 1\n elif have_mi_columns:\n\n # if we have grabbed an extra line, but its not in our\n # format so save in the buffer, and create an blank extra\n # line for the rest of the parsing code\n if hr == header[-1]:\n lc = len(this_columns)\n ic = (len(self.index_col)\n if self.index_col is not None else 0)\n unnamed_count = len(this_unnamed_cols)\n\n if lc != unnamed_count and lc - ic > unnamed_count:\n clear_buffer = False\n this_columns = [None] * lc\n self.buf = [self.buf[-1]]\n\n columns.append(this_columns)\n unnamed_cols.update({this_columns[i]\n for i in this_unnamed_cols})\n\n if len(columns) == 1:\n num_original_columns = len(this_columns)\n\n if clear_buffer:\n self._clear_buffer()\n\n if names is not None:\n if ((self.usecols is not None and\n len(names) != len(self.usecols)) or\n (self.usecols is None and\n len(names) != len(columns[0]))):\n raise ValueError('Number of passed names did not match '\n 'number of header fields in the file')\n if len(columns) > 1:\n raise TypeError('Cannot pass names with multi-index '\n 'columns')\n\n if self.usecols is not None:\n # Set _use_cols. We don't store columns because they are\n # overwritten.\n self._handle_usecols(columns, names)\n else:\n self._col_indices = None\n num_original_columns = len(names)\n columns = [names]\n else:\n columns = self._handle_usecols(columns, columns[0])\n else:\n try:\n line = self._buffered_line()\n\n except StopIteration:\n if not names:\n raise EmptyDataError(\n \"No columns to parse from file\")\n\n line = names[:]\n\n ncols = len(line)\n num_original_columns = ncols\n\n if not names:\n if self.prefix:\n columns = [['%s%d' % (self.prefix, i)\n for i in range(ncols)]]\n else:\n columns = [lrange(ncols)]\n columns = self._handle_usecols(columns, columns[0])\n else:\n if self.usecols is None or len(names) >= num_original_columns:\n columns = self._handle_usecols([names], names)\n num_original_columns = len(names)\n else:\n if (not callable(self.usecols) and\n len(names) != len(self.usecols)):\n raise ValueError(\n 'Number of passed names did not match number of '\n 'header fields in the file'\n )\n # Ignore output but set used columns.\n self._handle_usecols([names], names)\n columns = [names]\n num_original_columns = ncols\n\n return columns, num_original_columns, unnamed_cols\n\n def _handle_usecols(self, columns, usecols_key):\n \"\"\"\n Sets self._col_indices\n\n usecols_key is used if there are string usecols.\n \"\"\"\n if self.usecols is not None:\n if callable(self.usecols):\n col_indices = _evaluate_usecols(self.usecols, usecols_key)\n elif any(isinstance(u, string_types) for u in self.usecols):\n if len(columns) > 1:\n raise ValueError(\"If using multiple headers, usecols must \"\n \"be integers.\")\n col_indices = []\n\n for col in self.usecols:\n if isinstance(col, string_types):\n try:\n col_indices.append(usecols_key.index(col))\n except ValueError:\n _validate_usecols_names(self.usecols, usecols_key)\n else:\n col_indices.append(col)\n else:\n col_indices = self.usecols\n\n columns = [[n for i, n in enumerate(column) if i in col_indices]\n for column in columns]\n self._col_indices = col_indices\n return columns\n\n def _buffered_line(self):\n \"\"\"\n Return a line from buffer, filling buffer if required.\n \"\"\"\n if len(self.buf) > 0:\n return self.buf[0]\n else:\n return self._next_line()\n\n def _check_for_bom(self, first_row):\n \"\"\"\n Checks whether the file begins with the BOM character.\n If it does, remove it. In addition, if there is quoting\n in the field subsequent to the BOM, remove it as well\n because it technically takes place at the beginning of\n the name, not the middle of it.\n \"\"\"\n # first_row will be a list, so we need to check\n # that that list is not empty before proceeding.\n if not first_row:\n return first_row\n\n # The first element of this row is the one that could have the\n # BOM that we want to remove. Check that the first element is a\n # string before proceeding.\n if not isinstance(first_row[0], compat.string_types):\n return first_row\n\n # Check that the string is not empty, as that would\n # obviously not have a BOM at the start of it.\n if not first_row[0]:\n return first_row\n\n # Since the string is non-empty, check that it does\n # in fact begin with a BOM.\n first_elt = first_row[0][0]\n\n # This is to avoid warnings we get in Python 2.x if\n # we find ourselves comparing with non-Unicode\n if compat.PY2 and not isinstance(first_elt, unicode): # noqa\n try:\n first_elt = u(first_elt)\n except UnicodeDecodeError:\n return first_row\n\n if first_elt != _BOM:\n return first_row\n\n first_row = first_row[0]\n\n if len(first_row) > 1 and first_row[1] == self.quotechar:\n start = 2\n quote = first_row[1]\n end = first_row[2:].index(quote) + 2\n\n # Extract the data between the quotation marks\n new_row = first_row[start:end]\n\n # Extract any remaining data after the second\n # quotation mark.\n if len(first_row) > end + 1:\n new_row += first_row[end + 1:]\n return [new_row]\n elif len(first_row) > 1:\n return [first_row[1:]]\n else:\n # First row is just the BOM, so we\n # return an empty string.\n return [\"\"]\n\n def _is_line_empty(self, line):\n \"\"\"\n Check if a line is empty or not.\n\n Parameters\n ----------\n line : str, array-like\n The line of data to check.\n\n Returns\n -------\n boolean : Whether or not the line is empty.\n \"\"\"\n return not line or all(not x for x in line)\n\n def _next_line(self):\n if isinstance(self.data, list):\n while self.skipfunc(self.pos):\n self.pos += 1\n\n while True:\n try:\n line = self._check_comments([self.data[self.pos]])[0]\n self.pos += 1\n # either uncommented or blank to begin with\n if (not self.skip_blank_lines and\n (self._is_line_empty(\n self.data[self.pos - 1]) or line)):\n break\n elif self.skip_blank_lines:\n ret = self._remove_empty_lines([line])\n if ret:\n line = ret[0]\n break\n except IndexError:\n raise StopIteration\n else:\n while self.skipfunc(self.pos):\n self.pos += 1\n next(self.data)\n\n while True:\n orig_line = self._next_iter_line(row_num=self.pos + 1)\n self.pos += 1\n\n if orig_line is not None:\n line = self._check_comments([orig_line])[0]\n\n if self.skip_blank_lines:\n ret = self._remove_empty_lines([line])\n\n if ret:\n line = ret[0]\n break\n elif self._is_line_empty(orig_line) or line:\n break\n\n # This was the first line of the file,\n # which could contain the BOM at the\n # beginning of it.\n if self.pos == 1:\n line = self._check_for_bom(line)\n\n self.line_pos += 1\n self.buf.append(line)\n return line\n\n def _alert_malformed(self, msg, row_num):\n \"\"\"\n Alert a user about a malformed row.\n\n If `self.error_bad_lines` is True, the alert will be `ParserError`.\n If `self.warn_bad_lines` is True, the alert will be printed out.\n\n Parameters\n ----------\n msg : The error message to display.\n row_num : The row number where the parsing error occurred.\n Because this row number is displayed, we 1-index,\n even though we 0-index internally.\n \"\"\"\n\n if self.error_bad_lines:\n raise ParserError(msg)\n elif self.warn_bad_lines:\n base = 'Skipping line {row_num}: '.format(row_num=row_num)\n sys.stderr.write(base + msg + '\\n')\n\n def _next_iter_line(self, row_num):\n \"\"\"\n Wrapper around iterating through `self.data` (CSV source).\n\n When a CSV error is raised, we check for specific\n error messages that allow us to customize the\n error message displayed to the user.\n\n Parameters\n ----------\n row_num : The row number of the line being parsed.\n \"\"\"\n\n try:\n return next(self.data)\n except csv.Error as e:\n if self.warn_bad_lines or self.error_bad_lines:\n msg = str(e)\n\n if 'NULL byte' in msg:\n msg = ('NULL byte detected. This byte '\n 'cannot be processed in Python\\'s '\n 'native csv library at the moment, '\n 'so please pass in engine=\\'c\\' instead')\n\n if self.skipfooter > 0:\n reason = ('Error could possibly be due to '\n 'parsing errors in the skipped footer rows '\n '(the skipfooter keyword is only applied '\n 'after Python\\'s csv library has parsed '\n 'all rows).')\n msg += '. ' + reason\n\n self._alert_malformed(msg, row_num)\n return None\n\n def _check_comments(self, lines):\n if self.comment is None:\n return lines\n ret = []\n for l in lines:\n rl = []\n for x in l:\n if (not isinstance(x, compat.string_types) or\n self.comment not in x):\n rl.append(x)\n else:\n x = x[:x.find(self.comment)]\n if len(x) > 0:\n rl.append(x)\n break\n ret.append(rl)\n return ret\n\n def _remove_empty_lines(self, lines):\n \"\"\"\n Iterate through the lines and remove any that are\n either empty or contain only one whitespace value\n\n Parameters\n ----------\n lines : array-like\n The array of lines that we are to filter.\n\n Returns\n -------\n filtered_lines : array-like\n The same array of lines with the \"empty\" ones removed.\n \"\"\"\n\n ret = []\n for l in lines:\n # Remove empty lines and lines with only one whitespace value\n if (len(l) > 1 or len(l) == 1 and\n (not isinstance(l[0], compat.string_types) or\n l[0].strip())):\n ret.append(l)\n return ret\n\n def _check_thousands(self, lines):\n if self.thousands is None:\n return lines\n\n return self._search_replace_num_columns(lines=lines,\n search=self.thousands,\n replace='')\n\n def _search_replace_num_columns(self, lines, search, replace):\n ret = []\n for l in lines:\n rl = []\n for i, x in enumerate(l):\n if (not isinstance(x, compat.string_types) or\n search not in x or\n (self._no_thousands_columns and\n i in self._no_thousands_columns) or\n self.nonnum.search(x.strip())):\n rl.append(x)\n else:\n rl.append(x.replace(search, replace))\n ret.append(rl)\n return ret\n\n def _check_decimal(self, lines):\n if self.decimal == _parser_defaults['decimal']:\n return lines\n\n return self._search_replace_num_columns(lines=lines,\n search=self.decimal,\n replace='.')\n\n def _clear_buffer(self):\n self.buf = []\n\n _implicit_index = False\n\n def _get_index_name(self, columns):\n \"\"\"\n Try several cases to get lines:\n\n 0) There are headers on row 0 and row 1 and their\n total summed lengths equals the length of the next line.\n Treat row 0 as columns and row 1 as indices\n 1) Look for implicit index: there are more columns\n on row 1 than row 0. If this is true, assume that row\n 1 lists index columns and row 0 lists normal columns.\n 2) Get index from the columns if it was listed.\n \"\"\"\n orig_names = list(columns)\n columns = list(columns)\n\n try:\n line = self._next_line()\n except StopIteration:\n line = None\n\n try:\n next_line = self._next_line()\n except StopIteration:\n next_line = None\n\n # implicitly index_col=0 b/c 1 fewer column names\n implicit_first_cols = 0\n if line is not None:\n # leave it 0, #2442\n # Case 1\n if self.index_col is not False:\n implicit_first_cols = len(line) - self.num_original_columns\n\n # Case 0\n if next_line is not None:\n if len(next_line) == len(line) + self.num_original_columns:\n # column and index names on diff rows\n self.index_col = lrange(len(line))\n self.buf = self.buf[1:]\n\n for c in reversed(line):\n columns.insert(0, c)\n\n # Update list of original names to include all indices.\n orig_names = list(columns)\n self.num_original_columns = len(columns)\n return line, orig_names, columns\n\n if implicit_first_cols > 0:\n # Case 1\n self._implicit_index = True\n if self.index_col is None:\n self.index_col = lrange(implicit_first_cols)\n\n index_name = None\n\n else:\n # Case 2\n (index_name, columns_,\n self.index_col) = _clean_index_names(columns, self.index_col,\n self.unnamed_cols)\n\n return index_name, orig_names, columns\n\n def _rows_to_cols(self, content):\n col_len = self.num_original_columns\n\n if self._implicit_index:\n col_len += len(self.index_col)\n\n max_len = max(len(row) for row in content)\n\n # Check that there are no rows with too many\n # elements in their row (rows with too few\n # elements are padded with NaN).\n if (max_len > col_len and\n self.index_col is not False and\n self.usecols is None):\n\n footers = self.skipfooter if self.skipfooter else 0\n bad_lines = []\n\n iter_content = enumerate(content)\n content_len = len(content)\n content = []\n\n for (i, l) in iter_content:\n actual_len = len(l)\n\n if actual_len > col_len:\n if self.error_bad_lines or self.warn_bad_lines:\n row_num = self.pos - (content_len - i + footers)\n bad_lines.append((row_num, actual_len))\n\n if self.error_bad_lines:\n break\n else:\n content.append(l)\n\n for row_num, actual_len in bad_lines:\n msg = ('Expected %d fields in line %d, saw %d' %\n (col_len, row_num + 1, actual_len))\n if (self.delimiter and\n len(self.delimiter) > 1 and\n self.quoting != csv.QUOTE_NONE):\n # see gh-13374\n reason = ('Error could possibly be due to quotes being '\n 'ignored when a multi-char delimiter is used.')\n msg += '. ' + reason\n\n self._alert_malformed(msg, row_num + 1)\n\n # see gh-13320\n zipped_content = list(lib.to_object_array(\n content, min_width=col_len).T)\n\n if self.usecols:\n if self._implicit_index:\n zipped_content = [\n a for i, a in enumerate(zipped_content)\n if (i < len(self.index_col) or\n i - len(self.index_col) in self._col_indices)]\n else:\n zipped_content = [a for i, a in enumerate(zipped_content)\n if i in self._col_indices]\n return zipped_content\n\n def _get_lines(self, rows=None):\n lines = self.buf\n new_rows = None\n\n # already fetched some number\n if rows is not None:\n # we already have the lines in the buffer\n if len(self.buf) >= rows:\n new_rows, self.buf = self.buf[:rows], self.buf[rows:]\n\n # need some lines\n else:\n rows -= len(self.buf)\n\n if new_rows is None:\n if isinstance(self.data, list):\n if self.pos > len(self.data):\n raise StopIteration\n if rows is None:\n new_rows = self.data[self.pos:]\n new_pos = len(self.data)\n else:\n new_rows = self.data[self.pos:self.pos + rows]\n new_pos = self.pos + rows\n\n # Check for stop rows. n.b.: self.skiprows is a set.\n if self.skiprows:\n new_rows = [row for i, row in enumerate(new_rows)\n if not self.skipfunc(i + self.pos)]\n\n lines.extend(new_rows)\n self.pos = new_pos\n\n else:\n new_rows = []\n try:\n if rows is not None:\n for _ in range(rows):\n new_rows.append(next(self.data))\n lines.extend(new_rows)\n else:\n rows = 0\n\n while True:\n new_row = self._next_iter_line(\n row_num=self.pos + rows + 1)\n rows += 1\n\n if new_row is not None:\n new_rows.append(new_row)\n\n except StopIteration:\n if self.skiprows:\n new_rows = [row for i, row in enumerate(new_rows)\n if not self.skipfunc(i + self.pos)]\n lines.extend(new_rows)\n if len(lines) == 0:\n raise\n self.pos += len(new_rows)\n\n self.buf = []\n else:\n lines = new_rows\n\n if self.skipfooter:\n lines = lines[:-self.skipfooter]\n\n lines = self._check_comments(lines)\n if self.skip_blank_lines:\n lines = self._remove_empty_lines(lines)\n lines = self._check_thousands(lines)\n return self._check_decimal(lines)\n\n\ndef _make_date_converter(date_parser=None, dayfirst=False,\n infer_datetime_format=False):\n def converter(*date_cols):\n if date_parser is None:\n strs = _concat_date_cols(date_cols)\n\n try:\n return tools.to_datetime(\n ensure_object(strs),\n utc=None,\n box=False,\n dayfirst=dayfirst,\n errors='ignore',\n infer_datetime_format=infer_datetime_format\n )\n except ValueError:\n return tools.to_datetime(\n parsing.try_parse_dates(strs, dayfirst=dayfirst))\n else:\n try:\n result = tools.to_datetime(\n date_parser(*date_cols), errors='ignore')\n if isinstance(result, datetime.datetime):\n raise Exception('scalar parser')\n return result\n except Exception:\n try:\n return tools.to_datetime(\n parsing.try_parse_dates(_concat_date_cols(date_cols),\n parser=date_parser,\n dayfirst=dayfirst),\n errors='ignore')\n except Exception:\n return generic_parser(date_parser, *date_cols)\n\n return converter\n\n\ndef _process_date_conversion(data_dict, converter, parse_spec,\n index_col, index_names, columns,\n keep_date_col=False):\n def _isindex(colspec):\n return ((isinstance(index_col, list) and\n colspec in index_col) or\n (isinstance(index_names, list) and\n colspec in index_names))\n\n new_cols = []\n new_data = {}\n\n orig_names = columns\n columns = list(columns)\n\n date_cols = set()\n\n if parse_spec is None or isinstance(parse_spec, bool):\n return data_dict, columns\n\n if isinstance(parse_spec, list):\n # list of column lists\n for colspec in parse_spec:\n if is_scalar(colspec):\n if isinstance(colspec, int) and colspec not in data_dict:\n colspec = orig_names[colspec]\n if _isindex(colspec):\n continue\n data_dict[colspec] = converter(data_dict[colspec])\n else:\n new_name, col, old_names = _try_convert_dates(\n converter, colspec, data_dict, orig_names)\n if new_name in data_dict:\n raise ValueError('New date column already in dict %s' %\n new_name)\n new_data[new_name] = col\n new_cols.append(new_name)\n date_cols.update(old_names)\n\n elif isinstance(parse_spec, dict):\n # dict of new name to column list\n for new_name, colspec in compat.iteritems(parse_spec):\n if new_name in data_dict:\n raise ValueError('Date column %s already in dict' %\n new_name)\n\n _, col, old_names = _try_convert_dates(converter, colspec,\n data_dict, orig_names)\n\n new_data[new_name] = col\n new_cols.append(new_name)\n date_cols.update(old_names)\n\n data_dict.update(new_data)\n new_cols.extend(columns)\n\n if not keep_date_col:\n for c in list(date_cols):\n data_dict.pop(c)\n new_cols.remove(c)\n\n return data_dict, new_cols\n\n\ndef _try_convert_dates(parser, colspec, data_dict, columns):\n colset = set(columns)\n colnames = []\n\n for c in colspec:\n if c in colset:\n colnames.append(c)\n elif isinstance(c, int) and c not in columns:\n colnames.append(columns[c])\n else:\n colnames.append(c)\n\n new_name = '_'.join(str(x) for x in colnames)\n to_parse = [data_dict[c] for c in colnames if c in data_dict]\n\n new_col = parser(*to_parse)\n return new_name, new_col, colnames\n\n\ndef _clean_na_values(na_values, keep_default_na=True):\n\n if na_values is None:\n if keep_default_na:\n na_values = _NA_VALUES\n else:\n na_values = set()\n na_fvalues = set()\n elif isinstance(na_values, dict):\n old_na_values = na_values.copy()\n na_values = {} # Prevent aliasing.\n\n # Convert the values in the na_values dictionary\n # into array-likes for further use. This is also\n # where we append the default NaN values, provided\n # that `keep_default_na=True`.\n for k, v in compat.iteritems(old_na_values):\n if not is_list_like(v):\n v = [v]\n\n if keep_default_na:\n v = set(v) | _NA_VALUES\n\n na_values[k] = v\n na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()}\n else:\n if not is_list_like(na_values):\n na_values = [na_values]\n na_values = _stringify_na_values(na_values)\n if keep_default_na:\n na_values = na_values | _NA_VALUES\n\n na_fvalues = _floatify_na_values(na_values)\n\n return na_values, na_fvalues\n\n\ndef _clean_index_names(columns, index_col, unnamed_cols):\n if not _is_index_col(index_col):\n return None, columns, index_col\n\n columns = list(columns)\n\n cp_cols = list(columns)\n index_names = []\n\n # don't mutate\n index_col = list(index_col)\n\n for i, c in enumerate(index_col):\n if isinstance(c, compat.string_types):\n index_names.append(c)\n for j, name in enumerate(cp_cols):\n if name == c:\n index_col[i] = j\n columns.remove(name)\n break\n else:\n name = cp_cols[c]\n columns.remove(name)\n index_names.append(name)\n\n # Only clean index names that were placeholders.\n for i, name in enumerate(index_names):\n if isinstance(name, compat.string_types) and name in unnamed_cols:\n index_names[i] = None\n\n return index_names, columns, index_col\n\n\ndef _get_empty_meta(columns, index_col, index_names, dtype=None):\n columns = list(columns)\n\n # Convert `dtype` to a defaultdict of some kind.\n # This will enable us to write `dtype[col_name]`\n # without worrying about KeyError issues later on.\n if not isinstance(dtype, dict):\n # if dtype == None, default will be np.object.\n default_dtype = dtype or np.object\n dtype = defaultdict(lambda: default_dtype)\n else:\n # Save a copy of the dictionary.\n _dtype = dtype.copy()\n dtype = defaultdict(lambda: np.object)\n\n # Convert column indexes to column names.\n for k, v in compat.iteritems(_dtype):\n col = columns[k] if is_integer(k) else k\n dtype[col] = v\n\n # Even though we have no data, the \"index\" of the empty DataFrame\n # could for example still be an empty MultiIndex. Thus, we need to\n # check whether we have any index columns specified, via either:\n #\n # 1) index_col (column indices)\n # 2) index_names (column names)\n #\n # Both must be non-null to ensure a successful construction. Otherwise,\n # we have to create a generic emtpy Index.\n if (index_col is None or index_col is False) or index_names is None:\n index = Index([])\n else:\n data = [Series([], dtype=dtype[name]) for name in index_names]\n index = ensure_index_from_sequences(data, names=index_names)\n index_col.sort()\n\n for i, n in enumerate(index_col):\n columns.pop(n - i)\n\n col_dict = {col_name: Series([], dtype=dtype[col_name])\n for col_name in columns}\n\n return index, columns, col_dict\n\n\ndef _floatify_na_values(na_values):\n # create float versions of the na_values\n result = set()\n for v in na_values:\n try:\n v = float(v)\n if not np.isnan(v):\n result.add(v)\n except (TypeError, ValueError, OverflowError):\n pass\n return result\n\n\ndef _stringify_na_values(na_values):\n \"\"\" return a stringified and numeric for these values \"\"\"\n result = []\n for x in na_values:\n result.append(str(x))\n result.append(x)\n try:\n v = float(x)\n\n # we are like 999 here\n if v == int(v):\n v = int(v)\n result.append(\"%s.0\" % v)\n result.append(str(v))\n\n result.append(v)\n except (TypeError, ValueError, OverflowError):\n pass\n try:\n result.append(int(x))\n except (TypeError, ValueError, OverflowError):\n pass\n return set(result)\n\n\ndef _get_na_values(col, na_values, na_fvalues, keep_default_na):\n \"\"\"\n Get the NaN values for a given column.\n\n Parameters\n ----------\n col : str\n The name of the column.\n na_values : array-like, dict\n The object listing the NaN values as strings.\n na_fvalues : array-like, dict\n The object listing the NaN values as floats.\n keep_default_na : bool\n If `na_values` is a dict, and the column is not mapped in the\n dictionary, whether to return the default NaN values or the empty set.\n\n Returns\n -------\n nan_tuple : A length-two tuple composed of\n\n 1) na_values : the string NaN values for that column.\n 2) na_fvalues : the float NaN values for that column.\n \"\"\"\n\n if isinstance(na_values, dict):\n if col in na_values:\n return na_values[col], na_fvalues[col]\n else:\n if keep_default_na:\n return _NA_VALUES, set()\n\n return set(), set()\n else:\n return na_values, na_fvalues\n\n\ndef _get_col_names(colspec, columns):\n colset = set(columns)\n colnames = []\n for c in colspec:\n if c in colset:\n colnames.append(c)\n elif isinstance(c, int):\n colnames.append(columns[c])\n return colnames\n\n\ndef _concat_date_cols(date_cols):\n if len(date_cols) == 1:\n if compat.PY3:\n return np.array([compat.text_type(x) for x in date_cols[0]],\n dtype=object)\n else:\n return np.array([\n str(x) if not isinstance(x, compat.string_types) else x\n for x in date_cols[0]\n ], dtype=object)\n\n rs = np.array([' '.join(compat.text_type(y) for y in x)\n for x in zip(*date_cols)], dtype=object)\n return rs\n\n\nclass FixedWidthReader(BaseIterator):\n \"\"\"\n A reader of fixed-width lines.\n \"\"\"\n\n def __init__(self, f, colspecs, delimiter, comment, skiprows=None):\n self.f = f\n self.buffer = None\n self.delimiter = '\\r\\n' + delimiter if delimiter else '\\n\\r\\t '\n self.comment = comment\n if colspecs == 'infer':\n self.colspecs = self.detect_colspecs(skiprows=skiprows)\n else:\n self.colspecs = colspecs\n\n if not isinstance(self.colspecs, (tuple, list)):\n raise TypeError(\"column specifications must be a list or tuple, \"\n \"input was a %r\" % type(colspecs).__name__)\n\n for colspec in self.colspecs:\n if not (isinstance(colspec, (tuple, list)) and\n len(colspec) == 2 and\n isinstance(colspec[0], (int, np.integer, type(None))) and\n isinstance(colspec[1], (int, np.integer, type(None)))):\n raise TypeError('Each column specification must be '\n '2 element tuple or list of integers')\n\n def get_rows(self, n, skiprows=None):\n \"\"\"\n Read rows from self.f, skipping as specified.\n\n We distinguish buffer_rows (the first <= n lines)\n from the rows returned to detect_colspecs because\n it's simpler to leave the other locations with\n skiprows logic alone than to modify them to deal\n with the fact we skipped some rows here as well.\n\n Parameters\n ----------\n n : int\n Number of rows to read from self.f, not counting\n rows that are skipped.\n skiprows: set, optional\n Indices of rows to skip.\n\n Returns\n -------\n detect_rows : list of str\n A list containing the rows to read.\n\n \"\"\"\n if skiprows is None:\n skiprows = set()\n buffer_rows = []\n detect_rows = []\n for i, row in enumerate(self.f):\n if i not in skiprows:\n detect_rows.append(row)\n buffer_rows.append(row)\n if len(detect_rows) >= n:\n break\n self.buffer = iter(buffer_rows)\n return detect_rows\n\n def detect_colspecs(self, n=100, skiprows=None):\n # Regex escape the delimiters\n delimiters = ''.join(r'\\%s' % x for x in self.delimiter)\n pattern = re.compile('([^%s]+)' % delimiters)\n rows = self.get_rows(n, skiprows)\n if not rows:\n raise EmptyDataError(\"No rows from which to infer column width\")\n max_len = max(map(len, rows))\n mask = np.zeros(max_len + 1, dtype=int)\n if self.comment is not None:\n rows = [row.partition(self.comment)[0] for row in rows]\n for row in rows:\n for m in pattern.finditer(row):\n mask[m.start():m.end()] = 1\n shifted = np.roll(mask, 1)\n shifted[0] = 0\n edges = np.where((mask ^ shifted) == 1)[0]\n edge_pairs = list(zip(edges[::2], edges[1::2]))\n return edge_pairs\n\n def __next__(self):\n if self.buffer is not None:\n try:\n line = next(self.buffer)\n except StopIteration:\n self.buffer = None\n line = next(self.f)\n else:\n line = next(self.f)\n # Note: 'colspecs' is a sequence of half-open intervals.\n return [line[fromm:to].strip(self.delimiter)\n for (fromm, to) in self.colspecs]\n\n\nclass FixedWidthFieldParser(PythonParser):\n \"\"\"\n Specialization that Converts fixed-width fields into DataFrames.\n See PythonParser for details.\n \"\"\"\n\n def __init__(self, f, **kwds):\n # Support iterators, convert to a list.\n self.colspecs = kwds.pop('colspecs')\n PythonParser.__init__(self, f, **kwds)\n\n def _make_reader(self, f):\n self.data = FixedWidthReader(f, self.colspecs, self.delimiter,\n self.comment, self.skiprows)\n",
"\"\"\"\n\nprovide a generic structure to support window functions,\nsimilar to how we have a Groupby object\n\n\n\"\"\"\nfrom __future__ import division\n\nimport warnings\nimport numpy as np\nfrom collections import defaultdict\nfrom datetime import timedelta\n\nfrom pandas.core.dtypes.generic import (\n ABCSeries,\n ABCDataFrame,\n ABCDatetimeIndex,\n ABCTimedeltaIndex,\n ABCPeriodIndex,\n ABCDateOffset)\nfrom pandas.core.dtypes.common import (\n is_integer,\n is_bool,\n is_float_dtype,\n is_integer_dtype,\n needs_i8_conversion,\n is_timedelta64_dtype,\n is_list_like,\n ensure_float64,\n is_scalar)\n\nfrom pandas.core.base import PandasObject, SelectionMixin\nfrom pandas.core.groupby.base import GroupByMixin\nimport pandas.core.common as com\nimport pandas._libs.window as _window\n\nfrom pandas import compat\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import (Substitution, Appender,\n cache_readonly)\nfrom pandas.core.generic import _shared_docs\nfrom textwrap import dedent\n\n\n_shared_docs = dict(**_shared_docs)\n_doc_template = \"\"\"\n\nReturns\n-------\nsame type as input\n\nSee Also\n--------\npandas.Series.%(name)s\npandas.DataFrame.%(name)s\n\"\"\"\n\n\nclass _Window(PandasObject, SelectionMixin):\n _attributes = ['window', 'min_periods', 'center', 'win_type',\n 'axis', 'on', 'closed']\n exclusions = set()\n\n def __init__(self, obj, window=None, min_periods=None,\n center=False, win_type=None, axis=0, on=None, closed=None,\n **kwargs):\n\n self.__dict__.update(kwargs)\n self.blocks = []\n self.obj = obj\n self.on = on\n self.closed = closed\n self.window = window\n self.min_periods = min_periods\n self.center = center\n self.win_type = win_type\n self.win_freq = None\n self.axis = obj._get_axis_number(axis) if axis is not None else None\n self.validate()\n\n @property\n def _constructor(self):\n return Window\n\n @property\n def is_datetimelike(self):\n return None\n\n @property\n def _on(self):\n return None\n\n @property\n def is_freq_type(self):\n return self.win_type == 'freq'\n\n def validate(self):\n if self.center is not None and not is_bool(self.center):\n raise ValueError(\"center must be a boolean\")\n if (self.min_periods is not None and\n not is_integer(self.min_periods)):\n raise ValueError(\"min_periods must be an integer\")\n if (self.closed is not None and\n self.closed not in ['right', 'both', 'left', 'neither']):\n raise ValueError(\"closed must be 'right', 'left', 'both' or \"\n \"'neither'\")\n\n def _convert_freq(self):\n \"\"\" resample according to the how, return a new object \"\"\"\n\n obj = self._selected_obj\n index = None\n return obj, index\n\n def _create_blocks(self):\n \"\"\" split data into blocks & return conformed data \"\"\"\n\n obj, index = self._convert_freq()\n if index is not None:\n index = self._on\n\n # filter out the on from the object\n if self.on is not None:\n if obj.ndim == 2:\n obj = obj.reindex(columns=obj.columns.difference([self.on]),\n copy=False)\n blocks = obj._to_dict_of_blocks(copy=False).values()\n\n return blocks, obj, index\n\n def _gotitem(self, key, ndim, subset=None):\n \"\"\"\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : str / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n\n # create a new object to prevent aliasing\n if subset is None:\n subset = self.obj\n self = self._shallow_copy(subset)\n self._reset_cache()\n if subset.ndim == 2:\n if is_scalar(key) and key in subset or is_list_like(key):\n self._selection = key\n return self\n\n def __getattr__(self, attr):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self.obj:\n return self[attr]\n\n raise AttributeError(\"%r object has no attribute %r\" %\n (type(self).__name__, attr))\n\n def _dir_additions(self):\n return self.obj._dir_additions()\n\n def _get_window(self, other=None):\n return self.window\n\n @property\n def _window_type(self):\n return self.__class__.__name__\n\n def __unicode__(self):\n \"\"\" provide a nice str repr of our rolling object \"\"\"\n\n attrs = [\"{k}={v}\".format(k=k, v=getattr(self, k))\n for k in self._attributes\n if getattr(self, k, None) is not None]\n return \"{klass} [{attrs}]\".format(klass=self._window_type,\n attrs=','.join(attrs))\n\n def __iter__(self):\n url = 'https://github.com/pandas-dev/pandas/issues/11704'\n raise NotImplementedError('See issue #11704 {url}'.format(url=url))\n\n def _get_index(self, index=None):\n \"\"\"\n Return index as ndarrays\n\n Returns\n -------\n tuple of (index, index_as_ndarray)\n \"\"\"\n\n if self.is_freq_type:\n if index is None:\n index = self._on\n return index, index.asi8\n return index, index\n\n def _prep_values(self, values=None, kill_inf=True):\n\n if values is None:\n values = getattr(self._selected_obj, 'values', self._selected_obj)\n\n # GH #12373 : rolling functions error on float32 data\n # make sure the data is coerced to float64\n if is_float_dtype(values.dtype):\n values = ensure_float64(values)\n elif is_integer_dtype(values.dtype):\n values = ensure_float64(values)\n elif needs_i8_conversion(values.dtype):\n raise NotImplementedError(\"ops for {action} for this \"\n \"dtype {dtype} are not \"\n \"implemented\".format(\n action=self._window_type,\n dtype=values.dtype))\n else:\n try:\n values = ensure_float64(values)\n except (ValueError, TypeError):\n raise TypeError(\"cannot handle this type -> {0}\"\n \"\".format(values.dtype))\n\n if kill_inf:\n values = values.copy()\n values[np.isinf(values)] = np.NaN\n\n return values\n\n def _wrap_result(self, result, block=None, obj=None):\n \"\"\" wrap a single result \"\"\"\n\n if obj is None:\n obj = self._selected_obj\n index = obj.index\n\n if isinstance(result, np.ndarray):\n\n # coerce if necessary\n if block is not None:\n if is_timedelta64_dtype(block.values.dtype):\n from pandas import to_timedelta\n result = to_timedelta(\n result.ravel(), unit='ns').values.reshape(result.shape)\n\n if result.ndim == 1:\n from pandas import Series\n return Series(result, index, name=obj.name)\n\n return type(obj)(result, index=index, columns=block.columns)\n return result\n\n def _wrap_results(self, results, blocks, obj):\n \"\"\"\n wrap the results\n\n Parameters\n ----------\n results : list of ndarrays\n blocks : list of blocks\n obj : conformed data (may be resampled)\n \"\"\"\n\n from pandas import Series, concat\n from pandas.core.index import ensure_index\n\n final = []\n for result, block in zip(results, blocks):\n\n result = self._wrap_result(result, block=block, obj=obj)\n if result.ndim == 1:\n return result\n final.append(result)\n\n # if we have an 'on' column\n # we want to put it back into the results\n # in the same location\n columns = self._selected_obj.columns\n if self.on is not None and not self._on.equals(obj.index):\n\n name = self._on.name\n final.append(Series(self._on, index=obj.index, name=name))\n\n if self._selection is not None:\n\n selection = ensure_index(self._selection)\n\n # need to reorder to include original location of\n # the on column (if its not already there)\n if name not in selection:\n columns = self.obj.columns\n indexer = columns.get_indexer(selection.tolist() + [name])\n columns = columns.take(sorted(indexer))\n\n if not len(final):\n return obj.astype('float64')\n return concat(final, axis=1).reindex(columns=columns, copy=False)\n\n def _center_window(self, result, window):\n \"\"\" center the result in the window \"\"\"\n if self.axis > result.ndim - 1:\n raise ValueError(\"Requested axis is larger then no. of argument \"\n \"dimensions\")\n\n offset = _offset(window, True)\n if offset > 0:\n if isinstance(result, (ABCSeries, ABCDataFrame)):\n result = result.slice_shift(-offset, axis=self.axis)\n else:\n lead_indexer = [slice(None)] * result.ndim\n lead_indexer[self.axis] = slice(offset, None)\n result = np.copy(result[tuple(lead_indexer)])\n return result\n\n def aggregate(self, arg, *args, **kwargs):\n result, how = self._aggregate(arg, *args, **kwargs)\n if result is None:\n return self.apply(arg, raw=False, args=args, kwargs=kwargs)\n return result\n\n agg = aggregate\n\n _shared_docs['sum'] = dedent(\"\"\"\n Calculate %(name)s sum of given DataFrame or Series.\n\n Parameters\n ----------\n *args, **kwargs\n For compatibility with other %(name)s methods. Has no effect\n on the computed value.\n\n Returns\n -------\n Series or DataFrame\n Same type as the input, with the same index, containing the\n %(name)s sum.\n\n See Also\n --------\n Series.sum : Reducing sum for Series.\n DataFrame.sum : Reducing sum for DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4, 5])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> s.rolling(3).sum()\n 0 NaN\n 1 NaN\n 2 6.0\n 3 9.0\n 4 12.0\n dtype: float64\n\n >>> s.expanding(3).sum()\n 0 NaN\n 1 NaN\n 2 6.0\n 3 10.0\n 4 15.0\n dtype: float64\n\n >>> s.rolling(3, center=True).sum()\n 0 NaN\n 1 6.0\n 2 9.0\n 3 12.0\n 4 NaN\n dtype: float64\n\n For DataFrame, each %(name)s sum is computed column-wise.\n\n >>> df = pd.DataFrame({\"A\": s, \"B\": s ** 2})\n >>> df\n A B\n 0 1 1\n 1 2 4\n 2 3 9\n 3 4 16\n 4 5 25\n\n >>> df.rolling(3).sum()\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 6.0 14.0\n 3 9.0 29.0\n 4 12.0 50.0\n \"\"\")\n\n _shared_docs['mean'] = dedent(\"\"\"\n Calculate the %(name)s mean of the values.\n\n Parameters\n ----------\n *args\n Under Review.\n **kwargs\n Under Review.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.mean : Equivalent method for Series.\n DataFrame.mean : Equivalent method for DataFrame.\n\n Examples\n --------\n The below examples will show rolling mean calculations with window sizes of\n two and three, respectively.\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).mean()\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n\n >>> s.rolling(3).mean()\n 0 NaN\n 1 NaN\n 2 2.0\n 3 3.0\n dtype: float64\n \"\"\")\n\n\nclass Window(_Window):\n \"\"\"\n Provides rolling window calculations.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n window : int, or offset\n Size of the moving window. This is the number of observations used for\n calculating the statistic. Each window will be a fixed size.\n\n If its an offset then this will be the time period of each window. Each\n window will be a variable sized based on the observations included in\n the time-period. This is only valid for datetimelike indexes. This is\n new in 0.19.0\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). For a window that is specified by an offset,\n `min_periods` will default to 1. Otherwise, `min_periods` will default\n to the size of the window.\n center : bool, default False\n Set the labels at the center of the window.\n win_type : str, default None\n Provide a window type. If ``None``, all points are evenly weighted.\n See the notes below for further information.\n on : str, optional\n For a DataFrame, column on which to calculate\n the rolling window, rather than the index\n axis : int or str, default 0\n closed : str, default None\n Make the interval closed on the 'right', 'left', 'both' or\n 'neither' endpoints.\n For offset-based windows, it defaults to 'right'.\n For fixed windows, defaults to 'both'. Remaining cases not implemented\n for fixed windows.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n a Window or Rolling sub-classed for the particular operation\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n >>> df\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n Rolling sum with a window length of 2, using the 'triang'\n window type.\n\n >>> df.rolling(2, win_type='triang').sum()\n B\n 0 NaN\n 1 1.0\n 2 2.5\n 3 NaN\n 4 NaN\n\n Rolling sum with a window length of 2, min_periods defaults\n to the window length.\n\n >>> df.rolling(2).sum()\n B\n 0 NaN\n 1 1.0\n 2 3.0\n 3 NaN\n 4 NaN\n\n Same as above, but explicitly set the min_periods\n\n >>> df.rolling(2, min_periods=1).sum()\n B\n 0 0.0\n 1 1.0\n 2 3.0\n 3 2.0\n 4 4.0\n\n A ragged (meaning not-a-regular frequency), time-indexed DataFrame\n\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},\n ... index = [pd.Timestamp('20130101 09:00:00'),\n ... pd.Timestamp('20130101 09:00:02'),\n ... pd.Timestamp('20130101 09:00:03'),\n ... pd.Timestamp('20130101 09:00:05'),\n ... pd.Timestamp('20130101 09:00:06')])\n\n >>> df\n B\n 2013-01-01 09:00:00 0.0\n 2013-01-01 09:00:02 1.0\n 2013-01-01 09:00:03 2.0\n 2013-01-01 09:00:05 NaN\n 2013-01-01 09:00:06 4.0\n\n Contrasting to an integer rolling window, this will roll a variable\n length window corresponding to the time period.\n The default for min_periods is 1.\n\n >>> df.rolling('2s').sum()\n B\n 2013-01-01 09:00:00 0.0\n 2013-01-01 09:00:02 1.0\n 2013-01-01 09:00:03 3.0\n 2013-01-01 09:00:05 NaN\n 2013-01-01 09:00:06 4.0\n\n Notes\n -----\n By default, the result is set to the right edge of the window. This can be\n changed to the center of the window by setting ``center=True``.\n\n To learn more about the offsets & frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n The recognized win_types are:\n\n * ``boxcar``\n * ``triang``\n * ``blackman``\n * ``hamming``\n * ``bartlett``\n * ``parzen``\n * ``bohman``\n * ``blackmanharris``\n * ``nuttall``\n * ``barthann``\n * ``kaiser`` (needs beta)\n * ``gaussian`` (needs std)\n * ``general_gaussian`` (needs power, width)\n * ``slepian`` (needs width).\n\n If ``win_type=None`` all points are evenly weighted. To learn more about\n different window types see `scipy.signal window functions\n <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.\n\n See Also\n --------\n expanding : Provides expanding transformations.\n ewm : Provides exponential weighted functions.\n \"\"\"\n\n def validate(self):\n super(Window, self).validate()\n\n window = self.window\n if isinstance(window, (list, tuple, np.ndarray)):\n pass\n elif is_integer(window):\n if window <= 0:\n raise ValueError(\"window must be > 0 \")\n try:\n import scipy.signal as sig\n except ImportError:\n raise ImportError('Please install scipy to generate window '\n 'weight')\n\n if not isinstance(self.win_type, compat.string_types):\n raise ValueError('Invalid win_type {0}'.format(self.win_type))\n if getattr(sig, self.win_type, None) is None:\n raise ValueError('Invalid win_type {0}'.format(self.win_type))\n else:\n raise ValueError('Invalid window {0}'.format(window))\n\n def _prep_window(self, **kwargs):\n \"\"\"\n provide validation for our window type, return the window\n we have already been validated\n \"\"\"\n\n window = self._get_window()\n if isinstance(window, (list, tuple, np.ndarray)):\n return com.asarray_tuplesafe(window).astype(float)\n elif is_integer(window):\n import scipy.signal as sig\n\n # the below may pop from kwargs\n def _validate_win_type(win_type, kwargs):\n arg_map = {'kaiser': ['beta'],\n 'gaussian': ['std'],\n 'general_gaussian': ['power', 'width'],\n 'slepian': ['width']}\n if win_type in arg_map:\n return tuple([win_type] + _pop_args(win_type,\n arg_map[win_type],\n kwargs))\n return win_type\n\n def _pop_args(win_type, arg_names, kwargs):\n msg = '%s window requires %%s' % win_type\n all_args = []\n for n in arg_names:\n if n not in kwargs:\n raise ValueError(msg % n)\n all_args.append(kwargs.pop(n))\n return all_args\n\n win_type = _validate_win_type(self.win_type, kwargs)\n # GH #15662. `False` makes symmetric window, rather than periodic.\n return sig.get_window(win_type, window, False).astype(float)\n\n def _apply_window(self, mean=True, **kwargs):\n \"\"\"\n Applies a moving window of type ``window_type`` on the data.\n\n Parameters\n ----------\n mean : bool, default True\n If True computes weighted mean, else weighted sum\n\n Returns\n -------\n y : same type as input argument\n\n \"\"\"\n window = self._prep_window(**kwargs)\n center = self.center\n\n blocks, obj, index = self._create_blocks()\n results = []\n for b in blocks:\n try:\n values = self._prep_values(b.values)\n except TypeError:\n results.append(b.values.copy())\n continue\n\n if values.size == 0:\n results.append(values.copy())\n continue\n\n offset = _offset(window, center)\n additional_nans = np.array([np.NaN] * offset)\n\n def f(arg, *args, **kwargs):\n minp = _use_window(self.min_periods, len(window))\n return _window.roll_window(np.concatenate((arg,\n additional_nans))\n if center else arg, window, minp,\n avg=mean)\n\n result = np.apply_along_axis(f, self.axis, values)\n\n if center:\n result = self._center_window(result, window)\n results.append(result)\n\n return self._wrap_results(results, blocks, obj)\n\n _agg_doc = dedent(\"\"\"\n Examples\n --------\n\n >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n >>> df\n A B C\n 0 -2.385977 -0.102758 0.438822\n 1 -1.004295 0.905829 -0.954544\n 2 0.735167 -0.165272 -1.619346\n 3 -0.702657 -1.340923 -0.706334\n 4 -0.246845 0.211596 -0.901819\n 5 2.463718 3.157577 -1.380906\n 6 -1.142255 2.340594 -0.039875\n 7 1.396598 -1.647453 1.677227\n 8 -0.543425 1.761277 -0.220481\n 9 -0.640505 0.289374 -1.550670\n\n >>> df.rolling(3, win_type='boxcar').agg('mean')\n A B C\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 -0.885035 0.212600 -0.711689\n 3 -0.323928 -0.200122 -1.093408\n 4 -0.071445 -0.431533 -1.075833\n 5 0.504739 0.676083 -0.996353\n 6 0.358206 1.903256 -0.774200\n 7 0.906020 1.283573 0.085482\n 8 -0.096361 0.818139 0.472290\n 9 0.070889 0.134399 -0.031308\n\n See Also\n --------\n pandas.DataFrame.rolling.aggregate\n pandas.DataFrame.aggregate\n\n \"\"\")\n\n @Appender(_agg_doc)\n @Appender(_shared_docs['aggregate'] % dict(\n versionadded='',\n klass='Series/DataFrame',\n axis=''))\n def aggregate(self, arg, *args, **kwargs):\n result, how = self._aggregate(arg, *args, **kwargs)\n if result is None:\n\n # these must apply directly\n result = arg(self)\n\n return result\n\n agg = aggregate\n\n @Substitution(name='window')\n @Appender(_shared_docs['sum'])\n def sum(self, *args, **kwargs):\n nv.validate_window_func('sum', args, kwargs)\n return self._apply_window(mean=False, **kwargs)\n\n @Substitution(name='window')\n @Appender(_shared_docs['mean'])\n def mean(self, *args, **kwargs):\n nv.validate_window_func('mean', args, kwargs)\n return self._apply_window(mean=True, **kwargs)\n\n\nclass _GroupByMixin(GroupByMixin):\n \"\"\" provide the groupby facilities \"\"\"\n\n def __init__(self, obj, *args, **kwargs):\n parent = kwargs.pop('parent', None) # noqa\n groupby = kwargs.pop('groupby', None)\n if groupby is None:\n groupby, obj = obj, obj.obj\n self._groupby = groupby\n self._groupby.mutated = True\n self._groupby.grouper.mutated = True\n super(GroupByMixin, self).__init__(obj, *args, **kwargs)\n\n count = GroupByMixin._dispatch('count')\n corr = GroupByMixin._dispatch('corr', other=None, pairwise=None)\n cov = GroupByMixin._dispatch('cov', other=None, pairwise=None)\n\n def _apply(self, func, name, window=None, center=None,\n check_minp=None, **kwargs):\n \"\"\"\n dispatch to apply; we are stripping all of the _apply kwargs and\n performing the original function call on the grouped object\n \"\"\"\n\n def f(x, name=name, *args):\n x = self._shallow_copy(x)\n\n if isinstance(name, compat.string_types):\n return getattr(x, name)(*args, **kwargs)\n\n return x.apply(name, *args, **kwargs)\n\n return self._groupby.apply(f)\n\n\nclass _Rolling(_Window):\n\n @property\n def _constructor(self):\n return Rolling\n\n def _apply(self, func, name=None, window=None, center=None,\n check_minp=None, **kwargs):\n \"\"\"\n Rolling statistical measure using supplied function. Designed to be\n used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : str/callable to apply\n name : str, optional\n name of this function\n window : int/array, default to _get_window()\n center : bool, default to self.center\n check_minp : function, default to _use_window\n\n Returns\n -------\n y : type of input\n \"\"\"\n if center is None:\n center = self.center\n if window is None:\n window = self._get_window()\n\n if check_minp is None:\n check_minp = _use_window\n\n blocks, obj, index = self._create_blocks()\n index, indexi = self._get_index(index=index)\n results = []\n for b in blocks:\n values = self._prep_values(b.values)\n\n if values.size == 0:\n results.append(values.copy())\n continue\n\n # if we have a string function name, wrap it\n if isinstance(func, compat.string_types):\n cfunc = getattr(_window, func, None)\n if cfunc is None:\n raise ValueError(\"we do not support this function \"\n \"in _window.{0}\".format(func))\n\n def func(arg, window, min_periods=None, closed=None):\n minp = check_minp(min_periods, window)\n # ensure we are only rolling on floats\n arg = ensure_float64(arg)\n return cfunc(arg,\n window, minp, indexi, closed, **kwargs)\n\n # calculation function\n if center:\n offset = _offset(window, center)\n additional_nans = np.array([np.NaN] * offset)\n\n def calc(x):\n return func(np.concatenate((x, additional_nans)),\n window, min_periods=self.min_periods,\n closed=self.closed)\n else:\n\n def calc(x):\n return func(x, window, min_periods=self.min_periods,\n closed=self.closed)\n\n with np.errstate(all='ignore'):\n if values.ndim > 1:\n result = np.apply_along_axis(calc, self.axis, values)\n else:\n result = calc(values)\n\n if center:\n result = self._center_window(result, window)\n\n results.append(result)\n\n return self._wrap_results(results, blocks, obj)\n\n\nclass _Rolling_and_Expanding(_Rolling):\n\n _shared_docs['count'] = dedent(r\"\"\"\n The %(name)s count of any non-NaN observations inside the window.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n pandas.Series.%(name)s : Calling object with Series data.\n pandas.DataFrame.%(name)s : Calling object with DataFrames.\n pandas.DataFrame.count : Count of the full DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([2, 3, np.nan, 10])\n >>> s.rolling(2).count()\n 0 1.0\n 1 2.0\n 2 1.0\n 3 1.0\n dtype: float64\n >>> s.rolling(3).count()\n 0 1.0\n 1 2.0\n 2 2.0\n 3 2.0\n dtype: float64\n >>> s.rolling(4).count()\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n dtype: float64\n \"\"\")\n\n def count(self):\n\n blocks, obj, index = self._create_blocks()\n # Validate the index\n self._get_index(index=index)\n\n window = self._get_window()\n window = min(window, len(obj)) if not self.center else window\n\n results = []\n for b in blocks:\n result = b.notna().astype(int)\n result = self._constructor(result, window=window, min_periods=0,\n center=self.center,\n closed=self.closed).sum()\n results.append(result)\n\n return self._wrap_results(results, blocks, obj)\n\n _shared_docs['apply'] = dedent(r\"\"\"\n %(name)s function apply\n\n Parameters\n ----------\n func : function\n Must produce a single value from an ndarray input if ``raw=True``\n or a Series if ``raw=False``\n raw : bool, default None\n * ``False`` : passes each row or column as a Series to the\n function.\n * ``True`` or ``None`` : the passed function will receive ndarray\n objects instead.\n If you are just applying a NumPy reduction function this will\n achieve much better performance.\n\n The `raw` parameter is required and will show a FutureWarning if\n not passed. In the future `raw` will default to False.\n\n .. versionadded:: 0.23.0\n\n \\*args and \\*\\*kwargs are passed to the function\"\"\")\n\n def apply(self, func, raw=None, args=(), kwargs={}):\n from pandas import Series\n\n # TODO: _level is unused?\n _level = kwargs.pop('_level', None) # noqa\n window = self._get_window()\n offset = _offset(window, self.center)\n index, indexi = self._get_index()\n\n # TODO: default is for backward compat\n # change to False in the future\n if raw is None:\n warnings.warn(\n \"Currently, 'apply' passes the values as ndarrays to the \"\n \"applied function. In the future, this will change to passing \"\n \"it as Series objects. You need to specify 'raw=True' to keep \"\n \"the current behaviour, and you can pass 'raw=False' to \"\n \"silence this warning\", FutureWarning, stacklevel=3)\n raw = True\n\n def f(arg, window, min_periods, closed):\n minp = _use_window(min_periods, window)\n if not raw:\n arg = Series(arg, index=self.obj.index)\n return _window.roll_generic(\n arg, window, minp, indexi,\n closed, offset, func, raw, args, kwargs)\n\n return self._apply(f, func, args=args, kwargs=kwargs,\n center=False, raw=raw)\n\n def sum(self, *args, **kwargs):\n nv.validate_window_func('sum', args, kwargs)\n return self._apply('roll_sum', 'sum', **kwargs)\n\n _shared_docs['max'] = dedent(\"\"\"\n %(name)s maximum\n \"\"\")\n\n def max(self, *args, **kwargs):\n nv.validate_window_func('max', args, kwargs)\n return self._apply('roll_max', 'max', **kwargs)\n\n _shared_docs['min'] = dedent(\"\"\"\n Calculate the %(name)s minimum.\n\n Parameters\n ----------\n **kwargs\n Under Review.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with a Series.\n DataFrame.%(name)s : Calling object with a DataFrame.\n Series.min : Similar method for Series.\n DataFrame.min : Similar method for DataFrame.\n\n Examples\n --------\n Performing a rolling minimum with a window size of 3.\n\n >>> s = pd.Series([4, 3, 5, 2, 6])\n >>> s.rolling(3).min()\n 0 NaN\n 1 NaN\n 2 3.0\n 3 2.0\n 4 2.0\n dtype: float64\n \"\"\")\n\n def min(self, *args, **kwargs):\n nv.validate_window_func('min', args, kwargs)\n return self._apply('roll_min', 'min', **kwargs)\n\n def mean(self, *args, **kwargs):\n nv.validate_window_func('mean', args, kwargs)\n return self._apply('roll_mean', 'mean', **kwargs)\n\n _shared_docs['median'] = dedent(\"\"\"\n Calculate the %(name)s median.\n\n Parameters\n ----------\n **kwargs\n For compatibility with other %(name)s methods. Has no effect\n on the computed median.\n\n Returns\n -------\n Series or DataFrame\n Returned type is the same as the original object.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.median : Equivalent method for Series.\n DataFrame.median : Equivalent method for DataFrame.\n\n Examples\n --------\n Compute the rolling median of a series with a window size of 3.\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.rolling(3).median()\n 0 NaN\n 1 NaN\n 2 1.0\n 3 2.0\n 4 3.0\n dtype: float64\n \"\"\")\n\n def median(self, **kwargs):\n return self._apply('roll_median_c', 'median', **kwargs)\n\n _shared_docs['std'] = dedent(\"\"\"\n Calculate %(name)s standard deviation.\n\n Normalized by N-1 by default. This can be changed using the `ddof`\n argument.\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n *args, **kwargs\n For NumPy compatibility. No additional arguments are used.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller of the %(name)s calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.std : Equivalent method for Series.\n DataFrame.std : Equivalent method for DataFrame.\n numpy.std : Equivalent method for Numpy array.\n\n Notes\n -----\n The default `ddof` of 1 used in Series.std is different than the default\n `ddof` of 0 in numpy.std.\n\n A minimum of one period is required for the rolling calculation.\n\n Examples\n --------\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).std()\n 0 NaN\n 1 NaN\n 2 0.577350\n 3 1.000000\n 4 1.000000\n 5 1.154701\n 6 0.000000\n dtype: float64\n\n >>> s.expanding(3).std()\n 0 NaN\n 1 NaN\n 2 0.577350\n 3 0.957427\n 4 0.894427\n 5 0.836660\n 6 0.786796\n dtype: float64\n \"\"\")\n\n def std(self, ddof=1, *args, **kwargs):\n nv.validate_window_func('std', args, kwargs)\n window = self._get_window()\n index, indexi = self._get_index()\n\n def f(arg, *args, **kwargs):\n minp = _require_min_periods(1)(self.min_periods, window)\n return _zsqrt(_window.roll_var(arg, window, minp, indexi,\n self.closed, ddof))\n\n return self._apply(f, 'std', check_minp=_require_min_periods(1),\n ddof=ddof, **kwargs)\n\n _shared_docs['var'] = dedent(\"\"\"\n Calculate unbiased %(name)s variance.\n\n Normalized by N-1 by default. This can be changed using the `ddof`\n argument.\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n *args, **kwargs\n For NumPy compatibility. No additional arguments are used.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller of the %(name)s calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.var : Equivalent method for Series.\n DataFrame.var : Equivalent method for DataFrame.\n numpy.var : Equivalent method for Numpy array.\n\n Notes\n -----\n The default `ddof` of 1 used in :meth:`Series.var` is different than the\n default `ddof` of 0 in :func:`numpy.var`.\n\n A minimum of 1 period is required for the rolling calculation.\n\n Examples\n --------\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).var()\n 0 NaN\n 1 NaN\n 2 0.333333\n 3 1.000000\n 4 1.000000\n 5 1.333333\n 6 0.000000\n dtype: float64\n\n >>> s.expanding(3).var()\n 0 NaN\n 1 NaN\n 2 0.333333\n 3 0.916667\n 4 0.800000\n 5 0.700000\n 6 0.619048\n dtype: float64\n \"\"\")\n\n def var(self, ddof=1, *args, **kwargs):\n nv.validate_window_func('var', args, kwargs)\n return self._apply('roll_var', 'var',\n check_minp=_require_min_periods(1), ddof=ddof,\n **kwargs)\n\n _shared_docs['skew'] = \"\"\"Unbiased %(name)s skewness\"\"\"\n\n def skew(self, **kwargs):\n return self._apply('roll_skew', 'skew',\n check_minp=_require_min_periods(3), **kwargs)\n\n _shared_docs['kurt'] = dedent(\"\"\"\n Calculate unbiased %(name)s kurtosis.\n\n This function uses Fisher's definition of kurtosis without bias.\n\n Parameters\n ----------\n **kwargs\n Under Review.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.kurt : Equivalent method for Series.\n DataFrame.kurt : Equivalent method for DataFrame.\n scipy.stats.skew : Third moment of a probability density.\n scipy.stats.kurtosis : Reference SciPy method.\n\n Notes\n -----\n A minimum of 4 periods is required for the %(name)s calculation.\n \"\"\")\n\n def kurt(self, **kwargs):\n return self._apply('roll_kurt', 'kurt',\n check_minp=_require_min_periods(4), **kwargs)\n\n _shared_docs['quantile'] = dedent(\"\"\"\n %(name)s quantile.\n\n Parameters\n ----------\n quantile : float\n Quantile to compute. 0 <= quantile <= 1.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n .. versionadded:: 0.23.0\n\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n **kwargs:\n For compatibility with other %(name)s methods. Has no effect on\n the result.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).quantile(.4, interpolation='lower')\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n >>> s.rolling(2).quantile(.4, interpolation='midpoint')\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n\n See Also\n --------\n pandas.Series.quantile : Computes value at the given quantile over all data\n in Series.\n pandas.DataFrame.quantile : Computes values at the given quantile over\n requested axis in DataFrame.\n \"\"\")\n\n def quantile(self, quantile, interpolation='linear', **kwargs):\n window = self._get_window()\n index, indexi = self._get_index()\n\n def f(arg, *args, **kwargs):\n minp = _use_window(self.min_periods, window)\n if quantile == 1.0:\n return _window.roll_max(arg, window, minp, indexi,\n self.closed)\n elif quantile == 0.0:\n return _window.roll_min(arg, window, minp, indexi,\n self.closed)\n else:\n return _window.roll_quantile(arg, window, minp, indexi,\n self.closed, quantile,\n interpolation)\n\n return self._apply(f, 'quantile', quantile=quantile,\n **kwargs)\n\n _shared_docs['cov'] = dedent(\"\"\"\n %(name)s sample covariance\n\n Parameters\n ----------\n other : Series, DataFrame, or ndarray, optional\n if not supplied then will default to self and produce pairwise output\n pairwise : bool, default None\n If False then only matching columns between self and other will be used\n and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\"\"\")\n\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n if other is None:\n other = self._selected_obj\n # only default unset\n pairwise = True if pairwise is None else pairwise\n other = self._shallow_copy(other)\n\n # GH 16058: offset window\n if self.is_freq_type:\n window = self.win_freq\n else:\n window = self._get_window(other)\n\n def _get_cov(X, Y):\n # GH #12373 : rolling functions error on float32 data\n # to avoid potential overflow, cast the data to float64\n X = X.astype('float64')\n Y = Y.astype('float64')\n mean = lambda x: x.rolling(window, self.min_periods,\n center=self.center).mean(**kwargs)\n count = (X + Y).rolling(window=window,\n center=self.center).count(**kwargs)\n bias_adj = count / (count - ddof)\n return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj\n\n return _flex_binary_moment(self._selected_obj, other._selected_obj,\n _get_cov, pairwise=bool(pairwise))\n\n _shared_docs['corr'] = dedent(\"\"\"\n Calculate %(name)s correlation.\n\n Parameters\n ----------\n other : Series, DataFrame, or ndarray, optional\n If not supplied then will default to self.\n pairwise : bool, default None\n Calculate pairwise combinations of columns within a\n DataFrame. If `other` is not specified, defaults to `True`,\n otherwise defaults to `False`.\n Not relevant for :class:`~pandas.Series`.\n **kwargs\n Unused.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the\n %(name)s calculation.\n\n See Also\n --------\n Series.%(name)s : Calling object with Series data.\n DataFrame.%(name)s : Calling object with DataFrames.\n Series.corr : Equivalent method for Series.\n DataFrame.corr : Equivalent method for DataFrame.\n %(name)s.cov : Similar method to calculate covariance.\n numpy.corrcoef : NumPy Pearson's correlation calculation.\n\n Notes\n -----\n This function uses Pearson's definition of correlation\n (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).\n\n When `other` is not specified, the output will be self correlation (e.g.\n all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`\n set to `True`.\n\n Function will return ``NaN`` for correlations of equal valued sequences;\n this is the result of a 0/0 division error.\n\n When `pairwise` is set to `False`, only matching columns between `self` and\n `other` will be used.\n\n When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame\n with the original index on the first level, and the `other` DataFrame\n columns on the second level.\n\n In the case of missing elements, only complete pairwise observations\n will be used.\n\n Examples\n --------\n The below example shows a rolling calculation with a window size of\n four matching the equivalent function call using :meth:`numpy.corrcoef`.\n\n >>> v1 = [3, 3, 3, 5, 8]\n >>> v2 = [3, 4, 4, 4, 8]\n >>> fmt = \"{0:.6f}\" # limit the printed precision to 6 digits\n >>> # numpy returns a 2X2 array, the correlation coefficient\n >>> # is the number at entry [0][1]\n >>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))\n 0.333333\n >>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))\n 0.916949\n >>> s1 = pd.Series(v1)\n >>> s2 = pd.Series(v2)\n >>> s1.rolling(4).corr(s2)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 0.333333\n 4 0.916949\n dtype: float64\n\n The below example shows a similar rolling calculation on a\n DataFrame using the pairwise option.\n\n >>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\\\n [46., 31.], [50., 36.]])\n >>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))\n [[1. 0.6263001]\n [0.6263001 1. ]]\n >>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))\n [[1. 0.5553681]\n [0.5553681 1. ]]\n >>> df = pd.DataFrame(matrix, columns=['X','Y'])\n >>> df\n X Y\n 0 51.0 35.0\n 1 49.0 30.0\n 2 47.0 32.0\n 3 46.0 31.0\n 4 50.0 36.0\n >>> df.rolling(4).corr(pairwise=True)\n X Y\n 0 X NaN NaN\n Y NaN NaN\n 1 X NaN NaN\n Y NaN NaN\n 2 X NaN NaN\n Y NaN NaN\n 3 X 1.000000 0.626300\n Y 0.626300 1.000000\n 4 X 1.000000 0.555368\n Y 0.555368 1.000000\n\"\"\")\n\n def corr(self, other=None, pairwise=None, **kwargs):\n if other is None:\n other = self._selected_obj\n # only default unset\n pairwise = True if pairwise is None else pairwise\n other = self._shallow_copy(other)\n window = self._get_window(other)\n\n def _get_corr(a, b):\n a = a.rolling(window=window, min_periods=self.min_periods,\n center=self.center)\n b = b.rolling(window=window, min_periods=self.min_periods,\n center=self.center)\n\n return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))\n\n return _flex_binary_moment(self._selected_obj, other._selected_obj,\n _get_corr, pairwise=bool(pairwise))\n\n\nclass Rolling(_Rolling_and_Expanding):\n\n @cache_readonly\n def is_datetimelike(self):\n return isinstance(self._on,\n (ABCDatetimeIndex,\n ABCTimedeltaIndex,\n ABCPeriodIndex))\n\n @cache_readonly\n def _on(self):\n\n if self.on is None:\n return self.obj.index\n elif (isinstance(self.obj, ABCDataFrame) and\n self.on in self.obj.columns):\n from pandas import Index\n return Index(self.obj[self.on])\n else:\n raise ValueError(\"invalid on specified as {0}, \"\n \"must be a column (if DataFrame) \"\n \"or None\".format(self.on))\n\n def validate(self):\n super(Rolling, self).validate()\n\n # we allow rolling on a datetimelike index\n if ((self.obj.empty or self.is_datetimelike) and\n isinstance(self.window, (compat.string_types, ABCDateOffset,\n timedelta))):\n\n self._validate_monotonic()\n freq = self._validate_freq()\n\n # we don't allow center\n if self.center:\n raise NotImplementedError(\"center is not implemented \"\n \"for datetimelike and offset \"\n \"based windows\")\n\n # this will raise ValueError on non-fixed freqs\n self.win_freq = self.window\n self.window = freq.nanos\n self.win_type = 'freq'\n\n # min_periods must be an integer\n if self.min_periods is None:\n self.min_periods = 1\n\n elif not is_integer(self.window):\n raise ValueError(\"window must be an integer\")\n elif self.window < 0:\n raise ValueError(\"window must be non-negative\")\n\n if not self.is_datetimelike and self.closed is not None:\n raise ValueError(\"closed only implemented for datetimelike \"\n \"and offset based windows\")\n\n def _validate_monotonic(self):\n \"\"\" validate on is monotonic \"\"\"\n if not self._on.is_monotonic:\n formatted = self.on or 'index'\n raise ValueError(\"{0} must be \"\n \"monotonic\".format(formatted))\n\n def _validate_freq(self):\n \"\"\" validate & return window frequency \"\"\"\n from pandas.tseries.frequencies import to_offset\n try:\n return to_offset(self.window)\n except (TypeError, ValueError):\n raise ValueError(\"passed window {0} is not \"\n \"compatible with a datetimelike \"\n \"index\".format(self.window))\n\n _agg_doc = dedent(\"\"\"\n Examples\n --------\n\n >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n >>> df\n A B C\n 0 -2.385977 -0.102758 0.438822\n 1 -1.004295 0.905829 -0.954544\n 2 0.735167 -0.165272 -1.619346\n 3 -0.702657 -1.340923 -0.706334\n 4 -0.246845 0.211596 -0.901819\n 5 2.463718 3.157577 -1.380906\n 6 -1.142255 2.340594 -0.039875\n 7 1.396598 -1.647453 1.677227\n 8 -0.543425 1.761277 -0.220481\n 9 -0.640505 0.289374 -1.550670\n\n >>> df.rolling(3).sum()\n A B C\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 -2.655105 0.637799 -2.135068\n 3 -0.971785 -0.600366 -3.280224\n 4 -0.214334 -1.294599 -3.227500\n 5 1.514216 2.028250 -2.989060\n 6 1.074618 5.709767 -2.322600\n 7 2.718061 3.850718 0.256446\n 8 -0.289082 2.454418 1.416871\n 9 0.212668 0.403198 -0.093924\n\n\n >>> df.rolling(3).agg({'A':'sum', 'B':'min'})\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 -2.655105 -0.165272\n 3 -0.971785 -1.340923\n 4 -0.214334 -1.340923\n 5 1.514216 -1.340923\n 6 1.074618 0.211596\n 7 2.718061 -1.647453\n 8 -0.289082 -1.647453\n 9 0.212668 -1.647453\n\n See Also\n --------\n pandas.Series.rolling\n pandas.DataFrame.rolling\n\n \"\"\")\n\n @Appender(_agg_doc)\n @Appender(_shared_docs['aggregate'] % dict(\n versionadded='',\n klass='Series/DataFrame',\n axis=''))\n def aggregate(self, arg, *args, **kwargs):\n return super(Rolling, self).aggregate(arg, *args, **kwargs)\n\n agg = aggregate\n\n @Substitution(name='rolling')\n @Appender(_shared_docs['count'])\n def count(self):\n\n # different impl for freq counting\n if self.is_freq_type:\n return self._apply('roll_count', 'count')\n\n return super(Rolling, self).count()\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['apply'])\n def apply(self, func, raw=None, args=(), kwargs={}):\n return super(Rolling, self).apply(\n func, raw=raw, args=args, kwargs=kwargs)\n\n @Substitution(name='rolling')\n @Appender(_shared_docs['sum'])\n def sum(self, *args, **kwargs):\n nv.validate_rolling_func('sum', args, kwargs)\n return super(Rolling, self).sum(*args, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['max'])\n def max(self, *args, **kwargs):\n nv.validate_rolling_func('max', args, kwargs)\n return super(Rolling, self).max(*args, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_shared_docs['min'])\n def min(self, *args, **kwargs):\n nv.validate_rolling_func('min', args, kwargs)\n return super(Rolling, self).min(*args, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_shared_docs['mean'])\n def mean(self, *args, **kwargs):\n nv.validate_rolling_func('mean', args, kwargs)\n return super(Rolling, self).mean(*args, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_shared_docs['median'])\n def median(self, **kwargs):\n return super(Rolling, self).median(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_shared_docs['std'])\n def std(self, ddof=1, *args, **kwargs):\n nv.validate_rolling_func('std', args, kwargs)\n return super(Rolling, self).std(ddof=ddof, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_shared_docs['var'])\n def var(self, ddof=1, *args, **kwargs):\n nv.validate_rolling_func('var', args, kwargs)\n return super(Rolling, self).var(ddof=ddof, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['skew'])\n def skew(self, **kwargs):\n return super(Rolling, self).skew(**kwargs)\n\n _agg_doc = dedent(\"\"\"\n Examples\n --------\n\n The example below will show a rolling calculation with a window size of\n four matching the equivalent function call using `scipy.stats`.\n\n >>> arr = [1, 2, 3, 4, 999]\n >>> fmt = \"{0:.6f}\" # limit the printed precision to 6 digits\n >>> import scipy.stats\n >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))\n -1.200000\n >>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))\n 3.999946\n >>> s = pd.Series(arr)\n >>> s.rolling(4).kurt()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 -1.200000\n 4 3.999946\n dtype: float64\n \"\"\")\n\n @Appender(_agg_doc)\n @Substitution(name='rolling')\n @Appender(_shared_docs['kurt'])\n def kurt(self, **kwargs):\n return super(Rolling, self).kurt(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_shared_docs['quantile'])\n def quantile(self, quantile, interpolation='linear', **kwargs):\n return super(Rolling, self).quantile(quantile=quantile,\n interpolation=interpolation,\n **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['cov'])\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n return super(Rolling, self).cov(other=other, pairwise=pairwise,\n ddof=ddof, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_shared_docs['corr'])\n def corr(self, other=None, pairwise=None, **kwargs):\n return super(Rolling, self).corr(other=other, pairwise=pairwise,\n **kwargs)\n\n\nclass RollingGroupby(_GroupByMixin, Rolling):\n \"\"\"\n Provides a rolling groupby implementation\n\n .. versionadded:: 0.18.1\n\n \"\"\"\n @property\n def _constructor(self):\n return Rolling\n\n def _gotitem(self, key, ndim, subset=None):\n\n # we are setting the index on the actual object\n # here so our index is carried thru to the selected obj\n # when we do the splitting for the groupby\n if self.on is not None:\n self._groupby.obj = self._groupby.obj.set_index(self._on)\n self.on = None\n return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset)\n\n def _validate_monotonic(self):\n \"\"\"\n validate that on is monotonic;\n we don't care for groupby.rolling\n because we have already validated at a higher\n level\n \"\"\"\n pass\n\n\nclass Expanding(_Rolling_and_Expanding):\n \"\"\"\n Provides expanding transformations.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n min_periods : int, default 1\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n center : bool, default False\n Set the labels at the center of the window.\n axis : int or str, default 0\n\n Returns\n -------\n a Window sub-classed for the particular operation\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n >>> df.expanding(2).sum()\n B\n 0 NaN\n 1 1.0\n 2 3.0\n 3 3.0\n 4 7.0\n\n Notes\n -----\n By default, the result is set to the right edge of the window. This can be\n changed to the center of the window by setting ``center=True``.\n\n See Also\n --------\n rolling : Provides rolling window calculations.\n ewm : Provides exponential weighted functions.\n \"\"\"\n\n _attributes = ['min_periods', 'center', 'axis']\n\n def __init__(self, obj, min_periods=1, center=False, axis=0,\n **kwargs):\n super(Expanding, self).__init__(obj=obj, min_periods=min_periods,\n center=center, axis=axis)\n\n @property\n def _constructor(self):\n return Expanding\n\n def _get_window(self, other=None):\n \"\"\"\n Get the window length over which to perform some operation.\n\n Parameters\n ----------\n other : object, default None\n The other object that is involved in the operation.\n Such an object is involved for operations like covariance.\n\n Returns\n -------\n window : int\n The window length.\n \"\"\"\n axis = self.obj._get_axis(self.axis)\n length = len(axis) + (other is not None) * len(axis)\n\n other = self.min_periods or -1\n return max(length, other)\n\n _agg_doc = dedent(\"\"\"\n Examples\n --------\n\n >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n >>> df\n A B C\n 0 -2.385977 -0.102758 0.438822\n 1 -1.004295 0.905829 -0.954544\n 2 0.735167 -0.165272 -1.619346\n 3 -0.702657 -1.340923 -0.706334\n 4 -0.246845 0.211596 -0.901819\n 5 2.463718 3.157577 -1.380906\n 6 -1.142255 2.340594 -0.039875\n 7 1.396598 -1.647453 1.677227\n 8 -0.543425 1.761277 -0.220481\n 9 -0.640505 0.289374 -1.550670\n\n >>> df.ewm(alpha=0.5).mean()\n A B C\n 0 -2.385977 -0.102758 0.438822\n 1 -1.464856 0.569633 -0.490089\n 2 -0.207700 0.149687 -1.135379\n 3 -0.471677 -0.645305 -0.906555\n 4 -0.355635 -0.203033 -0.904111\n 5 1.076417 1.503943 -1.146293\n 6 -0.041654 1.925562 -0.588728\n 7 0.680292 0.132049 0.548693\n 8 0.067236 0.948257 0.163353\n 9 -0.286980 0.618493 -0.694496\n\n See Also\n --------\n pandas.DataFrame.expanding.aggregate\n pandas.DataFrame.rolling.aggregate\n pandas.DataFrame.aggregate\n\n \"\"\")\n\n @Appender(_agg_doc)\n @Appender(_shared_docs['aggregate'] % dict(\n versionadded='',\n klass='Series/DataFrame',\n axis=''))\n def aggregate(self, arg, *args, **kwargs):\n return super(Expanding, self).aggregate(arg, *args, **kwargs)\n\n agg = aggregate\n\n @Substitution(name='expanding')\n @Appender(_shared_docs['count'])\n def count(self, **kwargs):\n return super(Expanding, self).count(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['apply'])\n def apply(self, func, raw=None, args=(), kwargs={}):\n return super(Expanding, self).apply(\n func, raw=raw, args=args, kwargs=kwargs)\n\n @Substitution(name='expanding')\n @Appender(_shared_docs['sum'])\n def sum(self, *args, **kwargs):\n nv.validate_expanding_func('sum', args, kwargs)\n return super(Expanding, self).sum(*args, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['max'])\n def max(self, *args, **kwargs):\n nv.validate_expanding_func('max', args, kwargs)\n return super(Expanding, self).max(*args, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_shared_docs['min'])\n def min(self, *args, **kwargs):\n nv.validate_expanding_func('min', args, kwargs)\n return super(Expanding, self).min(*args, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_shared_docs['mean'])\n def mean(self, *args, **kwargs):\n nv.validate_expanding_func('mean', args, kwargs)\n return super(Expanding, self).mean(*args, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_shared_docs['median'])\n def median(self, **kwargs):\n return super(Expanding, self).median(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_shared_docs['std'])\n def std(self, ddof=1, *args, **kwargs):\n nv.validate_expanding_func('std', args, kwargs)\n return super(Expanding, self).std(ddof=ddof, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_shared_docs['var'])\n def var(self, ddof=1, *args, **kwargs):\n nv.validate_expanding_func('var', args, kwargs)\n return super(Expanding, self).var(ddof=ddof, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['skew'])\n def skew(self, **kwargs):\n return super(Expanding, self).skew(**kwargs)\n\n _agg_doc = dedent(\"\"\"\n Examples\n --------\n\n The example below will show an expanding calculation with a window size of\n four matching the equivalent function call using `scipy.stats`.\n\n >>> arr = [1, 2, 3, 4, 999]\n >>> import scipy.stats\n >>> fmt = \"{0:.6f}\" # limit the printed precision to 6 digits\n >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))\n -1.200000\n >>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False)))\n 4.999874\n >>> s = pd.Series(arr)\n >>> s.expanding(4).kurt()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 -1.200000\n 4 4.999874\n dtype: float64\n \"\"\")\n\n @Appender(_agg_doc)\n @Substitution(name='expanding')\n @Appender(_shared_docs['kurt'])\n def kurt(self, **kwargs):\n return super(Expanding, self).kurt(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_shared_docs['quantile'])\n def quantile(self, quantile, interpolation='linear', **kwargs):\n return super(Expanding, self).quantile(quantile=quantile,\n interpolation=interpolation,\n **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['cov'])\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n return super(Expanding, self).cov(other=other, pairwise=pairwise,\n ddof=ddof, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_shared_docs['corr'])\n def corr(self, other=None, pairwise=None, **kwargs):\n return super(Expanding, self).corr(other=other, pairwise=pairwise,\n **kwargs)\n\n\nclass ExpandingGroupby(_GroupByMixin, Expanding):\n \"\"\"\n Provides a expanding groupby implementation\n\n .. versionadded:: 0.18.1\n\n \"\"\"\n @property\n def _constructor(self):\n return Expanding\n\n\n_bias_template = \"\"\"\n\nParameters\n----------\nbias : bool, default False\n Use a standard estimation bias correction\n\"\"\"\n\n_pairwise_template = \"\"\"\n\nParameters\n----------\nother : Series, DataFrame, or ndarray, optional\n if not supplied then will default to self and produce pairwise output\npairwise : bool, default None\n If False then only matching columns between self and other will be used and\n the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the output\n will be a MultiIndex DataFrame in the case of DataFrame inputs.\n In the case of missing elements, only complete pairwise observations will\n be used.\nbias : bool, default False\n Use a standard estimation bias correction\n\"\"\"\n\n\nclass EWM(_Rolling):\n r\"\"\"\n Provides exponential weighted functions\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n com : float, optional\n Specify decay in terms of center of mass,\n :math:`\\alpha = 1 / (1 + com),\\text{ for } com \\geq 0`\n span : float, optional\n Specify decay in terms of span,\n :math:`\\alpha = 2 / (span + 1),\\text{ for } span \\geq 1`\n halflife : float, optional\n Specify decay in terms of half-life,\n :math:`\\alpha = 1 - exp(log(0.5) / halflife),\\text{ for } halflife > 0`\n alpha : float, optional\n Specify smoothing factor :math:`\\alpha` directly,\n :math:`0 < \\alpha \\leq 1`\n\n .. versionadded:: 0.18.0\n\n min_periods : int, default 0\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n adjust : bool, default True\n Divide by decaying adjustment factor in beginning periods to account\n for imbalance in relative weightings (viewing EWMA as a moving average)\n ignore_na : bool, default False\n Ignore missing values when calculating weights;\n specify True to reproduce pre-0.15.0 behavior\n\n Returns\n -------\n a Window sub-classed for the particular operation\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n >>> df.ewm(com=0.5).mean()\n B\n 0 0.000000\n 1 0.750000\n 2 1.615385\n 3 1.615385\n 4 3.670213\n\n Notes\n -----\n Exactly one of center of mass, span, half-life, and alpha must be provided.\n Allowed values and relationship between the parameters are specified in the\n parameter descriptions above; see the link at the end of this section for\n a detailed explanation.\n\n When adjust is True (default), weighted averages are calculated using\n weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.\n\n When adjust is False, weighted averages are calculated recursively as:\n weighted_average[0] = arg[0];\n weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].\n\n When ignore_na is False (default), weights are based on absolute positions.\n For example, the weights of x and y used in calculating the final weighted\n average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and\n (1-alpha)**2 and alpha (if adjust is False).\n\n When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based\n on relative positions. For example, the weights of x and y used in\n calculating the final weighted average of [x, None, y] are 1-alpha and 1\n (if adjust is True), and 1-alpha and alpha (if adjust is False).\n\n More details can be found at\n http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows\n\n See Also\n --------\n rolling : Provides rolling window calculations.\n expanding : Provides expanding transformations.\n \"\"\"\n _attributes = ['com', 'min_periods', 'adjust', 'ignore_na', 'axis']\n\n def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,\n min_periods=0, adjust=True, ignore_na=False,\n axis=0):\n self.obj = obj\n self.com = _get_center_of_mass(com, span, halflife, alpha)\n self.min_periods = min_periods\n self.adjust = adjust\n self.ignore_na = ignore_na\n self.axis = axis\n self.on = None\n\n @property\n def _constructor(self):\n return EWM\n\n _agg_doc = dedent(\"\"\"\n Examples\n --------\n\n >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n >>> df\n A B C\n 0 -2.385977 -0.102758 0.438822\n 1 -1.004295 0.905829 -0.954544\n 2 0.735167 -0.165272 -1.619346\n 3 -0.702657 -1.340923 -0.706334\n 4 -0.246845 0.211596 -0.901819\n 5 2.463718 3.157577 -1.380906\n 6 -1.142255 2.340594 -0.039875\n 7 1.396598 -1.647453 1.677227\n 8 -0.543425 1.761277 -0.220481\n 9 -0.640505 0.289374 -1.550670\n\n >>> df.ewm(alpha=0.5).mean()\n A B C\n 0 -2.385977 -0.102758 0.438822\n 1 -1.464856 0.569633 -0.490089\n 2 -0.207700 0.149687 -1.135379\n 3 -0.471677 -0.645305 -0.906555\n 4 -0.355635 -0.203033 -0.904111\n 5 1.076417 1.503943 -1.146293\n 6 -0.041654 1.925562 -0.588728\n 7 0.680292 0.132049 0.548693\n 8 0.067236 0.948257 0.163353\n 9 -0.286980 0.618493 -0.694496\n\n See Also\n --------\n pandas.DataFrame.rolling.aggregate\n\n \"\"\")\n\n @Appender(_agg_doc)\n @Appender(_shared_docs['aggregate'] % dict(\n versionadded='',\n klass='Series/DataFrame',\n axis=''))\n def aggregate(self, arg, *args, **kwargs):\n return super(EWM, self).aggregate(arg, *args, **kwargs)\n\n agg = aggregate\n\n def _apply(self, func, **kwargs):\n \"\"\"Rolling statistical measure using supplied function. Designed to be\n used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : str/callable to apply\n\n Returns\n -------\n y : same type as input argument\n\n \"\"\"\n blocks, obj, index = self._create_blocks()\n results = []\n for b in blocks:\n try:\n values = self._prep_values(b.values)\n except TypeError:\n results.append(b.values.copy())\n continue\n\n if values.size == 0:\n results.append(values.copy())\n continue\n\n # if we have a string function name, wrap it\n if isinstance(func, compat.string_types):\n cfunc = getattr(_window, func, None)\n if cfunc is None:\n raise ValueError(\"we do not support this function \"\n \"in _window.{0}\".format(func))\n\n def func(arg):\n return cfunc(arg, self.com, int(self.adjust),\n int(self.ignore_na), int(self.min_periods))\n\n results.append(np.apply_along_axis(func, self.axis, values))\n\n return self._wrap_results(results, blocks, obj)\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n def mean(self, *args, **kwargs):\n \"\"\"exponential weighted moving average\"\"\"\n nv.validate_window_func('mean', args, kwargs)\n return self._apply('ewma', **kwargs)\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_bias_template)\n def std(self, bias=False, *args, **kwargs):\n \"\"\"exponential weighted moving stddev\"\"\"\n nv.validate_window_func('std', args, kwargs)\n return _zsqrt(self.var(bias=bias, **kwargs))\n\n vol = std\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_bias_template)\n def var(self, bias=False, *args, **kwargs):\n \"\"\"exponential weighted moving variance\"\"\"\n nv.validate_window_func('var', args, kwargs)\n\n def f(arg):\n return _window.ewmcov(arg, arg, self.com, int(self.adjust),\n int(self.ignore_na), int(self.min_periods),\n int(bias))\n\n return self._apply(f, **kwargs)\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_pairwise_template)\n def cov(self, other=None, pairwise=None, bias=False, **kwargs):\n \"\"\"exponential weighted sample covariance\"\"\"\n if other is None:\n other = self._selected_obj\n # only default unset\n pairwise = True if pairwise is None else pairwise\n other = self._shallow_copy(other)\n\n def _get_cov(X, Y):\n X = self._shallow_copy(X)\n Y = self._shallow_copy(Y)\n cov = _window.ewmcov(X._prep_values(), Y._prep_values(), self.com,\n int(self.adjust), int(self.ignore_na),\n int(self.min_periods), int(bias))\n return X._wrap_result(cov)\n\n return _flex_binary_moment(self._selected_obj, other._selected_obj,\n _get_cov, pairwise=bool(pairwise))\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_pairwise_template)\n def corr(self, other=None, pairwise=None, **kwargs):\n \"\"\"exponential weighted sample correlation\"\"\"\n if other is None:\n other = self._selected_obj\n # only default unset\n pairwise = True if pairwise is None else pairwise\n other = self._shallow_copy(other)\n\n def _get_corr(X, Y):\n X = self._shallow_copy(X)\n Y = self._shallow_copy(Y)\n\n def _cov(x, y):\n return _window.ewmcov(x, y, self.com, int(self.adjust),\n int(self.ignore_na),\n int(self.min_periods),\n 1)\n\n x_values = X._prep_values()\n y_values = Y._prep_values()\n with np.errstate(all='ignore'):\n cov = _cov(x_values, y_values)\n x_var = _cov(x_values, x_values)\n y_var = _cov(y_values, y_values)\n corr = cov / _zsqrt(x_var * y_var)\n return X._wrap_result(corr)\n\n return _flex_binary_moment(self._selected_obj, other._selected_obj,\n _get_corr, pairwise=bool(pairwise))\n\n# Helper Funcs\n\n\ndef _flex_binary_moment(arg1, arg2, f, pairwise=False):\n\n if not (isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) and\n isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))):\n raise TypeError(\"arguments to moment function must be of type \"\n \"np.ndarray/Series/DataFrame\")\n\n if (isinstance(arg1, (np.ndarray, ABCSeries)) and\n isinstance(arg2, (np.ndarray, ABCSeries))):\n X, Y = _prep_binary(arg1, arg2)\n return f(X, Y)\n\n elif isinstance(arg1, ABCDataFrame):\n from pandas import DataFrame\n\n def dataframe_from_int_dict(data, frame_template):\n result = DataFrame(data, index=frame_template.index)\n if len(result.columns) > 0:\n result.columns = frame_template.columns[result.columns]\n return result\n\n results = {}\n if isinstance(arg2, ABCDataFrame):\n if pairwise is False:\n if arg1 is arg2:\n # special case in order to handle duplicate column names\n for i, col in enumerate(arg1.columns):\n results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])\n return dataframe_from_int_dict(results, arg1)\n else:\n if not arg1.columns.is_unique:\n raise ValueError(\"'arg1' columns are not unique\")\n if not arg2.columns.is_unique:\n raise ValueError(\"'arg2' columns are not unique\")\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n X, Y = arg1.align(arg2, join='outer')\n X = X + 0 * Y\n Y = Y + 0 * X\n\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n res_columns = arg1.columns.union(arg2.columns)\n for col in res_columns:\n if col in X and col in Y:\n results[col] = f(X[col], Y[col])\n return DataFrame(results, index=X.index,\n columns=res_columns)\n elif pairwise is True:\n results = defaultdict(dict)\n for i, k1 in enumerate(arg1.columns):\n for j, k2 in enumerate(arg2.columns):\n if j < i and arg2 is arg1:\n # Symmetric case\n results[i][j] = results[j][i]\n else:\n results[i][j] = f(*_prep_binary(arg1.iloc[:, i],\n arg2.iloc[:, j]))\n\n from pandas import MultiIndex, concat\n\n result_index = arg1.index.union(arg2.index)\n if len(result_index):\n\n # construct result frame\n result = concat(\n [concat([results[i][j]\n for j, c in enumerate(arg2.columns)],\n ignore_index=True)\n for i, c in enumerate(arg1.columns)],\n ignore_index=True,\n axis=1)\n result.columns = arg1.columns\n\n # set the index and reorder\n if arg2.columns.nlevels > 1:\n result.index = MultiIndex.from_product(\n arg2.columns.levels + [result_index])\n result = result.reorder_levels([2, 0, 1]).sort_index()\n else:\n result.index = MultiIndex.from_product(\n [range(len(arg2.columns)),\n range(len(result_index))])\n result = result.swaplevel(1, 0).sort_index()\n result.index = MultiIndex.from_product(\n [result_index] + [arg2.columns])\n else:\n\n # empty result\n result = DataFrame(\n index=MultiIndex(levels=[arg1.index, arg2.columns],\n labels=[[], []]),\n columns=arg2.columns,\n dtype='float64')\n\n # reset our index names to arg1 names\n # reset our column names to arg2 names\n # careful not to mutate the original names\n result.columns = result.columns.set_names(\n arg1.columns.names)\n result.index = result.index.set_names(\n result_index.names + arg2.columns.names)\n\n return result\n\n else:\n raise ValueError(\"'pairwise' is not True/False\")\n else:\n results = {}\n for i, col in enumerate(arg1.columns):\n results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))\n return dataframe_from_int_dict(results, arg1)\n\n else:\n return _flex_binary_moment(arg2, arg1, f)\n\n\ndef _get_center_of_mass(comass, span, halflife, alpha):\n valid_count = com.count_not_none(comass, span, halflife, alpha)\n if valid_count > 1:\n raise ValueError(\"comass, span, halflife, and alpha \"\n \"are mutually exclusive\")\n\n # Convert to center of mass; domain checks ensure 0 < alpha <= 1\n if comass is not None:\n if comass < 0:\n raise ValueError(\"comass must satisfy: comass >= 0\")\n elif span is not None:\n if span < 1:\n raise ValueError(\"span must satisfy: span >= 1\")\n comass = (span - 1) / 2.\n elif halflife is not None:\n if halflife <= 0:\n raise ValueError(\"halflife must satisfy: halflife > 0\")\n decay = 1 - np.exp(np.log(0.5) / halflife)\n comass = 1 / decay - 1\n elif alpha is not None:\n if alpha <= 0 or alpha > 1:\n raise ValueError(\"alpha must satisfy: 0 < alpha <= 1\")\n comass = (1.0 - alpha) / alpha\n else:\n raise ValueError(\"Must pass one of comass, span, halflife, or alpha\")\n\n return float(comass)\n\n\ndef _offset(window, center):\n if not is_integer(window):\n window = len(window)\n offset = (window - 1) / 2. if center else 0\n try:\n return int(offset)\n except TypeError:\n return offset.astype(int)\n\n\ndef _require_min_periods(p):\n def _check_func(minp, window):\n if minp is None:\n return window\n else:\n return max(p, minp)\n\n return _check_func\n\n\ndef _use_window(minp, window):\n if minp is None:\n return window\n else:\n return minp\n\n\ndef _zsqrt(x):\n with np.errstate(all='ignore'):\n result = np.sqrt(x)\n mask = x < 0\n\n if isinstance(x, ABCDataFrame):\n if mask.values.any():\n result[mask] = 0\n else:\n if mask.any():\n result[mask] = 0\n\n return result\n\n\ndef _prep_binary(arg1, arg2):\n if not isinstance(arg2, type(arg1)):\n raise Exception('Input arrays must be of the same type!')\n\n # mask out values, this also makes a common index...\n X = arg1 + 0 * arg2\n Y = arg2 + 0 * arg1\n\n return X, Y\n\n\n# Top-level exports\n\n\ndef rolling(obj, win_type=None, **kwds):\n if not isinstance(obj, (ABCSeries, ABCDataFrame)):\n raise TypeError('invalid type: %s' % type(obj))\n\n if win_type is not None:\n return Window(obj, win_type=win_type, **kwds)\n\n return Rolling(obj, **kwds)\n\n\nrolling.__doc__ = Window.__doc__\n\n\ndef expanding(obj, **kwds):\n if not isinstance(obj, (ABCSeries, ABCDataFrame)):\n raise TypeError('invalid type: %s' % type(obj))\n\n return Expanding(obj, **kwds)\n\n\nexpanding.__doc__ = Expanding.__doc__\n\n\ndef ewm(obj, **kwds):\n if not isinstance(obj, (ABCSeries, ABCDataFrame)):\n raise TypeError('invalid type: %s' % type(obj))\n\n return EWM(obj, **kwds)\n\n\newm.__doc__ = EWM.__doc__\n",
"from __future__ import division\n\nimport numpy as np\nfrom pandas import Interval, Timestamp, Timedelta\nimport pandas.core.common as com\n\nimport pytest\n\n\[email protected]\ndef interval():\n return Interval(0, 1)\n\n\nclass TestInterval(object):\n\n def test_properties(self, interval):\n assert interval.closed == 'right'\n assert interval.left == 0\n assert interval.right == 1\n assert interval.mid == 0.5\n\n def test_repr(self, interval):\n assert repr(interval) == \"Interval(0, 1, closed='right')\"\n assert str(interval) == \"(0, 1]\"\n\n interval_left = Interval(0, 1, closed='left')\n assert repr(interval_left) == \"Interval(0, 1, closed='left')\"\n assert str(interval_left) == \"[0, 1)\"\n\n def test_contains(self, interval):\n assert 0.5 in interval\n assert 1 in interval\n assert 0 not in interval\n\n msg = \"__contains__ not defined for two intervals\"\n with pytest.raises(TypeError, match=msg):\n interval in interval\n\n interval_both = Interval(0, 1, closed='both')\n assert 0 in interval_both\n assert 1 in interval_both\n\n interval_neither = Interval(0, 1, closed='neither')\n assert 0 not in interval_neither\n assert 0.5 in interval_neither\n assert 1 not in interval_neither\n\n def test_equal(self):\n assert Interval(0, 1) == Interval(0, 1, closed='right')\n assert Interval(0, 1) != Interval(0, 1, closed='left')\n assert Interval(0, 1) != 0\n\n def test_comparison(self):\n with pytest.raises(TypeError, match='unorderable types'):\n Interval(0, 1) < 2\n\n assert Interval(0, 1) < Interval(1, 2)\n assert Interval(0, 1) < Interval(0, 2)\n assert Interval(0, 1) < Interval(0.5, 1.5)\n assert Interval(0, 1) <= Interval(0, 1)\n assert Interval(0, 1) > Interval(-1, 2)\n assert Interval(0, 1) >= Interval(0, 1)\n\n def test_hash(self, interval):\n # should not raise\n hash(interval)\n\n @pytest.mark.parametrize('left, right, expected', [\n (0, 5, 5),\n (-2, 5.5, 7.5),\n (10, 10, 0),\n (10, np.inf, np.inf),\n (-np.inf, -5, np.inf),\n (-np.inf, np.inf, np.inf),\n (Timedelta('0 days'), Timedelta('5 days'), Timedelta('5 days')),\n (Timedelta('10 days'), Timedelta('10 days'), Timedelta('0 days')),\n (Timedelta('1H10M'), Timedelta('5H5M'), Timedelta('3H55M')),\n (Timedelta('5S'), Timedelta('1H'), Timedelta('59M55S'))])\n def test_length(self, left, right, expected):\n # GH 18789\n iv = Interval(left, right)\n result = iv.length\n assert result == expected\n\n @pytest.mark.parametrize('left, right, expected', [\n ('2017-01-01', '2017-01-06', '5 days'),\n ('2017-01-01', '2017-01-01 12:00:00', '12 hours'),\n ('2017-01-01 12:00', '2017-01-01 12:00:00', '0 days'),\n ('2017-01-01 12:01', '2017-01-05 17:31:00', '4 days 5 hours 30 min')])\n @pytest.mark.parametrize('tz', (None, 'UTC', 'CET', 'US/Eastern'))\n def test_length_timestamp(self, tz, left, right, expected):\n # GH 18789\n iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz))\n result = iv.length\n expected = Timedelta(expected)\n assert result == expected\n\n @pytest.mark.parametrize('left, right', [\n ('a', 'z'),\n (('a', 'b'), ('c', 'd')),\n (list('AB'), list('ab')),\n (Interval(0, 1), Interval(1, 2))])\n def test_length_errors(self, left, right):\n # GH 18789\n iv = Interval(left, right)\n msg = 'cannot compute length between .* and .*'\n with pytest.raises(TypeError, match=msg):\n iv.length\n\n def test_math_add(self, closed):\n interval = Interval(0, 1, closed=closed)\n expected = Interval(1, 2, closed=closed)\n\n result = interval + 1\n assert result == expected\n\n result = 1 + interval\n assert result == expected\n\n result = interval\n result += 1\n assert result == expected\n\n msg = r\"unsupported operand type\\(s\\) for \\+\"\n with pytest.raises(TypeError, match=msg):\n interval + interval\n\n with pytest.raises(TypeError, match=msg):\n interval + 'foo'\n\n def test_math_sub(self, closed):\n interval = Interval(0, 1, closed=closed)\n expected = Interval(-1, 0, closed=closed)\n\n result = interval - 1\n assert result == expected\n\n result = interval\n result -= 1\n assert result == expected\n\n msg = r\"unsupported operand type\\(s\\) for -\"\n with pytest.raises(TypeError, match=msg):\n interval - interval\n\n with pytest.raises(TypeError, match=msg):\n interval - 'foo'\n\n def test_math_mult(self, closed):\n interval = Interval(0, 1, closed=closed)\n expected = Interval(0, 2, closed=closed)\n\n result = interval * 2\n assert result == expected\n\n result = 2 * interval\n assert result == expected\n\n result = interval\n result *= 2\n assert result == expected\n\n msg = r\"unsupported operand type\\(s\\) for \\*\"\n with pytest.raises(TypeError, match=msg):\n interval * interval\n\n msg = r\"can\\'t multiply sequence by non-int\"\n with pytest.raises(TypeError, match=msg):\n interval * 'foo'\n\n def test_math_div(self, closed):\n interval = Interval(0, 1, closed=closed)\n expected = Interval(0, 0.5, closed=closed)\n\n result = interval / 2.0\n assert result == expected\n\n result = interval\n result /= 2.0\n assert result == expected\n\n msg = r\"unsupported operand type\\(s\\) for /\"\n with pytest.raises(TypeError, match=msg):\n interval / interval\n\n with pytest.raises(TypeError, match=msg):\n interval / 'foo'\n\n def test_math_floordiv(self, closed):\n interval = Interval(1, 2, closed=closed)\n expected = Interval(0, 1, closed=closed)\n\n result = interval // 2\n assert result == expected\n\n result = interval\n result //= 2\n assert result == expected\n\n msg = r\"unsupported operand type\\(s\\) for //\"\n with pytest.raises(TypeError, match=msg):\n interval // interval\n\n with pytest.raises(TypeError, match=msg):\n interval // 'foo'\n\n def test_constructor_errors(self):\n msg = \"invalid option for 'closed': foo\"\n with pytest.raises(ValueError, match=msg):\n Interval(0, 1, closed='foo')\n\n msg = 'left side of interval must be <= right side'\n with pytest.raises(ValueError, match=msg):\n Interval(1, 0)\n\n @pytest.mark.parametrize('tz_left, tz_right', [\n (None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')])\n def test_constructor_errors_tz(self, tz_left, tz_right):\n # GH 18538\n left = Timestamp('2017-01-01', tz=tz_left)\n right = Timestamp('2017-01-02', tz=tz_right)\n error = TypeError if com._any_none(tz_left, tz_right) else ValueError\n with pytest.raises(error):\n Interval(left, right)\n"
] | [
[
"pandas.notna",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.to_datetime",
"pandas.bdate_range",
"pandas.Timestamp",
"pandas.DatetimeIndex",
"pandas.Timedelta",
"pandas.tseries.offsets.CDay",
"numpy.int_",
"pandas.util.testing.assert_index_equal",
"numpy.timedelta64",
"pandas.compat.iteritems",
"pandas.date_range",
"pandas.to_timedelta",
"numpy.array",
"pandas.tseries.offsets.BDay"
],
[
"pandas.core.index.ensure_index_from_sequences",
"pandas._libs.parsers.sanitize_objects",
"pandas.errors.EmptyDataError",
"pandas._libs.lib.maybe_convert_numeric",
"numpy.asarray",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.errors.ParserError",
"pandas.compat.range",
"pandas.io.common._infer_compression",
"pandas.compat.map",
"pandas.compat.iteritems",
"pandas.io.common.is_file_like",
"pandas.core.dtypes.common.ensure_object",
"pandas.core.frame.DataFrame",
"numpy.where",
"numpy.roll",
"pandas.core.series.Series",
"pandas.errors.AbstractMethodError",
"pandas.compat.text_type",
"pandas._libs.lib.map_infer_mask",
"pandas.compat.StringIO",
"pandas.core.index.MultiIndex.from_tuples",
"pandas._libs.lib.map_infer",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_string_dtype",
"pandas.core.index.RangeIndex",
"numpy.zeros",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.util._decorators.Appender",
"pandas.compat.u",
"numpy.putmask",
"pandas.io.common.UnicodeReader",
"numpy.isnan",
"pandas._libs.tslibs.parsing.try_parse_dates",
"pandas.io.date_converters.generic_parser",
"pandas._libs.lib.to_object_array",
"pandas.compat.itervalues",
"pandas.compat.to_str",
"pandas.io.common._validate_header_arg",
"pandas.core.dtypes.cast.astype_nansafe",
"pandas.io.common._get_handle",
"pandas.io.common.UTF8Recoder",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"pandas._libs.parsers.TextReader",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.io.common.get_filepath_or_buffer",
"pandas.compat.zip",
"pandas._libs.lib.is_bool",
"pandas.core.dtypes.missing.isna",
"pandas._libs.lib.infer_dtype",
"pandas.compat.lrange",
"pandas.core.index.Index"
],
[
"pandas.tseries.frequencies.to_offset",
"numpy.sqrt",
"pandas.Series",
"scipy.signal.get_window",
"pandas._libs.window.roll_max",
"pandas._libs.window.roll_min",
"pandas.DataFrame",
"numpy.concatenate",
"pandas.compat.numpy.function.validate_rolling_func",
"pandas.util._decorators.Substitution",
"pandas.core.dtypes.common.ensure_float64",
"pandas.compat.numpy.function.validate_window_func",
"pandas.core.common.asarray_tuplesafe",
"pandas.compat.numpy.function.validate_expanding_func",
"pandas.Index",
"pandas.core.index.ensure_index",
"numpy.apply_along_axis",
"pandas.core.dtypes.common.is_float_dtype",
"pandas._libs.window.roll_var",
"pandas._libs.window.roll_generic",
"pandas._libs.window.roll_quantile",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.Appender",
"pandas.concat",
"numpy.log",
"pandas.MultiIndex",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.MultiIndex.from_product",
"numpy.errstate",
"numpy.array",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.dtypes.common.is_bool",
"pandas.core.common.count_not_none",
"pandas.core.groupby.base.GroupByMixin._dispatch",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"numpy.isinf"
],
[
"pandas.Timedelta",
"pandas.Timestamp",
"pandas.core.common._any_none",
"pandas.Interval"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zmlabe/ExtremeEvents | [
"701c274c074dd2c4ae7c7294ec20f35c64e6ea2b",
"701c274c074dd2c4ae7c7294ec20f35c64e6ea2b",
"701c274c074dd2c4ae7c7294ec20f35c64e6ea2b",
"701c274c074dd2c4ae7c7294ec20f35c64e6ea2b",
"701c274c074dd2c4ae7c7294ec20f35c64e6ea2b"
] | [
"Scripts/calc_LRP.py",
"Scripts/plot_Histograms_RawLENS.py",
"Scripts/calc_Accuracy_ClassMultiDecade_ANN_v1.py",
"Scripts/calc_dataFunctions.py",
"Scripts/plot_Composites_LENS-Monthly.py"
] | [
"\"\"\"\nFunctions are useful untilities for interpretation of ANN\n \nNotes\n-----\n Author : Zachary Labe\n Date : 22 July 2020\n \nUsage\n-----\n [1] deepTaylorAnalysis(model,XXt,YYt,biasBool,annType,classChunk,startYear)\n [2] def _gradient_descent_for_bwo(cnn_model_object, loss_tensor,\n init_function_or_matrices,\n num_iterations,learning_rate):\n [3] bwo_for_class(cnn_model_object,target_class,init_function_or_matrices,\n num_iterations=DEFAULT_NUM_BWO_ITERATIONS,\n learning_rate=DEFAULT_BWO_LEARNING_RATE)\n [4] optimal_input(model,input_img,target_class,num_iterations=200,\n learning_rate = 0.01)\n\"\"\"\n\n###############################################################################\n###############################################################################\n###############################################################################\n\ndef deepTaylorAnalysis(model,XXt,YYt,biasBool,annType,classChunk,startYear):\n \"\"\"\n Calculate Deep Taylor for LRP\n \"\"\"\n print('<<<< Started deepTaylorAnalysis() >>>>')\n \n ### Import modules\n import numpy as np \n import innvestigate\n import calc_Stats as SSS\n \n ### Define useful functions\n def invert_year_output(ypred,startYear):\n inverted_years = SSS.convert_fuzzyDecade_toYear(ypred,startYear,\n classChunk)\n \n return inverted_years\n \n ### Define prediction error\n yearsUnique = np.unique(YYt)\n percCutoff = 90\n withinYearInc = 2.\n errTolerance = withinYearInc \n if(annType=='class'):\n err = YYt[:,0] - invert_year_output(model.predict(XXt),\n startYear)\n \n ###########################################################################\n ###########################################################################\n ###########################################################################\n ### Create the innvestigate analyzer instance for each sample\n if(annType=='class'):\n model_nosoftmax = innvestigate.utils.model_wo_softmax(model)\n # analyzer = innvestigate.analyzer.relevance_based.relevance_analyzer.LRPAlphaBeta(\n # model_nosoftmax,alpha=1,beta=0,bias=biasBool)\n analyzer = innvestigate.analyzer.relevance_based.relevance_analyzer.LRPZ(model_nosoftmax)\n # analyzer = innvestigate.analyzer.relevance_based.relevance_analyzer.LRPEpsilon(model_nosoftmax, \n # epsilon=1e-07, bias=biasBool)\n\n deepTaylorMaps = np.empty(np.shape(XXt))\n deepTaylorMaps[:] = np.nan\n\n # analyze each input via the analyzer\n for i in np.arange(0,np.shape(XXt)[0]):\n\n # ensure error is small, i.e. model was correct\n if(np.abs(err[i])<=errTolerance):\n sample = XXt[i]\n analyzer_output = analyzer.analyze(sample[np.newaxis,...])\n deepTaylorMaps[i] = analyzer_output/np.sum(analyzer_output.flatten())\n\n print('done with Deep Taylor analyzer normalization') \n \n ###########################################################################\n ###########################################################################\n ###########################################################################\n ### Compute the frequency of data at each point and the average relevance \n ### normalized by the sum over the area and the frequency above the 90th \n ### percentile of the map\n yearsUnique = np.unique(YYt)\n summaryDT = np.zeros((len(yearsUnique),np.shape(deepTaylorMaps)[1]))\n summaryDTFreq = np.zeros((len(yearsUnique),np.shape(deepTaylorMaps)[1]))\n summaryNanCount = np.zeros((len(yearsUnique),1))\n\n for i, year in enumerate(yearsUnique):\n ### Years within N years of each year\n j = np.where(np.abs(YYt-year)<=withinYearInc)[0] \n\n ### Average relevance\n a = np.nanmean(deepTaylorMaps[j,:],axis=0)\n summaryDT[i,:] = a[np.newaxis,...]\n\n ### Frequency of non-nans\n nancount = np.count_nonzero(~np.isnan(deepTaylorMaps[j,1]))\n summaryNanCount[i] = nancount\n\n ### Frequency above percentile cutoff\n count = 0\n for k in j:\n b = deepTaylorMaps[k,:]\n if(~np.isnan(b[0])):\n count = count + 1\n pVal = np.percentile(b,percCutoff)\n summaryDTFreq[i,:] = summaryDTFreq[i,:]+np.where(b>=pVal,1,0)\n if(count==0):\n summaryDTFreq[i,:] = 0\n else:\n summaryDTFreq[i,:] = summaryDTFreq[i,:]/count \n \n print('<<<< Completed deepTaylorAnalysis() >>>>') \n return(summaryDT,summaryDTFreq,summaryNanCount)\n\n###############################################################################\n###############################################################################\n###############################################################################\n\ndef _gradient_descent_for_bwo(\n cnn_model_object, loss_tensor, init_function_or_matrices,\n num_iterations, learning_rate):\n \"\"\"\n Does gradient descent (the nitty-gritty part) for backwards optimization.\n :param cnn_model_object: Trained instance of `keras.models.Model`.\n :param loss_tensor: Keras tensor, defining the loss function to be\n minimized.\n :param init_function_or_matrices: Either a function or list of numpy arrays.\n If function, will be used to initialize input matrices. See\n `create_gaussian_initializer` for an example.\n If list of numpy arrays, these are the input matrices themselves. Matrices\n should be processed in the exact same way that training data were processed\n (e.g., normalization method). Matrices must also be in the same order as\n training matrices, and the [q]th matrix in this list must have the same\n shape as the [q]th training matrix.\n :param num_iterations: Number of gradient-descent iterations (number of\n times that the input matrices are adjusted).\n :param learning_rate: Learning rate. At each iteration, each input value x\n will be decremented by `learning_rate * gradient`, where `gradient` is\n the gradient of the loss function with respect to x.\n :return: list_of_optimized_input_matrices: length-T list of optimized input\n matrices (numpy arrays), where T = number of input tensors to the model.\n If the input arg `init_function_or_matrices` is a list of numpy arrays\n (rather than a function), `list_of_optimized_input_matrices` will have\n the exact same shape, just with different values.\n \"\"\"\n ### Import modules\n import numpy as np\n import keras.backend as K\n import copy\n\n if isinstance(cnn_model_object.input, list):\n list_of_input_tensors = cnn_model_object.input\n else:\n list_of_input_tensors = [cnn_model_object.input]\n\n num_input_tensors = len(list_of_input_tensors)\n list_of_gradient_tensors = K.gradients(loss_tensor, list_of_input_tensors)\n\n for i in range(num_input_tensors):\n list_of_gradient_tensors[i] /= K.maximum(\n K.sqrt(K.mean(list_of_gradient_tensors[i] ** 2)),\n K.epsilon()\n )\n\n inputs_to_loss_and_gradients = K.function(\n list_of_input_tensors + [K.learning_phase()],\n ([loss_tensor] + list_of_gradient_tensors)\n )\n\n if isinstance(init_function_or_matrices, list):\n list_of_optimized_input_matrices = copy.deepcopy(\n init_function_or_matrices)\n else:\n list_of_optimized_input_matrices = [None] * num_input_tensors\n\n for i in range(num_input_tensors):\n these_dimensions = np.array(\n [1] + list_of_input_tensors[i].get_shape().as_list()[1:],\n dtype=int\n )\n\n list_of_optimized_input_matrices[i] = init_function_or_matrices(\n these_dimensions)\n\n for j in range(num_iterations):\n these_outputs = inputs_to_loss_and_gradients(\n list_of_optimized_input_matrices + [0]\n )\n\n if np.mod(j, 100) == 0:\n print('Loss after {0:d} of {1:d} iterations: {2:.2e}'.format(\n j, num_iterations, these_outputs[0]\n ))\n\n for i in range(num_input_tensors):\n list_of_optimized_input_matrices[i] -= (\n these_outputs[i + 1] * learning_rate\n )\n\n print('Loss after {0:d} iterations: {1:.2e}'.format(\n num_iterations, these_outputs[0]\n ))\n\n return list_of_optimized_input_matrices\n\n###############################################################################\n###############################################################################\n###############################################################################\n\ndef bwo_for_class(\n cnn_model_object, target_class, init_function_or_matrices,\n num_iterations,learning_rate):\n \"\"\"\n Does backwards optimization to maximize probability of target class.\n :param cnn_model_object: Trained instance of `keras.models.Model`.\n :param target_class: Synthetic input data will be created to maximize\n probability of this class.\n :param init_function_or_matrices: See doc for `_gradient_descent_for_bwo`.\n :param num_iterations: Same.\n :param learning_rate: Same.\n :return: list_of_optimized_input_matrices: Same.\n \"\"\"\n ### Import modules\n import numpy as np\n import keras.backend as K\n\n target_class = int(np.round(target_class))\n num_iterations = int(np.round(num_iterations))\n\n assert target_class >= 0\n assert num_iterations > 0\n assert learning_rate > 0.\n assert learning_rate < 1.\n\n num_output_neurons = (\n cnn_model_object.layers[-1].output.get_shape().as_list()[-1]\n )\n\n if num_output_neurons == 1:\n assert target_class <= 1\n\n if target_class == 1:\n loss_tensor = K.mean(\n (cnn_model_object.layers[-1].output[..., 0] - 1) ** 2\n )\n else:\n loss_tensor = K.mean(\n cnn_model_object.layers[-1].output[..., 0] ** 2\n )\n else:\n assert target_class < num_output_neurons\n\n loss_tensor = K.mean(\n (cnn_model_object.layers[-1].output[..., target_class] - 1) ** 2\n )\n\n return _gradient_descent_for_bwo(\n cnn_model_object=cnn_model_object, loss_tensor=loss_tensor,\n init_function_or_matrices=init_function_or_matrices,\n num_iterations=num_iterations, learning_rate=learning_rate)\n\n###############################################################################\n###############################################################################\n###############################################################################\n\ndef optimal_input(model,input_img,target_class,num_iterations=200,learning_rate = 0.01):\n \"\"\" \n OI\n \"\"\"\n ### Define modules\n import numpy as np\n import keras.backend as K\n \n ### Need to change the out_loss calculation to use your loss equation\n ### Need to use the target_output variable\n # out_loss = - K.sum(target_output * K.log(model.layers[-1].output))\n out_loss = K.mean(\n (model.layers[-1].output[..., int(target_class)] - 1) ** 2\n )\n\n ### Calculate the gradients at the input layer WRT your output loss\n grad = K.gradients(out_loss, [model.input])[0]\n\n ### Create a function to iterate the loss and gradient\n ### Inputs are an image and the learning phase (0 for false)\n ### Outputs are the loss for the output and gradients WRT input layer\n iterate_fcn = K.function([model.input, K.learning_phase()], \n [out_loss, grad])\n\n for iterVal in np.arange(0,num_iterations):\n\n ### Calculate the loss and the gradients at the input layer based on the \n ### current stage of the input image\n out_loss, out_grad = iterate_fcn([input_img, 0])\n\n ### Take a step along gradient WRT input -- \n ### updates the input slightly towards its optimal input\n input_img -= out_grad*learning_rate\n \n return input_img\n\n###############################################################################\n###############################################################################\n###############################################################################\n",
"\"\"\"\nScipt plots histograms of data with mean removed over 4 time periods\n\nAuthor : Zachary M. Labe\nDate : 7 January 2021\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport scipy.stats as sts\nimport matplotlib.pyplot as plt\nimport calc_Utilities as UT\nimport calc_dataFunctions as df\nimport palettable.wesanderson as ww\nimport calc_Stats as dSS\n\n### Set preliminaries\ndirectoryfigure = '/Users/zlabe/Desktop/ExtremeEvents_v1/Composites/LENS/'\nreg_name = 'Globe'\ndataset = 'lens'\nrm_ensemble_mean = True\nvariq = ['T2M']\nmonthlychoice = 'annual'\nyeartype = ['1920-1964','1965-2009','2010-2054','2055-2099']\n\n###############################################################################\n###############################################################################\n###############################################################################\ndef read_primary_dataset(variq,dataset,lat_bounds,lon_bounds):\n data,lats,lons = df.readFiles(variq,dataset,monthlychoice)\n datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)\n print('\\nOur dataset: ',dataset,' is shaped',data.shape)\n return datar,lats,lons\n###############################################################################\n###############################################################################\n###############################################################################\n### Call functions\nfor i in range(len(variq)):\n ### Read in data for selected region \n lat_bounds,lon_bounds = UT.regions(reg_name)\n dataall,lats,lons = read_primary_dataset(variq[i],dataset,\n lat_bounds,lon_bounds)\n \n ### Remove ensemble mean\n if rm_ensemble_mean == True:\n data= dSS.remove_ensemble_mean(dataall)\n print('*Removed ensemble mean*')\n elif rm_ensemble_mean == False:\n data = dataall\n \n ### Composite over selected period (x2)\n if monthlychoice == 'DJF':\n years = np.arange(dataall.shape[1]) + 1921\n else:\n years = np.arange(dataall.shape[1]) + 1920\n \nmeancomp = np.empty((len(years)//40,data.shape[0],data.shape[2],data.shape[3]))\nfor count,i in enumerate(range(0,len(years)-45,45)):\n meancomp[count,:,:,:,] = np.nanmean(data[:,i:i+45,:,:],axis=1)\n# meancomp = meancomp[:,:35,:,:]\n \n### Mesh latxlon\nlon2,lat2 = np.meshgrid(lons,lats)\n\n### Set up different regions\n# Globe\nlat_globe = lats.copy()\nlon_globe = lons.copy()\nglobe = meancomp.copy()\nlatmin_globe = -90.\nlatmax_globe = 90.\nlonmin_globe = 0.\nlonmax_globe = 360.\nname_globe = 'Globe'\n\n# Tropics\nlat_trop = lats.copy()\nlon_trop = lons.copy()\ntrop = meancomp.copy()\nlatmin_trop = -30.\nlatmax_trop = 30.\nlonmin_trop = 0.\nlonmax_trop = 360.\nname_trop = 'Tropics'\n\n# Northern Hemisphere\nlat_nh = lats.copy()\nlon_nh = lons.copy()\nnh = meancomp.copy()\nlatmin_nh = 0.\nlatmax_nh = 90.\nlonmin_nh = 0.\nlonmax_nh = 360.\nname_nh = 'Northern Hemisphere'\n\n# Southern Hemisphere\nlat_sh = lats.copy()\nlon_sh = lons.copy()\nsh = meancomp.copy()\nlatmin_sh = -90.\nlatmax_sh = 0.\nlonmin_sh = 0.\nlonmax_sh = 360.\nname_sh = 'Southern Hemisphere'\n\n# Indian Ocean\nlat_io = lats.copy()\nlon_io = lons.copy()\nio = meancomp.copy()\nlatmin_io = -10.\nlatmax_io = 10.\nlonmin_io = 50.\nlonmax_io = 110.\nname_io = 'Indian Ocean'\n\n# ENSO region\nlat_enso = lats.copy()\nlon_enso = lons.copy()\nenso = meancomp.copy()\nlatmin_enso = -5.\nlatmax_enso = 5.\nlonmin_enso = 160.\nlonmax_enso = 280.\nname_enso = 'ENSO'\n\n# North Atlantic\nlat_na = lats.copy()\nlon_na = lons.copy()\nna = meancomp.copy()\nlatmin_na = 50.\nlatmax_na = 60.\nlonmin_na = 315.\nlonmax_na = 340.\nname_na = 'North Atlantic'\n\n# Arctic\nlat_a = lats.copy()\nlon_a = lons.copy()\na = meancomp.copy()\nlatmin_a = 67.\nlatmax_a = 90.\nlonmin_a= 0.\nlonmax_a = 360.\nname_a = 'Arctic'\n\n# Central Africa\nlat_africa = lats.copy()\nlon_africa = lons.copy()\nafrica = meancomp.copy()\nlatmin_africa = 0.\nlatmax_africa = 15.\nlonmin_africa = 0.\nlonmax_africa = 15.\nname_africa = 'Central Africa'\n\n# Southern Ocean\nlat_so = lats.copy()\nlon_so = lons.copy()\nso = meancomp.copy()\nlatmin_so = -66.\nlatmax_so = 40.\nlonmin_so = 5.\nlonmax_so = 70.\nname_so = 'Southern Ocean'\n\n# Create lists\nnames = [name_globe,name_trop,name_nh,name_sh,name_io,\n name_enso,name_na,name_a,name_africa,name_so]\n\nlattall = [lat_globe,lat_trop,lat_nh,lat_sh,lat_io,\n lat_enso,lat_na,lat_a,lat_africa,lat_so]\nlatallmin = [latmin_globe,latmin_trop,latmin_nh,latmin_sh,latmin_io,\n latmin_enso,latmin_na,latmin_a,latmin_africa,latmin_so]\nlatallmax = [latmax_globe,latmax_trop,latmax_nh,latmax_sh,latmax_io,\n latmax_enso,latmax_na,latmax_a,latmax_africa,latmax_so]\n\nlonnall = [lon_globe,lon_trop,lon_nh,lon_sh,lon_io,\n lon_enso,lon_na,lon_a,lon_africa,lon_so]\nlonallmin = [lonmin_globe,lonmin_trop,lonmin_nh,lonmin_sh,lonmin_io,\n lonmin_enso,lonmin_na,lonmin_a,lonmin_africa,lonmin_so]\nlonallmax = [lonmax_globe,lonmax_trop,lonmax_nh,lonmax_sh,lonmax_io,\n lonmax_enso,lonmax_na,lonmax_a,lonmax_africa,lonmax_so]\n\nregionsall = [globe,trop,nh,sh,io,enso,na,a,africa,so]\n\n### Calculate regional averages for histograms\nregions_average = []\nfor i in range(len(regionsall)):\n latq = np.where((lats >= latallmin[i]) & (lats <= latallmax[i]))[0]\n lonq = np.where((lons >= lonallmin[i]) & (lons <= lonallmax[i]))[0]\n latnew = lattall[i][latq]\n lonnew = lonnall[i][lonq]\n lonnew2,latnew2 = np.meshgrid(lonnew,latnew)\n \n regiongrid1 = regionsall[i][:,:,latq,:]\n regiongrid = regiongrid1[:,:,:,lonq]\n \n ave = UT.calc_weightedAve(regiongrid,latnew2)\n regions_average.append(ave)\n \n### Calculate PDFs\nnum_bins = np.arange(-0.4,0.401,0.005)\npdfregions = np.empty((len(regions_average),meancomp.shape[0],len(num_bins)))\nfor rrr in range(len(regions_average)):\n for hist in range(meancomp.shape[0]):\n m,s = sts.norm.fit(regions_average[rrr][hist].ravel())\n pdfregions[rrr,hist,:] = sts.norm.pdf(num_bins,m,s)\n\n###############################################################################\n###############################################################################\n############################################################################### \n### Create graph \nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\ndef adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([])\n \n### Begin each histogram set\ncolor=ww.Chevalier_4.mpl_colormap(np.linspace(0,1,meancomp.shape[0]))\npp = np.empty((pdfregions.shape[0]))\nfor rrrr in range(pdfregions.shape[0]):\n \n fig = plt.figure()\n ax = plt.subplot(111)\n adjust_spines(ax, ['left','bottom']) \n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none') \n ax.spines['bottom'].set_color('dimgrey')\n ax.spines['left'].set_color('dimgrey')\n ax.spines['bottom'].set_linewidth(2)\n ax.spines['left'].set_linewidth(2) \n ax.tick_params('both',length=5.5,width=2,which='major',color='dimgrey',\n labelsize=6) \n ax.yaxis.grid(zorder=1,color='dimgrey',alpha=0.35)\n \n ### Calculate statistical difference\n t,p = sts.ks_2samp(pdfregions[rrrr][0,:],pdfregions[rrrr][-1,:])\n pp[rrrr] = p\n \n for i,c in zip(range(pdfregions.shape[1]),color): \n data = pdfregions[rrrr,i,:]\n \n plt.plot(num_bins,data,color=c,linewidth=2,label=r'\\textbf{%s}' % yeartype[i],\n clip_on=False)\n \n plt.xticks(np.arange(-0.4,0.41,0.1),map(str,np.round(np.arange(-0.4,0.41,0.1),2)))\n plt.yticks(np.arange(0,21,2),map(str,np.arange(0,21,2)))\n plt.xlim([-0.4,0.4])\n plt.ylim([0,12])\n \n l = plt.legend(shadow=False,fontsize=7,loc='upper center',\n fancybox=True,frameon=False,ncol=4,bbox_to_anchor=(0.5,1.1),\n labelspacing=0.2,columnspacing=1,handletextpad=0.4)\n for text in l.get_texts():\n text.set_color('k')\n \n plt.text(-0.4,10.9,r'\\textbf{%s}' % names[rrrr],fontsize=20,\n color='dimgrey',ha='left',va='center')\n\n if p < 0.0001:\n plt.text(0.4,10.9,r'\\textbf{\\textit{p} $\\bf{<<}$ 0.01}',fontsize=7,\n color='k',ha='right',va='center')\n elif p < 0.01:\n plt.text(0.4,10.9,r'\\textbf{\\textit{p} $\\bf{<}$ 0.01}',fontsize=7,\n color='k',ha='right',va='center')\n elif p < 0.05:\n plt.text(0.4,10.9,r'\\textbf{\\textit{p} $\\bf{<}$ 0.05}',fontsize=7,\n color='k',ha='right',va='center')\n \n plt.savefig(directoryfigure + 'PDFs_%s_PeriodsInternal.png' % names[rrrr],\n dpi=300)\n\n###############################################################################\n###############################################################################\n############################################################################### \n### Begin each histogram set\nc2=ww.FantasticFox2_5.mpl_colormap\npp = np.empty((pdfregions.shape[0]))\nfor rrrr in range(pdfregions.shape[0]):\n \n fig = plt.figure()\n ax = plt.subplot(111)\n adjust_spines(ax, ['left','bottom']) \n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none') \n ax.spines['bottom'].set_color('dimgrey')\n ax.spines['left'].set_color('dimgrey')\n ax.spines['bottom'].set_linewidth(2)\n ax.spines['left'].set_linewidth(2) \n ax.tick_params('both',length=5.5,width=2,which='major',color='dimgrey',\n labelsize=6) \n ax.yaxis.grid(zorder=1,color='dimgrey',alpha=0.35)\n \n ### Calculate statistical difference\n datafirst = regions_average[rrrr][0,:]\n datalasts = regions_average[rrrr][-1,:]\n \n n_lensf, bins_lensf, patches_lensf = plt.hist(datafirst,\n bins=np.arange(-0.4,0.41,0.02),density=False,color=c2(0.1),\n label=r'\\textbf{1920-1964}',alpha=0.75,clip_on=False)\n for i in range(len(patches_lensf)):\n patches_lensf[i].set_facecolor(c2(0.1))\n patches_lensf[i].set_edgecolor('white')\n patches_lensf[i].set_linewidth(0.5)\n \n n_lensl, bins_lensl, patches_lensl = plt.hist(datalasts,\n bins=np.arange(-0.4,0.41,0.02),density=False,color=c2(0.6),\n label=r'\\textbf{2055-2099}',alpha=0.75,clip_on=False)\n for i in range(len(patches_lensl)):\n patches_lensl[i].set_facecolor(c2(0.6))\n patches_lensl[i].set_edgecolor('white')\n patches_lensl[i].set_linewidth(0.5)\n \n plt.xticks(np.arange(-0.4,0.41,0.1),map(str,np.round(np.arange(-0.4,0.41,0.1),2)))\n plt.yticks(np.arange(0,21,2),map(str,np.arange(0,21,2)))\n plt.xlim([-0.4,0.4])\n plt.ylim([0,14])\n \n l = plt.legend(shadow=False,fontsize=7,loc='upper center',\n fancybox=True,frameon=False,ncol=4,bbox_to_anchor=(0.5,1.1),\n labelspacing=0.2,columnspacing=1,handletextpad=0.4)\n for text in l.get_texts():\n text.set_color('k')\n \n plt.text(-0.4,12.8,r'\\textbf{%s}' % names[rrrr],fontsize=20,\n color='dimgrey',ha='left',va='center')\n \n plt.savefig(directoryfigure + 'Histogram_%s_PeriodsInternal.png' % names[rrrr],\n dpi=300)\n ",
"\"\"\"\nScript calculates accuracy of multi-decadal ANNv1\n\nAuthor : Zachary M. Labe\nDate : 19 January 2021\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport scipy.stats as sts\nimport matplotlib.pyplot as plt\nimport calc_Utilities as UT\nimport calc_dataFunctions as df\nimport palettable.wesanderson as ww\nimport calc_Stats as dSS\nfrom sklearn.metrics import accuracy_score\n\n### Set preliminaries\ndirectoryfigure = '/Users/zlabe/Desktop/ExtremeEvents_NewANN_v1/LENS/'\ndirectorydata = '/Users/zlabe/Documents/Research/ExtremeEvents/Data/'\nreg_name = 'Globe'\ndataset = 'lens'\ndataset_obs = '20CRv3'\nrm_ensemble_mean = True\nvariq = ['T2M']\nseasons = ['annual']\nland_only = False\nocean_only = False\nrm_merid_mean = False\nrm_annual_mean = False\nrm_ensemble_mean = True\nensnum = 40\nnum_of_class = 3\niterations = 100\n\n### Create sample class labels for 1920-2099\nif num_of_class == 3:\n yearlabels = np.arange(1920,2099+1,1)\n years = np.arange(1920,2099+1,1)\n lengthlabels = yearlabels.shape[0]//num_of_class\n array1 = np.asarray([0]*lengthlabels)\n array2 = np.asarray([1]*lengthlabels)\n array3 = np.asarray([2]*lengthlabels)\n classesl = np.concatenate([array1,array2,array3],axis=None)\nelif num_of_class == 2:\n yearlabels = np.arange(1920,2099+1,1)\n lengthlabels = yearlabels.shape[0]//num_of_class\n array1 = np.asarray([0]*lengthlabels)\n array2 = np.asarray([1]*lengthlabels)\n classesl = np.concatenate([array1,array2],axis=None)\n \n### Read in data\ntrainq = np.genfromtxt(directorydata + 'training_%s_%s_%s_%s_iterations%s_v2.txt' % (variq[0],seasons[0],reg_name,dataset,iterations))\ntestq = np.genfromtxt(directorydata + 'testing_%s_%s_%s_%s_iterations%s_v2.txt' % (variq[0],seasons[0],reg_name,dataset,iterations))\nobsq = np.genfromtxt(directorydata + 'obsout_%s_%s_%s_%s_iterations%s_v2.txt' % (variq[0],seasons[0],reg_name,dataset_obs,iterations))\n\n### Reshape\ntrain = np.reshape(trainq,(trainq.shape[0]//yearlabels.shape[0],yearlabels.shape[0],trainq.shape[1]))\ntest = np.reshape(testq,(testq.shape[0]//yearlabels.shape[0],yearlabels.shape[0],testq.shape[1]))\nobs = obsq\n\n### Combination of data\ntotal = np.append(train,test,axis=0)\n\n###############################################################################\n###############################################################################\n###############################################################################\n### Calculate accuracy\n\n### Argmax\ndef truelabel(data):\n \"\"\"\n Calculate argmax\n \"\"\"\n maxindexdata = np.empty((data.shape[0],data.shape[1]))\n for i in range(data.shape[0]):\n maxindexdata[i,:] = np.argmax(data[i,:,:],axis=1) \n meanmaxindexdata= np.nanmean(maxindexdata,axis=0)\n \n return maxindexdata,meanmaxindexdata\n\ndef accuracyTotalTime(data_pred,data_true):\n \"\"\"\n Compute accuracy for the entire time series\n \"\"\"\n accdata_pred = np.empty((data_pred.shape[0]))\n for i in range(data_pred.shape[0]):\n accdata_pred[i] = accuracy_score(data_true,data_pred[i,:])\n \n return accdata_pred\n\ndef accuracyTPeriodTime(data_pred,data_true):\n \"\"\"\n Compute accuracy for the three periods\n \"\"\"\n time = data_true.shape[0]\n period = int(time//3)\n \n accdata_pred = np.empty((data_pred.shape[0],3))\n for i in range(data_pred.shape[0]):\n for save,j in enumerate(range(0,time,period)):\n accdata_pred[i,save] = accuracy_score(data_true[j:j+period],\n data_pred[i,j:j+period])\n \n return accdata_pred\n\n### Calculate statistics\nindextrain,meanindextrain = truelabel(train)\nindextest,meanindextest = truelabel(test)\n\nacctrain = accuracyTotalTime(indextrain,classesl)\nacctest = accuracyTotalTime(indextest,classesl)\nnp.savetxt(directorydata + 'train_totalaccuracy_ClassMultiDecade_ANNv1.txt',\n acctrain)\nnp.savetxt(directorydata + 'test_totalaccuracy_ClassMultiDecade_ANNv1.txt',\n acctest)\n\nperiodtrain = accuracyTPeriodTime(indextrain,classesl)\nperiodtest = accuracyTPeriodTime(indextest,classesl)\nnp.savetxt(directorydata + 'train_periodaccuracy_ClassMultiDecade_ANNv1.txt',\n periodtrain)\nnp.savetxt(directorydata + 'test_periodaccuracy_ClassMultiDecade_ANNv1.txt',\n periodtest)\n",
"\"\"\"\nFunctions are useful untilities for data processing in the NN\n \nNotes\n-----\n Author : Zachary Labe\n Date : 8 July 2020\n \nUsage\n-----\n [1] readFiles(variq,dataset)\n [2] getRegion(data,lat1,lon1,lat_bounds,lon_bounds)\n\"\"\"\n\ndef readFiles(variq,dataset,monthlychoice):\n \"\"\"\n Function reads in data for selected dataset\n\n Parameters\n ----------\n variq : string\n variable for analysis\n dataset : string\n name of data set for primary data\n \n Returns\n -------\n data : numpy array\n data from selected data set\n lat1 : 1d numpy array\n latitudes\n lon1 : 1d numpy array\n longitudes\n\n Usage\n -----\n data,lat1,lon1 = readFiles(variq,dataset)\n \"\"\"\n print('\\n>>>>>>>>>> Using readFiles function!')\n \n ### Import modules\n import numpy as np\n \n if dataset == 'lens':\n import read_LENS as LL\n directorydataLL = '/Users/zlabe/Data/LENS/monthly/'\n slicebaseLL = np.arange(1951,1980+1,1)\n sliceshapeLL = 4\n slicenanLL = 'nan'\n addclimoLL = True\n takeEnsMeanLL = False\n lat1,lon1,data,ENSmean = LL.read_LENS(directorydataLL,variq,\n monthlychoice,slicebaseLL,\n sliceshapeLL,addclimoLL,\n slicenanLL,takeEnsMeanLL)\n elif dataset == 'best':\n import read_BEST as BB\n directorydataBB = '/Users/zlabe/Data/BEST/'\n sliceyearBB = np.arange(1956,2019+1,1)\n sliceshapeBB = 3\n slicenanBB = 'nan'\n addclimoBB = True\n ENSmean = np.nan\n lat1,lon1,data = BB.read_BEST(directorydataBB,monthlychoice,\n sliceyearBB,sliceshapeBB,addclimoBB,\n slicenanBB)\n elif dataset == 'ERA5':\n import read_ERA5_monthly as ER\n directorydataER = '/Users/zlabe/Data/ERA5/'\n sliceyearER = np.arange(1979,2019+1,1)\n sliceshapeER = 3\n slicenanER = 'nan'\n addclimoER = True\n ENSmean = np.nan\n lat1,lon1,data = ER.read_ERA5_monthly(variq,directorydataER,\n monthlychoice,sliceyearER,\n sliceshapeER,addclimoER,\n slicenanER)\n elif dataset == '20CRv3':\n import read_20CRv3_monthly as TW\n directorydataTW = '/Users/zlabe/Data/20CRv3/'\n sliceyearTW = np.arange(1836,2015+1,1)\n sliceshapeTW = 3\n slicenanTW = 'nan'\n addclimoTW = True\n ENSmean = np.nan\n lat1,lon1,data = TW.read_20CRv3_monthly(variq,directorydataTW,\n monthlychoice,sliceyearTW,\n sliceshapeTW,addclimoTW,\n slicenanTW)\n elif dataset == 'RANDOM':\n import read_randomData_monthly as RA\n directorydataRA = '/Users/zlabe/Data/'\n slicebaseRA = np.arange(1951,1980+1,1)\n sliceshapeRA = 4\n slicenanRA = 'nan'\n addclimoRA = True\n takeEnsMeanRA = False\n lat1,lon1,data,ENSmean = RA.read_randomData_monthly(directorydataRA,variq,\n monthlychoice,slicebaseRA,\n sliceshapeRA,addclimoRA,\n slicenanRA,takeEnsMeanRA)\n elif any([dataset=='CCCma_canesm2',dataset=='CSIRO_MK3.6',\n dataset=='GFDL_CM3',dataset=='GFDL_ESM2M',\n dataset=='KNMI_ecearth',dataset=='MPI']):\n import read_SMILE as SM\n directorySS = '/Users/zlabe/Data/SMILE/'\n simulationSS = dataset\n slicebaseSS = np.arange(1951,1980+1,1)\n sliceshapeSS = 4\n slicenanSS = 'nan'\n addclimoSS = True\n takeEnsMeanSS = False\n lat1,lon1,data,ENSmean = SM.read_SMILE(directorySS,simulationSS,variq,monthlychoice,\n slicebaseSS,sliceshapeSS,addclimoSS,\n slicenanSS,takeEnsMeanSS) \n elif any([dataset=='XGHG',dataset=='XAER',\n dataset=='XBMB',dataset=='XLULC']):\n import read_SINGLE_LENS as SI\n directorySI = '/Users/zlabe/Data/LENS/SINGLEFORCING/'\n simulationSI = dataset\n slicebaseSI = np.arange(1951,1980+1,1)\n sliceshapeSI = 4\n slicenanSI = 'nan'\n addclimoSI = True\n takeEnsMeanSI = False\n lat1,lon1,data,ENSmean = SI.read_SINGLE_LENS(directorySI,simulationSI,variq,monthlychoice,\n slicebaseSI,sliceshapeSI,addclimoSI,\n slicenanSI,takeEnsMeanSI)\n else:\n ValueError('WRONG DATA SET SELECTED!')\n \n print('>>>>>>>>>> Completed: Finished readFiles function!')\n return data,lat1,lon1 \n\ndef getRegion(data,lat1,lon1,lat_bounds,lon_bounds):\n \"\"\"\n Function masks out region for data set\n\n Parameters\n ----------\n data : 3d+ numpy array\n original data set\n lat1 : 1d array\n latitudes\n lon1 : 1d array\n longitudes\n lat_bounds : 2 floats\n (latmin,latmax)\n lon_bounds : 2 floats\n (lonmin,lonmax)\n \n Returns\n -------\n data : numpy array\n MASKED data from selected data set\n lat1 : 1d numpy array\n latitudes\n lon1 : 1d numpy array\n longitudes\n\n Usage\n -----\n data,lats,lons = getRegion(data,lat1,lon1,lat_bounds,lon_bounds)\n \"\"\"\n print('\\n>>>>>>>>>> Using get_region function!')\n \n ### Import modules\n import numpy as np\n \n ### Note there is an issue with 90N latitude (fixed!)\n lat1 = np.round(lat1,3)\n \n ### Mask latitudes\n if data.ndim == 2:\n latq = np.where((lat1 >= lat_bounds[0]) & (lat1 <= lat_bounds[1]))[0]\n latn = lat1[latq]\n datalatq = data[latq,:] \n ### Mask longitudes\n lonq = np.where((lon1 >= lon_bounds[0]) & (lon1 <= lon_bounds[1]))[0]\n lonn = lon1[lonq]\n datalonq = datalatq[:,lonq]\n \n elif data.ndim == 3:\n latq = np.where((lat1 >= lat_bounds[0]) & (lat1 <= lat_bounds[1]))[0]\n latn = lat1[latq]\n datalatq = data[:,latq,:] \n ### Mask longitudes\n lonq = np.where((lon1 >= lon_bounds[0]) & (lon1 <= lon_bounds[1]))[0]\n lonn = lon1[lonq]\n datalonq = datalatq[:,:,lonq]\n \n elif data.ndim == 4:\n latq = np.where((lat1 >= lat_bounds[0]) & (lat1 <= lat_bounds[1]))[0]\n latn = lat1[latq]\n datalatq = data[:,:,latq,:] \n ### Mask longitudes\n lonq = np.where((lon1 >= lon_bounds[0]) & (lon1 <= lon_bounds[1]))[0]\n lonn = lon1[lonq]\n datalonq = datalatq[:,:,:,lonq]\n \n elif data.ndim == 6:\n latq = np.where((lat1 >= lat_bounds[0]) & (lat1 <= lat_bounds[1]))[0]\n latn = lat1[latq]\n datalatq = data[:,:,:,latq,:]\n ### Mask longitudes\n lonq = np.where((lon1 >= lon_bounds[0]) & (lon1 <= lon_bounds[1]))[0]\n lonn = lon1[lonq]\n datalonq = datalatq[:,:,:,:,lonq]\n \n ### New variable name\n datanew = datalonq\n \n print('>>>>>>>>>> Completed: getRegion function!')\n return datanew,latn,lonn \n\n### Test functions - do not use!\n# import numpy as np\n# import matplotlib.pyplot as plt\n# import calc_Utilities as UT\n# data,lat1,lon1 = readFiles('T2M','RANDOM','annual')",
"\"\"\"\nCreated on Thu Aug 13 08:20:11 2020\n\n@author: zlabe\n\"\"\"\n\n\"\"\"\nScript plots composites for large ensemble data (monthly) using \nseveral variables\n\nAuthor : Zachary M. Labe\nDate : 13 August 2020\n\"\"\"\n\n### Import modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport palettable.cubehelix as cm\nimport calc_Utilities as UT\nimport calc_dataFunctions as df\nimport calc_Stats as dSS\n\n### Set preliminaries\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \ndirectoryfigure = '/Users/zlabe/Desktop/ExtremeEvents_v1/Composites/LENS/'\nreg_name = 'Globe'\ndataset = 'lens'\nrm_ensemble_mean = False\nvariq = ['T2M']\nmonthlychoice = 'annual'\n\ndef read_primary_dataset(variq,dataset,lat_bounds,lon_bounds):\n data,lats,lons = df.readFiles(variq,dataset,monthlychoice)\n datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)\n print('\\nOur dataset: ',dataset,' is shaped',data.shape)\n return datar,lats,lons\n\nfor i in range(len(variq)):\n ### Read in data for selected region \n lat_bounds,lon_bounds = UT.regions(reg_name)\n dataall,lats,lons = read_primary_dataset(variq[i],dataset,\n lat_bounds,lon_bounds)\n \n ### Remove ensemble mean\n if rm_ensemble_mean == True:\n data= dSS.remove_ensemble_mean(dataall)\n print('*Removed ensemble mean*')\n elif rm_ensemble_mean == False:\n data = dataall\n \n ### Calculate ensemble mean\n meandata = np.nanmean(data,axis=0)\n del data #save storage\n \n ### Composite over selected period (x2)\n if monthlychoice == 'DJF':\n years = np.arange(meandata.shape[0]) + 1921\n else:\n years = np.arange(meandata.shape[0]) + 1920\n \n length = years.shape[0]//2\n historical = meandata[:length,:,:]\n future = meandata[length:,:,:]\n \n ### Average over composites for plotting\n historicalm = np.nanmean(historical,axis=0)\n futurem = np.nanmean(future,axis=0)\n \n ### Calculate significance\n pruns = UT.calc_FDR_ttest(future[:,:,:],historical[:,:,:],0.05) #FDR\n \n ###########################################################################\n ###########################################################################\n ###########################################################################\n ### Begin plots!!!\n fig = plt.figure()\n \n ### Select graphing preliminaries\n if rm_ensemble_mean == True:\n if variq[i] == 'T2M':\n label = r'\\textbf{T2M [$\\bf{^{\\circ}}$C]}'\n cmap = cm.cubehelix3_16_r.mpl_colormap \n elif variq[i] == 'SLP':\n label = r'\\textbf{SLP [hPa]}'\n cmap = cm.cubehelix3_16_r.mpl_colormap \n elif variq[i] == 'U700':\n label = r'\\textbf{U700 [m/s]}'\n cmap = cm.cubehelix3_16_r.mpl_colormap \n limit = np.linspace(futurem.min(),futurem.max(),300)\n barlim = np.linspace(futurem.min(),futurem.max(),2)\n elif rm_ensemble_mean == False:\n if variq[i] == 'T2M':\n label = r'\\textbf{T2M [$\\bf{^{\\circ}}$C]}'\n cmap = plt.cm.twilight\n limit = np.arange(-35,35.1,0.5)\n barlim = np.arange(-35,36,35)\n elif variq[i] == 'SLP':\n label = r'\\textbf{SLP [hPa]}'\n cmap = plt.cm.cividis\n limit = np.arange(985,1035.1,2)\n barlim = np.arange(985,1036,10)\n elif variq[i] == 'U700':\n label = r'\\textbf{U700 [m/s]}'\n cmap = cm.classic_16.mpl_colormap \n limit = np.arange(-10,20.1,0.5)\n barlim = np.arange(-10,21,5)\n \n ###########################################################################\n ax = plt.subplot(211)\n m = Basemap(projection='moll',lon_0=0,resolution='l') \n var, lons_cyclic = addcyclic(historicalm, lons)\n var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\n lon2d, lat2d = np.meshgrid(lons_cyclic, lats)\n x, y = m(lon2d, lat2d)\n \n circle = m.drawmapboundary(fill_color='white',color='dimgray',\n linewidth=0.45)\n circle.set_clip_on(False)\n \n cs = m.contourf(x,y,var,limit,extend='both')\n \n m.drawcoastlines(color='dimgray',linewidth=0.7) \n cs.set_cmap(cmap) \n plt.text(0,0,r'\\textbf{1921-2010}',color='dimgrey',fontsize=10)\n \n ###########################################################################\n ax = plt.subplot(212)\n m = Basemap(projection='moll',lon_0=0,resolution='l') \n var, lons_cyclic = addcyclic(futurem, lons)\n var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\n lon2d, lat2d = np.meshgrid(lons_cyclic, lats)\n x, y = m(lon2d, lat2d)\n \n circle = m.drawmapboundary(fill_color='white',color='dimgray',\n linewidth=0.45)\n circle.set_clip_on(False)\n \n cs = m.contourf(x,y,var,limit,extend='both')\n \n m.drawcoastlines(color='dimgray',linewidth=0.7) \n cs.set_cmap(cmap) \n plt.text(0,0,r'\\textbf{2011-2100}',color='dimgrey',fontsize=10)\n \n ###########################################################################\n ###########################################################################\n ###########################################################################\n cbar_ax = fig.add_axes([0.293,0.1,0.4,0.03]) \n cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',\n extend='both',extendfrac=0.07,drawedges=False)\n \n cbar.set_label(label,fontsize=14,color='k',labelpad=1.4) \n \n cbar.set_ticks(barlim)\n cbar.set_ticklabels(list(map(str,barlim)))\n cbar.ax.tick_params(axis='x', size=.01,labelsize=6)\n cbar.outline.set_edgecolor('dimgrey')\n \n plt.tight_layout()\n plt.subplots_adjust(bottom=0.16,wspace=0,hspace=0.01)\n \n if rm_ensemble_mean == True:\n plt.savefig(directoryfigure + 'Composites_LENS_%s_%s.png' \\\n % (monthlychoice,variq[i]),dpi=300)\n elif rm_ensemble_mean == False:\n plt.savefig(directoryfigure + 'Composites_LENS_%s_%s_ORIGINAL.png' \\\n % (monthlychoice,variq[i]),dpi=300)\n"
] | [
[
"numpy.abs",
"numpy.unique",
"numpy.isnan",
"numpy.arange",
"numpy.percentile",
"numpy.round",
"numpy.shape",
"numpy.nanmean",
"numpy.mod",
"numpy.where"
],
[
"matplotlib.pyplot.legend",
"scipy.stats.ks_2samp",
"numpy.linspace",
"scipy.stats.norm.pdf",
"numpy.arange",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"numpy.nanmean",
"matplotlib.pyplot.text",
"numpy.meshgrid",
"numpy.where",
"numpy.empty",
"matplotlib.pyplot.figure"
],
[
"numpy.reshape",
"numpy.arange",
"numpy.asarray",
"numpy.genfromtxt",
"numpy.concatenate",
"numpy.append",
"numpy.argmax",
"numpy.nanmean",
"numpy.savetxt",
"numpy.empty",
"sklearn.metrics.accuracy_score"
],
[
"numpy.round",
"numpy.arange",
"numpy.where"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplot",
"numpy.nanmean",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.text",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hdelecki/alpha-zero-general-ut3 | [
"1b38aad66c3ac38d815fbd21c34475bfa7573706"
] | [
"ut3/UT3Game.py"
] | [
"from __future__ import print_function\nimport sys\nsys.path.append('..')\nfrom Game import Game\nfrom .UT3Logic import Board\nimport numpy as np\n\n\"\"\"\nGame class implementation for the game of Ultimate TicTacToe.\n\nAuthor: Harrison Delecki, github.com/hdelecki\n\nBased on the OthelloGame by Surag Nair.\n\"\"\"\nclass UT3Game(Game):\n def __init__(self, n=3, conv=True):\n self.conv = conv\n self.n = n\n #self.last_move = None\n\n def getArray(self, board):\n if self.conv:\n global_rep = np.repeat(np.repeat(board.global_pieces, 3, axis=1), 3, axis=0)\n local_rep = board.local_pieces\n play_rep = np.repeat(np.repeat(board.play_map, 3, axis=1), 3, axis=0)\n #valid_rep = np.zeros(local_rep.shape)\n #0valids = board.get_legal_moves(player=1)\n #valid_rep[tuple(valids.T.tolist())] = 1.0\n return np.stack((local_rep, global_rep, play_rep))\n else:\n raise NotImplementedError()\n\n def getBoardChannels(self):\n #return 2\n if self.conv:\n return 3\n else:\n return 1\n\n def getInitBoard(self):\n # return initial board (numpy board)\n #self.last_move = None\n b = Board(self.n)\n return self.getArray(b)\n\n def getBoardSize(self):\n # (a,b) tuple\n return (self.n**2, self.n**2)\n\n def getActionSize(self):\n # return number of actions\n return self.n**4\n\n def getNextState(self, board, player, action):\n # if player takes action on board, return next (board,player)\n # action must be a valid move\n # if action == self.n*self.n:\n # return (board, -player)\n # b = Board(self.n)\n # b.pieces = np.copy(board)\n # move = (int(action/self.n), action%self.n)\n # b.execute_move(move, player)\n # return (b.pieces, -player)\n b = Board(self.n)\n b.local_pieces = np.copy(board[0])\n b.global_pieces = np.copy(board[1][::3, ::3])\n b.play_map = np.copy(board[2][::3, ::3])\n #b.last_move = self.last_move\n move = np.unravel_index(action, (self.n**2, self.n**2))\n #move = int(action/self.n**2), action%self.n**2\n b.execute_move(move, player)\n #self.last_move = b.last_move\n return self.getArray(b), -player\n\n def getValidMoves(self, board, player):\n # return a fixed size binary vector\n #valid = [0]*self.getActionSize()\n valid = np.zeros(self.getActionSize())\n b = Board(self.n)\n b.local_pieces = np.copy(board[0])\n b.global_pieces = np.copy(board[1][::3, ::3])\n b.play_map = np.copy(board[2][::3, ::3])\n valid_coords = b.get_legal_moves(player)\n valid_idx = np.ravel_multi_index(valid_coords.T, (self.n**2, self.n**2))\n valid[valid_idx] = True\n return valid\n\n\n def getGameEnded(self, board, player):\n # return 0 if not ended, 1 if player 1 won, -1 if player 1 lost\n # player = 1\n brd = Board(self.n)\n brd.local_pieces = np.copy(board[0])\n brd.global_pieces = np.copy(board[1][::3, ::3])\n brd.play_map = np.copy(board[2][::3, ::3])\n \n if brd.is_win(1):\n return player\n elif brd.is_win(-1):\n return -player\n elif brd.is_full():\n return brd.draw\n\n # for player in -1, 1:\n # if brd.is_win(player):\n # return player\n # if brd.is_full():\n # return brd.draw\n return 0\n\n def getCanonicalForm(self, board, player):\n # return state if player==1, else return -state if player==-1\n #return np.where(board, player*board, board)\n if player == 1:\n return board\n else:\n board[:2,:,:] *= -1\n return board\n \n def getSymmetries(self, board, pi):\n # rotate, mirror\n assert(len(pi) == self.getActionSize()) # 1 for pass\n pi_board = np.reshape(pi, self.getBoardSize())\n sym, x, y = [], -2, -1\n \n # sym.append((board, pi))\n # return sym\n\n for rot in range(1, 5):\n for flip in True, False:\n newB = np.rot90(board, rot, (x, y))\n newPi = np.rot90(pi_board, rot, (x, y))\n if flip:\n newB = np.flip(newB, y)\n newPi = np.flip(newPi, y)\n sym.append((newB, list(newPi.ravel())))\n return sym\n\n def stringRepresentation(self, board):\n return board.tostring()\n\n\n def display(self, board, indent=' '):\n # print('Last Move:')\n # print(board.last_move)\n print('')\n print(indent + ' 0 | 1 | 2 ‖ 3 | 4 | 5 ‖ 6 | 7 | 8')\n print('')\n for n, row in enumerate(board[0]):\n if n:\n if n % 3:\n sep = '---+---+---'\n print(indent + '- ' + sep + '‖' + sep + '‖' + sep)\n else:\n sep = '==========='\n print(indent + '= ' + sep + '#' + sep + '#' + sep)\n row = ' ‖ '.join(' | '.join(map(str, map(int, row[i:i+3]))) for i in range(0, len(row), 3))\n print(indent + str(n) + ' ' + row.replace('-1','O').replace('1','X').replace('0','.'))\n print('')\n\ndef display(board, indent=' '):\n # print('Last Move:')\n # print(board.last_move)\n print('')\n print(indent + ' 0 | 1 | 2 ‖ 3 | 4 | 5 ‖ 6 | 7 | 8')\n print('')\n for n, row in enumerate(board[0]):\n if n:\n if n % 3:\n sep = '---+---+---'\n print(indent + '- ' + sep + '‖' + sep + '‖' + sep)\n else:\n sep = '==========='\n print(indent + '= ' + sep + '#' + sep + '#' + sep)\n row = ' ‖ '.join(' | '.join(map(str, map(int, row[i:i+3]))) for i in range(0, len(row), 3))\n print(indent + str(n) + ' ' + row.replace('-1','O').replace('1','X').replace('0','.'))\n print('')\n\n # @staticmethod\n # def display(board):\n # n = board.shape[0]\n\n # print(\" \", end=\"\")\n # for y in range(n):\n # print (y,\"\", end=\"\")\n # print(\"\")\n # print(\" \", end=\"\")\n # for _ in range(n):\n # print (\"-\", end=\"-\")\n # print(\"--\")\n # for y in range(n):\n # print(y, \"|\",end=\"\") # print the row #\n # for x in range(n):\n # piece = board[y][x] # get the piece to print\n # if piece == -1: print(\"X \",end=\"\")\n # elif piece == 1: print(\"O \",end=\"\")\n # else:\n # if x==n:\n # print(\"-\",end=\"\")\n # else:\n # print(\"- \",end=\"\")\n # print(\"|\")\n\n # print(\" \", end=\"\")\n # for _ in range(n):\n # print (\"-\", end=\"-\")\n # print(\"--\")\n"
] | [
[
"numpy.rot90",
"numpy.stack",
"numpy.copy",
"numpy.ravel_multi_index",
"numpy.repeat",
"numpy.flip",
"numpy.unravel_index"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AmericaGL/TrashTalk_Dapp | [
"401f17289261b5f537b239e7759dc039d53211e1",
"401f17289261b5f537b239e7759dc039d53211e1",
"401f17289261b5f537b239e7759dc039d53211e1"
] | [
"opencv-3.3.0/samples/python/mouse_and_match.py",
"opencv-3.3.0/samples/python/gaussian_mix.py",
"opencv-3.3.0/samples/python/lk_track.py"
] | [
"#!/usr/bin/env python\n'''\nmouse_and_match.py [-i path | --input path: default ../data/]\n\nDemonstrate using a mouse to interact with an image:\n Read in the images in a directory one by one\n Allow the user to select parts of an image with a mouse\n When they let go of the mouse, it correlates (using matchTemplate) that patch with the image.\n\n SPACE for next image\n ESC to exit\n'''\n\n# Python 2/3 compatibility\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\n\n# built-in modules\nimport os\nimport sys\nimport glob\nimport argparse\nfrom math import *\n\n\ndrag_start = None\nsel = (0,0,0,0)\n\ndef onmouse(event, x, y, flags, param):\n global drag_start, sel\n if event == cv2.EVENT_LBUTTONDOWN:\n drag_start = x, y\n sel = 0,0,0,0\n elif event == cv2.EVENT_LBUTTONUP:\n if sel[2] > sel[0] and sel[3] > sel[1]:\n patch = gray[sel[1]:sel[3],sel[0]:sel[2]]\n result = cv2.matchTemplate(gray,patch,cv2.TM_CCOEFF_NORMED)\n result = np.abs(result)**3\n val, result = cv2.threshold(result, 0.01, 0, cv2.THRESH_TOZERO)\n result8 = cv2.normalize(result,None,0,255,cv2.NORM_MINMAX,cv2.CV_8U)\n cv2.imshow(\"result\", result8)\n drag_start = None\n elif drag_start:\n #print flags\n if flags & cv2.EVENT_FLAG_LBUTTON:\n minpos = min(drag_start[0], x), min(drag_start[1], y)\n maxpos = max(drag_start[0], x), max(drag_start[1], y)\n sel = minpos[0], minpos[1], maxpos[0], maxpos[1]\n img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)\n cv2.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1)\n cv2.imshow(\"gray\", img)\n else:\n print(\"selection is complete\")\n drag_start = None\n\nif __name__ == '__main__':\n print(__doc__)\n\n parser = argparse.ArgumentParser(description='Demonstrate mouse interaction with images')\n parser.add_argument(\"-i\",\"--input\", default='../data/', help=\"Input directory.\")\n args = parser.parse_args()\n path = args.input\n\n cv2.namedWindow(\"gray\",1)\n cv2.setMouseCallback(\"gray\", onmouse)\n '''Loop through all the images in the directory'''\n for infile in glob.glob( os.path.join(path, '*.*') ):\n ext = os.path.splitext(infile)[1][1:] #get the filename extenstion\n if ext == \"png\" or ext == \"jpg\" or ext == \"bmp\" or ext == \"tiff\" or ext == \"pbm\":\n print(infile)\n\n img=cv2.imread(infile,1)\n if img is None:\n continue\n sel = (0,0,0,0)\n drag_start = None\n gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n cv2.imshow(\"gray\",gray)\n if cv2.waitKey() == 27:\n break\n cv2.destroyAllWindows()\n",
"#!/usr/bin/env python\n\n# Python 2/3 compatibility\nfrom __future__ import print_function\nimport sys\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n xrange = range\n\nimport numpy as np\nfrom numpy import random\nimport cv2\n\ndef make_gaussians(cluster_n, img_size):\n points = []\n ref_distrs = []\n for i in xrange(cluster_n):\n mean = (0.1 + 0.8*random.rand(2)) * img_size\n a = (random.rand(2, 2)-0.5)*img_size*0.1\n cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)\n n = 100 + random.randint(900)\n pts = random.multivariate_normal(mean, cov, n)\n points.append( pts )\n ref_distrs.append( (mean, cov) )\n points = np.float32( np.vstack(points) )\n return points, ref_distrs\n\ndef draw_gaussain(img, mean, cov, color):\n x, y = np.int32(mean)\n w, u, vt = cv2.SVDecomp(cov)\n ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)\n s1, s2 = np.sqrt(w)*3.0\n cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.LINE_AA)\n\n\nif __name__ == '__main__':\n cluster_n = 5\n img_size = 512\n\n print('press any key to update distributions, ESC - exit\\n')\n\n while True:\n print('sampling distributions...')\n points, ref_distrs = make_gaussians(cluster_n, img_size)\n\n print('EM (opencv) ...')\n em = cv2.ml.EM_create()\n em.setClustersNumber(cluster_n)\n em.setCovarianceMatrixType(cv2.ml.EM_COV_MAT_GENERIC)\n em.trainEM(points)\n means = em.getMeans()\n covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232\n found_distrs = zip(means, covs)\n print('ready!\\n')\n\n img = np.zeros((img_size, img_size, 3), np.uint8)\n for x, y in np.int32(points):\n cv2.circle(img, (x, y), 1, (255, 255, 255), -1)\n for m, cov in ref_distrs:\n draw_gaussain(img, m, cov, (0, 255, 0))\n for m, cov in found_distrs:\n draw_gaussain(img, m, cov, (0, 0, 255))\n\n cv2.imshow('gaussian mixture', img)\n ch = cv2.waitKey(0)\n if ch == 27:\n break\n cv2.destroyAllWindows()\n",
"#!/usr/bin/env python\n\n'''\nLucas-Kanade tracker\n====================\n\nLucas-Kanade sparse optical flow demo. Uses goodFeaturesToTrack\nfor track initialization and back-tracking for match verification\nbetween frames.\n\nUsage\n-----\nlk_track.py [<video_source>]\n\n\nKeys\n----\nESC - exit\n'''\n\n# Python 2/3 compatibility\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\nimport video\nfrom common import anorm2, draw_str\nfrom time import clock\n\nlk_params = dict( winSize = (15, 15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\nfeature_params = dict( maxCorners = 500,\n qualityLevel = 0.3,\n minDistance = 7,\n blockSize = 7 )\n\nclass App:\n def __init__(self, video_src):\n self.track_len = 10\n self.detect_interval = 5\n self.tracks = []\n self.cam = video.create_capture(video_src)\n self.frame_idx = 0\n\n def run(self):\n while True:\n ret, frame = self.cam.read()\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n vis = frame.copy()\n\n if len(self.tracks) > 0:\n img0, img1 = self.prev_gray, frame_gray\n p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)\n p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)\n p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)\n d = abs(p0-p0r).reshape(-1, 2).max(-1)\n good = d < 1\n new_tracks = []\n for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):\n if not good_flag:\n continue\n tr.append((x, y))\n if len(tr) > self.track_len:\n del tr[0]\n new_tracks.append(tr)\n cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)\n self.tracks = new_tracks\n cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))\n draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))\n\n if self.frame_idx % self.detect_interval == 0:\n mask = np.zeros_like(frame_gray)\n mask[:] = 255\n for x, y in [np.int32(tr[-1]) for tr in self.tracks]:\n cv2.circle(mask, (x, y), 5, 0, -1)\n p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)\n if p is not None:\n for x, y in np.float32(p).reshape(-1, 2):\n self.tracks.append([(x, y)])\n\n\n self.frame_idx += 1\n self.prev_gray = frame_gray\n cv2.imshow('lk_track', vis)\n\n ch = cv2.waitKey(1)\n if ch == 27:\n break\n\ndef main():\n import sys\n try:\n video_src = sys.argv[1]\n except:\n video_src = 0\n\n print(__doc__)\n App(video_src).run()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.abs"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.random.multivariate_normal",
"numpy.eye",
"numpy.int32",
"numpy.arctan2",
"numpy.random.rand",
"numpy.zeros",
"numpy.vstack",
"numpy.random.randint"
],
[
"numpy.int32",
"numpy.zeros_like",
"numpy.float32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
slohani-ai/machine-learning-for-physical-sciences | [
"f10f04d768b8eb0966953d76e6a553d3b11af92f"
] | [
"mlphys/deepqis/Simulator/Distributions.py"
] | [
"\"\"\"\nauthor: Sanjaya Lohani\nemail: [email protected]\nLicence: Apache-2.0\n\"\"\"\n\nimport numpy as np\nimport qiskit.quantum_info as qi\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\n\nclass Haar_State:\n\n def __init__(self, qs):\n self._qs = qs\n\n def pure_states(self, _):\n state = qi.random_statevector(dims=2 ** self._qs)\n state_dm = state.to_operator()\n state_np = state_dm.data\n return state_np\n\n def sample_dm(self, n_size): # K == D in equation (3) in the bias paper\n q_dm = list(map(self.pure_states, range(n_size)))\n q_dm = np.array(q_dm).reshape(n_size, 2 ** self._qs,\n 2 ** self._qs) # [self.n_size, 2 ** self._qs, 2 ** self._qs]\n return q_dm\n\n\nclass Hilbert_Schmidt:\n\n def __init__(self, qs):\n self._qs = qs\n\n def hs_states(self, _):\n dm = qi.random_density_matrix(dims=2 ** self._qs) # defualt is Hilbert-Schmidth\n dm_np = dm.data\n return dm_np\n\n def sample_dm(self, n_size):\n hs_dm = list(map(self.hs_states, range(n_size)))\n hs_dm = np.array(hs_dm).reshape(n_size, 2 ** self._qs, 2 ** self._qs)\n return hs_dm\n\n\nclass Bures:\n\n def __init__(self, qs):\n self._qs = qs\n\n def hs_states(self, _):\n dm = qi.random_density_matrix(dims=2 ** self._qs, method='Bures') # defualt is Hilbert-Schmidth\n dm_np = dm.data\n return dm_np\n\n def sample_dm(self, n_size):\n hs_dm = list(map(self.hs_states, range(n_size)))\n hs_dm = np.array(hs_dm).reshape(n_size, 2 ** self._qs, 2 ** self._qs)\n return hs_dm\n\n\nclass eye_NN:\n\n def __init__(self, qs):\n self._qs = qs\n\n def I_states(self, _):\n state = np.identity(2 ** self._qs)\n return state\n\n def sample_dm(self, n_size): # K == D in equation (3) in the bias paper\n q_dm = list(map(self.I_states, range(n_size)))\n q_dm = np.array(q_dm).reshape(n_size, 2 ** self._qs,\n 2 ** self._qs) # [self.n_size, 2 ** self._qs, 2 ** self._qs]\n return 1 / 4 * q_dm\n\n\nclass HS_Haar:\n\n def __init__(self, qs):\n self._qs = qs\n\n def sample_dm(self, n_size, Haar_to_HS=None): # a fraction for Haar_to_HS. For eg. 10% --> 0.1\n haar_dm = Haar_State(qs=self._qs).sample_dm(n_size=n_size)\n hs_dm = Hilbert_Schmidt(qs=self._qs).sample_dm(n_size=n_size)\n if Haar_to_HS is None:\n a = np.random.uniform(low=0.0, high=1.0, size=[n_size, 1, 1])\n else:\n a = Haar_to_HS\n hs_haar_dm = (1 - a) * hs_dm + a * haar_dm\n return hs_haar_dm\n\n\nclass Mix_eye:\n\n def __init__(self, qs):\n self._qs = qs\n\n def sample_dm(self, n_size, eye_to_mix=None, states='HS'): # a fraction for I_to_Mix. For eg. 10% --> 0.1\n if states == 'HS':\n mix_dm = Hilbert_Schmidt(qs=self._qs).sample_dm(n_size=n_size)\n if states == 'Haar':\n mix_dm = Haar_State(qs=self._qs).sample_dm(n_size=n_size)\n\n I_dm = eye_NN(qs=self._qs).sample_dm(n_size=n_size)\n if eye_to_mix is None:\n a = np.random.uniform(low=0.0, high=1.0, size=[n_size, 1, 1])\n else:\n a = eye_to_mix\n\n hs_haar_dm = (1 - a) * mix_dm + a * I_dm\n return hs_haar_dm\n\n\nclass MaiAlquierDist_Symmetric:\n\n def __init__(self,\n qs=2,\n alpha=tf.TensorSpec(shape=1, dtype=tf.float32)) -> object:\n self.alpha = alpha\n self._qs = qs\n\n @staticmethod\n def _cast_complex(x):\n return tf.cast(x, tf.complex128)\n\n def sample_alpha(self, n_size=tf.TensorSpec(shape=1, dtype=tf.int64)):\n alpha = tf.repeat(self.alpha, [2 ** self._qs])\n dist = tfp.distributions.Dirichlet(alpha)\n sampled = dist.sample(n_size) # [n_size, self._qs]\n sampled = tf.expand_dims(sampled, axis=-1) # [n_size, self._qs, 1]\n sampled = tf.expand_dims(sampled, axis=-1) # [n_size, self._qs, 1, 1]\n return sampled\n\n def sample_dm(self, n_size, numpy_array=True):\n q_dm = Haar_State(qs=self._qs).sample_dm(n_size=n_size * 2 ** self._qs) # [self.n_size * 2**self._qs,\n # 2 ** self._qs, 2 ** self._qs]\n haar_dm = tf.reshape(q_dm, [n_size, 2 ** self._qs, 2 ** self._qs, 2 ** self._qs]) # [n_size, self._qs,\n # self._qs, self._qs]\n alphas = self.sample_alpha(n_size) # [n_size, self._qs, 1, 1]\n alphas = self._cast_complex(alphas)\n ma_states_array = tf.multiply(alphas, haar_dm) # [n_size, self._qs, self._qs, self._qs]\n ma_states = tf.reduce_sum(ma_states_array,\n axis=1) # [n_size, self._qs --> traced out and dropped, self._qs, self._qs]\n # --> [n_size, self._qs, self._qs]\n if numpy_array:\n ma_states = ma_states.numpy()\n return ma_states\n\n\nclass MaiAlquierDist_Asymmetric:\n\n def __init__(self,\n qs=2,\n k_params=None,\n alpha=[0.1, 0.2, 0.3, 0.4]) -> object:\n self.alpha = alpha\n self._qs = qs\n self.D = 2 ** self._qs\n self.K = self.D\n if k_params is not None:\n self.K = k_params\n\n @staticmethod\n def _cast_complex(x):\n return tf.cast(x, tf.complex128)\n\n def sample_alpha(self, n_size=tf.TensorSpec(shape=1, dtype=tf.int64)):\n # if purity is not None:\n # self.alpha = self.D * (1 - purity) / (self.D * (purity * self.D - 2) + 1)\n # alpha = tf.repeat(self.alpha, [2 ** self._qs])\n dist = tfp.distributions.Dirichlet(self.alpha)\n if isinstance(self.alpha, np.ndarray):\n tf.debugging.assert_equal(self.alpha.ndim, 2, '|The given alpha must be a rank 2 tensor.')\n sampled = dist.sample(1)\n sampled = tf.squeeze(sampled)\n else:\n sampled = dist.sample(n_size) # [n_size, self._qs]\n sampled = tf.expand_dims(sampled, axis=-1) # [n_size, self._qs, 1]\n sampled = tf.expand_dims(sampled, axis=-1) # [n_size, self._qs, 1, 1]\n return sampled\n\n def sample_dm(self, n_size, numpy_array=True):\n q_dm = Haar_State(qs=self._qs).sample_dm(n_size=n_size * self.K) # [self.n_size * 2**self._qs,\n # 2 ** self._qs, 2 ** self._qs]\n haar_dm = tf.reshape(q_dm, [n_size, self.K, 2 ** self._qs, 2 ** self._qs]) # [n_size, self._qs,\n # self._qs, self._qs]\n alphas = self.sample_alpha(n_size) # [n_size, self._qs, 1, 1]\n alphas = self._cast_complex(alphas)\n ma_states_array = tf.multiply(alphas, haar_dm) # [n_size, self._qs, self._qs, self._qs]\n ma_states = tf.reduce_sum(ma_states_array,\n axis=1) # [n_size, self._qs --> traced out and dropped, self._qs, self._qs]\n # --> [n_size, self._qs, self._qs]\n if numpy_array:\n ma_states = ma_states.numpy()\n return ma_states\n\n\nclass MaiAlquierDist_Gamma:\n\n def __init__(self,\n qs=tf.TensorSpec(shape=1, dtype=tf.int64),\n alpha=tf.TensorSpec(shape=1, dtype=tf.float32)) -> object:\n self.alpha = alpha\n self._qs = qs\n\n @staticmethod\n def _cast_complex(x):\n return tf.cast(x, tf.complex128)\n\n @tf.function\n def sample_dm(self, n_size=tf.TensorSpec(shape=1, dtype=tf.int64), numpy_array=False):\n self.n_size = n_size\n x = tf.random.normal([self.n_size, 2 * 2 ** self._qs * 2 ** self._qs], 0., 1.)\n Xr = tf.reshape(x[:, :2 ** self._qs * 2 ** self._qs], [self.n_size, 2 ** self._qs, 2 ** self._qs])\n Xi = tf.reshape(x[:, 2 ** self._qs * 2 ** self._qs:], [self.n_size, 2 ** self._qs, 2 ** self._qs])\n Xr = self._cast_complex(Xr)\n Xi = self._cast_complex(Xi)\n X = Xr + 1j * Xi\n W = X / tf.expand_dims(tf.norm(X, axis=1), axis=1)\n # print('shape of W', W.shape)\n if isinstance(self.alpha, float):\n gamma_factor = self._cast_complex(tf.random.gamma([self.n_size, 2 ** self._qs], alpha=self.alpha, beta=1.))\n else:\n g_tensor = tf.vectorized_map(lambda x: tf.random.gamma([2 ** self._qs], x), self.alpha)\n gamma_factor = self._cast_complex(tf.reshape(g_tensor, [-1, 2 ** self._qs]))\n gamma_factor_norm = gamma_factor / tf.expand_dims(tf.reduce_sum(gamma_factor, axis=1), axis=1)\n gama_diag_batch = tf.vectorized_map(lambda x: tf.linalg.diag(x), gamma_factor_norm) # rank 3 tensors\n rho = tf.linalg.matmul(W, tf.linalg.matmul(gama_diag_batch, W, adjoint_b=True))\n return rho\n"
] | [
[
"tensorflow.debugging.assert_equal",
"tensorflow.norm",
"tensorflow.multiply",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.repeat",
"tensorflow.squeeze",
"tensorflow.linalg.matmul",
"tensorflow.random.gamma",
"tensorflow.linalg.diag",
"numpy.identity",
"numpy.random.uniform",
"numpy.array",
"tensorflow.random.normal",
"tensorflow.TensorSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
meichenfang/inferelator | [
"47f8ebcc5f303264a75814897c52026b47c57aef"
] | [
"inferelator/distributed/dask_functions.py"
] | [
"from inferelator.distributed.inferelator_mp import MPControl\nfrom inferelator.regression import base_regression\nfrom inferelator import utils\nimport copy\n\nimport numpy as np\nimport scipy.sparse as sps\nfrom dask import distributed\n\n\"\"\"\nThis package contains the dask-specific multiprocessing functions (these are used in place of map calls to allow the\nmore advanced memory and task tools of dask to be used)\n\"\"\"\n\nDASK_SCATTER_TIMEOUT = 120\n\n\ndef amusr_regress_dask(X, Y, priors, prior_weight, n_tasks, genes, tfs, G, remove_autoregulation=True):\n \"\"\"\n Execute multitask (AMUSR)\n\n :return: list\n Returns a list of regression results that the amusr_regression pileup_data can process\n \"\"\"\n\n assert MPControl.is_dask()\n\n from inferelator.regression.amusr_regression import format_prior, run_regression_EBIC\n DaskController = MPControl.client\n\n # Gets genes, n_tasks, prior_weight, and remove_autoregulation from regress_dask()\n # Other arguments are passed in\n def regression_maker(j, x_df, y_list, prior, tf):\n level = 0 if j % 100 == 0 else 2\n utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=genes[j], i=j, total=G),\n level=level)\n\n gene = genes[j]\n x, y, tasks = [], [], []\n\n if remove_autoregulation:\n tf = [t for t in tf if t != gene]\n else:\n pass\n\n for k, y_data in y_list:\n x.append(x_df[k].get_gene_data(tf)) # list([N, K])\n y.append(y_data)\n tasks.append(k) # [T,]\n\n prior = format_prior(prior, gene, tasks, prior_weight)\n return j, run_regression_EBIC(x, y, tf, tasks, gene, prior)\n\n def response_maker(y_df, i):\n y = []\n gene = genes[i]\n for k in range(n_tasks):\n if gene in y_df[k].gene_names:\n y.append((k, y_df[k].get_gene_data(gene, force_dense=True).reshape(-1, 1)))\n return y\n\n # Scatter common data to workers\n [scatter_x] = DaskController.client.scatter([X], broadcast=True, hash=False)\n [scatter_priors] = DaskController.client.scatter([priors], broadcast=True, hash=False)\n\n # Wait for scattering to finish before creating futures\n distributed.wait(scatter_x, timeout=DASK_SCATTER_TIMEOUT)\n distributed.wait(scatter_priors, timeout=DASK_SCATTER_TIMEOUT)\n\n future_list = [DaskController.client.submit(regression_maker, i, scatter_x, response_maker(Y, i), scatter_priors,\n tfs)\n for i in range(G)]\n\n # Collect results as they finish instead of waiting for all workers to be done\n result_list = process_futures_into_list(future_list)\n\n DaskController.client.cancel(scatter_x)\n DaskController.client.cancel(scatter_priors)\n\n return result_list\n\n\ndef bbsr_regress_dask(X, Y, pp_mat, weights_mat, G, genes, nS):\n \"\"\"\n Execute regression (BBSR)\n\n :return: list\n Returns a list of regression results that the pileup_data can process\n \"\"\"\n assert MPControl.is_dask()\n\n from inferelator.regression import bayes_stats\n DaskController = MPControl.client\n\n def regression_maker(j, x, y, pp, weights):\n level = 0 if j % 100 == 0 else 2\n utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=genes[j], i=j, total=G), level=level)\n data = bayes_stats.bbsr(x, utils.scale_vector(y), pp[j, :].flatten(), weights[j, :].flatten(), nS)\n data['ind'] = j\n return j, data\n\n # Scatter common data to workers\n [scatter_x] = DaskController.client.scatter([X.values], broadcast=True, hash=False)\n [scatter_pp] = DaskController.client.scatter([pp_mat.values], broadcast=True, hash=False)\n [scatter_weights] = DaskController.client.scatter([weights_mat.values], broadcast=True, hash=False)\n\n # Wait for scattering to finish before creating futures\n distributed.wait(scatter_x, timeout=DASK_SCATTER_TIMEOUT)\n distributed.wait(scatter_pp, timeout=DASK_SCATTER_TIMEOUT)\n distributed.wait(scatter_weights, timeout=DASK_SCATTER_TIMEOUT)\n\n future_list = [DaskController.client.submit(regression_maker, i, scatter_x,\n Y.get_gene_data(i, force_dense=True).flatten(),\n scatter_pp, scatter_weights)\n for i in range(G)]\n\n # Collect results as they finish instead of waiting for all workers to be done\n result_list = process_futures_into_list(future_list)\n\n DaskController.client.cancel(scatter_x)\n DaskController.client.cancel(scatter_pp)\n DaskController.client.cancel(scatter_weights)\n\n return result_list\n\n\ndef sklearn_regress_dask(X, Y, model, G, genes, min_coef):\n \"\"\"\n Execute regression (SKLearn)\n\n :return: list\n Returns a list of regression results that the pileup_data can process\n \"\"\"\n assert MPControl.is_dask()\n\n from inferelator.regression import sklearn_regression\n DaskController = MPControl.client\n\n def regression_maker(j, x, y):\n level = 0 if j % 100 == 0 else 2\n utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=genes[j], i=j, total=G), level=level)\n data = sklearn_regression.sklearn_gene(x, utils.scale_vector(y), copy.copy(model))\n data['ind'] = j\n return j, data\n\n # Scatter common data to workers\n [scatter_x] = DaskController.client.scatter([X.values], broadcast=True, hash=False)\n\n # Wait for scattering to finish before creating futures\n distributed.wait(scatter_x, timeout=DASK_SCATTER_TIMEOUT)\n\n future_list = [DaskController.client.submit(regression_maker, i, scatter_x,\n Y.get_gene_data(i, force_dense=True).flatten())\n for i in range(G)]\n\n # Collect results as they finish instead of waiting for all workers to be done\n result_list = process_futures_into_list(future_list)\n\n DaskController.client.cancel(scatter_x)\n\n return result_list\n\n\ndef lasso_stars_regress_dask(X, Y, alphas, num_subsamples, random_seed, method, params, G, genes):\n \"\"\"\n Execute regression (LASSO-StARS)\n\n :return: list\n Returns a list of regression results that the pileup_data can process\n \"\"\"\n assert MPControl.is_dask()\n\n from inferelator.regression import stability_selection\n DaskController = MPControl.client\n\n def regression_maker(j, x, y):\n level = 0 if j % 100 == 0 else 2\n utils.Debug.allprint(base_regression.PROGRESS_STR.format(gn=genes[j], i=j, total=G), level=level)\n data = stability_selection.stars_model_select(x, utils.scale_vector(y), alphas, num_subsamples=num_subsamples,\n method=method, random_seed=random_seed, **params)\n data['ind'] = j\n return j, data\n\n # Scatter common data to workers\n [scatter_x] = DaskController.client.scatter([X.values], broadcast=True, hash=False)\n\n # Wait for scattering to finish before creating futures\n distributed.wait(scatter_x, timeout=DASK_SCATTER_TIMEOUT)\n\n future_list = [DaskController.client.submit(regression_maker, i, scatter_x,\n Y.get_gene_data(i, force_dense=True).flatten())\n for i in range(G)]\n\n # Collect results as they finish instead of waiting for all workers to be done\n result_list = process_futures_into_list(future_list)\n\n DaskController.client.cancel(scatter_x)\n\n return result_list\n\n\ndef build_mi_array_dask(X, Y, bins, logtype):\n \"\"\"\n Calculate MI into an array with dask (the naive map is very inefficient)\n\n :param X: np.ndarray (n x m1)\n Discrete array of bins\n :param Y: np.ndarray (n x m2)\n Discrete array of bins\n :param bins: int\n The total number of bins that were used to make the arrays discrete\n :param logtype: np.log func\n Which log function to use (log2 gives bits, ln gives nats)\n :return mi: np.ndarray (m1 x m2)\n Returns the mutual information array\n \"\"\"\n\n assert MPControl.is_dask()\n\n from inferelator.regression.mi import _calc_mi, _make_table, _make_discrete\n\n # Get a reference to the Dask controller\n DaskController = MPControl.client\n\n m1, m2 = X.shape[1], Y.shape[1]\n\n def mi_make(i, x, y):\n x = _make_discrete(x, bins)\n return i, [_calc_mi(_make_table(x, y[:, j], bins), logtype=logtype) for j in range(m2)]\n\n # Scatter Y to workers and keep track as Futures\n [scatter_y] = DaskController.client.scatter([Y], broadcast=True, hash=False)\n\n # Wait for scattering to finish before creating futures\n distributed.wait(scatter_y, timeout=DASK_SCATTER_TIMEOUT)\n\n # Build an asynchronous list of Futures for each calculation of mi_make\n future_list = [DaskController.client.submit(mi_make, i,\n X[:, i].A.flatten() if sps.isspmatrix(X) else X[:, i].flatten(),\n scatter_y)\n for i in range(m1)]\n\n # Collect results as they finish instead of waiting for all workers to be done\n mi_list = process_futures_into_list(future_list)\n\n # Convert the list of lists to an array\n mi = np.array(mi_list)\n assert (m1, m2) == mi.shape, \"Array {sh} produced [({m1}, {m2}) expected]\".format(sh=mi.shape, m1=m1, m2=m2)\n\n DaskController.client.cancel(scatter_y)\n\n return mi\n\n\ndef process_futures_into_list(future_list, raise_on_error=True, check_results=True):\n \"\"\"\n Take a list of futures and turn them into a list of results\n Results must be of the form i, data (where i is the output order)\n\n :param future_list: A list of executing futures\n :type future_list: list\n :param raise_on_error: Should an error be raised if a job can't be restarted or just move on from it.\n Defaults to True\n :type raise_on_error: bool\n :param check_results: Should the result object be checked (and restarted if there's a problem)\n If False, this will raise an error with the result of a failed future is retrieved.\n Defaults to True.\n :type check_results: bool\n :return output_list: A list of results from the completed futures\n :rtype: list\n \"\"\"\n\n DaskController = MPControl.client\n output_list = [None] * len(future_list)\n complete_gen = distributed.as_completed(future_list)\n\n for finished_future in complete_gen:\n\n DaskController.check_cluster_state()\n\n # Jobs can be cancelled in certain situations\n if check_results and (finished_future.cancelled() or (finished_future.status == \"erred\")):\n error = finished_future.exception()\n utils.Debug.vprint(\"Restarting job (Error: {er})\".format(er=error), level=0)\n\n # Restart cancelled futures and put them back into the work pile\n try:\n DaskController.client.retry(finished_future)\n complete_gen.update([finished_future])\n except KeyError:\n if raise_on_error:\n raise\n\n # In the event of success, get the data\n else:\n i, result_data = finished_future.result()\n output_list[i] = result_data\n finished_future.cancel()\n\n return output_list\n\n"
] | [
[
"scipy.sparse.isspmatrix",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
sdss/coordio | [
"61f5c962b8e3f335259168c9f8e872b4d3fe25d8"
] | [
"coordio/sky.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# @Author: José Sánchez-Gallego ([email protected])\n# @Date: 2020-08-17\n# @Filename: sky.py\n# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)\n\n# IAU-defined sky coordinate systems and transformations.\n\nimport ctypes\n\nimport numpy\n\nfrom . import sofa\nfrom .coordinate import Coordinate, Coordinate2D, verifySite, verifyWavelength\nfrom .exceptions import CoordinateError, CoordIOError\nfrom .time import Time\nfrom .site import Site\n# from .telescope import Field\nfrom . import defaults\nfrom . import conv\n\n\n__all__ = ['ICRS', 'Observed']\n\n\nclass ICRS(Coordinate2D):\n \"\"\"A representation of ICRS coordinates.\n\n Parameters\n ----------\n value : numpy.ndarray\n A Nx2 Numpy array with the RA and Dec coordinates of the targets.\n epoch : numpy.ndarray\n A 1D array with the epoch of the coordinates for each target,\n as a TDB Julian date (although for most applications the small\n differences between scales will not matter). Defaults to J2000.\n pmra : numpy.ndarray\n A 1D array with the proper motion in the RA axis for the N targets,\n in milliarcsec/yr. Must be a true angle, i.e, it must include the\n ``cos(dec)`` term.\n pmdec : numpy.ndarray\n A 1D array with the proper motion in the RA axis for the N targets,\n in milliarcsec/yr.\n parallax : numpy.ndarray\n A 1D array with the parallax for the N targets, in milliarcsec.\n rvel : numpy.ndarray\n A 1D array with the radial velocity in km/s, positive when receding.\n wavelength : numpy.ndarray\n A 1D array with he observing wavelength, in angstrom.\n Defaults to the value in `defaults.WAVELENGTH` (GFA, sdss-r)\n\n \"\"\"\n\n __extra_arrays__ = ['epoch', 'pmra', 'pmdec', 'parallax', 'rvel', 'wavelength']\n\n def __new__(cls, value, **kwargs):\n\n verifyWavelength(kwargs, len(value), strict=False)\n\n obj = super().__new__(cls, value, **kwargs)\n\n if kwargs.get('epoch', None) is None:\n obj.epoch += defaults.EPOCH\n\n # if kwargs.get('wavelength', None) is None:\n # if hasattr(value, \"wavelength\"):\n # obj.wavelength = value.wavelength\n # else:\n # obj.wavelength += defaults.wavelength\n\n # check if a coordinate was passed that we can just\n # 'cast' into Observed\n if isinstance(value, Coordinate):\n\n if value.coordSysName == 'Observed':\n obj._fromObserved(value)\n\n else:\n raise CoordIOError(\n 'Cannot convert to ICRS from %s'%value.coordSysName\n )\n\n return obj\n\n def _fromObserved(self, obsCoords):\n \"\"\"Converts from `.Observed` coordinates. Epoch is the\n time specifified by the site.\n\n \"\"\"\n\n # We need the epoch to be J2000.0 because that's what iauAtco13 likes.\n # icrs_2000 = icrsCoords.to_epoch(2451545.0, site=self.site)\n\n # rra = numpy.radians(icrs_2000[:, 0])\n # rdec = numpy.radians(icrs_2000[:, 1])\n # rpmra = numpy.radians(icrs_2000.pmra / 1000. / 3600.) / numpy.cos(rdec)\n # rpmdec = numpy.radians(icrs_2000.pmdec / 1000. / 3600.)\n\n rlong = numpy.radians(obsCoords.site.longitude)\n rlat = numpy.radians(obsCoords.site.latitude)\n rZD = numpy.radians(90 - obsCoords[:,0])\n rAz = numpy.radians(obsCoords[:,1])\n wavelength = obsCoords.wavelength / 10000.\n _type = \"A\".encode() # coords are azimuth, zenith dist\n\n time = obsCoords.site.time\n\n utc = time.to_utc()\n utc1 = int(utc)\n utc2 = utc - utc1\n dut1 = time.get_dut1()\n\n _ra = ctypes.c_double()\n _dec = ctypes.c_double()\n\n ra = numpy.zeros(len(obsCoords))\n dec = numpy.zeros(len(obsCoords))\n\n for ii in range(len(rAz)):\n\n sofa.iauAtoc13(\n _type, rAz[ii], rZD[ii], utc1, utc2, dut1,\n rlong, rlat, obsCoords.site.altitude, 0.0, 0.0,\n obsCoords.site.pressure, obsCoords.site.temperature,\n obsCoords.site.rh, wavelength[ii], _ra, _dec\n )\n ra[ii] = numpy.degrees(_ra.value)\n dec[ii] = numpy.degrees(_dec.value)\n\n self[:, 0] = ra\n self[:, 1] = dec\n\n def to_epoch(self, jd, site=None):\n \"\"\"Convert the coordinates to a new epoch.\n\n Parameters\n ----------\n jd : float\n The Julian date, in TAI scale, of the output epoch.\n site : .Site\n The site of the observation. Used to determine the TDB-TT offset.\n If not provided, it assumes longitude and latitude zero.\n\n Returns\n -------\n icrs : `.ICRS`\n A new `.ICRS` object with the coordinates, proper motion, etc. in\n the new epoch.\n\n \"\"\"\n\n rra = numpy.radians(self[:, 0])\n rdec = numpy.radians(self[:, 1])\n rpmra = numpy.radians(self.pmra / 1000. / 3600.) / numpy.cos(rdec)\n rpmdec = numpy.radians(self.pmdec / 1000. / 3600.)\n\n # Using TDB is probably an overkill.\n\n tai = Time(jd, scale='TAI')\n\n if site:\n epoch2 = tai.to_tdb(longitude=site.longitude,\n latitude=site.latitude,\n altitude=site.altitude)\n else:\n epoch2 = tai.to_tdb()\n\n epoch2_1 = int(epoch2)\n epoch2_2 = epoch2 - epoch2_1\n\n ra2 = ctypes.c_double()\n dec2 = ctypes.c_double()\n pmra2 = ctypes.c_double()\n pmdec2 = ctypes.c_double()\n parallax2 = ctypes.c_double()\n rvel2 = ctypes.c_double()\n\n new_icrs = self.copy()\n\n for ii in range(self.shape[0]):\n\n epoch1_1 = float(int(self.epoch[ii]))\n epoch1_2 = self.epoch[ii] - epoch1_1\n\n res = sofa.iauPmsafe(rra[ii], rdec[ii], rpmra[ii], rpmdec[ii],\n self.parallax[ii] / 1000., self.rvel[ii],\n epoch1_1, epoch1_2, epoch2_1, epoch2_2,\n ra2, dec2, pmra2, pmdec2, parallax2, rvel2)\n\n if res > 1 or res < 0:\n raise CoordinateError(f'iauPmsafe return with '\n f'error code {res}.')\n\n new_icrs[ii, :] = numpy.rad2deg([ra2.value, dec2.value])\n new_icrs.pmra[ii] = numpy.rad2deg(pmra2.value) * 3600. * 1000.\n new_icrs.pmra[ii] *= numpy.cos(dec2.value)\n new_icrs.pmdec[ii] = numpy.rad2deg(pmdec2.value) * 3600. * 1000.\n new_icrs.parallax[ii] = parallax2.value * 1000.\n new_icrs.rvel[ii] = rvel2.value\n\n return new_icrs\n\n\nclass Observed(Coordinate2D):\n \"\"\"The observed coordinates of a series of targets.\n\n The array contains the Alt/Az coordinates of the targets. Their RA/Dec\n coordinates can be accessed via the ``ra`` and ``dec`` attributes.\n If `.ICRS` or `.Field` is passed, Alt/Az coordinates are computed.\n\n Parameters\n ----------\n value : numpy.ndarray\n A Nx2 Numpy array with the Alt and Az coordinates of the targets,\n in degrees. Or `.ICRS` instance. Or a `.Field` instance.\n wavelength : numpy.ndarray\n A 1D array with he observing wavelength, in angstrom.\n If not explicitly passed, it tries to inheret from value.wavelength,\n if that doesn't exist, it is set to default specified in:\n `defaults.wavelength`\n site : .Site\n The site from where observations will occur, along with the time\n of the observation. Mandatory argument.\n\n Attributes\n -----------\n ra : numpy.ndarray\n Nx1 Numpy array, observed RA in degrees\n dec : numpy.ndarray\n Nx1 Numpy array, observed Dec in degrees\n ha : numpy.ndarray\n Nx1 Numpy array, hour angle in degrees\n pa : numpy.ndarray\n Nx1 Numpy array, position angle in degrees. By SOFA: the angle between\n the direction to the north celestial pole and direction to the zenith.\n range is [-180, 180]. The sign is according to:\n -ha --> -pa, +ha --> +pa\n\n \"\"\"\n\n __extra_arrays__ = ['wavelength']\n __extra_params__ = ['site'] # mandatory\n __computed_arrays__ = ['ra', 'dec', 'ha', 'pa']\n\n def __new__(cls, value, **kwargs):\n # should we do range checks (eg alt < 90)? probably.\n\n verifySite(kwargs)\n\n # if kwargs.get('site', None) is None:\n # raise CoordIOError('Site must be passed to Observed')\n\n # else:\n # site = kwargs.get('site')\n # if not isinstance(site, Site):\n # raise CoordIOError('Must pass Site to Observed')\n # if site.time is None:\n # raise CoordIOError(\n # \"Time of observation must be specified on Site\"\n # )\n\n # should we prefer wavelength passed, or wavelength\n # existing on value (if it does exist). Here preferring passed\n # if kwargs.get('wavelength', None) is None:\n # if hasattr(value, \"wavelength\"):\n # kwargs[\"wavelength\"] = value.wavelength\n verifyWavelength(\n kwargs, len(value), strict=False\n )\n\n obj = super().__new__(cls, value, **kwargs)\n\n # if kwargs.get('wavelength', None) is None:\n # obj.wavelength += defaults.wavelength\n\n\n\n # check if a coordinate was passed that we can just\n # 'cast' into Observed\n if isinstance(value, Coordinate):\n\n if value.coordSysName == 'ICRS':\n obj._fromICRS(value)\n\n elif value.coordSysName == 'Field':\n obj._fromField(value)\n\n else:\n raise CoordIOError(\n 'Cannot convert to Observed from %s'%value.coordSysName\n )\n\n else:\n # raw numpy array supplied compute values\n obj._fromRaw()\n\n return obj\n\n def _fromICRS(self, icrsCoords):\n \"\"\"Converts from ICRS to topocentric observed coordinates for a site.\n Automatically executed after initialization with `.ICRS`.\n\n Computes and sets ra, dec, ha, pa arrays.\n\n Parameters:\n ------------\n icrsCoords : `.ICRS`\n ICRS coordinates from which to convert to observed coordinates\n\n \"\"\"\n\n # eventually move this to coordio.conv?\n\n # Prepare to call iauAtco13\n # Given:\n # rc,dc double ICRS right ascension at J2000.0 (radians)\n # pr double RA proper motion (radians/year)\n # pd double Dec proper motion (radians/year)\n # px double parallax (arcsec)\n # rv double radial velocity (km/s, +ve if receding)\n # utc1 double UTC as a 2-part...\n # utc2 double ...quasi Julian Date\n # dut1 double UT1-UTC (seconds)\n # elong double longitude (radians, east +ve)\n # phi double latitude (geodetic, radians)\n # hm double height above ellipsoid (m, geodetic)\n # xp,yp double polar motion coordinates (radians)\n # phpa double pressure at the observer (hPa = mB)\n # tc double ambient temperature at the observer (deg C)\n # rh double relative humidity at the observer (range 0-1)\n # wl double wavelength (micrometers)\n #\n # Returned:\n # aob double* observed azimuth (radians: N=0,E=90)\n # zob double* observed zenith distance (radians)\n # hob double* observed hour angle (radians)\n # dob double* observed declination (radians)\n # rob double* observed right ascension (CIO-based, radians)\n # eo double* equation of the origins (ERA-GST)\n\n # TODO: maybe write this as Cython or C?\n\n # We need the epoch to be J2000.0 because that's what iauAtco13 likes.\n icrs_2000 = icrsCoords.to_epoch(2451545.0, site=self.site)\n\n rra = numpy.radians(icrs_2000[:, 0])\n rdec = numpy.radians(icrs_2000[:, 1])\n rpmra = numpy.radians(icrs_2000.pmra / 1000. / 3600.) / numpy.cos(rdec)\n rpmdec = numpy.radians(icrs_2000.pmdec / 1000. / 3600.)\n\n rlong = numpy.radians(self.site.longitude)\n rlat = numpy.radians(self.site.latitude)\n\n time = self.site.time\n\n utc = time.to_utc()\n utc1 = int(utc)\n utc2 = utc - utc1\n dut1 = time.get_dut1()\n\n az_obs = ctypes.c_double()\n zen_obs = ctypes.c_double()\n ha_obs = ctypes.c_double()\n dec_obs = ctypes.c_double()\n ra_obs = ctypes.c_double()\n eo_obs = ctypes.c_double()\n\n for ii in range(len(rra)):\n\n sofa.iauAtco13(\n rra[ii], rdec[ii], rpmra[ii], rpmdec[ii],\n icrs_2000.parallax[ii] / 1000., icrs_2000.rvel[ii],\n utc1, utc2, dut1,\n rlong, rlat, self.site.altitude, 0.0, 0.0,\n self.site.pressure, self.site.temperature,\n self.site.rh, icrs_2000.wavelength[ii] / 10000.,\n az_obs, zen_obs, ha_obs, dec_obs, ra_obs, eo_obs\n )\n\n altAz = [\n 90 - numpy.rad2deg(zen_obs.value),\n numpy.rad2deg(az_obs.value)\n ]\n self[ii, :] = altAz\n\n self.ra[ii] = numpy.rad2deg(ra_obs.value)\n self.dec[ii] = numpy.rad2deg(dec_obs.value)\n self.ha[ii] = numpy.rad2deg(ha_obs.value)\n\n # compute the pa\n self.pa[ii] = numpy.rad2deg(\n sofa.iauHd2pa(ha_obs.value, dec_obs.value, rlat)\n )\n\n def _fromField(self, fieldCoords):\n \"\"\"Converts from field coordinates to topocentric observed\n coordinates for a site. Automatically executed after initialization\n with `.Field`.\n\n Computes and sets ra, dec, ha, pa arrays.\n\n Parameters:\n ------------\n fieldCoords : `.Field`\n Field coordinates from which to convert to observed coordinates\n\n \"\"\"\n # get field center info\n altCenter, azCenter = fieldCoords.field_center.flatten()\n pa = float(fieldCoords.field_center.pa) # parallactic angle\n\n alt, az = conv.fieldToObserved(\n fieldCoords.x, fieldCoords.y, fieldCoords.z,\n altCenter, azCenter, pa\n )\n\n self[:,0] = alt\n self[:,1] = az\n\n self._fromRaw()\n\n def _fromRaw(self):\n \"\"\"Automatically executed after initialization with\n an Nx2 `numpy.ndarray` of Alt/Az coords.\n\n Computes and sets ra, dec, ha, pa arrays.\n\n \"\"\"\n\n self[:, 1] = self[:, 1] % 360\n\n # compute ra, dec, ha, pa here...\n dec_obs = ctypes.c_double()\n ha_obs = ctypes.c_double()\n rlat = numpy.radians(self.site.latitude)\n rlong = numpy.radians(self.site.longitude)\n ut1 = self.site.time.to_ut1()\n\n for ii, (alt, az) in enumerate(self):\n raz = numpy.radians(az)\n ralt = numpy.radians(alt)\n sofa.iauAe2hd(raz, ralt, rlat, ha_obs, dec_obs)\n self.ha[ii] = numpy.degrees(ha_obs.value)\n self.dec[ii] = numpy.degrees(dec_obs.value)\n self.pa[ii] = numpy.degrees(\n sofa.iauHd2pa(ha_obs.value, dec_obs.value, rlat)\n )\n # earth rotation angle (from SOFA docs)\n # https://www.iausofa.org/2017_0420_C/sofa/sofa_ast_c.pdf\n era = sofa.iauEra00(ut1, 0) # time is sum of the 2 args\n _ra = numpy.degrees(era + rlong - ha_obs.value)\n _ra = _ra % 360 # wrap ra\n\n self.ra[ii] = _ra\n\n\n"
] | [
[
"numpy.degrees",
"numpy.radians",
"numpy.cos",
"numpy.rad2deg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arpanmangal/coinaction | [
"488eb4fea833ecf5df65acdf12e55592099efc93"
] | [
"mmaction/models/tenons/backbones/resnet_s3d.py"
] | [
"import logging\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\n\nfrom ....utils.misc import rgetattr, rhasattr\nfrom .resnet import ResNet \nfrom mmcv.cnn import constant_init, kaiming_init\nfrom mmcv.runner import load_checkpoint\n\nfrom ....ops.trajectory_conv_package.traj_conv import TrajConv\nfrom .. import flownets\n\n\nfrom ...registry import BACKBONES\n\ndef conv3x3x3(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1):\n \"3x3x3 convolution with padding\"\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=(temporal_stride, spatial_stride, spatial_stride),\n padding=dilation,\n dilation=dilation,\n bias=False)\n\n\ndef conv1x3x3(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1):\n \"1x3x3 convolution with padding\"\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=(1,3,3),\n stride=(temporal_stride, spatial_stride, spatial_stride),\n padding=(0, dilation, dilation),\n dilation=dilation,\n bias=False)\n\n\ndef conv3x1x1(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1, bias=False):\n \"3x1x1 convolution with padding\"\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=(3,1,1),\n stride=(temporal_stride, spatial_stride, spatial_stride),\n padding=(dilation,0,0),\n dilation=dilation,\n bias=bias)\n\ndef trajconv3x1x1(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1, bias=False):\n \"3x1x1 convolution with padding\"\n return TrajConv(\n in_planes,\n out_planes,\n kernel_size=(3,1,1),\n stride=(temporal_stride, spatial_stride, spatial_stride),\n padding=(dilation,0,0),\n dilation=dilation,\n bias=bias)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self,\n inplanes,\n planes,\n spatial_stride=1,\n temporal_stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n if_inflate=True,\n with_cp=False,\n with_trajectory=False):\n super(BasicBlock, self).__init__()\n self.conv1 = conv1x3x3(inplanes, planes, spatial_stride, 1, dilation)\n self.bn1 = nn.BatchNorm3d(planes)\n self.relu = nn.ReLU(inplace=True)\n \n self.conv2 = conv1x3x3(planes, planes)\n self.bn2 = nn.BatchNorm3d(planes)\n\n self.if_inflate = if_inflate\n\n if self.if_inflate:\n self.conv1_t = conv3x1x1(planes, planes, 1, temporal_stride, dilation, bias=True)\n self.bn1_t = nn.BatchNorm3d(planes)\n if with_trajectory:\n self.conv2_t = trajconv3x1x1(planes, planes, bias=True)\n else:\n self.conv2_t = conv3x1x1(planes, planes, bias=True)\n self.bn2_t = nn.BatchNorm3d(planes)\n\n self.downsample = downsample\n self.spatial_stride = spatial_stride\n self.temporal_stride = temporal_stride\n self.dilation = dilation\n assert not with_cp\n\n self.with_trajectory = with_trajectory\n\n def forward(self, input):\n x, traj_src = input\n\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n if self.if_inflate:\n out = self.conv1_t(out)\n out = self.bn1_t(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n \n if self.if_inflate:\n out = self.relu(out)\n if self.with_trajectory:\n assert traj_src[0] is not None\n out = self.conv2_t(out, traj_src[0])\n else:\n out = self.conv2_t(out)\n out = self.bn2_t(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out, traj_src[1:]\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self,\n inplanes,\n planes,\n spatial_stride=1,\n temporal_stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n if_inflate=True,\n with_cp=False,\n with_trajectory=False):\n \"\"\"Bottleneck block for ResNet.\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer,\n if it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n \"\"\"\n super(Bottleneck, self).__init__()\n assert style in ['pytorch', 'caffe']\n self.inplanes = inplanes\n self.planes = planes\n if style == 'pytorch':\n self.conv1_stride = 1\n self.conv2_stride = spatial_stride\n self.conv1_stride_t = 1\n self.conv2_stride_t = temporal_stride\n else:\n self.conv1_stride = spatial_stride\n self.conv2_stride = 1\n self.conv1_stride_t = temporal_stride\n self.conv2_stride_t = 1\n\n self.conv1 = nn.Conv3d(\n inplanes,\n planes,\n kernel_size=1,\n stride=(self.conv1_stride_t, self.conv1_stride, self.conv1_stride),\n bias=False)\n\n self.conv2 = nn.Conv3d(\n planes,\n planes,\n kernel_size=(1,3,3),\n stride=(1, self.conv2_stride, self.conv2_stride),\n padding=(0, dilation, dilation),\n dilation=(1, dilation, dilation),\n bias=False)\n\n self.if_inflate = if_inflate\n if self.if_inflate:\n self.conv2_t = nn.Conv3d(\n planes,\n planes,\n kernel_size=(3,1,1),\n stride=(self.conv2_stride_t,1,1),\n padding=(1,0,0),\n dilation=1,\n bias=True)\n self.bn2_t = nn.BatchNorm3d(planes)\n\n self.bn1 = nn.BatchNorm3d(planes)\n self.bn2 = nn.BatchNorm3d(planes)\n self.conv3 = nn.Conv3d(\n planes, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm3d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.spatial_tride = spatial_stride\n self.temporal_tride = temporal_stride\n self.dilation = dilation\n self.with_cp = with_cp\n\n self.with_trajectory = with_trajectory\n\n def forward(self, x):\n\n def _inner_forward(xx):\n x, traj_src = xx\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n if self.if_inflate:\n if self.with_trajectory:\n assert traj_src is not None\n out = self.conv2_t(out, traj_src[0])\n else:\n out = self.conv2_t(out)\n out = self.bn2_t(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out, traj_src[1:]\n\n if self.with_cp and x.requires_grad:\n out, traj_remains = cp.checkpoint(_inner_forward, x)\n else:\n out, traj_remains = _inner_forward(x)\n\n out = self.relu(out)\n\n return out, traj_remains\n\n\ndef make_res_layer(block,\n inplanes,\n planes,\n blocks,\n spatial_stride=1,\n temporal_stride=1,\n dilation=1,\n style='pytorch',\n inflate_freq=1,\n with_cp=False,\n traj_src_indices=-1):\n traj_src_indices = traj_src_indices if not isinstance(traj_src_indices, int) else (traj_src_indices, ) * blocks\n inflate_freq = inflate_freq if not isinstance(inflate_freq, int) else (inflate_freq, ) * blocks\n assert len(inflate_freq) == blocks\n downsample = None\n if spatial_stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv3d(\n inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=(temporal_stride, spatial_stride, spatial_stride),\n bias=False),\n nn.BatchNorm3d(planes * block.expansion),\n )\n\n layers = []\n layers.append(\n block(\n inplanes,\n planes,\n spatial_stride,\n temporal_stride,\n dilation,\n downsample,\n style=style,\n if_inflate=(inflate_freq[0] == 1),\n with_trajectory=(traj_src_indices[0]>-1),\n with_cp=with_cp))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(\n block(inplanes,\n planes,\n 1, 1,\n dilation,\n style=style,\n if_inflate= (inflate_freq[i] == 1),\n with_trajectory=(traj_src_indices[i]>-1),\n with_cp=with_cp))\n\n return nn.Sequential(*layers)\n\n\[email protected]_module\nclass ResNet_S3D(nn.Module):\n \"\"\"ResNet_S3D backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n bn_eval (bool): Whether to set BN layers to eval mode, namely, freeze\n running stats (mean and var).\n bn_frozen (bool): Whether to freeze weight and bias of BN layers.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n \"\"\"\n\n arch_settings = {\n 18: (BasicBlock, (2, 2, 2, 2)),\n 34: (BasicBlock, (3, 4, 6, 3)),\n 50: (Bottleneck, (3, 4, 6, 3)),\n 101: (Bottleneck, (3, 4, 23, 3)),\n 152: (Bottleneck, (3, 8, 36, 3))\n }\n\n def __init__(self,\n depth,\n pretrained=None,\n num_stages=4,\n spatial_strides=(1, 2, 2, 2),\n temporal_strides=(1, 1, 1, 1),\n dilations=(1, 1, 1, 1),\n out_indices=(0, 1, 2, 3),\n conv1_kernel_t=5,\n conv1_stride_t=2,\n pool1_kernel_t=1,\n pool1_stride_t=2,\n use_pool2=True,\n style='pytorch',\n frozen_stages=-1,\n inflate_freq=(1, 1, 1, 1), # For C2D baseline, this is set to -1.\n bn_eval=True,\n bn_frozen=False,\n partial_bn=False,\n with_cp=False,\n with_trajectory=False,\n trajectory_source_indices=-1,\n trajectory_downsample_method='ave',\n conv_bias=0.2):\n super(ResNet_S3D, self).__init__()\n if depth not in self.arch_settings:\n raise KeyError('invalid depth {} for resnet'.format(depth))\n self.depth = depth\n self.pretrained = pretrained\n self.num_stages = num_stages\n assert num_stages >= 1 and num_stages <= 4\n self.spatial_strides = spatial_strides\n self.temporal_strides = temporal_strides\n self.dilations = dilations\n assert len(spatial_strides) == len(temporal_strides) == len(dilations) == num_stages\n self.out_indices = out_indices\n assert max(out_indices) < num_stages\n self.style = style\n self.frozen_stages = frozen_stages\n self.inflate_freqs = inflate_freq if not isinstance(inflate_freq, int) else (inflate_freq, ) * num_stages\n self.bn_eval = bn_eval\n self.bn_frozen = bn_frozen\n self.partial_bn = partial_bn\n self.with_cp = with_cp\n\n self.with_trajectory = with_trajectory\n self.trajectory_source_indices = trajectory_source_indices \\\n if not isinstance(trajectory_source_indices, int) else [trajectory_source_indices, ] * num_stages\n self.trajectory_downsample_method = trajectory_downsample_method\n\n self.conv_bias = conv_bias\n\n self.block, stage_blocks = self.arch_settings[depth]\n self.stage_blocks = stage_blocks[:num_stages]\n for stage in range(num_stages):\n self.trajectory_source_indices[stage] = self.trajectory_source_indices[stage] \\\n if not isinstance(self.trajectory_source_indices[stage], int) else (self.trajectory_source_indices[stage], ) * self.stage_blocks[stage]\n self.inplanes = 64\n\n if conv1_kernel_t > 1:\n self.conv1 = nn.Conv3d(\n 3, 64, kernel_size=(1,7,7), stride=(1,2,2), padding=(0,3,3), bias=False)\n self.conv1_t = nn.Conv3d(\n 64, 64, kernel_size=(conv1_kernel_t,1,1), stride=(conv1_stride_t,1,1), padding=((conv1_kernel_t-1)//2,1,1), bias=True)\n self.bn1_t = nn.BatchNorm3d(64)\n else:\n self.conv1 = nn.Conv3d(\n 3, 64, kernel_size=(1,7,7), stride=(conv1_stride_t,2,2), padding=(0,3,3), bias=False)\n self.bn1 = nn.BatchNorm3d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=(pool1_kernel_t,3,3), stride=(pool1_stride_t,2,2), padding=(pool1_kernel_t//2,1,1))\n self.use_pool2 = use_pool2\n if self.use_pool2:\n self.pool2 = nn.MaxPool3d(kernel_size=(3,1,1), stride=(2,1,1), padding=(1,0,0))\n\n self.res_layers = []\n for i, num_blocks in enumerate(self.stage_blocks):\n traj_src_indices = self.trajectory_source_indices[i] \\\n if not isinstance(self.trajectory_source_indices[i], int) \\\n else (self.trajectory_source_indices[i], ) * num_blocks\n spatial_stride = spatial_strides[i]\n temporal_stride = temporal_strides[i]\n dilation = dilations[i]\n planes = 64 * 2**i\n res_layer = make_res_layer(\n self.block,\n self.inplanes,\n planes,\n num_blocks,\n spatial_stride=spatial_stride,\n temporal_stride=temporal_stride,\n dilation=dilation,\n style=self.style,\n inflate_freq=self.inflate_freqs[i],\n with_cp=with_cp,\n traj_src_indices=traj_src_indices)\n self.inplanes = planes * self.block.expansion\n layer_name = 'layer{}'.format(i + 1)\n self.add_module(layer_name, res_layer)\n self.res_layers.append(layer_name)\n\n self.feat_dim = self.block.expansion * 64 * 2**(\n len(self.stage_blocks) - 1)\n\n def init_weights(self):\n if isinstance(self.pretrained, str):\n logger = logging.getLogger()\n resnet2d = ResNet(self.depth)\n load_checkpoint(resnet2d, self.pretrained, strict=False, logger=logger)\n for name, module in self.named_modules():\n if isinstance(module, nn.Conv3d) or isinstance(module, TrajConv):\n if rhasattr(resnet2d, name):\n new_weight = rgetattr(resnet2d, name).weight.data.unsqueeze(2).expand_as(module.weight) / module.weight.data.shape[2]\n module.weight.data.copy_(new_weight)\n if hasattr(module, 'bias') and module.bias is not None:\n new_bias = rgetattr(resnet2d, name).bias.data\n module.bias.data.copy_(new_bias)\n else:\n kaiming_init(module, bias=self.conv_bias)\n elif isinstance(module, nn.BatchNorm3d):\n if rhasattr(resnet2d, name):\n for attr in ['weight', 'bias', 'running_mean', 'running_var']:\n setattr(module, attr, getattr(rgetattr(resnet2d, name), attr))\n else:\n constant_init(module, 1)\n elif self.pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n kaiming_init(m, bias=self.conv_bias)\n elif isinstance(m, nn.BatchNorm3d):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def forward(self, x, trajectory_forward=None, trajectory_backward=None):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n outs = []\n for i, layer_name in enumerate(self.res_layers):\n res_layer = getattr(self, layer_name)\n y = []\n for j in self.trajectory_source_indices[i]:\n if j > -1:\n flow_forward = trajectory_forward[j] ## N, 2*T, H, W (..x3y3x4y4..)\n flow_backward = trajectory_backward[j]\n flow_forward = flow_forward.view((flow_forward.size(0), -1, 2, flow_forward.size(2), flow_forward.size(3)))\n flow_backward = flow_backward.view((flow_backward.size(0), -1, 2, flow_backward.size(2), flow_backward.size(3)))\n flow_forward_x, flow_forward_y = torch.split(flow_forward, 1, 2)\n flow_backward_x, flow_backward_y = torch.split(flow_backward, 1, 2)\n flow_backward_x = flow_backward_x.flip(1).view((flow_backward_x.size(0), 1, flow_backward_x.size(1),\n flow_backward_x.size(3), flow_backward_x.size(4))) # N,T,1,H,W => N,1,T,H,W\n flow_backward_y = flow_backward_y.flip(1).view((flow_backward_y.size(0), 1, flow_backward_y.size(1),\n flow_backward_y.size(3), flow_backward_y.size(4)))\n flow_forward_x = flow_forward_x.view((flow_forward_x.size(0), 1, flow_forward_x.size(1),\n flow_forward_x.size(3), flow_forward_x.size(4)))\n flow_forward_y = flow_forward_y.view((flow_forward_y.size(0), 1, flow_forward_y.size(1),\n flow_forward_y.size(3), flow_forward_y.size(4)))\n flow_zero = torch.zeros_like(flow_forward_x)\n y.append(torch.cat((flow_backward_y, flow_backward_x, flow_zero, flow_zero, flow_forward_y, flow_forward_x), 1))\n else:\n y.append(None)\n \n x, remains = res_layer((x, y))\n assert len(remains) == 0 ## TODO: delete if check passes\n if i in self.out_indices:\n outs.append(x)\n if self.use_pool2 and i == 0:\n x = self.pool2(x)\n if len(outs) == 1:\n return outs[0]\n else:\n return tuple(outs)\n\n def train(self, mode=True):\n super(ResNet_S3D, self).train(mode)\n if self.bn_eval:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm3d):\n m.eval()\n if self.bn_frozen:\n for params in m.parameters():\n params.requires_grad = False\n if self.partial_bn:\n for i in range(1, self.frozen_stages + 1):\n mod = getattr(self, 'layer{}'.format(i))\n for m in mod.modules():\n if isinstance(m, nn.BatchNorm3d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n if mode and self.frozen_stages >= 0:\n for param in self.conv1.parameters():\n param.requires_grad = False\n for param in self.bn1.parameters():\n param.requires_grad = False\n self.bn1.eval()\n self.bn1.weight.requires_grad = False\n self.bn1.bias.requires_grad = False\n for i in range(1, self.frozen_stages + 1):\n mod = getattr(self, 'layer{}'.format(i))\n mod.eval()\n for param in mod.parameters():\n param.requires_grad = False\n"
] | [
[
"torch.nn.Sequential",
"torch.cat",
"torch.zeros_like",
"torch.nn.MaxPool3d",
"torch.nn.Conv3d",
"torch.utils.checkpoint.checkpoint",
"torch.split",
"torch.nn.ReLU",
"torch.nn.BatchNorm3d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RJPenic/Orpheus | [
"1a795fa732a10d4f16b48cf00808a125548e00d5"
] | [
"orpheus/dataset.py"
] | [
"import torch\nfrom dataclasses import dataclass, field\nfrom typing import List\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torch.nn.utils.rnn import pad_sequence\nimport csv\n\nfrom nltk.corpus import stopwords\nimport random\n\nfrom tqdm import tqdm\n\n@dataclass\nclass Instance:\n label : int\n text : List[List[str]] = field(default_factory = list)\n\n def __init__(self, label : int, text : List[List[str]]):\n self.label = label\n self.text = text\n\nclass LyricsDataset(torch.utils.data.Dataset):\n def __init__(self, instances, max_vocab_size = 30000, max_lines = 30, max_words_per_line = 10, remove_stop_words = False):\n self.instances = instances\n self.stop_words = set(stopwords.words('english'))\n self.stop_words.update(['you\\'re', 'i\\'m', 'she\\'s', 'he\\'s', 'it\\'s', '\\'re', '\\'m', '\\'s'])\n\n self.max_vocab_size = max_vocab_size\n self.max_lines = max_lines\n self.max_words_per_line = max_words_per_line\n self.remove_stop_words = remove_stop_words\n\n self.text_vocab = self.construct_vocab(instances)\n\n def get_subset_vocab(self, indices):\n return self.construct_vocab([self.instances[i] for i in indices])\n\n def construct_vocab(self, instances):\n ct_txt = {}\n\n for instance in instances:\n for line in instance.text:\n for token in line:\n if not (self.remove_stop_words and token in self.stop_words):\n ct_txt[token] = ct_txt.get(token, 0) + 1\n\n return Vocab(ct_txt, self.max_lines, self.max_words_per_line, max_size = self.max_vocab_size)\n\n @staticmethod\n def from_file(filename, labels, take_rates = None, max_lines = 30, max_words_per_line = 10, skip_first_line = True, remove_stop_words = True, max_vocab_size = 30000):\n instances = []\n\n if take_rates is None:\n take_rates = [1.0] * len(labels)\n\n with open(filename) as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\n\n print(\"Loading dataset...\")\n for i, row in tqdm(enumerate(csv_reader)):\n if i == 0 and skip_first_line:\n continue\n\n label = row[5].lower()\n\n if label not in labels:\n continue\n\n if take_rates[labels.index(label)] < random.random():\n continue\n\n instances.append(Instance(\n int(labels.index(label)),\n [line.split() for line in row[6].split('\\n')[:max_lines]]\n ))\n\n print(f'Number of instances : {len(instances)}')\n\n print(\"-- Labels --\")\n for i, l in enumerate(labels):\n print(f'{i} : {l}')\n print(\"------------\")\n\n return LyricsDataset(instances, max_vocab_size, max_lines, max_words_per_line, remove_stop_words)\n\n def __getitem__(self, i):\n return self.text_vocab.encode(self.instances[i].text), self.instances[i].label\n \n def __len__(self):\n return len(self.instances)\n\nclass Vocab:\n def __init__(self, frequencies, max_lines, max_words_per_line, max_size = -1, min_freq = 0,\n special = [\"<PAD>\", \"<UNK>\"]): # maybe add additional special for line padding ???\n self.stoi = {}\n self.max_lines = max_lines\n self.max_words_per_line = max_words_per_line\n\n for s in special:\n self.stoi[s] = len(self.stoi)\n\n sorted_tokens = sorted(frequencies.keys(), key = lambda k: -frequencies[k])\n\n for t in sorted_tokens:\n if min_freq > frequencies[t] or (len(self.stoi) >= max_size and max_size != -1) :\n break\n self.stoi[t.lower()] = len(self.stoi)\n\n def encode(self, text):\n encoded = []\n\n for j, line in enumerate(text):\n if j >= self.max_lines:\n break\n\n temp = []\n for i, token in enumerate(line):\n if i >= self.max_words_per_line:\n break\n\n temp.append(self.stoi.get(token.lower(), self.stoi[\"<UNK>\"]))\n \n encoded.append(temp)\n\n return encoded\n\ndef load_vec_file_to_dict(filename):\n with open(filename, encoding=\"utf8\") as f:\n content = f.readlines()\n \n content = [x.strip() for x in content]\n \n vecs = {}\n\n print(\"Loading word vector representation...\")\n for line in tqdm(content):\n elems = line.split()\n vecs[elems[0]] = torch.Tensor([float(n) for n in elems[1:]])\n \n return vecs\n \n \ndef load_vec_repr(vocab, d = 300, file = None, freeze = False):\n emb_mat = torch.randn(len(vocab.stoi), d)\n emb_mat[0] = torch.zeros(d)\n\n if file is not None:\n vecs = load_vec_file_to_dict(file)\n \n for k in vocab.stoi:\n if k in vecs:\n emb_mat[vocab.stoi[k]] = vecs[k]\n\n\n return nn.Embedding.from_pretrained(emb_mat, padding_idx = 0, freeze = freeze)\n\ndef pad_collate_fn(batch, pad_index = 0):\n texts, labels = list(zip(*batch))\n bsz = len(labels)\n\n nums_lines = [len(lines) for lines in texts]\n nums_words = [[len(line) for line in lines] for lines in texts]\n\n max_lines = max(nums_lines)\n max_words = max([max(nw) for nw in nums_words])\n\n texts_tensor = torch.full((bsz, max_lines, max_words), pad_index).long()\n line_lens_tensor = torch.full((bsz, max_lines), pad_index).long()\n\n for i, text in enumerate(texts):\n text_len = nums_lines[i]\n line_lens_tensor[i, :text_len] = torch.LongTensor(nums_words[i])\n for j, line in enumerate(text):\n line_len = nums_words[i][j]\n texts_tensor[i, j, :line_len] = torch.LongTensor(line)\n\n return texts_tensor, torch.LongTensor(labels)\n"
] | [
[
"torch.LongTensor",
"torch.nn.Embedding.from_pretrained",
"torch.full",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jdailey/solarpanelcount | [
"39643e97d628c9317aca398d28e37ed25472a7f6"
] | [
"train_classification.py"
] | [
"\"\"\"Train the inception-v3 model on Solar Panel Identification dataset.\"\"\"\n\nfrom datetime import datetime\nimport os.path\nimport time\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nimport skimage\nimport skimage.io\nimport skimage.transform\nimport random\nimport pickle\nfrom collections import deque\n\nfrom inception import inception_model as inception\nfrom inception.slim import slim\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('ckpt_save_dir', 'ckpt/inception_classification',\n \"\"\"Directory for saving model checkpoint. \"\"\")\n\ntf.app.flags.DEFINE_string('ckpt_restore_dir', 'ckpt/inception_classification',\n \"\"\"Directory for restoring old model checkpoint. \"\"\")\n\ntf.app.flags.DEFINE_string('pretrained_model_ckpt_path', 'ckpt/inception-v3/model.ckpt-157585',\n \"\"\"If specified, restore this pretrained model \"\"\"\n \"\"\"before beginning any training.\"\"\")\n\ntf.app.flags.DEFINE_string('train_set_dir', 'SPI_train',\n \"\"\"Directory of training set\"\"\")\n\ntf.app.flags.DEFINE_integer('max_steps', 200000,\n \"\"\"Number of batches/steps to run.\"\"\")\n\ntf.app.flags.DEFINE_integer('num_gpus', 1,\n \"\"\"How many GPUs to use.\"\"\")\n\ntf.app.flags.DEFINE_boolean('fine_tune', True,\n \"\"\"If true, start from well-trained model on SPI dataset, else start from\n pretrained model on ImageNet\"\"\")\n\ntf.app.flags.DEFINE_float('initial_learning_rate', 0.001,\n \"\"\"Initial learning rate.\"\"\")\n\ntf.app.flags.DEFINE_float('num_epochs_per_decay', 5.0,\n \"\"\"Epochs after which learning rate decays.\"\"\")\n\ntf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.5,\n \"\"\"Learning rate decay factor.\"\"\")\n\n# basic parameters\nBATCH_SIZE = 32\nIMAGE_SIZE = 299\nNUM_CLASSES = 2\n\n# Constants dictating the learning rate schedule.\nRMSPROP_DECAY = 0.9 # Decay term for RMSProp.\nRMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.\nRMSPROP_EPSILON = 0.1 # Epsilon term for RMSProp.\n\ndef load_image(path):\n # load image and prepocess.\n rotate_angle_list = [0, 90, 180, 270]\n img = skimage.io.imread(path)\n resized_img = skimage.transform.resize(img, (IMAGE_SIZE, IMAGE_SIZE))\n if resized_img.shape[2] != 3:\n resized_img = resized_img[:, :, 0:3]\n rotate_angle = random.choice(rotate_angle_list)\n image = skimage.transform.rotate(resized_img, rotate_angle)\n return image\n\ndef train():\n # load train set list and transform it to queue.\n try:\n with open('train_set_list.pickle', 'r') as f:\n train_set_list = pickle.load(f)\n except:\n raise EnvironmentError('Data list not existed. Please run generate_data_list.py first.')\n random.shuffle(train_set_list)\n train_set_queue = deque(train_set_list)\n train_set_size = len(train_set_list)\n del train_set_list\n print ('Training set built. Size: '+str(train_set_size))\n\n # build the tensorflow graph.\n with tf.Graph().as_default() as g:\n\n global_step = tf.get_variable(\n 'global_step', [],\n initializer=tf.constant_initializer(0), trainable=False)\n\n num_batches_per_epoch = train_set_size / BATCH_SIZE\n decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,\n global_step,\n decay_steps,\n FLAGS.learning_rate_decay_factor,\n staircase=True)\n tf.summary.scalar('learning_rate', lr)\n\n # Create an optimizer that performs gradient descent.\n opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,\n momentum=RMSPROP_MOMENTUM,\n epsilon=RMSPROP_EPSILON)\n\n images = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])\n\n labels = tf.placeholder(tf.int32, shape=[BATCH_SIZE])\n\n logits = inception.inference(images, NUM_CLASSES, for_training=True,\n restore_logits=FLAGS.fine_tune,\n scope=None)\n\n inception.loss(logits, labels, batch_size=BATCH_SIZE)\n\n # Assemble all of the losses for the current tower only.\n losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope = None)\n\n # Calculate the total loss for the current tower.\n regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n total_loss = tf.add_n(losses + regularization_losses, name='total_loss')\n\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.summary.scalar(l.op.name + ' (raw)', l)\n tf.summary.scalar(l.op.name, loss_averages.average(l))\n\n with tf.control_dependencies([loss_averages_op]):\n total_loss = tf.identity(total_loss)\n\n batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,\n scope=None)\n\n # Calculate the gradients for the batch of data on this ImageNet\n # tower.\n grads = opt.compute_gradients(total_loss)\n\n # Apply gradients.\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Add histograms for trainable variables.\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name, var)\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n tf.summary.histogram(var.op.name + '/gradients', grad)\n\n # Track the moving averages of all trainable variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n inception.MOVING_AVERAGE_DECAY, global_step)\n\n variables_to_average = (tf.trainable_variables() +\n tf.moving_average_variables())\n variables_averages_op = variable_averages.apply(variables_to_average)\n\n # Group all updates to into a single train op.\n batchnorm_updates_op = tf.group(*batchnorm_updates)\n train_op = tf.group(apply_gradient_op, variables_averages_op,\n batchnorm_updates_op)\n\n # Create a saver.\n saver = tf.train.Saver(tf.all_variables())\n\n # Build the summary operation from the last tower summaries.\n summary_op = tf.summary.merge_all()\n\n # Build an initialization operation to run below.\n init = tf.global_variables_initializer()\n\n # open session and initialize\n sess = tf.Session(config=tf.ConfigProto(\n log_device_placement=True))\n sess.run(init)\n\n # restore old checkpoint\n if FLAGS.fine_tune:\n checkpoint = tf.train.get_checkpoint_state(FLAGS.ckpt_restore_dir)\n if checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n else:\n print(\"Could not find old network weights\")\n else:\n variables_to_restore = tf.get_collection(\n slim.variables.VARIABLES_TO_RESTORE)\n restorer = tf.train.Saver(variables_to_restore)\n restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)\n print('%s: Pre-trained model restored from %s' %\n (datetime.now(), FLAGS.pretrained_model_checkpoint_path))\n\n summary_writer = tf.summary.FileWriter(\n FLAGS.ckpt_save_dir,\n graph_def=sess.graph.as_graph_def(add_shapes=True))\n\n step = 1\n while step <= FLAGS.max_steps:\n start_time = time.time()\n # construct image batch and label batch for one step train\n minibatch = []\n for count in xrange(0, BATCH_SIZE):\n element = train_set_queue.pop()\n minibatch.append(element)\n train_set_queue.appendleft(element)\n\n image_list = [load_image(d[0]) for d in minibatch]\n label_list = [d[1] for d in minibatch]\n\n image_batch = np.array(image_list)\n label_batch = np.array(label_list)\n\n image_batch = np.reshape(image_batch, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])\n label_batch = np.reshape(label_batch, [BATCH_SIZE])\n\n _, loss_value = sess.run([train_op, total_loss], feed_dict={images: image_batch, labels: label_batch})\n\n duration = time.time() - start_time\n\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step == 1 or step % 10 == 0:\n num_examples_per_step = BATCH_SIZE\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = float(duration)\n\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '\n 'sec/batch)')\n\n print(format_str % (datetime.now(), step, loss_value,\n examples_per_sec, sec_per_batch))\n\n # shuttle the image list per epoch\n if step % num_batches_per_epoch == 0:\n random.shuffle(train_set_queue)\n\n # write summary periodically\n if step == 1 or step % 100 == 0:\n summary_str = sess.run(summary_op, feed_dict={images: image_batch, labels: label_batch})\n summary_writer.add_summary(summary_str, step)\n\n # Save the model checkpoint periodically.\n if step % 1000 == 0:\n checkpoint_path = os.path.join(FLAGS.ckpt_save_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n\n step += 1\n\n\nif __name__ == '__main__':\n train()\n"
] | [
[
"tensorflow.control_dependencies",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.moving_average_variables",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.group",
"tensorflow.add_n",
"tensorflow.summary.scalar",
"tensorflow.all_variables",
"tensorflow.Graph",
"tensorflow.get_collection",
"numpy.reshape",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.train.exponential_decay",
"tensorflow.ConfigProto",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"tensorflow.train.RMSPropOptimizer",
"numpy.isnan",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"numpy.array",
"tensorflow.summary.histogram",
"tensorflow.train.get_checkpoint_state",
"tensorflow.constant_initializer",
"tensorflow.app.flags.DEFINE_float"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
maheshacherrypick/MLAlgorithms | [
"829c74cf7d79307fc6ca1d849e65b959fb10e5de"
] | [
"mla/metrics/tests/test_metrics.py"
] | [
"from __future__ import division\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_almost_equal\n\nfrom mla.metrics.base import check_data, validate_input\nfrom mla.metrics.metrics import get_metric\n\n\ndef test_data_validation():\n with pytest.raises(ValueError):\n check_data([], 1)\n\n with pytest.raises(ValueError):\n check_data([1, 2, 3], [3, 2])\n\n a, b = check_data([1, 2, 3], [3, 2, 1])\n\n assert np.all(a == np.array([1, 2, 3]))\n assert np.all(b == np.array([3, 2, 1]))\n\n\ndef metric(name):\n return validate_input(get_metric(name))\n\n\ndef test_classification_error():\n f = metric('classification_error')\n assert f([1, 2, 3, 4], [1, 2, 3, 4]) == 0\n assert f([1, 2, 3, 4], [1, 2, 3, 5]) == 0.25\n assert f([1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0]) == (1.0 / 6)\n\n\ndef test_absolute_error():\n f = metric('absolute_error')\n assert f([3], [5]) == [2]\n assert f([-1], [-4]) == [3]\n\n\ndef test_mean_absolute_error():\n f = metric('mean_absolute_error')\n assert f([1, 2, 3], [1, 2, 3]) == 0\n assert f([1, 2, 3], [3, 2, 1]) == 4 / 3\n\n\ndef test_squared_error():\n f = metric('squared_error')\n assert f([1], [1]) == [0]\n assert f([3], [1]) == [4]\n\n\ndef test_squared_log_error():\n f = metric('squared_log_error')\n assert f([1], [1]) == [0]\n assert f([3], [1]) == [np.log(2) ** 2]\n assert f([np.exp(2) - 1], [np.exp(1) - 1]) == [1.0]\n\n\ndef test_mean_squared_log_error():\n f = metric('mean_squared_log_error')\n assert f([1, 2, 3], [1, 2, 3]) == 0\n assert f([1, 2, 3, np.exp(1) - 1], [1, 2, 3, np.exp(2) - 1]) == 0.25\n\n\ndef test_root_mean_squared_log_error():\n f = metric('root_mean_squared_log_error')\n assert f([1, 2, 3], [1, 2, 3]) == 0\n assert f([1, 2, 3, np.exp(1) - 1], [1, 2, 3, np.exp(2) - 1]) == 0.5\n\n\ndef test_mean_squared_error():\n f = metric('mean_squared_error')\n assert f([1, 2, 3], [1, 2, 3]) == 0\n assert f(range(1, 5), [1, 2, 3, 6]) == 1\n\n\ndef test_root_mean_squared_error():\n f = metric('root_mean_squared_error')\n assert f([1, 2, 3], [1, 2, 3]) == 0\n assert f(range(1, 5), [1, 2, 3, 5]) == 0.5\n\n\ndef test_multiclass_logloss():\n f = metric('logloss')\n assert_almost_equal(f([1], [1]), 0)\n assert_almost_equal(f([1, 1], [1, 1]), 0)\n assert_almost_equal(f([1], [0.5]), -np.log(0.5))\n"
] | [
[
"numpy.exp",
"numpy.log",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wikfeldt/intro-to-dl | [
"7fb1fb6c520941143000c5e1b46c48c95db17ed6",
"7fb1fb6c520941143000c5e1b46c48c95db17ed6",
"7fb1fb6c520941143000c5e1b46c48c95db17ed6"
] | [
"day2/tf2-dvc-cnn-evaluate.py",
"day2/pytorch_dvc_cnn_pretrained.py",
"day2/tf2-dvc-cnn-simple-hvd.py"
] | [
"\n# coding: utf-8\n\n# # Dogs-vs-cats classification with CNNs\n# \n# In this notebook, we'll train a convolutional neural network (CNN,\n# ConvNet) to classify images of dogs from images of cats using\n# TensorFlow 2.0 / Keras. This notebook is largely based on the blog\n# post [Building powerful image classification models using very\n# little data]\n# (https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html)\n# by François Chollet.\n# \n# **Note that using a GPU with this notebook is highly recommended.**\n# \n# First, the needed imports.\n\nimport os, datetime, sys\nimport random\nimport pathlib\n\nimport tensorflow as tf\n\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import (Dense, Activation, Dropout, Conv2D,\n Flatten, MaxPooling2D, InputLayer)\nfrom tensorflow.keras.preprocessing.image import (ImageDataGenerator, \n array_to_img, \n img_to_array, load_img)\nfrom tensorflow.keras import applications, optimizers\n\nfrom tensorflow.keras.callbacks import TensorBoard\n\nimport numpy as np\n\nprint('Using Tensorflow version:', tf.__version__,\n 'Keras version:', tf.keras.__version__,\n 'backend:', tf.keras.backend.backend())\n\n# ## Data\n# \n# The test set consists of 22000 images.\n\nif 'DATADIR' in os.environ:\n DATADIR = os.environ['DATADIR']\nelse:\n DATADIR = \"/scratch/project_2003747/data/\"\n\ndatapath = os.path.join(DATADIR, \"dogs-vs-cats/train-2000/\")\n\nnimages = dict()\nnimages['test'] = 22000\n\n# ### Image paths and labels\n\ndef get_paths(dataset):\n data_root = pathlib.Path(datapath+dataset)\n image_paths = list(data_root.glob('*/*'))\n image_paths = [str(path) for path in image_paths]\n image_count = len(image_paths)\n assert image_count == nimages[dataset], \"Found {} images, expected {}\".format(image_count, nimages[dataset])\n return image_paths\n\nimage_paths = dict()\nimage_paths['test'] = get_paths('test')\n\nlabel_names = sorted(item.name for item in pathlib.Path(datapath+'train').glob('*/')\n if item.is_dir())\nlabel_to_index = dict((name, index) for index,name in enumerate(label_names))\n\ndef get_labels(dataset):\n return [label_to_index[pathlib.Path(path).parent.name]\n for path in image_paths[dataset]]\n \nimage_labels = dict()\nimage_labels['test'] = get_labels('test')\n\n# ### Data augmentation\n# \n# We need to resize all test images to a fixed size. Here we'll use\n# 160x160 pixels.\n# \n# Unlike the training images, we do not apply any random\n# transformations to the test images.\n\nINPUT_IMAGE_SIZE = [160, 160, 3]\n\ndef preprocess_image(image, augment):\n image = tf.image.decode_jpeg(image, channels=3)\n if augment:\n image = tf.image.resize(image, [256, 256])\n image = tf.image.random_crop(image, INPUT_IMAGE_SIZE)\n if random.random() < 0.5:\n image = tf.image.flip_left_right(image)\n else:\n image = tf.image.resize(image, INPUT_IMAGE_SIZE[:2])\n image /= 255.0 # normalize to [0,1] range\n return image\n\ndef load_and_augment_image(path, label):\n image = tf.io.read_file(path)\n return preprocess_image(image, True), label\n\ndef load_and_not_augment_image(path, label):\n image = tf.io.read_file(path)\n return preprocess_image(image, False), label\n\n# ### TF Datasets\n# \n# Let's now define our TF Dataset\n# (https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/data/Dataset#class_dataset)\n# for the test data. First the Datasets contain the filenames of the\n# images and the corresponding labels.\n\ntest_dataset = tf.data.Dataset.from_tensor_slices((image_paths['test'],\n image_labels['test']))\n\n# We then map() the filenames to the actual image data and decode the images.\n\nBATCH_SIZE = 32\n\ntest_dataset = test_dataset.map(load_and_not_augment_image, num_parallel_calls=10)\ntest_dataset = test_dataset.batch(BATCH_SIZE, drop_remainder=False)\ntest_dataset = test_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\n# ### Initialization\n\nif len(sys.argv)<2:\n print('ERROR: model file missing')\n sys.exit()\n \nmodel = load_model(sys.argv[1])\n\nprint(model.summary())\n\n# ### Inference\n\nprint('Evaluating model', sys.argv[1])\nscores = model.evaluate(test_dataset, verbose=2)\nprint(\"Test set %s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n",
"# coding: utf-8\n\n# Dogs-vs-cats classification with CNNs\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import models\nfrom datetime import datetime\n\nfrom pytorch_dvc_cnn import get_train_loader, get_validation_loader, get_test_loader\nfrom pytorch_dvc_cnn import device, train, evaluate, get_tensorboard\n\nmodel_file = 'dvc_pretrained_cnn.pt'\nmodel_file_ft = 'dvc_pretrained_finetune.pt'\n\n\n# Option 2: Reuse a pre-trained CNN\n\nclass PretrainedNet(nn.Module):\n def __init__(self):\n super(PretrainedNet, self).__init__()\n self.vgg_features = models.vgg16(pretrained=True).features\n\n # Freeze the VGG16 layers\n for param in self.vgg_features.parameters():\n param.requires_grad = False\n\n self.fc1 = nn.Linear(512*4*4, 64)\n self.fc2 = nn.Linear(64, 1)\n\n def forward(self, x):\n x = self.vgg_features(x)\n\n # flattened 2D to 1D\n x = x.view(-1, 512*4*4)\n\n x = F.relu(self.fc1(x))\n return torch.sigmoid(self.fc2(x))\n\n\ndef train_main():\n # Learning 1: New layers\n\n model = PretrainedNet().to(device)\n\n params = filter(lambda p: p.requires_grad, model.parameters())\n optimizer = optim.SGD(params, lr=0.01)\n criterion = nn.BCELoss()\n\n print(model)\n\n batch_size = 25\n train_loader = get_train_loader(batch_size)\n validation_loader = get_validation_loader(batch_size)\n\n log = get_tensorboard('pretrained')\n epochs = 10\n\n start_time = datetime.now()\n for epoch in range(1, epochs + 1):\n train(model, train_loader, criterion, optimizer, epoch, log)\n\n with torch.no_grad():\n print('\\nValidation:')\n evaluate(model, validation_loader, criterion, epoch, log)\n\n end_time = datetime.now()\n print('Total training time: {}.'.format(end_time - start_time))\n\n torch.save(model.state_dict(), model_file)\n print('Wrote model to', model_file)\n\n # Learning 2: Fine-tuning\n log = get_tensorboard('finetuned')\n\n for name, layer in model.vgg_features.named_children():\n note = ' '\n for param in layer.parameters():\n note = '-'\n if int(name) >= 24:\n param.requires_grad = True\n note = '+'\n print(name, note, layer, len(param))\n\n params = filter(lambda p: p.requires_grad, model.parameters())\n # optimizer = optim.SGD(model.parameters(), lr=1e-3)\n optimizer = optim.RMSprop(params, lr=1e-5)\n criterion = nn.BCELoss()\n\n print(model)\n\n prev_epochs = epoch\n epochs = 20\n\n start_time = datetime.now()\n for epoch in range(1, epochs + 1):\n train(model, train_loader, criterion, optimizer, prev_epochs+epoch, log)\n\n with torch.no_grad():\n print('\\nValidation:')\n evaluate(model, validation_loader, criterion, prev_epochs+epoch, log)\n\n end_time = datetime.now()\n print('Total training time: {}.'.format(end_time - start_time))\n\n torch.save(model.state_dict(), model_file_ft)\n print('Wrote finetuned model to', model_file_ft)\n\n\ndef test_main():\n model = PretrainedNet()\n model.load_state_dict(torch.load(model_file))\n model.to(device)\n\n test_loader = get_test_loader(25)\n\n print('=========')\n print('Pretrained:')\n with torch.no_grad():\n evaluate(model, test_loader)\n\n model = PretrainedNet()\n model.load_state_dict(torch.load(model_file_ft))\n model.to(device)\n\n print('=========')\n print('Finetuned:')\n with torch.no_grad():\n evaluate(model, test_loader)\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--test', action='store_true')\n parser.add_argument('--model_file')\n args = parser.parse_args()\n\n if args.model_file:\n model_file = args.model_file\n\n if args.test:\n test_main()\n else:\n train_main()\n",
"\n\n# coding: utf-8\n\n# # Dogs-vs-cats classification with CNNs\n# \n# In this notebook, we'll train a convolutional neural network (CNN,\n# ConvNet) to classify images of dogs from images of cats using\n# TensorFlow 2.0 / Keras. This notebook is largely based on the blog\n# post [Building powerful image classification models using very\n# little data]\n# (https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html)\n# by François Chollet.\n# \n# **Note that using a GPU with this notebook is highly recommended.**\n# \n# First, the needed imports.\n\nimport os, datetime\nimport random\nimport pathlib\n\nimport tensorflow as tf\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import (Dense, Activation, Dropout, Conv2D,\n Flatten, MaxPooling2D, InputLayer)\nfrom tensorflow.keras.preprocessing.image import (ImageDataGenerator, \n array_to_img, \n img_to_array, load_img)\nfrom tensorflow.keras import applications, optimizers\n\nfrom tensorflow.keras.callbacks import TensorBoard\n\nimport numpy as np\n\n# Horovod: import\nimport horovod.tensorflow.keras as hvd\n\n# Horovod: initialize Horovod\nhvd.init()\n\nif hvd.rank() == 0:\n print('Using Tensorflow version:', tf.__version__,\n 'Keras version:', tf.keras.__version__,\n 'backend:', tf.keras.backend.backend())\n print('Using Horovod with', hvd.size(), 'workers')\n\n# Horovod: pin GPU to be used to process local rank (one GPU per process)\ngpus = tf.config.experimental.list_physical_devices('GPU')\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\nif gpus:\n tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')\n\n# ## Data\n# \n# The training dataset consists of 2000 images of dogs and cats, split\n# in half. In addition, the validation set consists of 1000 images,\n\nif 'DATADIR' in os.environ:\n DATADIR = os.environ['DATADIR']\nelse:\n DATADIR = \"/scratch/project_2003747/data/\"\n\ndatapath = os.path.join(DATADIR, \"dogs-vs-cats/train-2000/\")\n\nnimages = dict()\nnimages['train'] = 2000\nnimages['validation'] = 1000\n\n# ### Image paths and labels\n\ndef get_paths(dataset):\n data_root = pathlib.Path(datapath+dataset)\n image_paths = list(data_root.glob('*/*'))\n image_paths = [str(path) for path in image_paths]\n image_count = len(image_paths)\n assert image_count == nimages[dataset], \"Found {} images, expected {}\".format(image_count, nimages[dataset])\n return image_paths\n\nimage_paths = dict()\nimage_paths['train'] = get_paths('train')\nimage_paths['validation'] = get_paths('validation')\n\nlabel_names = sorted(item.name for item in pathlib.Path(datapath+'train').glob('*/')\n if item.is_dir())\nlabel_to_index = dict((name, index) for index,name in enumerate(label_names))\n\ndef get_labels(dataset):\n return [label_to_index[pathlib.Path(path).parent.name]\n for path in image_paths[dataset]]\n \nimage_labels = dict()\nimage_labels['train'] = get_labels('train')\nimage_labels['validation'] = get_labels('validation')\n\n# ### Data augmentation\n# \n# We need to resize all training and validation images to a fixed\n# size. Here we'll use 160x160 pixels.\n# \n# Then, to make the most of our limited number of training examples,\n# we'll apply random transformations (crop and horizontal flip) to\n# them each time we are looping over them. This way, we \"augment\" our\n# training dataset to contain more data. There are various\n# transformations readily available in TensorFlow, see tf.image\n# (https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/image)\n# for more information.\n\nINPUT_IMAGE_SIZE = [160, 160, 3]\n\ndef preprocess_image(image, augment):\n image = tf.image.decode_jpeg(image, channels=3)\n if augment:\n image = tf.image.resize(image, [256, 256])\n image = tf.image.random_crop(image, INPUT_IMAGE_SIZE)\n if random.random() < 0.5:\n image = tf.image.flip_left_right(image)\n else:\n image = tf.image.resize(image, INPUT_IMAGE_SIZE[:2])\n image /= 255.0 # normalize to [0,1] range\n return image\n\ndef load_and_augment_image(path, label):\n image = tf.io.read_file(path)\n return preprocess_image(image, True), label\n\ndef load_and_not_augment_image(path, label):\n image = tf.io.read_file(path)\n return preprocess_image(image, False), label\n\n\n# ### TF Datasets\n# \n# Let's now define our TF Datasets\n# (https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/data/Dataset#class_dataset)\n# for training and validation data. First the Datasets contain the\n# filenames of the images and the corresponding labels.\n\ntrain_dataset = tf.data.Dataset.from_tensor_slices((image_paths['train'],\n image_labels['train']))\nvalidation_dataset = tf.data.Dataset.from_tensor_slices((image_paths['validation'],\n image_labels['validation']))\n\n# We then map() the filenames to the actual image data and decode the images.\n# Note that we shuffle and augment only the training data.\n\nBATCH_SIZE = 32\n\ntrain_dataset = train_dataset.map(load_and_augment_image, num_parallel_calls=10)\ntrain_dataset = train_dataset.shuffle(2000).batch(BATCH_SIZE, drop_remainder=True)\ntrain_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\nvalidation_dataset = validation_dataset.map(load_and_not_augment_image,\n num_parallel_calls=10)\nvalidation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=True)\nvalidation_dataset = validation_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\n# ## Train a small CNN from scratch\n# \n# Similarly as with MNIST digits, we can start from scratch and train\n# a CNN for the classification task. However, due to the small number\n# of training images, a large network will easily overfit, regardless\n# of the data augmentation.\n# \n# ### Initialization\n\nmodel = Sequential()\n\nmodel.add(Conv2D(32, (3, 3), input_shape=INPUT_IMAGE_SIZE, activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1, activation='sigmoid'))\n\n# Horovod: adjust learning rate based on number of GPUs.\nopt = tf.keras.optimizers.RMSprop(0.001 * hvd.size())\n\n# Horovod: add Horovod DistributedOptimizer.\nopt = hvd.DistributedOptimizer(opt)\n\n# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow\n# uses hvd.DistributedOptimizer() to compute gradients.\nmodel.compile(loss='binary_crossentropy',\n optimizer=opt,\n metrics=['accuracy'],\n experimental_run_tf_function=False)\n#model.compile(loss='binary_crossentropy',\n# optimizer='rmsprop',\n# metrics=['accuracy'])\n\nif hvd.rank() == 0:\n print(model.summary())\n\ncallbacks = [\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n hvd.callbacks.BroadcastGlobalVariablesCallback(0),\n\n # Horovod: average metrics among workers at the end of every epoch.\n #\n # Note: This callback must be in the list before the ReduceLROnPlateau,\n # TensorBoard or other metrics-based callbacks.\n hvd.callbacks.MetricAverageCallback(),\n\n # Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final\n # accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during\n # the first three epochs. See https://arxiv.org/abs/1706.02677 for details.\n hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=3, verbose=1),\n]\n\n# ### Learning\n\n# We'll use TensorBoard to visualize our progress during training.\n\n# Horovod: \nlogfile = \"dvc-cnn-simple-hvd-{}-\".format(hvd.rank())\nlogfile = logfile+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\nlogdir = os.path.join(os.getcwd(), \"logs\", logfile)\nprint('Rank:', hvd.rank(), 'TensorBoard log directory:', logdir)\nos.makedirs(logdir)\ncallbacks.append(TensorBoard(log_dir=logdir))\n\n# Horovod: reduce epochs\nepochs = 20 // hvd.size()\n\n# Horovod: write logs on worker 0.\nverbose = 2 if hvd.rank() == 0 else 0\n\nhistory = model.fit(train_dataset, epochs=epochs,\n validation_data=validation_dataset,\n callbacks=callbacks, verbose=verbose)\n\n# Horovod: \nif hvd.rank() == 0:\n fname = \"dvc-cnn-simple-hvd.h5\"\n print('Saving model to', fname)\n model.save(fname)\n"
] | [
[
"tensorflow.keras.models.load_model",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.image.flip_left_right",
"tensorflow.keras.backend.backend",
"tensorflow.image.random_crop",
"tensorflow.image.resize",
"tensorflow.io.read_file",
"tensorflow.image.decode_jpeg"
],
[
"torch.load",
"torch.nn.BCELoss",
"torch.optim.RMSprop",
"torch.nn.Linear",
"torch.no_grad",
"torch.optim.SGD"
],
[
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.layers.Dense",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.keras.layers.Conv2D",
"tensorflow.image.flip_left_right",
"tensorflow.keras.backend.backend",
"tensorflow.image.random_crop",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.image.resize",
"tensorflow.io.read_file",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten",
"tensorflow.image.decode_jpeg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
msohaibalam/grove | [
"8c27a5d12923d6ace57956db6a249e8d01e33f35",
"8c27a5d12923d6ace57956db6a249e8d01e33f35"
] | [
"grove/tests/jordan_gradient/test_gradient_utils.py",
"grove/tests/simon/test_simon.py"
] | [
"import numpy as np\n\nfrom grove.alpha.jordan_gradient.gradient_utils import binary_to_real, \\\n measurements_to_bf\n\n\ndef test_binary_to_real():\n for sign in [1, -1]:\n decimal_rep = sign * 0.345703125\n binary_rep = str(sign * 0.010110001)\n\n decimal_convert = binary_to_real(binary_rep)\n\n assert(np.isclose(decimal_rep, decimal_convert))\n\n\ndef test_measurements_to_bf():\n measurements = [[1, 0, 0], [1, 0, 0], [1, 1, 0], [1, 0, 0]]\n true_bf = 0.01\n\n bf_from_measurements = measurements_to_bf(measurements)\n\n assert(np.isclose(true_bf, bf_from_measurements))\n",
"\"\"\"Test class for helper methods found simon\"\"\"\n\nfrom os.path import abspath, dirname\n\nimport numpy as np\nfrom mock import patch\nfrom pyquil.quil import Program\n\nfrom grove.simon.simon import Simon, create_1to1_bitmap, create_valid_2to1_bitmap\n\npackage_path = abspath(dirname(dirname(__file__)))\n\nEXPECTED_SIMON_ORACLE = np.load(package_path + '/simon/data/simon_test_oracle.npy')\n\n\ndef _create_expected_program():\n expected_prog = Program()\n expected_prog.defgate(\"SIMON_ORACLE\", EXPECTED_SIMON_ORACLE)\n expected_prog.inst(\"H 0\")\n expected_prog.inst(\"H 1\")\n expected_prog.inst(\"H 2\")\n\n expected_prog.inst(\"SIMON_ORACLE 5 4 3 2 1 0\")\n\n expected_prog.inst(\"H 0\")\n expected_prog.inst(\"H 1\")\n expected_prog.inst(\"H 2\")\n return expected_prog\n\n\ndef test_simon_class():\n \"\"\"Test is based on worked example of Watrous lecture\n https://cs.uwaterloo.ca/~watrous/CPSC519/LectureNotes/06.pdf\"\"\"\n simon_algo = Simon()\n\n with patch(\"pyquil.api.QVMConnection\") as qvm:\n # Need to mock multiple returns as an iterable\n qvm.run_and_measure.side_effect = [\n (np.asarray([1, 1, 1], dtype=int), ),\n (np.asarray([1, 1, 1], dtype=int), ),\n (np.asarray([1, 0, 0], dtype=int), ),\n (np.asarray([1, 1, 1], dtype=int), ),\n (np.asarray([0, 0, 0], dtype=int), ),\n (np.asarray([0, 1, 1], dtype=int), ),\n ]\n\n bit_string_mapping = {\n '000': '101',\n '001': '010',\n '010': '000',\n '011': '110',\n\n '100': '000',\n '101': '110',\n '110': '101',\n '111': '010'\n }\n\n mask = simon_algo.find_mask(qvm, bit_string_mapping)\n\n assert simon_algo.n_qubits == 3\n assert simon_algo.n_ancillas == 3\n assert simon_algo._qubits == [0, 1, 2, 3, 4, 5]\n assert simon_algo.computational_qubits == [0, 1, 2]\n assert simon_algo.ancillas == [3, 4, 5]\n\n assert mask == [1, 1, 0]\n assert simon_algo.simon_circuit.__str__() == _create_expected_program().__str__()\n\n\ndef test_unitary_function_return():\n simon_algo = Simon()\n bit_string_mapping = {\n '000': '101',\n '001': '010',\n '010': '000',\n '011': '110',\n\n '100': '000',\n '101': '110',\n '110': '101',\n '111': '010'\n }\n\n actual_return = simon_algo._compute_unitary_oracle_matrix(bit_string_mapping)\n np.testing.assert_equal(actual_return[0], EXPECTED_SIMON_ORACLE)\n\n\ndef test_unitary_oracle_func_computer():\n bit_string_mapping = {\n '0': '1',\n '1': '0',\n }\n np.testing.assert_equal(Simon()._compute_unitary_oracle_matrix(bit_string_mapping)[0],\n [[0., 0., 1., 0.],\n [0., 1., 0., 0.],\n [1., 0., 0., 0.],\n [0., 0., 0., 1.]]\n )\n\n\ndef test_unitary_oracle_func_computer_2():\n bit_string_mapping = {\n '00': '10',\n '01': '11',\n '10': '00',\n '11': '01'\n }\n np.testing.assert_equal(Simon()._compute_unitary_oracle_matrix(bit_string_mapping)[0],\n [[0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],\n [0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.],\n [0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.]]\n )\n\n\ndef test_no_substitution():\n simon_algo = Simon()\n simon_algo._dict_of_linearly_indep_bit_vectors = {\n 0: [1, 0, 1, 0, 0],\n 1: [0, 1, 0, 0, 0],\n 3: [0, 0, 0, 1, 0]\n }\n z = np.array([1, 1, 1, 0, 0]) # linear combination of first two rows hence won't add\n\n simon_algo._add_to_dict_of_indep_bit_vectors(z)\n W_actual = simon_algo._dict_of_linearly_indep_bit_vectors\n\n W_expected = {\n 0: [1, 0, 1, 0, 0],\n 1: [0, 1, 0, 0, 0],\n 3: [0, 0, 0, 1, 0]\n }\n\n np.testing.assert_equal(W_actual, W_expected)\n\n\ndef test_simple_conflict():\n simon_algo = Simon()\n simon_algo._dict_of_linearly_indep_bit_vectors = {\n 0: [1, 0, 1, 0, 0],\n 1: [0, 1, 0, 0, 0],\n 3: [0, 0, 0, 1, 0]\n }\n z = np.array([1, 0, 0, 0, 1]) # conflict with first row.\n\n simon_algo._add_to_dict_of_indep_bit_vectors(z)\n W_actual = simon_algo._dict_of_linearly_indep_bit_vectors\n\n W_expected = {\n 0: [1, 0, 1, 0, 0],\n 1: [0, 1, 0, 0, 0],\n 2: [0, 0, 1, 0, 1],\n 3: [0, 0, 0, 1, 0]\n }\n\n np.testing.assert_equal(W_actual, W_expected)\n\n\ndef test_insert_directly():\n simon_algo = Simon()\n simon_algo._dict_of_linearly_indep_bit_vectors = {\n 0: [1, 1, 0, 0, 0],\n 1: [0, 1, 0, 1, 0]\n }\n z = np.array([0, 0, 1, 0, 1])\n\n simon_algo._add_to_dict_of_indep_bit_vectors(z)\n W_actual = simon_algo._dict_of_linearly_indep_bit_vectors\n W_expected = {\n 0: [1, 1, 0, 0, 0],\n 1: [0, 1, 0, 1, 0],\n 2: [0, 0, 1, 0, 1]\n }\n\n np.testing.assert_equal(W_actual, W_expected)\n\n\ndef test_insert_after_xor():\n simon_algo = Simon()\n simon_algo._dict_of_linearly_indep_bit_vectors = {\n 0: [1, 0, 0, 0, 0, 0],\n 1: [0, 1, 1, 0, 0, 0]\n }\n\n z = np.array([0, 0, 1, 0, 1, 1])\n\n simon_algo._add_to_dict_of_indep_bit_vectors(z)\n W_actual = simon_algo._dict_of_linearly_indep_bit_vectors\n W_expected = {\n 0: [1, 0, 0, 0, 0, 0],\n 1: [0, 1, 1, 0, 0, 0],\n 2: [0, 0, 1, 0, 1, 1]\n }\n\n np.testing.assert_equal(W_actual, W_expected)\n\n\ndef test_add_row_at_top():\n simon_algo = Simon()\n simon_algo.n_qubits = 4\n simon_algo._dict_of_linearly_indep_bit_vectors = {\n 1: [0, 1, 0, 1],\n 2: [0, 0, 1, 0],\n 3: [0, 0, 0, 1]\n }\n insert_row_num = simon_algo._add_missing_msb_vector()\n\n W_actual = simon_algo._dict_of_linearly_indep_bit_vectors\n W_expected = {\n 0: [1, 0, 0, 0],\n 1: [0, 1, 0, 1],\n 2: [0, 0, 1, 0],\n 3: [0, 0, 0, 1]\n }\n\n assert insert_row_num == 0\n\n np.testing.assert_equal(W_actual, W_expected)\n\n\ndef test_add_row_at_bottom():\n simon_algo = Simon()\n simon_algo.n_qubits = 4\n simon_algo._dict_of_linearly_indep_bit_vectors = {\n 0: [1, 0, 0, 0],\n 1: [0, 1, 0, 1],\n 2: [0, 0, 1, 0]\n }\n insert_row_num = simon_algo._add_missing_msb_vector()\n\n W_actual = simon_algo._dict_of_linearly_indep_bit_vectors\n W_expected = {\n 0: [1, 0, 0, 0],\n 1: [0, 1, 0, 1],\n 2: [0, 0, 1, 0],\n 3: [0, 0, 0, 1]\n }\n assert insert_row_num == 3\n\n np.testing.assert_equal(W_actual, W_expected)\n\n\ndef test_add_row_in_middle():\n simon_algo = Simon()\n simon_algo.n_qubits = 5\n simon_algo._dict_of_linearly_indep_bit_vectors = {\n 0: [1, 1, 0, 0, 0],\n 2: [0, 0, 1, 0, 1],\n 3: [0, 0, 0, 1, 0],\n 4: [0, 0, 0, 0, 1]\n }\n insert_row_num = simon_algo._add_missing_msb_vector()\n\n W_actual = simon_algo._dict_of_linearly_indep_bit_vectors\n W_expected = {\n 0: [1, 1, 0, 0, 0],\n 1: [0, 1, 0, 0, 0],\n 2: [0, 0, 1, 0, 1],\n 3: [0, 0, 0, 1, 0],\n 4: [0, 0, 0, 0, 1]\n }\n\n assert insert_row_num == 1\n\n np.testing.assert_equal(W_actual, W_expected)\n\n\ndef test_bit_map_generation():\n mask = '101'\n expected_map = {\n '000': '101',\n '001': '100',\n '010': '111',\n '011': '110',\n '100': '001',\n '101': '000',\n '110': '011',\n '111': '010'\n }\n actual_map = create_1to1_bitmap(mask)\n assert actual_map == expected_map\n\n\ndef test_2to1_bit_map_generation():\n mask = '101'\n expected_map = {\n '000': '001',\n '101': '001',\n '001': '101',\n '100': '101',\n '010': '000',\n '111': '000',\n '011': '111',\n '110': '111'\n }\n # need to patch numpy as random seed behaves differently on\n # py27 vs. py36\n with patch(\"numpy.random.choice\") as rd_fake:\n rd_fake.return_value = ['001', '101', '000', '111']\n\n actual_map = create_valid_2to1_bitmap(mask)\n assert actual_map == expected_map\n\n\ndef test_check_mask_correct():\n sa = Simon()\n\n sa.mask = [1, 1, 0]\n sa.bit_map = {\n '000': '101',\n '001': '010',\n '010': '000',\n '011': '110',\n\n '100': '000',\n '101': '110',\n '110': '101',\n '111': '010'\n }\n\n assert sa._check_mask_correct()\n"
] | [
[
"numpy.isclose"
],
[
"numpy.asarray",
"numpy.load",
"numpy.array",
"numpy.testing.assert_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
spatialucr/lne | [
"951df7334c1495a133f6fc982eaa1d366f08a68b"
] | [
"LNCE/python/INCS.py"
] | [
"\"\"\"\nIndicators of Neighborhood Change\n\"\"\"\n\nfrom collections import defaultdict\nimport numpy as np\n\ndef _labels_to_neighborhoods(labels):\n \"\"\"Convert a list of labels to neighborhoods dictionary\n Arguments\n ---------\n labels: list of neighborhood labels\n Returns\n -------\n neighborhoods: dictionary\n key is the label for each neighborhood, value is the list of\n area indexes defining that neighborhood\n Examples\n --------\n >>> labels = [1,1,1,2,2,3]\n >>> neighborhoods = _labels_to_neighborhoods(labels)\n >>> neighborhoods[1]\n [0, 1, 2]\n >>> neighborhoods[2]\n [3, 4]\n >>> neighborhoods[3]\n [5]\n \"\"\"\n neighborhoods = defaultdict(list)\n for i, label in enumerate(labels):\n #if label != -9999:\n neighborhoods[label].append(i)\n return neighborhoods\n\n\ndef linc(labels_sequence):\n \"\"\"Local Indicator of Neighborhood Change\n Arguments\n ---------\n labels_sequence: sequence of neighborhood labels (n,t)\n n areas in n periods\n first element is a list of neighborhood labels per area in\n period 0, second element is a list of neighborhood labels\n per area in period 1, and so on for all T periods.\n Returns\n -------\n lincs: array\n local indicator of neighborhood change over all periods\n Notes\n -----\n The local indicator of neighborhood change defined here allows for\n singleton neighborhoods (i.e., neighborhoods composed of a single primitive\n area such as a tract or block.). This is in contrast to the initial\n implementation in :cite:`rey2011` which prohibited singletons.\n Examples\n --------\n Time period 0 has the city defined as four neighborhoods on 10 tracts:\n >>> labels_0 = [1, 1, 1, 1, 2, 2, 3, 3, 3, 4]\n Time period 1 in the same city, with slight change in composition of the four neighborhoods\n >>> labels_1 = [1, 1, 1, 1, 1, 2, 3, 3, 3, 4]\n >>> res = linc([labels_0, labels_1])\n >>> res[4]\n 1.0\n >>> res[1]\n 0.25\n >>> res[7]\n 0.0\n >>> res[-1]\n 0.0\n And, in period 2, no change\n >>> labels_2 = [1, 1, 1, 1, 1, 2, 3, 3, 3, 4]\n >>> res = linc([labels_1, labels_2])\n >>> res[0]\n 0.0\n We can pass more than two time periods, and get a \"time-wise global linc\"\n for each unit\n >>> res = linc([labels_0, labels_1, labels_2])\n >>> res[0]\n 0.25\n \"\"\"\n ltn = _labels_to_neighborhoods\n #print(labels_sequence)\n neighborhood_sequences = [ltn(labels) for labels in labels_sequence]\n #print(neighborhood_sequences[0])\n #print(neighborhood_sequences[1])\n ns = neighborhood_sequences\n n_areas = len(labels_sequence[0])\n lincs = np.zeros((n_areas,))\n\n T = len(labels_sequence)\n for i in range(n_areas):\n neighbors = []\n for t in range(T):\n if (labels_sequence[t][i] == -9999): continue\n neighbors.append(set(ns[t][labels_sequence[t][i]]))\n if (len(neighbors) < 2): \n lincs[i] = -9999\n else:\n intersection = set.intersection(*neighbors)\n union = set.union(*neighbors)\n n_union = len(union)\n if n_union == 1: # singleton at all points in time\n lincs[i] = 0.\n else:\n lincs[i] = round(1.0 - ((len(intersection)-1)/(n_union-1)),2)\n #print(\"Tract ID #\", i, \"-----------------------------------\")\t\t\n #print(\"*neighbors=\",*neighbors)\t\t\n #print(\"intersection= \",intersection)\n #print(\"union=\",union)\n #print(\" \")\n #print(\" \") \n return lincs\n\t\nif __name__ == '__main__':\t\n\t #0, 1, 2, 3, 4, 5, 6, 7, 8, 9\n tract70 = [1, 1, 2, 2, 3, 3, 1, 2, 2, 1 ]\t\n tract80 = [1, 1, 1, 3, 3, 3, 3, 2, 2, 3 ]\n tract90 = [1, 1, 3, 3, 2, 2, 3, 2, 2, 3 ]\t\n #0, 1, 2, 3, 4, 5, 6, 7, 8, 9\n #labels_0 = [1, 1, 1, 1, 2, 2, 3, 3, 4, 4] \n #labels_1 = [1, 1, 1, 1, 1, 2, 3, 3, 3, 4]\n #INC_bw_70_80 = linc([tract70, tract80])\n INC_bw_80_90 = linc([tract80, tract90])\n #INC_bw_70_80_90 = linc([tract70, tract80, tract90])\n #print(\"INC_bw_70_80= \",INC_bw_70_80)\n print(\"INC_bw_80_90= \",INC_bw_80_90)\n #print(\"INC_bw_70_80_90= \",INC_bw_70_80_90)\n\n\t #tractID: 0 1 2 3 4 5 6 7 8 9\n#labels_0 = [1, 1, 1, 1, 2, 2, 3, 3, 4, 4] \n#labels_1 = [1, 1, 1, 1, 1, 2, 3, 3, 3, 4]\n#Res = [0.25, 0.25, 0.25, 0.25, 1.00, 1.00 ,0.5, 0.5, 1.00, 1.00 ]\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
justinpak13/Watch-Analysis | [
"a7ce05c4fef859d66675d801e8a05f3f7e8e8d4e"
] | [
"dashboard.py"
] | [
"import dash\nfrom dash.dependencies import Input, Output\nfrom dash import dash_table\nfrom dash import dcc\nfrom dash import html\nimport pandas as pd\n\n# Import data into pandas\ndf = pd.read_csv(\"data.csv\")\ndf[\"Condition\"] = df[\"Condition Category\"]\ndf = df.drop([\"Condition Category\", \"Missed Prices\", \"Index\", \"SKU\"], axis=1)\n\ndf = df[\n [\n \"Brand\",\n \"Model\",\n \"Reference\",\n \"Year\",\n \"Condition\",\n \"Papers\",\n \"Box\",\n \"Movement\",\n \"Dimensions\",\n \"Gender\",\n \"Case\",\n \"Bracelet\",\n \"Crystal\",\n \"Dial Color\",\n \"Price\",\n \"Features\",\n \"Link\",\n ]\n]\n\napp = dash.Dash(__name__)\n\nmoney = dash_table.FormatTemplate.money(0)\n# App Layout\napp.layout = html.Div(\n [\n # Title\n html.H1(\"Watch Data\", style={\"text-align\": \"center\"}),\n # Dropdowns\n html.Div(\n className=\"row\",\n children=[\n # First dropdown\n html.Div(\n children=[\n html.Label([\"Brand\"], style={\"text-align\": \"center\"},),\n dcc.Dropdown(\n id=\"brand_dropdown\",\n options=[\n {\"label\": i, \"value\": i}\n for i in df[\"Brand\"].sort_values().unique()\n ],\n value=None,\n clearable=True,\n searchable=True,\n ),\n ],\n style=dict(width=\"50%\"),\n ),\n # Second dropdown\n html.Div(\n children=[\n html.Label([\"Model\"], style={\"text-align\": \"center\"},),\n dcc.Dropdown(\n id=\"model_dropdown\",\n value=None, # [![enter image description here][1]][1]\n clearable=True,\n searchable=True,\n ),\n ],\n style=dict(width=\"50%\"),\n ),\n html.Div(\n children=[\n html.Label([\"Price\"], style={\"text-align\": \"center\"},),\n dcc.RangeSlider(\n id=\"range_slider\",\n tooltip={\"placement\": \"bottom\", \"always_visible\": True},\n ),\n ],\n style=dict(width=\"50%\"),\n ),\n ],\n style=dict(display=\"flex\"),\n ),\n html.Br(),\n html.Div(\n [\n dash_table.DataTable(\n id=\"table\",\n filter_action=\"native\",\n sort_action=\"native\",\n style_cell={\"textAlign\": \"left\", \"minWidth\": 110, \"width\": 110},\n style_table={\"minWidth\": \"100%\"},\n style_cell_conditional=[\n {\"if\": {\"column_id\": \"Features\"}, \"textAlign\": \"right\",},\n {\"if\": {\"column_id\": \"Link\"}, \"textAlign\": \"right\"},\n ],\n style_data_conditional=[\n {\n \"if\": {\"row_index\": \"odd\"},\n \"backgroundColor\": \"rgb(220, 220, 220)\",\n }\n ],\n style_header={\n \"backgroundColor\": \"rgb(210, 210, 210)\",\n \"color\": \"black\",\n \"fontWeight\": \"bold\",\n },\n )\n ]\n ),\n ]\n)\n\n# Connecting Dash Components\[email protected](\n [Output(component_id=\"model_dropdown\", component_property=\"options\")],\n [Input(component_id=\"brand_dropdown\", component_property=\"value\")],\n)\ndef update_model(brand_selected):\n\n dff = df[df[\"Brand\"] == brand_selected]\n return [[{\"label\": i, \"value\": i} for i in dff[\"Model\"].sort_values().unique()]]\n\n\[email protected](\n [\n Output(component_id=\"range_slider\", component_property=\"min\"),\n Output(component_id=\"range_slider\", component_property=\"max\"),\n Output(component_id=\"range_slider\", component_property=\"value\"),\n ],\n [\n Input(component_id=\"brand_dropdown\", component_property=\"value\"),\n Input(component_id=\"model_dropdown\", component_property=\"value\"),\n ],\n)\ndef update_slider(brand_selected, model_selected):\n\n dff = df[(df[\"Brand\"] == brand_selected) & (df[\"Model\"] == model_selected)]\n return (\n dff[\"Price\"].min(),\n dff[\"Price\"].max(),\n [dff[\"Price\"].min(), dff[\"Price\"].max()],\n )\n\n\[email protected](\n [\n Output(component_id=\"table\", component_property=\"columns\"),\n Output(component_id=\"table\", component_property=\"data\"),\n ],\n [\n Input(component_id=\"brand_dropdown\", component_property=\"value\"),\n Input(component_id=\"model_dropdown\", component_property=\"value\"),\n Input(component_id=\"range_slider\", component_property=\"value\"),\n ],\n)\ndef update_table(brand_selected, model_selected, range):\n if brand_selected is None and model_selected is None:\n dff = df\n elif model_selected is None:\n dff = df[df[\"Brand\"] == brand_selected]\n else:\n dff = df[\n (df[\"Brand\"] == brand_selected)\n & (df[\"Model\"] == model_selected)\n & (df[\"Price\"] >= range[0])\n & (df[\"Price\"] <= range[1])\n ]\n return (\n [\n {\"name\": i, \"id\": i, \"hideable\": True, \"type\": \"numeric\", \"format\": money}\n if i == \"Price\"\n else {\"name\": i, \"id\": i, \"hideable\": True}\n for i in dff.columns\n ],\n dff.to_dict(\"records\"),\n )\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
xsir317/AlphaRenju | [
"d5fdcf8d1442e4e43661a4cee88c95d5c25fd45a"
] | [
"policy_value_net.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nAn implementation of the policyValueNet in Tensorflow\nTested in Tensorflow 1.4 and 1.5\n\n@author: Xiang Zhong\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass PolicyValueNet():\n def __init__(self, model_file=None):\n self.model_file = model_file\n self.loss_weight = [1.0,0.1] # policy weight and value weight\n\n #TODO https://github.com/NeymarL/ChineseChess-AlphaZero/blob/distributed/cchess_alphazero/agent/model.py 参考这个来弄一个残差网络,5层据说就很好用了。\n # Define the tensorflow neural network\n # 1. Input:\n self.input_states = tf.placeholder(\n tf.float32, shape=[None, 3, 15, 15])\n self.input_state = tf.transpose(self.input_states, [0, 2, 3, 1])\n # 2. Common Networks Layers\n self.conv1 = tf.layers.conv2d(inputs=self.input_state,\n filters=32, kernel_size=[3, 3],\n padding=\"same\", data_format=\"channels_last\",\n activation=tf.nn.relu)\n self.conv2 = tf.layers.conv2d(inputs=self.conv1, filters=64,\n kernel_size=[3, 3], padding=\"same\",\n data_format=\"channels_last\",\n activation=tf.nn.relu)\n self.conv3 = tf.layers.conv2d(inputs=self.conv2, filters=128,\n kernel_size=[3, 3], padding=\"same\",\n data_format=\"channels_last\",\n activation=tf.nn.relu)\n # 3-1 Action Networks\n self.action_conv = tf.layers.conv2d(inputs=self.conv3, filters=3,\n kernel_size=[1, 1], padding=\"same\",\n data_format=\"channels_last\",\n activation=tf.nn.relu)\n # Flatten the tensor\n self.action_conv_flat = tf.reshape(\n self.action_conv, [-1, 3 * 15 * 15])\n # 3-2 Full connected layer, the output is the log probability of moves\n # on each slot on the board\n self.action_fc = tf.layers.dense(inputs=self.action_conv_flat,\n units=15 * 15,\n activation=tf.nn.log_softmax)\n # 4 Evaluation Networks\n self.evaluation_conv = tf.layers.conv2d(inputs=self.conv3, filters=2,\n kernel_size=[1, 1],\n padding=\"same\",\n data_format=\"channels_last\",\n activation=tf.nn.relu)\n self.evaluation_conv_flat = tf.reshape(\n self.evaluation_conv, [-1, 2 * 15 * 15])\n self.evaluation_fc1 = tf.layers.dense(inputs=self.evaluation_conv_flat,\n units=64, activation=tf.nn.relu)\n # output the score of evaluation on current state\n self.evaluation_fc2 = tf.layers.dense(inputs=self.evaluation_fc1,\n units=1, activation=tf.nn.tanh)\n\n # Define the Loss function\n # 1. Label: the array containing if the game wins or not for each state\n self.labels = tf.placeholder(tf.float32, shape=[None, 1])\n # 2. Predictions: the array containing the evaluation score of each state\n # which is self.evaluation_fc2\n # 3-1. Value Loss function\n self.value_loss = tf.losses.mean_squared_error(self.labels,\n self.evaluation_fc2)\n # 3-2. Policy Loss function\n self.mcts_probs = tf.placeholder(\n tf.float32, shape=[None, 15 * 15])\n self.policy_loss = tf.negative(tf.reduce_mean(\n tf.reduce_sum(tf.multiply(self.mcts_probs, self.action_fc), 1)))\n # 3-3. L2 penalty (regularization)\n l2_penalty_beta = 1e-4\n vars = tf.trainable_variables()\n l2_penalty = l2_penalty_beta * tf.add_n(\n [tf.nn.l2_loss(v) for v in vars if 'bias' not in v.name.lower()])\n # 3-4 Add up to be the Loss function\n self.loss = self.loss_weight[0] * self.policy_loss + self.loss_weight[1] * self.value_loss + l2_penalty\n\n # Define the optimizer we use for training\n self.learning_rate = tf.placeholder(tf.float32)\n self.optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate).minimize(self.loss)\n\n # Make a session\n self.session = tf.Session()\n\n # calc policy entropy, for monitoring only\n self.entropy = tf.negative(tf.reduce_mean(\n tf.reduce_sum(tf.exp(self.action_fc) * self.action_fc, 1)))\n\n # Initialize variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n\n # For saving and restoring\n self.saver = tf.train.Saver()\n if self.model_file is not None and tf.train.checkpoint_exists(self.model_file):\n self.restore_model()\n print (\"restore from :\" , self.model_file)\n else:\n print (\"no file to load\")\n\n def policy_value(self, state_batch):\n \"\"\"\n input: a batch of states\n output: a batch of action probabilities and state values\n \"\"\"\n log_act_probs, value = self.session.run(\n [self.action_fc, self.evaluation_fc2],\n feed_dict={self.input_states: state_batch}\n )\n act_probs = np.exp(log_act_probs)\n return act_probs, value\n\n def policy_value_fn(self, board):\n \"\"\"\n input: board\n output: a list of (action, probability) tuples for each available\n action and the score of the board state\n \"\"\"\n legal_positions = board.availables\n current_state = np.ascontiguousarray(board.current_state().reshape(\n -1, 3, 15, 15))\n act_probs, value = self.policy_value(current_state)\n act_probs = zip(legal_positions, act_probs[0][legal_positions])\n return act_probs, value\n\n def train_step(self, state_batch, mcts_probs, winner_batch, lr):\n \"\"\"perform a training step\"\"\"\n winner_batch = np.reshape(winner_batch, (-1, 1))\n loss, entropy, _ = self.session.run(\n [self.loss, self.entropy, self.optimizer],\n feed_dict={self.input_states: state_batch,\n self.mcts_probs: mcts_probs,\n self.labels: winner_batch,\n self.learning_rate: lr})\n return loss, entropy\n\n def save_model(self):\n self.saver.save(self.session, self.model_file)\n\n def restore_model(self):\n self.saver.restore(self.session, self.model_file)\n"
] | [
[
"tensorflow.layers.conv2d",
"tensorflow.losses.mean_squared_error",
"tensorflow.transpose",
"tensorflow.multiply",
"numpy.reshape",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.layers.dense",
"tensorflow.exp",
"tensorflow.global_variables_initializer",
"tensorflow.nn.l2_loss",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"numpy.exp",
"tensorflow.train.checkpoint_exists"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
lsb-riken/CUBIC-informatics | [
"e7982072bb5d892f55e86cdf671376ab379b9b29",
"e7982072bb5d892f55e86cdf671376ab379b9b29"
] | [
"script/MergeBrain.py",
"script/HalfBrainCells.py"
] | [
"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n\"\"\"Overview:\n Downscale images & cells for altas mapping\n\nUsage:\n MergeBrain.py images PARAM_FILE [-p NUM_CPUS] [--exec <path>]\n MergeBrain.py cells PARAM_FILE\n MergeBrain.py full PARAM_FILE [-p NUM_CPUS] [--exec <path>]\n\nOptions:\n -h --help Show this screen.\n --version Show version.\n -p NUM_CPUS Number of cpus to be used [default: -1](all available).\n --exec <path> Location of the executable [default: ./build/ScaleMerge]\n\"\"\"\n\nimport json, glob, os.path, shutil\nimport tifffile\nimport functools\nfrom docopt import docopt\nimport joblib\nimport subprocess as sp\nimport pandas as pd\nimport numpy as np\n\nfrom HalfBrainCells import HalfBrainCells\nfrom HalfBrainImages import HalfBrainImages\n\n\ndt_scalemerged = np.dtype([\n ('scaled_x','f4'), ('scaled_y', 'f4'), ('scaled_z', 'f4'),\n ('is_valid', 'bool'),\n])\n\ndef run_ScaleMerge(paramfile, mergedfile, path_exec, logfile=None, print_output=True):\n mergedfile_mean,mergedfile_max,mergedfile_min = mergedfile\n cmd = \" \".join([path_exec, paramfile,\n mergedfile_mean,mergedfile_max,mergedfile_min])\n print(\"[*] Executing : {}\".format(cmd))\n out = sp.check_output([path_exec, paramfile,\n mergedfile_mean,mergedfile_max,mergedfile_min])\n if logfile:\n with open(logfile, \"wb\") as f:\n f.write(out)\n else:\n if print_output:\n print(out.decode())\n return\n\nclass WholeBrainImages(object):\n def __init__(self, paramfile, ):\n print(\"\\n[*] Initializing WholeBrain({})\".format(paramfile))\n with open(paramfile) as f:\n self.params = json.load(f)\n\n self.halfbrain_FW = HalfBrainImages(self.params[\"HDoG_paramfile\"][\"FW\"])\n self.halfbrain_RV = HalfBrainImages(self.params[\"HDoG_paramfile\"][\"RV\"])\n\n # asuume scale is equivalent for FW & RV except for direction\n assert abs(self.halfbrain_FW.scale_xy) == abs(self.halfbrain_RV.scale_xy)\n assert abs(self.halfbrain_FW.scale_z) == abs(self.halfbrain_RV.scale_z)\n self.fnames_FW = self.halfbrain_FW.list_fnames_all\n self.fnames_RV = self.halfbrain_RV.list_fnames_all\n self.zs_FW = self.halfbrain_FW.list_zs_all\n self.zs_RV = self.halfbrain_RV.list_zs_all\n self.zs_global_FW = self.halfbrain_FW.list_zs_global_all\n self.zs_global_RV = self.halfbrain_RV.list_zs_global_all\n\n # boundary position\n fname_boundary_FW = self.params[\"merge_info\"][\"boundary_fname\"][\"FW\"]\n fname_boundary_RV = self.params[\"merge_info\"][\"boundary_fname\"][\"RV\"]\n if len(self.zs_FW) > 0:\n self.iz_FW_boundary = self.zs_FW.index(int(fname_boundary_FW))\n else:\n self.iz_FW_boundary = None\n if len(self.zs_RV) > 0:\n self.iz_RV_boundary = self.zs_RV.index(int(fname_boundary_RV))\n else:\n self.iz_RV_boundary = None\n\n print(\"\\t boundary for FW ({}) at i={}\".format(fname_boundary_FW, self.iz_FW_boundary))\n print(\"\\t boundary for RV ({}) at i={}\".format(fname_boundary_RV, self.iz_RV_boundary))\n\n self.skip_z_FW = 1\n self.skip_z_RV = 1\n self.param_header_FW = \"\"\n self.param_header_RV = \"\"\n self.precompute_param_header(is_FW=True)\n self.precompute_param_header(is_FW=False)\n self.bound_z_global_FW = (-np.inf, +np.inf)\n self.bound_z_global_RV = (-np.inf, +np.inf)\n self.merged_depth = None\n\n self.single_mergedfile_mean = os.path.join(self.params[\"dst_basedir\"], \"whole.tif\")\n self.single_mergedfile_max = os.path.join(self.params[\"dst_basedir\"], \"whole_max.tif\")\n self.single_mergedfile_min = os.path.join(self.params[\"dst_basedir\"], \"whole_min.tif\")\n\n def precompute_param_header(self, is_FW):\n if is_FW:\n print(\"[*] Precomputng param header for FW\")\n halfbrain = self.halfbrain_FW\n flip_rot_before_info = self.params[\"merge_info\"][\"flip_rot\"][\"FW\"]\n else:\n print(\"[*] Precomputng param header for RV\")\n halfbrain = self.halfbrain_RV\n flip_rot_before_info = self.params[\"merge_info\"][\"flip_rot\"][\"RV\"]\n\n input_image_info = halfbrain.params[\"input_image_info\"]\n flip_rot_after_info = self.params[\"scale_info\"][\"flip_rot\"]\n\n # downscale ratio\n down_scale_xyz = self.params[\"scale_info\"][\"downscale_unit\"]\n downscale_ratio_xy = float(abs(halfbrain.scale_xy)) / down_scale_xyz # [um / um] = dimensionless\n assert down_scale_xyz % halfbrain.scale_z == 0\n downscale_ratio_z = float(abs(halfbrain.scale_z)) / down_scale_xyz # [um / um] = dimensionless\n skip_z = int(down_scale_xyz / abs(halfbrain.scale_z))\n print(\"\\t downscale ratio for xy : {}\".format(downscale_ratio_xy))\n print(\"\\t downscale ratio for z : {} (skip={})\".format(downscale_ratio_z, skip_z))\n\n flip_rot_before = 0\n flip_rot_before += 1 if flip_rot_before_info[\"flipX\"] else 0\n flip_rot_before += 2 if flip_rot_before_info[\"flipY\"] else 0\n flip_rot_before += 4 if flip_rot_before_info[\"rotCCW\"] else 0\n flip_rot_before += 8 if flip_rot_before_info[\"rotCW\"] else 0\n flip_rot_after = 0\n flip_rot_after += 1 if flip_rot_after_info[\"flipX\"] else 0\n flip_rot_after += 2 if flip_rot_after_info[\"flipY\"] else 0\n if flip_rot_before_info[\"rotCCW\"] or flip_rot_before_info[\"rotCW\"]:\n width_loaded = input_image_info[\"height\"]\n height_loaded = input_image_info[\"width\"]\n else:\n width_loaded = input_image_info[\"width\"]\n height_loaded = input_image_info[\"height\"]\n num_xnames = len(halfbrain.list_xnames)\n num_ynames = len(halfbrain.list_ynames)\n param_dict = {\n \"width\": width_loaded,\n \"height\": height_loaded,\n \"num_xnames\": num_xnames,\n \"num_ynames\": num_ynames,\n \"downscale_ratio_xy\": downscale_ratio_xy,\n \"downscale_ratio_z\": downscale_ratio_z,\n \"overlap_left\": input_image_info[\"left_margin\"],\n \"overlap_right\": input_image_info[\"right_margin\"],\n \"overlap_top\": input_image_info[\"top_margin\"],\n \"overlap_bottom\": input_image_info[\"bottom_margin\"],\n \"flip_rot_before\": flip_rot_before,\n \"flip_rot_after\": flip_rot_after,\n \"imgformat\": 1, # bin\n \"showgrid\": 0, # no grid\n }\n\n # compute ScaleMerged parameters for cell coordinate transformation\n # apply transformation as in ScaleMerge\n strip_width = input_image_info[\"width\"] - input_image_info[\"left_margin\"] - input_image_info[\"right_margin\"]\n strip_height = input_image_info[\"height\"] - input_image_info[\"top_margin\"] - input_image_info[\"bottom_margin\"]\n if flip_rot_before_info[\"rotCCW\"] or flip_rot_before_info[\"rotCW\"]:\n strip_width,strip_height = strip_height,strip_width\n # max int less than or equal strip_width * downscale_ratio_xy\n sampled_width = int(strip_width * downscale_ratio_xy)\n sampled_height = int(strip_height * downscale_ratio_xy)\n actual_downscale_ratio_x = sampled_width / strip_width # [pixel / pixel] = dimensionless\n actual_downscale_ratio_y = sampled_height / strip_height # [pixel / pixel] = dimensionless\n kernel_width = strip_width / sampled_width\n kernel_height = strip_height / sampled_height\n merged_width = sampled_width * num_xnames\n merged_height = sampled_height * num_ynames\n\n margin_left = input_image_info[\"left_margin\"] * actual_downscale_ratio_x\n margin_right = input_image_info[\"right_margin\"] * actual_downscale_ratio_x\n margin_top = input_image_info[\"top_margin\"] * actual_downscale_ratio_y\n margin_bottom = input_image_info[\"bottom_margin\"] * actual_downscale_ratio_y\n if flip_rot_before_info[\"flipX\"]:\n margin_left,margin_right = margin_right,margin_left\n if flip_rot_before_info[\"flipY\"]:\n margin_top,margin_bottom = margin_bottom,margin_top\n if flip_rot_before_info[\"rotCCW\"]:\n margin_left,margin_top,margin_right,margin_bottom = margin_top,margin_right,margin_bottom,margin_left\n if flip_rot_before_info[\"rotCW\"]:\n margin_left,margin_top,margin_right,margin_bottom = margin_bottom,margin_left,margin_top,margin_right\n if flip_rot_after_info[\"flipX\"]:\n margin_left,margin_right = margin_right,margin_left\n if flip_rot_after_info[\"flipY\"]:\n margin_top,margin_bottom = margin_bottom,margin_top\n print(\"\\t original: {} x {} x ({} x {})\".format(input_image_info[\"width\"], input_image_info[\"height\"], num_xnames, num_ynames))\n print(\"\\t strip: {} x {} x ({} x {})\".format(strip_width, strip_height, num_xnames, num_ynames))\n print(\"\\t sampled: {} x {} x ({} x {})\".format(sampled_width, sampled_height, num_xnames, num_ynames))\n print(\"\\t merged: {} x {}\".format(merged_width, merged_height))\n print(\"\\t actual downscale ratio : {:.7f} x {:.7f}\".format(actual_downscale_ratio_x, actual_downscale_ratio_y))\n print(\"\\t merged_mergin: L:{:.3f} R:{:.3f} T:{:.3f} B:{:.3f}\".format(margin_left,margin_right,margin_top, margin_bottom))\n param_dict.update({\n \"merged_margin_left\": margin_left,\n \"merged_margin_right\": margin_right,\n \"merged_margin_top\": margin_top,\n \"merged_margin_bottom\": margin_bottom,\n \"strip_width\": strip_width,\n \"strip_height\": strip_height,\n \"sampled_width\": sampled_width,\n \"sampled_height\": sampled_height,\n \"actual_downscale_ratio_x\": actual_downscale_ratio_x,\n \"actual_downscale_ratio_y\": actual_downscale_ratio_y,\n \"kernel_width\": kernel_width,\n \"kernel_height\": kernel_height,\n \"merged_width\": merged_width,\n \"merged_height\": merged_height,\n })\n if is_FW:\n self.skip_z_FW = skip_z\n self.param_scalemerge_FW = param_dict\n else:\n self.skip_z_RV = skip_z\n self.param_scalemerge_RV = param_dict\n return\n\n def scalemerge(self, num_cpus=-1, dry_run=False, path_exec=\"./ScaleMerge\"):\n print(\"[*] Starting scalemerge...\")\n # Let's start merging FW & RV using boundary information\n scale_z_FW = self.halfbrain_FW.scale_z\n scale_z_RV = self.halfbrain_RV.scale_z\n if self.params[\"merge_info\"][\"use_at_boundary\"] == \"FW\":\n use_FW_at_boundary = True\n elif self.params[\"merge_info\"][\"use_at_boundary\"] == \"RV\":\n use_FW_at_boundary = False\n else:\n raise TypeError\n\n print(\"\\t FW length: {}\".format(len(self.fnames_FW)))\n print(\"\\t RV length: {}\".format(len(self.fnames_RV)))\n indices_FW = range(len(self.fnames_FW))\n indices_RV = range(len(self.fnames_RV))\n zflip = self.params[\"scale_info\"][\"flip_rot\"][\"flipZ\"]\n print(\"\\t z flip: {}\".format(\"on\" if zflip else \"off\"))\n\n is_halfsize = False\n if len(self.zs_global_FW) > 0:\n zs_global_FW0 = self.zs_global_FW[0]\n zs_global_FW1 = self.zs_global_FW[-1]\n else:\n zs_global_FW0 = None\n zs_global_FW1 = None\n is_halfsize = True\n if len(self.zs_global_RV) > 0:\n zs_global_RV0 = self.zs_global_RV[0]\n zs_global_RV1 = self.zs_global_RV[-1]\n else:\n zs_global_RV0 = None\n zs_global_RV1 = None\n is_halfsize = True\n\n if scale_z_FW * scale_z_RV > 0:\n print(\"[Case 1-4]\")\n print(\"\\t scale_z_FW\", scale_z_FW)\n print(\"\\t zs_global_FW[0]:\", zs_global_FW0)\n print(\"\\t zs_global_FW[-1]:\", zs_global_FW1)\n print(\"\\t zs_global_RV[0]:\", zs_global_RV0)\n print(\"\\t zs_global_RV[-1]:\", zs_global_RV1)\n # suppose FW & RV is growing in the same direction,\n # there is 4 scenarios for merging.\n if scale_z_FW > 0 and (is_halfsize or self.zs_global_FW[0] < self.zs_global_RV[0]):\n print(\"->[Case 1]\")\n # [case 1]\n # merged: |-FW-->|--RV-->\n # FW: |-FW---->\n # RV: |---RV-->\n # if halfsize, case2 and case1 comes to the same\n indices_FW_strip = indices_FW[:self.iz_FW_boundary+1][::-1][::self.skip_z_FW][::-1]\n indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV]\n\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[1:]\n else:\n indices_FW_strip = indices_FW_strip[:-1]\n is_FWs = [True for _ in indices_FW_strip] + [False for _ in indices_RV_strip]\n merging_fnames = [self.fnames_FW[i] for i in indices_FW_strip] + [self.fnames_RV[i] for i in indices_RV_strip]\n\n elif scale_z_FW > 0 and self.zs_global_RV[0] < self.zs_global_FW[0]:\n print(\"->[Case 2]\")\n # [case 2]\n # mergped: |-RV-->|--FW-->\n # FW: |---FW-->\n # RV: |-RV---->\n indices_RV_strip = indices_RV[:self.iz_RV_boundary+1][::-1][::self.skip_z_RV][::-1]\n indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW]\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[:-1]\n else:\n indices_FW_strip = indices_FW_strip[1:]\n is_FWs = [False for _ in indices_RV_strip] + [True for _ in indices_FW_strip]\n merging_fnames = [self.fnames_RV[i] for i in indices_RV_strip] + [self.fnames_FW[i] for i in indices_FW_strip]\n\n elif scale_z_FW < 0 and (is_halfsize or self.zs_global_FW[0] < self.zs_global_RV[0]):\n print(\"->[Case 3]\")\n # [case 3] (reverse case 1)\n # merged: |-FW-->|--RV-->\n # FW: <-FW----|\n # RV: <---RV--|\n # if halfsize, case3 and case4 comes to the same\n indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW][::-1]\n indices_RV_strip = indices_RV[:self.iz_RV_boundary+1][::-1][::self.skip_z_RV]\n\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[1:]\n else:\n indices_FW_strip = indices_FW_strip[:-1]\n is_FWs = [True for _ in indices_FW_strip] + [False for _ in indices_RV_strip]\n merging_fnames = [self.fnames_FW[i] for i in indices_FW_strip] + [self.fnames_RV[i] for i in indices_RV_strip]\n\n elif scale_z_FW < 0 and self.zs_global_RV[0] < self.zs_global_FW[0]:\n print(\"->[Case 4]\")\n # [case 4] : reverse case2\n # mergped: |-RV-->|--FW-->\n # FW: <---FW--|\n # RV: <-RV----|\n indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV][::-1]\n indices_FW_strip = indices_FW[:self.iz_FW_boundary+1][::-1][::self.skip_z_FW]\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[:-1]\n else:\n indices_FW_strip = indices_FW_strip[1:]\n is_FWs = [False for _ in indices_RV_strip] + [True for _ in indices_FW_strip]\n merging_fnames = [self.fnames_RV[i] for i in indices_RV_strip] + [self.fnames_FW[i] for i in indices_FW_strip]\n\n else:\n raise TypeError\n elif scale_z_FW * scale_z_RV < 0:\n # suppose FW & RV is growing in the opposite direction,\n # there is 4 scenarios\n print(\"[Case 5-8]\")\n print(\"\\t scale_z_FW\", scale_z_FW)\n print(\"\\t zs_global_FW[0]:\", zs_global_FW0)\n print(\"\\t zs_global_FW[-1]:\", zs_global_FW1)\n print(\"\\t zs_global_RV[0]:\", zs_global_RV0)\n print(\"\\t zs_global_RV[-1]:\", zs_global_RV1)\n if scale_z_FW < 0 and (is_halfsize or self.zs_global_FW[-1] < self.zs_global_RV[0]):\n print(\"->[Case 5]\")\n # [case 5]\n # merged: |-FW-->|--RV-->\n # FW: <-FW----|\n # RV: |---RV-->\n indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW][::-1]\n indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV]\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[1:]\n else:\n indices_FW_strip = indices_FW_strip[:-1]\n is_FWs = [True for _ in indices_FW_strip] + [False for _ in indices_RV_strip]\n merging_fnames = [self.fnames_FW[i] for i in indices_FW_strip] + [self.fnames_RV[i] for i in indices_RV_strip]\n\n elif scale_z_FW > 0 and (is_halfsize or self.zs_global_FW[-1] > self.zs_global_RV[0]):\n print(\"->[Case 6]\")\n # [case 6]\n # merged: |-RV-->|--FW-->\n # FW: |---FW-->\n # RV: <-RV----|\n indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV][::-1]\n indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW]\n if use_FW_at_boundary:\n indices_RV_strip = indices_RV_strip[:-1]\n else:\n indices_FW_strip = indices_FW_strip[1:]\n is_FWs = [False for _ in indices_RV_strip] + [True for _ in indices_FW_strip]\n merging_fnames = [self.fnames_RV[i] for i in indices_RV_strip] + [self.fnames_FW[i] for i in indices_FW_strip]\n\n elif scale_z_FW > 0 and self.zs_global_FW[-1] < self.zs_global_RV[0]:\n print(\"->[Case 7]\")\n # [case 7] : reverse case5\n raise NotImplementedError\n elif scale_z_FW < 0 and self.zs_global_FW[-1] > self.zs_global_RV[0]:\n print(\"->[Case 8]\")\n # [case 8] : reverse case6\n raise NotImplementedError\n else:\n raise TypeError\n else:\n raise TypeError\n\n # save boundary point for picking valid cell candidates\n if is_halfsize:\n self.bound_z_global_FW = (-np.inf, +np.inf)\n self.bound_z_global_RV = (-np.inf, +np.inf)\n elif is_FWs[0]:\n self.bound_z_global_FW = (-np.inf, self.zs_global_FW[self.iz_FW_boundary])\n self.bound_z_global_RV = (self.zs_global_RV[self.iz_RV_boundary], +np.inf)\n else:\n self.bound_z_global_RV = (-np.inf, self.zs_global_RV[self.iz_RV_boundary])\n self.bound_z_global_FW = (self.zs_global_FW[self.iz_FW_boundary], +np.inf)\n\n self.merged_depth = len(merging_fnames)\n print(\"\\tmerged depth: {}\".format(self.merged_depth))\n if is_FWs[0]:\n self.new_origin_z_global = self.zs_global_FW[indices_FW_strip[0]]\n else:\n self.new_origin_z_global = self.zs_global_RV[indices_RV_strip[0]]\n print(\"\\tnew z_global origin : {}\".format(self.new_origin_z_global))\n\n if zflip:\n is_FWs = is_FWs[::-1]\n merging_fnames = merging_fnames[::-1]\n\n # write paramfiles for each process of ScaleMerge\n total_z_merged = len(merging_fnames)\n mergedfile_mean_basedir = os.path.join(self.params[\"dst_basedir\"], \"zs_mean\")\n mergedfile_max_basedir = os.path.join(self.params[\"dst_basedir\"], \"zs_max\")\n mergedfile_min_basedir = os.path.join(self.params[\"dst_basedir\"], \"zs_min\")\n if not os.path.exists(mergedfile_mean_basedir):\n os.makedirs(mergedfile_mean_basedir)\n if not os.path.exists(mergedfile_max_basedir):\n os.makedirs(mergedfile_max_basedir)\n if not os.path.exists(mergedfile_min_basedir):\n os.makedirs(mergedfile_min_basedir)\n mergedfile_mean_basename = os.path.join(mergedfile_mean_basedir, \"{i:04d}.tif\")\n mergedfile_max_basename = os.path.join(mergedfile_max_basedir, \"{i:04d}.tif\")\n mergedfile_min_basename = os.path.join(mergedfile_min_basedir, \"{i:04d}.tif\")\n mergedfiles = [(\n mergedfile_mean_basename.format(i=i),\n mergedfile_max_basename.format(i=i),\n mergedfile_min_basename.format(i=i),\n )for i in range(total_z_merged)]\n\n paramfiles = [self.write_paramfile(i,is_FW,merging_fname)\n for i,(is_FW,merging_fname) in enumerate(zip(is_FWs, merging_fnames))]\n if not dry_run:\n joblib.Parallel(n_jobs=num_cpus, verbose=10)([\n joblib.delayed(run_ScaleMerge)(paramfile,mergedfile, path_exec, print_output=False)\n for paramfile, mergedfile in zip(paramfiles,mergedfiles)\n ])\n print(\"[*] Concatenating tiff images to single tiff({})\".format(self.single_mergedfile_mean))\n img_mergedsingle_mean = np.empty((len(mergedfiles), self.param_scalemerge_FW[\"merged_height\"], self.param_scalemerge_FW[\"merged_width\"]), dtype=np.uint16)\n img_mergedsingle_max = np.empty_like(img_mergedsingle_mean)\n img_mergedsingle_min = np.empty_like(img_mergedsingle_mean)\n\n for i,(mergedfile_mean,mergedfile_max,mergedfile_min) in enumerate(mergedfiles):\n img_mergedsingle_mean[i,:,:] = tifffile.imread(mergedfile_mean)\n img_mergedsingle_max[i,:,:] = tifffile.imread(mergedfile_max)\n img_mergedsingle_min[i,:,:] = tifffile.imread(mergedfile_min)\n tifffile.imsave(self.single_mergedfile_mean, img_mergedsingle_mean)\n tifffile.imsave(self.single_mergedfile_max, img_mergedsingle_max)\n tifffile.imsave(self.single_mergedfile_min, img_mergedsingle_min)\n\n print(\"[*] Deleting temporary tiff images\")\n shutil.rmtree(mergedfile_mean_basedir)\n shutil.rmtree(mergedfile_max_basedir)\n shutil.rmtree(mergedfile_min_basedir)\n else:\n print(\"[*] Skipping ScaleMerge for images\")\n\n for paramfile in paramfiles:\n os.remove(paramfile)\n return\n\n def write_paramfile(self, i, is_FW, merging_fname):\n paramfile = \"/tmp/param_merge_{randomID}_{i:04d}.txt\".format(randomID = np.random.randint(2**31), i=i)\n if is_FW:\n param_dict = self.param_scalemerge_FW\n halfbrain = self.halfbrain_FW\n else:\n param_dict = self.param_scalemerge_RV\n halfbrain = self.halfbrain_RV\n param_text = \"{width}:{height}:{num_xnames}:{num_ynames}:{downscale_ratio_xy}:{overlap_left}:{overlap_right}:{overlap_top}:{overlap_bottom}:{flip_rot_before}:{flip_rot_after}:{imgformat}:{showgrid}\\n\".format(**param_dict)\n\n for yname in halfbrain.list_ynames:\n for xname in halfbrain.list_xnames:\n imagestack = halfbrain.get_imagestack_by_xyname(xname,yname)\n img = imagestack.get_imagefile_by_fname(merging_fname)\n fullpath = img.fullpath if not img.is_dummy else \"\"\n param_text += fullpath + \"\\n\"\n\n with open(paramfile, \"w\") as f:\n f.write(param_text)\n\n return paramfile\n\nclass WholeBrainCells(object):\n def __init__(self, paramfile, wholebrain_images=None, clf=None):\n if wholebrain_images:\n self.wholebrain_images = wholebrain_images\n else:\n self.wholebrain_images = WholeBrainImages(paramfile)\n\n self.halfbrain_cells_FW = HalfBrainCells(\n self.wholebrain_images.params[\"HDoG_paramfile\"][\"FW\"],\n is_FW = True,\n halfbrain_images=self.wholebrain_images.halfbrain_FW,\n clf=clf\n )\n self.halfbrain_cells_RV = HalfBrainCells(\n self.wholebrain_images.params[\"HDoG_paramfile\"][\"RV\"],\n is_FW = False,\n halfbrain_images=self.wholebrain_images.halfbrain_RV,\n clf=clf\n )\n # average mode or not (default: false)\n is_ave_FW = self.halfbrain_cells_FW.halfbrain_images.params[\"HDoG_param\"].get(\"is_ave_mode\", False)\n is_ave_RV = self.halfbrain_cells_RV.halfbrain_images.params[\"HDoG_param\"].get(\"is_ave_mode\", False)\n assert is_ave_FW == is_ave_RV\n self.is_ave = is_ave_FW\n\n def scalemerge(self):\n # should be called after scalemerge()\n print(\"[*] Starting scalemerge for HDoG result...\")\n cellstacks_FW = self.halfbrain_cells_FW.dict_stacks\n cellstacks_RV = self.halfbrain_cells_RV.dict_stacks\n param_scalemerge_FW = self.wholebrain_images.param_scalemerge_FW\n param_scalemerge_RV = self.wholebrain_images.param_scalemerge_RV\n # scale and merge\n org_scale_xy_FW = float(abs(self.wholebrain_images.halfbrain_FW.scale_xy))\n org_scale_z_FW = float(abs(self.wholebrain_images.halfbrain_FW.scale_z))\n org_scale_xy_RV = float(abs(self.wholebrain_images.halfbrain_RV.scale_xy))\n org_scale_z_RV = float(abs(self.wholebrain_images.halfbrain_RV.scale_z))\n offset_x_FW = self.wholebrain_images.halfbrain_FW.list_offset_xs[0]\n offset_y_FW = self.wholebrain_images.halfbrain_FW.list_offset_ys[0]\n offset_x_RV = self.wholebrain_images.halfbrain_RV.list_offset_xs[0]\n offset_y_RV = self.wholebrain_images.halfbrain_RV.list_offset_ys[0]\n print(\"\\t offset_FW: {},{},{}\".format(offset_x_FW,offset_y_FW,self.wholebrain_images.new_origin_z_global))\n print(\"\\t offset_RV: {},{},{}\".format(offset_x_RV,offset_y_RV,self.wholebrain_images.new_origin_z_global))\n # flip rot after\n flip_rot_after_info = self.wholebrain_images.params[\"scale_info\"][\"flip_rot\"]\n A_FW = np.zeros((3,3))\n A_FW[:2,:2] = np.array(self.wholebrain_images.halfbrain_FW.params[\"coordinate_info\"][\"affine_global\"])[:2,:2]\n A_FW[2,2] = 1.\n A_FW[np.nonzero(A_FW)] = 1.\n b_FW = np.zeros(3)\n A_RV = np.zeros((3,3))\n A_RV[:2,:2] = np.array(self.wholebrain_images.halfbrain_RV.params[\"coordinate_info\"][\"affine_global\"])[:2,:2]\n A_RV[2,2] = 1.\n A_RV[np.nonzero(A_RV)] = 1.\n b_RV = np.zeros(3)\n if flip_rot_after_info[\"flipX\"]:\n b_FW[0] += param_scalemerge_FW[\"merged_width\"]\n A_FW[0,:] *= -1\n b_RV[0] += param_scalemerge_RV[\"merged_width\"]\n A_RV[0,:] *= -1\n if flip_rot_after_info[\"flipY\"]:\n b_FW[1] += param_scalemerge_FW[\"merged_height\"]\n A_FW[1,:] *= -1\n b_RV[1] += param_scalemerge_RV[\"merged_height\"]\n A_RV[1,:] *= -1\n if flip_rot_after_info[\"flipZ\"]:\n b_FW[2] += self.wholebrain_images.merged_depth\n A_FW[2,:] *= -1\n b_RV[2] += self.wholebrain_images.merged_depth\n A_RV[2,:] *= -1\n\n def process_stack(dst_path, cellstack, bound_z, margin_left, margin_top,\n offset_x, offset_y, offset_z, coeff_x, coeff_y, coeff_z, A, b):\n print(\"[*] Dumping merged data to {}\".format(dst_path))\n if bound_z[0] > bound_z[1]:\n smallest_z,largest_z = bound_z[1],bound_z[0]\n else:\n smallest_z,largest_z = bound_z\n\n data_scaled = np.zeros(cellstack.data_global.shape[0], dtype=dt_scalemerged)\n data_scaled[\"is_valid\"] = np.bitwise_and(\n smallest_z <= cellstack.data_global[\"merged_z\"],\n cellstack.data_global[\"merged_z\"] <= largest_z)\n #print(\"\\tz_range 1: {} - {}\".format(data_valid[\"centroid_z\"].min(), data_valid[\"centroid_z\"].max()))\n centroid_scaled = np.zeros((cellstack.data_global.shape[0],3), dtype=np.float32)\n centroid_scaled[:,0] = (cellstack.data_global[\"merged_x\"] - offset_x) * coeff_x - margin_left\n centroid_scaled[:,1] = (cellstack.data_global[\"merged_y\"] - offset_y) * coeff_y - margin_top\n centroid_scaled[:,2] = (cellstack.data_global[\"merged_z\"] - offset_z) * coeff_z\n #print(\"\\tz_range 2: {} - {}\".format(centroid_scaled[:,2].min(), centroid_scaled[:,2].max()))\n centroid_fliprot = A.dot(centroid_scaled.T).T + b\n data_scaled[\"scaled_x\"] = centroid_fliprot[:,0]\n data_scaled[\"scaled_y\"] = centroid_fliprot[:,1]\n data_scaled[\"scaled_z\"] = centroid_fliprot[:,2]\n #print(\"\\tz_range 3: {} - {}\".format(data_valid[\"centroid_z\"].min(), data_valid[\"centroid_z\"].max()))\n joblib.dump(data_scaled, dst_path, compress=3)\n return np.count_nonzero(data_scaled[\"is_valid\"])\n\n dst_basedir = self.wholebrain_images.params[\"dst_basedir\"]\n dst_basedir_FW = os.path.join(dst_basedir,\"FW\")\n dst_basedir_RV = os.path.join(dst_basedir,\"RV\")\n if not os.path.exists(dst_basedir_FW):\n os.makedirs(dst_basedir_FW)\n if not os.path.exists(dst_basedir_RV):\n os.makedirs(dst_basedir_RV)\n # Note: parallelizable loop\n dict_num_cells = {}\n for xyname,cellstack in cellstacks_FW.items():\n if cellstack.is_dummy: continue\n dst_path = os.path.join(dst_basedir_FW, \"{}_{}.pkl\".format(xyname[1],xyname[0]))\n num_cells = process_stack(dst_path, cellstack,\n self.wholebrain_images.bound_z_global_FW,\n param_scalemerge_FW[\"merged_margin_left\"],\n param_scalemerge_FW[\"merged_margin_top\"],\n offset_x_FW, offset_y_FW, self.wholebrain_images.new_origin_z_global,\n param_scalemerge_FW[\"actual_downscale_ratio_x\"] / org_scale_xy_FW,\n param_scalemerge_FW[\"actual_downscale_ratio_y\"] / org_scale_xy_FW,\n param_scalemerge_FW[\"downscale_ratio_z\"] / org_scale_z_FW,\n A_FW, b_FW)\n dict_num_cells[dst_path] = num_cells\n\n for xyname,cellstack in cellstacks_RV.items():\n if cellstack.is_dummy: continue\n dst_path = os.path.join(dst_basedir_RV, \"{}_{}.pkl\".format(xyname[1],xyname[0]))\n num_cells = process_stack(dst_path, cellstack,\n self.wholebrain_images.bound_z_global_RV,\n param_scalemerge_RV[\"merged_margin_left\"],\n param_scalemerge_RV[\"merged_margin_top\"],\n offset_x_RV, offset_y_RV, self.wholebrain_images.new_origin_z_global,\n param_scalemerge_RV[\"actual_downscale_ratio_x\"] / org_scale_xy_RV,\n param_scalemerge_RV[\"actual_downscale_ratio_y\"] / org_scale_xy_RV,\n param_scalemerge_RV[\"downscale_ratio_z\"] / org_scale_z_RV,\n A_RV, b_RV)\n dict_num_cells[dst_path] = num_cells\n\n # saving information\n joblib.dump(dict_num_cells, os.path.join(dst_basedir, \"info.pkl\"), compress=3)\n return\n\ndef main():\n args = docopt(__doc__)\n\n wb_images = WholeBrainImages(args[\"PARAM_FILE\"])\n\n if args[\"images\"]:\n wb_images.scalemerge(num_cpus=int(args[\"-p\"]), dry_run=False, path_exec=args[\"--exec\"])\n elif args[\"cells\"]:\n wb_images.scalemerge(num_cpus=int(args[\"-p\"]), dry_run=True, path_exec=args[\"--exec\"])\n wb_cells = WholeBrainCells(args[\"PARAM_FILE\"], wholebrain_images=wb_images)\n wb_cells.scalemerge()\n elif args[\"full\"]:\n wb_images.scalemerge(num_cpus=int(args[\"-p\"]), dry_run=False, path_exec=args[\"--exec\"])\n wb_cells = WholeBrainCells(args[\"PARAM_FILE\"], wholebrain_images=wb_images)\n wb_cells.scalemerge()\n\n\nif __name__ == \"__main__\":\n main()\n",
"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport os, os.path\nimport tifffile\n\nfrom HalfBrainImages import HalfBrainImages\n\ndt_local = np.dtype([\n ('local_x', 'f4'), ('local_y', 'f4'), ('local_z', 'f4'),\n ('structureness', 'f4'), ('blobness', 'f4'),('intensity', 'f4'),\n ('size', 'u2'),('padding', 'u2'),\n])\ndt_global = np.dtype([\n ('merged_x', 'f4'), ('merged_y', 'f4'), ('merged_z', 'f4'),\n ('i_x', 'u1'), ('i_y', 'u1'), # should be < 128\n ('FWRV', 'u1'), # 0 for FW(thetaoff), 1 for RV(thetaon)\n])\n\nclass HDoGResultStack(object):\n def __init__(self, result_file, imagestack, is_FW, i_xy, scales, image_size, clf=None):\n width,height = image_size\n self.imagestack = imagestack\n self.clf = clf\n\n if not os.path.exists(result_file):\n self.is_dummy = True\n self.data_local = np.zeros(0, dtype=dt_local)\n self.data_global = np.zeros(0, dtype=dt_global)\n else:\n self.is_dummy = False\n\n scale_x,scale_y,scale_z = scales\n # load data\n data_local = np.fromfile(result_file, dtype=dt_local)\n if clf is not None:\n # use cells predicted as positive by the classifier\n from HDoG_classifier import get_X_3d\n X = get_X_3d(data_local)\n pred = clf.predict(X)\n data_local = data_local[pred]\n data_global = np.zeros(data_local.shape[0], dtype=dt_global)\n data_global[\"FWRV\"] = 0 if is_FW else 1\n data_global[\"i_x\"] = i_xy[0]\n data_global[\"i_y\"] = i_xy[1]\n data_global[\"merged_x\"] = imagestack.offset_x + data_local[\"local_x\"] * scale_x\n data_global[\"merged_y\"] = imagestack.offset_y + data_local[\"local_y\"] * scale_y\n data_global[\"merged_z\"] = imagestack.offset_z + data_local[\"local_z\"] * scale_z\n\n # flip\n if scale_x < 0:\n data_global[\"merged_x\"] -= width * scale_x\n if scale_y < 0:\n data_global[\"merged_y\"] -= height * scale_y\n\n self.data_local = data_local\n self.data_global = data_global\n\n def get_stack_src_img(self, zlim=None, verbose=True):\n # zlim = (start_z, end_z)\n if verbose: print(self.imagestack.path)\n list_imagefiles = self.imagestack.list_imagefiles_no_dummy\n if verbose: print(\"\\tnumber of images:{}\".format(len(list_imagefiles)))\n if zlim:\n if verbose: print(\"\\tspecified images:{}\".format(len(list_imagefiles[zlim[0]:zlim[1]])))\n list_imagefiles = list_imagefiles[zlim[0]:zlim[1]]\n\n imgs = []\n for imgfile in list_imagefiles:\n imgs.append(imgfile.load_image())\n imgs = np.array(imgs)\n return imgs\n\n def save_stack_src_img(self, dst_path, zlim=None, verbose=True):\n src_img = self.get_stack_src_img(zlim,verbose)\n tifffile.imsave(dst_path, src_img)\n return\n\n def get_substack_indicator(self, zlim):\n # zlim = (start_z, end_z)\n in_substack = np.bitwise_and(self.data_local[\"local_z\"] >= zlim[0], self.data_local[\"local_z\"] < zlim[1])\n return in_substack\n\nclass HalfBrainCells(object):\n def __init__(self, paramfile, is_FW=True, halfbrain_images=None, clf=None):\n print(\"\\n[*] Initializing CellHalfBrain({})\".format(paramfile))\n if halfbrain_images is not None:\n assert isinstance(halfbrain_images, HalfBrainImages)\n self.halfbrain_images = halfbrain_images\n else:\n self.halfbrain_images = HalfBrainImages(paramfile)\n\n self.dict_stacks = {}\n self.clf = clf\n # load result\n result_dir = os.path.join(self.halfbrain_images.params[\"dst_basedir\"])\n if self.clf is None:\n print(\"<candidate mode>\")\n else:\n print(\"<predicted mode>\")\n\n is_exists_result = np.zeros((len(self.halfbrain_images.list_ynames), len(self.halfbrain_images.list_xnames)), dtype=np.bool)\n width = self.halfbrain_images.params[\"input_image_info\"][\"width\"]\n height = self.halfbrain_images.params[\"input_image_info\"][\"height\"]\n total_regions = 0\n list_centroid_xs = []\n list_centroid_ys = []\n list_centroid_zs = []\n for iy,yname in enumerate(self.halfbrain_images.list_ynames):\n for ix,xname in enumerate(self.halfbrain_images.list_xnames):\n result_file = os.path.join(result_dir, \"{}_{}.bin\".format(yname,xname))\n imagestack = self.halfbrain_images.get_imagestack_by_xyname(xname,yname)\n cellstack = HDoGResultStack(\n result_file, imagestack, is_FW, (ix,iy),\n (\n self.halfbrain_images.scale_x,\n self.halfbrain_images.scale_y,\n self.halfbrain_images.scale_z,\n ), (width, height), self.clf)\n if not cellstack.is_dummy:\n is_exists_result[iy,ix] = True\n total_regions += len(cellstack.data_global)\n\n self.dict_stacks[(xname,yname)] = cellstack\n list_centroid_xs.append(cellstack.data_global[\"merged_x\"])\n list_centroid_ys.append(cellstack.data_global[\"merged_y\"])\n list_centroid_zs.append(cellstack.data_global[\"merged_z\"])\n self.centroid_xs = np.concatenate(list_centroid_xs)\n self.centroid_ys = np.concatenate(list_centroid_ys)\n self.centroid_zs = np.concatenate(list_centroid_zs)\n\n print(\"\\tResult exists for {} stacks per {} x {}\".format(\n np.count_nonzero(is_exists_result),\n len(self.halfbrain_images.list_xnames),\n len(self.halfbrain_images.list_ynames)\n ))\n print(\"\\tTotal Regions: {}\".format(total_regions))\n if total_regions > 0:\n print(\"\\tcentroid_x range: {} - {}\".format(np.min(self.centroid_xs),np.max(self.centroid_xs)))\n print(\"\\tcentroid_y range: {} - {}\".format(np.min(self.centroid_ys),np.max(self.centroid_ys)))\n print(\"\\tcentroid_z range: {} - {}\".format(np.min(self.centroid_zs),np.max(self.centroid_zs)))\n\n def get_stack_by_xyname(self, xname, yname):\n stack = self.dict_stacks.get((xname,yname), None)\n if not stack:\n raise ValueError\n else:\n return stack\n\n def get_stack(self, pos_xy=None, i_xy=None, verbose=True):\n # specify either `pos_xy` or `i_xy`\n assert pos_xy or i_xy\n\n stack_xs = self.halfbrain_images.list_offset_xs\n stack_ys = self.halfbrain_images.list_offset_ys\n if not i_xy and pos_xy:\n i_xy = (np.where(np.array(stack_xs) <= pos_xy[0])[0][-1],\n np.where(np.array(stack_ys) <= pos_xy[1])[0][-1])\n xname = self.halfbrain_images.list_xnames[i_xy[0]]\n yname = self.halfbrain_images.list_ynames[i_xy[1]]\n if verbose:\n print(\"\\txname: {}\\tyname:{}\".format(xname,yname))\n\n return self.dict_stacks[(xname,yname)]\n"
] | [
[
"numpy.nonzero",
"numpy.empty_like",
"numpy.dtype",
"numpy.bitwise_and",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.fromfile",
"numpy.min",
"numpy.dtype",
"numpy.concatenate",
"numpy.bitwise_and",
"numpy.max",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
neulab/xnmt | [
"d93f8f3710f986f36eb54e9ff3976a6b683da2a4",
"d93f8f3710f986f36eb54e9ff3976a6b683da2a4",
"d93f8f3710f986f36eb54e9ff3976a6b683da2a4"
] | [
"xnmt/models/retrievers.py",
"xnmt/tensor_tools.py",
"xnmt/transducers/positional.py"
] | [
"import numpy as np\n\nfrom xnmt.settings import settings\nimport xnmt\nfrom xnmt import batchers, expression_seqs\nfrom xnmt.models import base as models\nfrom xnmt.persistence import serializable_init, Serializable\n\nif xnmt.backend_dynet:\n import dynet as dy\n\n##### A class for retrieval databases\n# This file contains databases used for retrieval.\n# At the moment it includes only a standard database that keeps all of the things\n# to be retrieved in a list.\n\nclass StandardRetrievalDatabase(Serializable):\n \"\"\"This is a database to be used for retrieval. Its database member\"\"\"\n\n yaml_tag = \"!StandardRetrievalDatabase\"\n\n @serializable_init\n def __init__(self, reader, database_file, dev_id_file=None, test_id_file=None):\n self.reader = reader\n self.database_file = database_file\n self.data = list(reader.read_sents(database_file))\n self.indexed = []\n self.dev_id_file = dev_id_file\n self.test_id_file = test_id_file\n\n def __getitem__(self, indices):\n trg_examples, trg_masks = batchers.pad([self.data[index] for index in indices])\n return batchers.mark_as_batch(trg_examples), trg_masks\n\n##### The actual retriever class\nclass Retriever(models.ConditionedModel, models.GeneratorModel):\n \"\"\"\n A template class implementing a retrieval model.\n \"\"\"\n\n def calc_loss(self, src, db_idx):\n \"\"\"Calculate loss based on a database index.\n\n Args:\n src: The source input.\n db_idx: The correct index in the database to be retrieved.\n Returns:\n An expression representing the loss.\n \"\"\"\n raise NotImplementedError('calc_loss must be implemented for Retriever subclasses')\n\n def index_database(self, indices=None):\n \"\"\"A function that can be called before actually performing retrieval.\n\n This will perform any necessary pre-processing to make retrieval more efficient.\n If the model is updated, assume that the indexing result is stale and no longer applicable.\n \"\"\"\n pass\n\n def generate(self, src):\n \"\"\"Perform retrieval, trying to get the sentence that most closely matches in the database.\n\n Args:\n src: The source.\n i: Id of the input\n Returns:\n The ID of the example that most closely matches in the database.\n \"\"\"\n raise NotImplementedError('generate must be implemented for Retriever subclasses')\n\n def initialize_generator(self, **kwargs):\n candidates = None\n if kwargs[\"candidate_id_file\"] is not None:\n with open(kwargs[\"candidate_id_file\"], \"r\") as f:\n candidates = sorted({int(x):1 for x in f}.keys())\n self.index_database(candidates)\n\[email protected]_dynet\nclass DotProductRetriever(Retriever, Serializable):\n \"\"\"\n A retriever trains using max-margin methods.\n \"\"\"\n\n yaml_tag = '!DotProductRetriever'\n\n @serializable_init\n def __init__(self, src_embedder, src_encoder, trg_embedder, trg_encoder, database, loss_direction=\"forward\"):\n \"\"\"Constructor.\n\n Args:\n src_embedder: A word embedder for the source language\n src_encoder: An encoder for the source language\n trg_embedder: A word embedder for the target language\n trg_encoder: An encoder for the target language\n database: A database of things to retrieve\n \"\"\"\n self.src_embedder = src_embedder\n self.src_encoder = src_encoder\n self.trg_embedder = trg_embedder\n self.trg_encoder = trg_encoder\n self.database = database\n self.loss_direction = loss_direction\n\n def exprseq_pooling(self, exprseq):\n # Reduce to vector\n exprseq = expression_seqs.ExpressionSequence(expr_tensor=exprseq.mask.add_to_tensor_expr(exprseq.as_tensor(),-1e10), mask=exprseq.mask)\n if exprseq.expr_tensor is not None:\n if len(exprseq.expr_tensor.dim()[0]) > 1:\n return dy.max_dim(exprseq.expr_tensor, d=1)\n else:\n return exprseq.expr_tensor\n else:\n return dy.emax(exprseq.expr_list)\n\n def calc_loss(self, src, db_idx, src_mask=None, trg_mask=None):\n src_embeddings = self.src_embedder.embed_sent(src, mask=src_mask)\n self.src_encoder.set_input(src)\n src_encodings = self.exprseq_pooling(self.src_encoder.transduce(src_embeddings))\n trg_batch, trg_mask = self.database[db_idx]\n # print(\"trg_mask=\\n\",trg_mask)\n trg_encodings = self.encode_trg_example(trg_batch, mask=trg_mask)\n dim = trg_encodings.dim()\n trg_reshaped = dy.reshape(trg_encodings, (dim[0][0], dim[1]))\n # ### DEBUG\n # trg_npv = trg_reshaped.npvalue()\n # for i in range(dim[1]):\n # print(\"--- trg_reshaped {}: {}\".format(i,list(trg_npv[:,i])))\n # ### DEBUG\n prod = dy.transpose(src_encodings) * trg_reshaped\n # ### DEBUG\n # prod_npv = prod.npvalue()\n # for i in range(dim[1]):\n # print(\"--- prod {}: {}\".format(i,list(prod_npv[0].transpose()[i])))\n # ### DEBUG\n id_range = list(range(len(db_idx)))\n # This is ugly:\n if self.loss_direction == \"forward\":\n prod = dy.transpose(prod)\n loss = dy.sum_batches(dy.hinge_batch(prod, id_range))\n elif self.loss_direction == \"bidirectional\":\n prod = dy.reshape(prod, (len(db_idx), len(db_idx)))\n loss = dy.sum_elems(\n dy.hinge_dim(prod, id_range, d=0) + dy.hinge_dim(prod, id_range, d=1))\n else:\n raise RuntimeError(\"Illegal loss direction {}\".format(self.loss_direction))\n\n return loss\n\n def index_database(self, indices=None):\n # Create the inverted index if necessary\n if indices is None:\n indices = range(len(self.database.data))\n self.database.inverted_index = None\n else:\n self.database.inverted_index = indices\n # Actually index everything\n self.database.indexed = []\n for index in indices:\n item = self.database.data[int(index)]\n dy.renew_cg(immediate_compute=settings.IMMEDIATE_COMPUTE, check_validity=settings.CHECK_VALIDITY)\n self.database.indexed.append(self.encode_trg_example(item).npvalue())\n # ### DEBUG\n # for i, x in enumerate(self.database.indexed):\n # print(\"--- database {}: {}\".format(i,list(x)))\n # ### DEBUG\n self.database.indexed = np.stack(self.database.indexed, axis=1)\n\n def encode_trg_example(self, example, mask=None):\n embeddings = self.trg_embedder.embed_sent(example, mask=mask)\n self.trg_encoder.set_input(example)\n encodings = self.exprseq_pooling(self.trg_encoder.transduce(embeddings))\n return encodings\n\n def generate(self, src, return_type=\"idxscore\", nbest=10):\n src_embedding = self.src_embedder.embed_sent(src)\n self.src_encoder.set_input(src)\n src_encoding = dy.transpose(self.exprseq_pooling(self.src_encoder.transduce(src_embedding))).npvalue()\n scores = np.dot(src_encoding, self.database.indexed)\n # print(\"--- scores: {}\".format(list(scores[0])))\n kbest = np.argsort(scores, axis=1)[0,-nbest:][::-1]\n # print(\"--- kbest: {}\".format(kbest))\n ids = kbest if self.database.inverted_index is None else [self.database.inverted_index[x] for x in kbest]\n\n if return_type == \"idxscore\":\n return [(i,scores[0,x]) for i, x in zip(ids, kbest)]\n elif return_type == \"idx\":\n return list(ids)\n elif return_type == \"score\":\n return [scores[0,x] for x in kbest]\n else:\n raise RuntimeError(\"Illegal return_type to retrieve: {}\".format(return_type))\n\n",
"\"\"\"\nA collection of backend-agnostic utilities.\n\nThe goal of this module is to provide a bridge between DyNet and Torch code, by providing commonly used functionality\nthat allows writing code that works with either backend.\n\nThis is *not* meant as a complete wrapper around each backend. Rather, only important high level functionality is\ncovered, dealing with tensor dimensions, reshaping, aggregation, etc.\n\n\"\"\"\n\nfrom abc import ABC\nfrom typing import Callable, Sequence\nimport numbers\n\nimport xnmt\nfrom xnmt.settings import settings\nfrom xnmt import param_collections, trace\n\nclass Tensor(ABC): pass\n\nif xnmt.backend_dynet:\n import dynet as dy\n Tensor.register(dy.Expression)\nif xnmt.backend_torch:\n import torch\n import torch.nn as nn\n Tensor.register(torch.Tensor)\n\n\ndef reset_graph(zero_grad: bool = True) -> None:\n \"\"\"\n Reset graph and/or gradients.\n\n DyNet case: reset computation graph (this is done implicitly by Pytorch garbage collection)\n Pytorch case: zero gradients (unless zero_grad is set to False). This is done automatically upon update() in DyNet.\n\n Args:\n zero_grad: Whether to zero gradients with Pytorch backend.\n \"\"\"\n if xnmt.backend_dynet:\n dy.renew_cg(immediate_compute=settings.IMMEDIATE_COMPUTE, check_validity=settings.CHECK_VALIDITY)\n if xnmt.backend_torch:\n torch.autograd.set_detect_anomaly(settings.CHECK_VALIDITY)\n if zero_grad:\n param_collections.ParamManager.global_collection().zero_grad()\n trace.reset()\n\ndef sent_len(x):\n \"\"\"\n Get the sentence length of a sequence tensor.\n\n Args:\n x: Tensor of matrix shape (+ batch dim)\n\n Returns:\n Sentence length.\n \"\"\"\n if xnmt.backend_dynet:\n return x.dim()[0][-1]\n else:\n return x.size()[1]\n\ndef sent_len_transp(x):\n \"\"\"\n Get the sentence length of a transposed sequence tensor (with flipped hidden/time dims).\n\n Args:\n x: Tensor of matrix shape (+ batch dim)\n\n Returns:\n Sentence length.\n \"\"\"\n if xnmt.backend_dynet:\n return x.dim()[0][0]\n else:\n return x.size()[-1]\n\ndef batch_size(x: Tensor) -> int:\n \"\"\"\n Get batch size of tensor.\n\n Args:\n x: a DyNet expression or PyTorch tensor.\n\n Returns:\n The batch size.\n \"\"\"\n if xnmt.backend_dynet:\n return x.dim()[1]\n else:\n return x.size()[0]\n\ndef hidden_size(x: Tensor) -> int:\n \"\"\"\n Get the hidden dimension of a batched tensor, e.g. a vector or a sequence tensor.\n\n Args:\n x: batched tensor\n\n Returns:\n vector size\n \"\"\"\n if xnmt.backend_dynet:\n return x.dim()[0][0]\n else:\n return x.size()[-1]\n\ndef hidden_size_transp(x: Tensor) -> int:\n \"\"\"\n Get the vector dimension of a transposed batched vector.\n\n Args:\n x: vector\n\n Returns:\n vector size\n \"\"\"\n if xnmt.backend_dynet:\n return x.dim()[0][-1]\n else:\n return x.size()[1]\n\ndef dim_desc(x: Tensor) -> tuple:\n \"\"\"\n Get a tuple describing the tensor dimensions.\n\n DyNet case: ((dim_1, ..dim_n), batch_size)\n PyTorch case: (batch_size, dim_n, .., dim_1)\n\n Args:\n x: tensor\n\n Returns:\n dimension description\n \"\"\"\n if xnmt.backend_dynet:\n return x.dim()\n else:\n return x.size()\n\ndef merge_time_batch_dims(x: Tensor) -> Tensor:\n \"\"\"\n Pack the time dimension into the batch dimension.\n\n Args:\n x: input tensor\n\n Returns:\n output tensor\n \"\"\"\n if xnmt.backend_dynet:\n ((hidden_dim, seq_len), batch_size_) = x.dim()\n return dy.reshape(x, (hidden_dim,), batch_size=batch_size_ * seq_len)\n else:\n batch_size_, seq_len, hidden_dim = x.size()\n return x.view((batch_size_ * seq_len, hidden_dim))\n\ndef unmerge_time_batch_dims(x: Tensor, batch_size_: numbers.Integral) -> Tensor:\n \"\"\"\n Undo packing of the time dimension into the batch dimension.\n\n Args:\n x: input tensor\n batch_size_: original batch size\n\n Returns:\n output tensor\n \"\"\"\n if xnmt.backend_dynet:\n seq_len = x.dim()[1] // batch_size_\n hidden_dim = x.dim()[0]\n if hidden_dim == (1,): hidden_dim = tuple()\n return dy.reshape(x, hidden_dim + (seq_len,), batch_size=batch_size_)\n else:\n seq_len = x.size()[0] // batch_size_\n hidden_dim = x.size()[1:]\n return x.view((batch_size_, seq_len) + hidden_dim)\n\ndef aggregate_masked_loss(x: Tensor, mask: 'xnmt.batchers.Mask'=None) -> Tensor:\n \"\"\"\n Aggregate loss values for unmasked entries.\n\n Args:\n x: Batched sequence of losses.\n mask: An optional mask for the case of outputs of unequal lengths.\n\n Returns:\n Batched sequence of losses, with masked ones zeroed out.\n \"\"\"\n if xnmt.backend_dynet:\n if mask:\n x = dy.cmult(x, dy.inputTensor(1.0 - mask.np_arr.T, batched=True))\n return dy.sum_elems(x)\n else:\n if mask:\n x = torch.mul(x, torch.as_tensor(1.0 - mask.np_arr, dtype=x.dtype, device=xnmt.device))\n return torch.sum(x, dim=tuple(range(1, len(x.size())))) # sum over all but batch elems\n\ndef esum(x: Sequence[Tensor]) -> Tensor:\n \"\"\"\n Perform an elementwise sum over all the given expressions.\n\n Args:\n x: list of tensor expressions of equal size to sum over.\n\n Returns:\n Summed tensor.\n \"\"\"\n if xnmt.backend_dynet:\n return dy.esum(x)\n else:\n return sum(x)\n\ndef zeroes(hidden_dim: numbers.Integral, batch_size: numbers.Integral=1) -> Tensor:\n \"\"\"\n Create a possibly batched zero vector.\n\n Args:\n hidden_dim: vector size\n batch_size: batch size\n\n Returns:\n DyNet expression of size ((hidden_dim,),batch_size) or PyTorch tensor of size (batch_size,hidden_dim)\n \"\"\"\n if xnmt.backend_dynet:\n return dy.zeroes((hidden_dim,), batch_size=batch_size)\n else:\n return torch.zeros(size=(batch_size, hidden_dim,), device=xnmt.device)\n\ndef concatenate(l: Sequence[Tensor]) -> Tensor:\n \"\"\"\n Stack batched vectors to form a longer batched vector.\n\n Args:\n l: list of batched vectors (DyNet dims: ((vec_size),batch_size); PyTorch dims: (batch_size,vec_size)).\n\n Returns:\n A batched vector.\n \"\"\"\n if xnmt.backend_dynet:\n return dy.concatenate(l)\n else:\n return torch.cat(l, dim=1)\n\ndef npvalue(t: Tensor) -> 'np.ndarray':\n \"\"\"\n Numpy array in column-major format (i.e., results will be in DyNet format, regardless of backend)\n\n Args:\n t: Tensor\n\n Returns:\n Numpy array\n \"\"\"\n if xnmt.backend_dynet:\n return t.npvalue()\n else:\n ret = t.cpu().data.numpy()\n if batch_size(t)==1 and t.dim()>1:\n ret = ret.squeeze(0)\n return ret.T\n\ndef average(l: Sequence[Tensor]) -> Tensor:\n \"\"\"\n Perform an elementwise average over all the given tensor expressions.\n\n Args:\n l: list of tensor expressions of matching size.\n\n Returns:\n Averaged tensor expression.\n \"\"\"\n if xnmt.backend_dynet:\n return dy.average(l)\n else:\n return sum(l) / len(l)\n\ndef dropout(t: Tensor, p: numbers.Real) -> Tensor:\n \"\"\"\n Dropout elements of the given tensor with probability p, and rescale accordingly.\n\n Args:\n t: input tensor\n p: dropout probability\n\n Returns:\n output tensor\n \"\"\"\n if xnmt.backend_dynet:\n return dy.dropout(t, p)\n else:\n return nn.Dropout(p=p)(t)\n\n\ndef identity(x: Tensor) -> Tensor:\n \"\"\"\n Identity function.\n\n Args:\n x: input\n\n Returns:\n output, same as input.\n \"\"\"\n return x\n\ndef activation_by_name(activation: str) -> Callable[[Tensor],Tensor]:\n \"\"\"\n Get a callable activation function, resolving potential different namings between backends.\n\n Args:\n activation: name of activation (tanh|rectify|relu|sigmoid|elu|selu|asinh|identity)\n\n Returns:\n A unary tensor activation function.\n \"\"\"\n if activation == 'tanh':\n return dy.tanh if xnmt.backend_dynet else torch.tanh\n elif activation in ['rectify','relu']:\n return dy.rectify if xnmt.backend_dynet else torch.relu\n elif activation == 'sigmoid':\n return dy.sigmoid if xnmt.backend_dynet else torch.sigmoid\n elif activation == 'elu':\n return dy.elu if xnmt.backend_dynet else torch.elu\n elif activation == 'selu':\n return dy.selu if xnmt.backend_dynet else torch.selu\n elif activation == 'asinh':\n if xnmt.backend_dynet:\n return dy.asinh\n else:\n raise ValueError(f\"Unknown activation {activation}\")\n elif activation == 'identity':\n return identity\n else:\n raise ValueError(f\"Unknown activation {activation}\")\n",
"from typing import List\nimport numbers\n\nimport xnmt\nimport xnmt.tensor_tools as tt\nfrom xnmt import events, expression_seqs, param_collections, param_initializers\nfrom xnmt.transducers import base as transducers\nfrom xnmt.persistence import serializable_init, Serializable, bare, Ref\n\nif xnmt.backend_dynet:\n import dynet as dy\nif xnmt.backend_torch:\n import torch\n import torch.nn as nn\n\n# Note: alternatively, this could wrap \"PositionEmbedder\", but it seems to me\n# that PositionEmbedder is probably not necessary in the first place, so\n# it probably makes more sense to have this as a SeqTransducer that\n# adds positional embeddings to an input\[email protected]_dynet\nclass PositionalSeqTransducerDynet(transducers.SeqTransducer, Serializable):\n yaml_tag = '!PositionalSeqTransducer'\n\n @events.register_xnmt_handler\n @serializable_init\n def __init__(self,\n max_pos: numbers.Integral,\n op: str = 'sum',\n emb_type: str = 'param',\n input_dim: numbers.Integral = Ref(\"exp_global.default_layer_dim\"),\n dropout: numbers.Real = Ref(\"exp_global.dropout\", default=0.0),\n param_init: param_initializers.ParamInitializer = Ref(\"exp_global.param_init\", default=bare(param_initializers.GlorotInitializer))) \\\n -> None:\n \"\"\"\n max_pos: largest embedded position\n op: how to combine positional encodings with the original encodings, can be \"sum\" or \"concat\"\n type: what type of embddings to use, \"param\"=parameterized (others, such as the trigonometric embeddings are todo)\n input_dim: embedding size\n dropout: apply dropout to output of this transducer\n param_init: how to initialize embedding matrix\n \"\"\"\n self.max_pos = max_pos\n self.input_dim = input_dim\n self.dropout = dropout\n self.op = op\n self.emb_type = emb_type\n param_init = param_init\n dim = (self.input_dim, max_pos)\n my_params = param_collections.ParamManager.my_params(self)\n self.embedder = my_params.add_parameters(dim, init=param_init.initializer(dim, is_lookup=True))\n\n @ events.handle_xnmt_event\n def on_set_train(self, val):\n self.train = val\n\n def get_final_states(self) -> List[transducers.FinalTransducerState]:\n return self._final_states\n\n def transduce(self, src: expression_seqs.ExpressionSequence) -> expression_seqs.ExpressionSequence:\n sent_len = src.sent_len()\n embeddings = dy.strided_select(dy.parameter(self.embedder), [1,1], [0,0], [self.input_dim, sent_len])\n if self.op == 'sum':\n output = embeddings + src.as_tensor()\n elif self.op == 'concat':\n output = dy.concatenate([embeddings, src.as_tensor()])\n else:\n raise ValueError(f'Illegal op {op} in PositionalTransducer (options are \"sum\"/\"concat\")')\n if self.train and self.dropout > 0.0:\n output = dy.dropout(output, self.dropout)\n output_seq = expression_seqs.ExpressionSequence(expr_tensor=output, mask=src.mask)\n self._final_states = [transducers.FinalTransducerState(output_seq[-1])]\n return output_seq\n\[email protected]_torch\nclass PositionalSeqTransducerTorch(transducers.SeqTransducer, Serializable):\n yaml_tag = '!PositionalSeqTransducer'\n\n @events.register_xnmt_handler\n @serializable_init\n def __init__(self,\n max_pos: numbers.Integral,\n op: str = 'sum',\n emb_type: str = 'param',\n input_dim: numbers.Integral = Ref(\"exp_global.default_layer_dim\"),\n dropout: numbers.Real = Ref(\"exp_global.dropout\", default=0.0),\n param_init: param_initializers.ParamInitializer = Ref(\"exp_global.param_init\", default=bare(param_initializers.GlorotInitializer))) \\\n -> None:\n \"\"\"\n max_pos: largest embedded position\n op: how to combine positional encodings with the original encodings, can be \"sum\" or \"concat\"\n type: what type of embddings to use, \"param\"=parameterized (others, such as the trigonometric embeddings are todo)\n input_dim: embedding size\n dropout: apply dropout to output of this transducer\n param_init: how to initialize embedding matrix\n \"\"\"\n self.max_pos = max_pos\n self.input_dim = input_dim\n self.dropout = dropout\n self.op = op\n self.emb_type = emb_type\n my_params = param_collections.ParamManager.my_params(self)\n self.embeddings = nn.Embedding(self.max_pos, self.input_dim).to(xnmt.device)\n my_params.append(self.embeddings)\n my_params.init_params(param_init)\n\n @ events.handle_xnmt_event\n def on_set_train(self, val):\n self.train = val\n\n def get_final_states(self) -> List[transducers.FinalTransducerState]:\n return self._final_states\n\n def transduce(self, src: expression_seqs.ExpressionSequence) -> expression_seqs.ExpressionSequence:\n sent_len = src.sent_len()\n batch_size = tt.batch_size(src[0])\n embeddings = self.embeddings(torch.tensor([list(range(sent_len))] * batch_size).to(xnmt.device))\n # embeddings = dy.strided_select(dy.parameter(self.embedder), [1,1], [0,0], [self.input_dim, sent_len])\n if self.op == 'sum':\n output = embeddings + src.as_tensor()\n elif self.op == 'concat':\n output = tt.concatenate([embeddings, src.as_tensor()])\n else:\n raise ValueError(f'Illegal op {op} in PositionalTransducer (options are \"sum\"/\"concat\")')\n if self.train and self.dropout > 0.0:\n output = tt.dropout(output, self.dropout)\n output_seq = expression_seqs.ExpressionSequence(expr_tensor=output, mask=src.mask)\n self._final_states = [transducers.FinalTransducerState(output_seq[-1])]\n return output_seq\n\nPositionalSeqTransducer = xnmt.resolve_backend(PositionalSeqTransducerDynet, PositionalSeqTransducerTorch)\n"
] | [
[
"numpy.argsort",
"numpy.dot",
"numpy.stack"
],
[
"torch.nn.Dropout",
"torch.autograd.set_detect_anomaly",
"torch.cat",
"torch.zeros",
"torch.as_tensor"
],
[
"torch.nn.Embedding"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VamshikShetty/Neural-Style | [
"44806accdfb9bb98aa15d3145563bf6759e9e604"
] | [
"Fast Style Transfer/TensorFlow/train_fast_style_transfer.py"
] | [
"\nimport tensorflow as tf\ntf.reset_default_graph() \n\n\nfrom keras.applications.vgg19 import VGG19\nimport os \n\nfrom tensorflow.python.keras.preprocessing import image as kp_image\nfrom keras.models import Model\nfrom keras.layers import Dense, BatchNormalization,Dropout,concatenate \nfrom keras import backend as K\nfrom keras.models import Model,load_model,model_from_json #,evaluate_generator\nfrom keras import losses\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Conv2D, MaxPooling2D, AveragePooling2D,Flatten,GlobalAveragePooling2D\n\nimport numpy as np\nfrom PIL import Image\nimport cv2\nimport scipy\nimport transform_network as TNET\nfrom loss import Loss, get_VGG19\n\ncontent_layers = ['block3_conv3']\nstyle_layers = ['block1_conv1','block2_conv2', 'block3_conv3', 'block4_conv3']\n\nnum_content_layers = len(content_layers)\nnum_style_layers = len(style_layers)\n\n\nseed = 791\ntf.set_random_seed(seed)\nnp.random.seed(seed)\n\ncontent_dir = 'content/'\nstyle_image = 'udnie.jpg'\n\n\nheight = 352\nwidth = 352\n\ndef load_img(path_to_img, expand = True, img_shape=(height,width)):\n \n img = scipy.misc.imread(path_to_img)\n\n img = scipy.misc.imresize(img, img_shape)\n img = img.astype(\"float32\")\n if expand:\n img = np.expand_dims(img, axis=0)\n \n img = tf.keras.applications.vgg19.preprocess_input(img)\n\n return img\n\ndef load_batch(image_paths):\n x = []\n for image_path in image_paths:\n img = load_img(image_path, False)\n\n x.append(img)\n\n x = np.asarray(x)\n return x\n\n\ndef deprocess_img(processed_img, shape):\n x = processed_img.copy()\n if len(x.shape) == 4:\n x = np.squeeze(x, 0)\n assert len(x.shape) == 3, (\"Input to deprocess image must be an image of \"\n \"dimension [1, height, width, channel] or [height, width, channel]\")\n if len(x.shape) != 3:\n raise ValueError(\"Invalid input to deprocessing image\")\n \n # perform the inverse of the preprocessiing step\n x[:, :, 0] += 103.939\n x[:, :, 1] += 116.779\n x[:, :, 2] += 123.68\n\n\n\n x = np.clip(x, 0, 255).astype('uint8')\n img = scipy.misc.imresize(x, shape)\n return img\n\n\ndef run_fast_style_transfer(content_training_images, style_image_path, epochs, batch_size, content_weight=0.6, style_weight=0.4, total_variation_weight = 1e-5): \n\n with tf.Session() as sess:\n K.set_session(sess)\n\n \n input_batch = tf.placeholder(tf.float32, shape=(None, height, width, 3), name=\"input_batch\")\n init_image = TNET.get_TransformNet('transform_network', input_batch)\n\n loss = Loss(init_image, content_layers, style_layers)\n\n\n content_loss = loss.content_loss(input_batch)\n\n \n style_var = load_img(style_image_path)\n\n\n\n style_var = tf.Variable(style_var)\n style_loss = loss.style_loss(style_var)\n \n\n tv_loss = loss.tv_loss(init_image)\n\n total_loss = style_weight*style_loss + content_weight*content_loss + total_variation_weight*tv_loss\n\n\n transform_net = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='transform_network')\n opt = tf.train.AdamOptimizer(learning_rate=0.0005, beta1=0.9, epsilon=1e-08).minimize(total_loss, var_list=[transform_net])\n\n\n #sess.run(tf.variables_initializer(var_list=[input_batch]))\n \n sess.run(tf.global_variables_initializer())\n # saver = tf.train.Saver()\n\n Tnet_saver = tf.train.Saver(transform_net)\n\n # loading the weights again because tf.global_variables_initializer() resets the weights\n loss.load_weights_to_vgg19(\"vgg_weights/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5\")\n # init_image.load_weights('0-transform_network.h5')\n\n\n dir_model = \"weights/\"+style_image.split('.')[0]+\"_weights/\"\n if not os.path.exists(dir_model):\n os.makedirs(dir_model)\n \n # Tnet_saver.restore(sess, dir_model+\"model.ckpt\")\n\n\n for i in range(epochs):\n\n avg_loss = 0\n avg_cnt = 1\n\n for j in range(0, int(len(content_training_images)/batch_size)):\n\n batch = load_batch(content_training_images[j: j+batch_size])\n\n temp = sess.run([total_loss, style_loss, content_loss, tv_loss, init_image, opt],feed_dict={input_batch:batch})\n\n print('epoch: ',i,'batch: ',j,' loss: ', temp[:4], 'avg loss: ', avg_loss )\n\n avg_loss = (avg_loss*(avg_cnt-1) + temp[0] )/avg_cnt\n avg_cnt += 1\n\n\n if j%50==0: # and i%50==0:\n image = deprocess_img(temp[4][2], batch[2].shape[:-1])\n cv2.imwrite(str(i)+'-'+str(j)+'-temp.jpg',image)\n if i==0:\n image_ori = deprocess_img(batch[2], batch[2].shape[:-1])\n cv2.imwrite(str(i)+'-'+str(j)+'-temp-orgi.jpg',image_ori)\n\n\n # if (i+1)%100==0:\n print('\\n Data Saved ... ')\n Tnet_saver.save(sess, dir_model+\"model.ckpt\")\n\n sess.close()\n\n\n\ncontent_training_images = os.listdir(content_dir) # http://cocodataset.org/#download 2017 val images [5k/1GB]\nfor i in range(len(content_training_images)):\n content_training_images[i] = content_dir+content_training_images[i]\n\n#print(content_training_images)\nrun_fast_style_transfer(content_training_images, style_image, epochs=5, batch_size=6)\n#cv2.imwrite(str(num_iterations)+'-'+save_name,best)\n\n"
] | [
[
"scipy.misc.imresize",
"numpy.expand_dims",
"tensorflow.keras.applications.vgg19.preprocess_input",
"numpy.random.seed",
"tensorflow.Variable",
"numpy.asarray",
"tensorflow.get_collection",
"numpy.squeeze",
"numpy.clip",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"scipy.misc.imread",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.set_random_seed",
"tensorflow.train.Saver"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.10",
"0.16",
"0.19",
"0.18",
"0.12",
"1.0",
"0.17",
"1.2"
],
"tensorflow": [
"1.10"
]
}
] |
Aarthif-Nawaz/Home3Circuit | [
"3e2d71fe7a839a7fd820d18fbef9bb31fde608e0"
] | [
"homestayAdmin/FinancialForecasting.py"
] | [
"import pickle\r\nimport warnings\r\nimport itertools\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nwarnings.filterwarnings(\"ignore\")\r\nplt.style.use('fivethirtyeight')\r\nimport pandas as pd\r\nimport statsmodels.api as sm\r\nimport matplotlib\r\nimport mysql.connector\r\nimport os\r\nimport sys\r\n\r\nmatplotlib.rcParams['axes.labelsize'] = 14\r\nmatplotlib.rcParams['xtick.labelsize'] = 12\r\nmatplotlib.rcParams['ytick.labelsize'] = 12\r\nmatplotlib.rcParams['text.color'] = 'k'\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"\",\r\n database=\"homestay\"\r\n)\r\n\r\n\r\ndf = pd.read_csv(\"Forecasting.csv\", parse_dates=[1] ,header=0)\r\n\r\nname = sys.argv[1]\r\nprint(name)\r\nhomestay = df.loc[df['Name'] == str(name)]\r\n# homestay['Date'].min(), homestay['Date'].max()\r\n#\r\n# homestay = homestay.sort_values('Date')\r\n# homestay.isnull().sum()\r\n#\r\n# homestay = homestay.groupby('Date')['Sales'].sum().reset_index()\r\nhomestay.index = homestay['Date']\r\ny = homestay['Sales'].resample('MS').mean()\r\ny.plot(figsize=(15, 6))\r\nfrom pandas.plotting import autocorrelation_plot\r\nautocorrelation_plot(homestay['Sales'])\r\n\r\nfrom statsmodels.graphics.tsaplots import plot_pacf\r\nplot_pacf(homestay['Sales'], lags=15)\r\n\r\n\r\nfrom statsmodels.tsa.arima_model import ARIMA, ARIMAResults\r\n\r\nmodel = ARIMA(df['Sales'], order=(1,0,1))\r\nmodel_fit = model.fit()\r\nprint(model_fit.summary())\r\nresiduals = model_fit.resid\r\nresiduals.plot()\r\nprint(residuals.describe())\r\noutput = model_fit.forecast()\r\nprint(output)\r\nprint(model_fit.forecast(5)[0])\r\ntrain_size = int(df.shape[0]*0.7)\r\ntrain, test = df.Sales[0:train_size], df.Sales[train_size:]\r\ndata = train\r\npredict =[]\r\nfor t in test:\r\n model = ARIMA(data, order=(0,0,1))\r\n model_fit = model.fit()\r\n y = model_fit.forecast()\r\n print(y[0][0])\r\n predict.append(y[0][0])\r\n data = np.append(data, t)\r\n data = pd.Series(data)\r\n\r\nfrom sklearn.metrics import mean_squared_error\r\nmse = mean_squared_error(test.values, predict)\r\n# print(mse)\r\n# print(predict)\r\nmodel_fit.save('model.pkl')\r\nloaded = ARIMAResults.load('model.pkl')\r\nvalues = loaded.predict()\r\nmycursor = mydb.cursor()\r\ndelsql = \"truncate table price\"\r\n\r\nmycursor.execute(delsql)\r\n\r\n\r\n\r\nimport datetime\r\ntoday = datetime.datetime.today()\r\ndatem = datetime.datetime(today.year, today.month,1)\r\n\r\nfrom dateutil.rrule import rrule, MONTHLY\r\nfrom datetime import datetime\r\n\r\n\r\n\r\n\r\ndef months(start_month, start_year, end_month, end_year):\r\n start = datetime(start_year, start_month, 1)\r\n end = datetime(end_year, end_month, 1)\r\n return [(d.month, d.year) for d in rrule(MONTHLY, dtstart=start, until=end)]\r\n\r\nvalue = sys.argv[2]\r\nyear = today.year\r\nif(today.month + int(value) > 12):\r\n year = year +1\r\n predict_month = months(today.month, today.year, ((int(value) + 5)-12), year)\r\nelse:\r\n predict_month = months(today.month,today.year,(int(value)+5),year)\r\nlist = []\r\nfor j in predict_month:\r\n list.append(j[0])\r\nprint(list)\r\nfor i in range (1,len(values)):\r\n if(i <= int(value)):\r\n if (today.month + i <= 12):\r\n year = today.year\r\n mycursor.execute(\"INSERT INTO price(month , price) VALUES (%s,%s)\", (str(list[i])+\"-\"+str(year), float(values[i])))\r\n else:\r\n year = today.year+1\r\n mycursor.execute(\"INSERT INTO price(month , price) VALUES (%s,%s)\", (str(list[i]) + \"-\" + str(year), float(values[i])))\r\nmydb.commit()\r\n\r\n\r\n\r\n\r\n# p = d = q = range(0, 2)\r\n# pdq = list(itertools.product(p, d, q))\r\n# seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]\r\n# print('Examples of parameter combinations for Seasonal ARIMA...')\r\n# print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1]))\r\n# print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2]))\r\n# print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3]))\r\n# print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4]))\r\n#\r\n# for param in pdq:\r\n# for param_seasonal in seasonal_pdq:\r\n# try:\r\n# mod = sm.tsa.statespace.SARIMAX(y,order=param,seasonal_order=param_seasonal,enforce_stationarity=False,enforce_invertibility=False)\r\n# results = mod.fit()\r\n# #print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))\r\n# except:\r\n# continue\r\n# mod = sm.tsa.statespace.SARIMAX(y,order=(1, 1, 1),seasonal_order=(1, 1, 0, 2),enforce_stationarity=False,enforce_invertibility=False)\r\n# results = mod.fit()\r\n# #print(results.summary().tables[1])\r\n# # results.plot_diagnostics(figsize=(16, 8))\r\n# # plt.show()\r\n# pred = results.get_prediction(start=pd.to_datetime('2020-01-01'), dynamic=False)\r\n# pred_ci = pred.conf_int()\r\n# ax = y['2020-01-07':].plot(label='observed')\r\n# pred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7, figsize=(14, 7))\r\n# ax.fill_between(pred_ci.index,\r\n# pred_ci.iloc[:, 0],\r\n# pred_ci.iloc[:, 1], color='k', alpha=.2)\r\n# ax.set_xlabel('Date')\r\n# ax.set_ylabel('Sales')\r\n# plt.legend()\r\n# plt.show()\r\n#\r\n# y_forecasted = pred.predicted_mean\r\n# y_truth = y['2020-01-07':]\r\n# mse = ((y_forecasted - y_truth) ** 2).mean()\r\n# print('The Mean Squared Error of our forecasts is {}'.format(round(mse, 2)))\r\n#\r\n# pred_uc = results.get_forecast(steps=100)\r\n# pred_ci = pred_uc.conf_int()\r\n# ax = y.plot(label='observed', figsize=(14, 7))\r\n# pred_uc.predicted_mean.plot(ax=ax, label='Forecast')\r\n# ax.fill_between(pred_ci.index,pred_ci.iloc[:, 0],pred_ci.iloc[:, 1], color='k', alpha=.25)\r\n# ax.set_xlabel('Date')\r\n# ax.set_ylabel('Sales')\r\n# plt.legend()\r\n# plt.show()\r\n"
] | [
[
"pandas.read_csv",
"pandas.Series",
"sklearn.metrics.mean_squared_error",
"numpy.append",
"pandas.plotting.autocorrelation_plot",
"matplotlib.pyplot.style.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
eherr/anim_utils | [
"2274b86ff410c8f6feb588626cbf83664382abca"
] | [
"anim_utils/motion_editing/fabrik_chain.py"
] | [
"#!/usr/bin/env python\n#\n# Copyright 2019 DFKI GmbH.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the\n# following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n# USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n https://www.sciencedirect.com/science/article/pii/S1524070311000178?via%3Dihub\n\n based on the pseudocode by Renzo Poddighe\n https://project.dke.maastrichtuniversity.nl/robotlab/wp-content/uploads/Bachelor-thesis-Renzo-Poddighe.pdf\n\"\"\"\nimport math\nimport numpy as np\nfrom transformations import quaternion_inverse, quaternion_multiply, quaternion_from_matrix, euler_from_quaternion\nfrom .analytical_inverse_kinematics import calculate_limb_joint_rotation, calculate_limb_root_rotation, to_local_coordinate_system\n\ndef sign(x):\n return 1 if x >= 0 else -1\n\ndef quaternion_to_av(q):\n \"\"\" according to lee 2000\n the purely imaginary quaternion is identical to the angular velocity\n the sign of the real part gives the direction\n Since the unit quaternion space is folded by the antipodal equivalence,\n the angular velocity is twice as fast\n \"\"\"\n return 2 * np.array(q[1:]) * sign(q[0])\n\ndef normalize(v):\n return v/ np.linalg.norm(v)\n\ndef get_quaternion_delta(a, b):\n return quaternion_multiply(quaternion_inverse(b), a)\n\n\ndef quaternion_from_axis_angle(axis, angle):\n q = [1,0,0,0]\n q[1] = axis[0] * math.sin(angle / 2)\n q[2] = axis[1] * math.sin(angle / 2)\n q[3] = axis[2] * math.sin(angle / 2)\n q[0] = math.cos(angle / 2)\n return normalize(q)\n\n\n\ndef get_offset_quat(a, b):\n a_len = np.linalg.norm(a)\n b_len = np.linalg.norm(b)\n if a_len > 0 and b_len > 0:\n q = quaternion_from_vector_to_vector(a/a_len,b/b_len)\n q /= np.linalg.norm(q)\n return q\n else:\n return [1,0,0,0]\n\ndef quaternion_from_vector_to_vector(a, b):\n \"\"\"src: http://stackoverflow.com/questions/1171849/finding-quaternion-representing-the-rotation-from-one-vector-to-another\n http://wiki.ogre3d.org/Quaternion+and+Rotation+Primer\"\"\"\n\n v = np.cross(a, b)\n w = np.sqrt((np.linalg.norm(a) ** 2) * (np.linalg.norm(b) ** 2)) + np.dot(a, b)\n q = np.array([w, v[0], v[1], v[2]])\n if np.dot(q,q) != 0:\n return q/ np.linalg.norm(q)\n else:\n idx = np.nonzero(a)[0]\n q = np.array([0, 0, 0, 0])\n q[1 + ((idx + 1) % 2)] = 1 # [0, 0, 1, 0] for a rotation of 180 around y axis\n return q\n\ndef quaternion_from_vector_to_vector2(a, b):\n \"\"\"http://math.stackexchange.com/questions/293116/rotating-one-3d-vector-to-another\"\"\"\n if np.array_equiv(a, b):\n return [1, 0, 0, 0]\n\n axis = normalize(np.cross(a, b))\n dot = np.dot(a, b)\n if dot >= 1.0:\n return [1, 0, 0, 0]\n angle = math.acos(dot)\n q = quaternion_from_axis_angle(axis, angle)\n return q\n\ndef to_local_cos(skeleton, node_name, frame, q):\n # bring into parent coordinate system\n pm = skeleton.nodes[node_name].get_global_matrix(frame)[:3,:3]\n inv_p = quaternion_inverse(quaternion_from_matrix(pm))\n inv_p /= np.linalg.norm(inv_p)\n return quaternion_multiply(inv_p, q)\n\n\ndef orient_joint_to_target(skeleton, node, frame, src_pos, target_pos):\n if skeleton.nodes[node].parent is None:\n parent_pos = [0, 0, 0]\n else:\n parent_pos = skeleton.nodes[node].parent.get_global_position(frame)\n src_dir = normalize(src_pos - parent_pos)\n target_dir = normalize(target_pos - parent_pos)\n delta_q = quaternion_from_vector_to_vector(src_dir, target_dir)\n return normalize(delta_q)\n\n\nclass FABRIKBone(object):\n def __init__(self, name, child):\n self.name = name\n self.child = child\n self.position = np.array([0, 0, 0], np.float) # position of joint\n self.length = 0\n self.is_root = False\n self.is_leaf = False\n\n\nROOT_OFFSET = np.array([0,0,0], np.float)\n\nclass FABRIKChain(object):\n def __init__(self, skeleton, bones, node_order, tolerance=0.01, delta_tolerance=0.0001, max_iter=500, frame_offset=3, root_offset=ROOT_OFFSET, activate_constraints=False):\n self.skeleton = skeleton\n self.bones = bones\n self.node_order = node_order\n self.reversed_node_order = list(reversed(node_order))\n self.tolerance = tolerance\n self.max_iter = max_iter\n self.target = None\n self.root_pos = None\n self.chain_length = 0\n self.root_offset = root_offset\n self.activate_constraints = activate_constraints\n self.frame_offset = frame_offset\n self.delta_tolerance = delta_tolerance\n\n def set_positions_from_frame(self, frame, parent_length):\n self.skeleton.clear_cached_global_matrices()\n for idx, node in enumerate(self.node_order):\n p = self.skeleton.nodes[node].get_global_position(frame, use_cache=True)\n #print(\"pos \", node, p)\n self.bones[node].position = p\n if idx ==0:\n self.root_pos = p\n self.chain_length = 0\n for node in self.node_order:\n next_node = self.bones[node].child\n if next_node is None:\n break\n d = np.linalg.norm(self.bones[next_node].position - self.bones[node].position)\n self.bones[node].length = d\n #print(\"length \",node, d)\n self.chain_length += d\n self.parent_length = parent_length\n\n def target_is_reachable(self):\n dist = np.linalg.norm(self.target - self.root_pos)\n #print(\"unreachable\", dist, self.chain_length)\n return dist < self.chain_length+ self.parent_length\n\n def run(self, frame, target):\n self.target = target\n self.set_positions_from_frame(frame, 0)\n self.solve()\n return self.get_joint_parameters()\n\n def run_partial(self, frame, target):\n self.target = target\n self.set_positions_from_frame(frame, 0)\n self.solve()\n return self.set_partial_joint_parameters(frame)\n\n def run_partial_with_constraints(self, frame, target):\n self.target = target\n self.set_positions_from_frame(frame, 0)\n self.solve_partial(frame)\n return self.set_partial_joint_parameters(frame)\n\n def run_with_constraints(self, frame, target):\n self.target = target\n self.set_positions_from_frame(frame, 0)\n self.solve_with_constraints()\n return self.get_joint_parameters()\n\n def solve(self):\n if not self.target_is_reachable():\n print(\"unreachable\")\n # if unreachable orient joints to target\n self.orient_to_target()\n else:\n print(\"reachable\")\n # if reachable perform forward and backward reaching until tolerance is reached\n iter = 0\n distance = self.get_error()\n while distance > self.tolerance and iter< self.max_iter:\n self.backward()\n self.forward()\n iter+=1\n distance = self.get_error()\n print(\"iter\",iter, distance)\n\n def solve_with_constraints(self):\n if not self.target_is_reachable():\n print(\"unreachable\")\n # if unreachable orient joints to target\n self.orient_to_target()\n else:\n print(\"reachable\")\n # if reachable perform forward and backward reaching until tolerance is reached\n iter = 0\n distance = self.get_error()\n while distance > self.tolerance and iter < self.max_iter:\n self.backward()\n self.forward()\n self.apply_constraints()\n iter+=1\n distance = self.get_error()\n print(\"iter\",iter, distance)\n\n def solve_partial(self, frame):\n if not self.target_is_reachable():\n print(\"unreachable\")\n # if unreachable orient joints to target\n self.orient_to_target()\n else:\n print(\"reachable\")\n # if reachable perform forward and backward reaching until tolerance is reached\n iter = 0\n distance = self.get_error()\n while distance > self.tolerance and iter < self.max_iter:\n self.backward()\n self.forward()\n\n self.set_partial_joint_parameters(frame)\n self.set_positions_from_frame(frame, 0)\n\n iter += 1\n distance = self.get_error()\n print(\"iter\", iter, distance)\n\n def get_error(self):\n end_effector = self.node_order[-1]\n return np.linalg.norm(self.bones[end_effector].position - self.target)\n\n def orient_to_target(self):\n for idx, node in enumerate(self.node_order[:-1]):\n next_node = self.bones[node].child\n if next_node is None:\n print(\"Error: none at \",node)\n break\n r = np.linalg.norm(self.target - self.bones[node].position)\n if r > 0:\n l = self.bones[node].length / r\n self.bones[next_node].position = (1 - l) * self.bones[node].position + l * self.target\n\n def backward(self):\n end_effector = self.node_order[-1]\n self.bones[end_effector].position = np.array(self.target)\n n_points = len(self.node_order)\n for idx in range(n_points - 2, -1, -1):\n node = self.node_order[idx]\n next_node = self.bones[node].child\n r = np.linalg.norm(self.bones[next_node].position - self.bones[node].position)\n if r > 0:\n l = self.bones[node].length / r\n self.bones[node].position = (1 - l) * self.bones[next_node].position + l * self.bones[node].position\n\n def forward(self):\n root_node = self.node_order[0]\n self.bones[root_node].position = self.root_pos\n for idx, node in enumerate(self.node_order[:-1]): #for p_idx in range(0, self.n_points - 1, 1):\n #next_node = self.node_order[idx + 1]\n next_node = self.bones[node].child\n r = np.linalg.norm(self.bones[next_node].position - self.bones[node].position)\n if r > 0:\n l = self.bones[node].length / r\n self.bones[next_node].position = l * self.bones[next_node].position + (1 - l) * self.bones[node].position\n\n def apply_constraints(self):\n frame = self.get_joint_parameters()\n self.set_positions_from_frame(frame, 0)\n return\n\n def get_joint_parameters(self):\n n_joints = len(self.node_order) - 1\n frame = np.zeros(n_joints*4+3)\n o = 3\n prev_point = self.root_offset\n for idx, node in enumerate(self.node_order[:-1]):\n #for node in self.skeleton.animated_joints:\n next_node = self.bones[node].child\n q = self.get_global_rotation(node, next_node)\n frame[o:o + 4] = to_local_cos(self.skeleton, node, frame, q)\n if self.skeleton.nodes[node].joint_constraint is not None and self.activate_constraints:\n self.apply_constraint_with_swing(node, frame, o)\n prev_point = self.bones[next_node].position\n o += 4\n return frame\n\n def set_partial_joint_parameters(self, frame):\n o = self.frame_offset\n for idx, node in enumerate(self.node_order[:-1]):\n next_node = self.bones[node].child\n q = self.get_global_rotation_non_cos(node, next_node, frame)\n frame[o:o + 4] = to_local_coordinate_system(self.skeleton,frame, node, q)\n if self.skeleton.nodes[node].joint_constraint is not None and self.activate_constraints:\n self.apply_constraint_with_swing(node, frame, o)\n o += 4\n return frame\n\n def apply_constraint_with_swing(self, node, frame, o, eps=0.01):\n old_q = np.array(frame[o:o + 4])\n\n #remove twist rotation\n swing_q, twist_q = self.skeleton.nodes[node].joint_constraint.split(old_q)\n frame[o:o + 4] = twist_q\n\n # apply swing_q to parent\n parent_q = np.array(frame[o - 4:o])\n new_parent_q = quaternion_multiply(parent_q, swing_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n new_node_pos = self.skeleton.nodes[node].children[0].get_global_position(frame)\n diff = np.linalg.norm(new_node_pos - self.target)\n\n # calculate rotation fix if necessary\n if diff > eps:\n delta_q = orient_joint_to_target(self.skeleton, node, frame, new_node_pos, self.target)\n aligned_parent_q = quaternion_multiply(delta_q, new_parent_q)\n aligned_parent_q = normalize(aligned_parent_q)\n frame[o - 4:o] = aligned_parent_q\n\n def apply_constraint_with_swing_global(self, node, frame, o, eps=0.01):\n old_q = np.array(frame[o:o + 4])\n next_node = self.bones[node].child\n parent_m = self.skeleton.nodes[node].get_global_matrix(frame)[:3, :3]\n node_m = self.skeleton.nodes[next_node].get_global_matrix(frame)[:3, :3]\n node_q = quaternion_from_matrix(node_m)\n node_q = normalize(node_q)\n # remove twist rotation\n swing_q, twist_q = self.skeleton.nodes[node].joint_constraint.split_global(parent_m, node_q)\n frame[o:o + 4] = twist_q\n\n # apply swing_q to parent\n parent_q = np.array(frame[o - 4:o])\n new_parent_q = quaternion_multiply(parent_q, swing_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n new_node_pos = self.skeleton.nodes[node].children[0].get_global_position(frame)\n diff = np.linalg.norm(new_node_pos - self.target)\n return\n # calculate rotation fix if necessary\n if diff > eps:\n delta_q = orient_joint_to_target(self.skeleton, node, frame, new_node_pos, self.target)\n aligned_parent_q = quaternion_multiply(delta_q, new_parent_q)\n aligned_parent_q = normalize(aligned_parent_q)\n frame[o - 4:o] = aligned_parent_q\n\n\n def apply_constraint_with_swing_and_lee(self, node, frame, o, eps=0.01):\n old_q = frame[o:o + 4]\n old_node_pos = self.skeleton.nodes[node].children[0].get_global_position(frame)\n delta_q, new_q = self.skeleton.nodes[node].joint_constraint.split(old_q)\n frame[o:o + 4] = new_q\n new_node_pos = self.skeleton.nodes[node].children[0].get_global_position(frame)\n diff = np.linalg.norm(new_node_pos - old_node_pos)\n if diff > eps:\n parent_q = frame[o - 4:o]\n new_parent_q = quaternion_multiply(parent_q, delta_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n new_node_pos = self.skeleton.nodes[node].children[0].get_global_position(frame)\n diff = np.linalg.norm(new_node_pos -old_node_pos)\n if diff > eps:\n parent_q = frame[o - 4:o]\n root = None\n if self.skeleton.nodes[node].parent is not None:\n root = self.skeleton.nodes[node].parent.node_name\n end_effector = self.skeleton.nodes[node].children[0].node_name\n print(\"apply lee tolani\",root, node, end_effector, diff)\n local_axis = self.skeleton.nodes[node].joint_constraint.axis\n #frame[o:o + 4] = calculate_limb_joint_rotation(self.skeleton, root, node, end_effector, local_axis, frame, self.target)\n delta_q = calculate_limb_root_rotation(self.skeleton, root, end_effector, frame, self.target)\n new_parent_q = quaternion_multiply(parent_q, delta_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n\n def apply_constraint_with_swing2(self, node, parent_node, frame, o):\n next_node = self.bones[node].child\n target_global_m = self.skeleton.nodes[next_node].get_global_matrix(frame)[:3,:3]\n old_q = frame[o:o + 4]\n delta_q, new_q = self.skeleton.nodes[node].joint_constraint.split(old_q)\n frame[o:o + 4] = new_q\n new_global_m = self.skeleton.nodes[next_node].get_global_matrix(frame)[:3,:3]\n delta_global_m = np.dot(np.linalg.inv(new_global_m), target_global_m)\n actual_delta_q = normalize(quaternion_from_matrix(delta_global_m))\n parent_q = frame[o - 4:o]\n new_parent_q = quaternion_multiply(actual_delta_q, parent_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n\n\n\n def apply_constraint_with_vector(self, node, parent_node, frame, o):\n next_node = self.bones[node].child\n old_pos = self.bones[next_node].position\n old_q = frame[o:o + 4]\n twist_q, swing_q = self.skeleton.nodes[node].joint_constraint.split(old_q)\n frame[o:o + 4] = swing_q\n\n # get global delta quaternion to apply on parent\n parent_pos = self.skeleton.nodes[parent_node].get_global_position(frame)\n next_node_pos = self.skeleton.nodes[next_node].get_global_position(frame)\n position_delta = np.linalg.norm(old_pos-next_node_pos)\n print(\"position delta\", position_delta)\n if position_delta < 0.001:\n return\n desired_offset = normalize(old_pos - parent_pos)\n offset = normalize(next_node_pos - parent_pos)\n delta_q = quaternion_from_vector_to_vector(offset, desired_offset)\n print(\"deltaq\", parent_node, node, next_node, next_node_pos, old_pos, twist_q, swing_q)\n # apply global delta on parent\n if True:\n global_m = self.skeleton.nodes[parent_node].get_global_matrix(frame)\n global_parent_q = normalize(quaternion_from_matrix(global_m))\n new_parent_q = quaternion_multiply(global_parent_q, delta_q)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = to_local_cos(self.skeleton, parent_node, frame, new_parent_q)\n else:\n local_parent_q = frame[o - 4:o]\n local_delta = to_local_cos(self.skeleton, parent_node, frame, delta_q)\n\n new_parent_q = quaternion_multiply(local_parent_q, local_delta)\n new_parent_q = normalize(new_parent_q)\n frame[o - 4:o] = new_parent_q\n print(new_parent_q, local_parent_q, local_delta, delta_q)\n\n\n\n def get_global_rotation(self, node, next_node):\n \"\"\" FIXME works only when identity frame coordinate system is the same as the offset \"\"\"\n\n #print(\"set\", node, next_node)\n target = self.bones[next_node].position - self.bones[node].position\n next_offset = np.array(self.skeleton.nodes[next_node].offset)\n target_len = np.linalg.norm(target)\n if target_len > 0:\n target /= target_len\n next_offset /= np.linalg.norm(next_offset)\n\n # 1. sum over offsets of static nodes\n local_offset = self.get_child_offset(node, next_node)\n actual_offset = next_offset + local_offset\n actual_offset /= np.linalg.norm(actual_offset) # actual_offset = [0.5, 0.5,0]\n # 2. get global rotation\n q = quaternion_from_vector_to_vector(actual_offset, target)\n return q\n\n else:\n #print(\"skip\", target_len, self.bones[next_node].position)\n return [1, 0, 0, 0]\n\n def get_global_rotation_non_cos(self, node, next_node, frame):\n target_position = self.bones[next_node].position\n root_pos = self.skeleton.nodes[node].get_global_position(frame)\n #src_dir = np.dot(m, offset)\n end_effector_pos = self.skeleton.nodes[next_node].get_global_position(frame)\n src_delta = end_effector_pos - root_pos\n src_dir = src_delta / np.linalg.norm(src_delta)\n\n target_delta = target_position - root_pos\n target_dir = target_delta / np.linalg.norm(target_delta)\n\n #q = quaternion_from_vector_to_vector(offset, target_dir)\n q = quaternion_from_vector_to_vector2(src_dir, target_dir)\n q = normalize(q)\n return q\n\n\n def get_child_offset(self, node, child_node):\n \"\"\"\n \"\"\"\n actual_offset = np.array([0, 0, 0], np.float)\n while node is not None and self.skeleton.nodes[node].children[0].node_name != child_node:\n local_offset = np.array(self.skeleton.nodes[node].children[0].offset)\n local_offset /= np.linalg.norm(local_offset)\n actual_offset = actual_offset + local_offset\n node = self.skeleton.nodes[node].children[0].node_name\n if len(self.skeleton.nodes[node].children) < 1:\n node = None\n\n return actual_offset\n\n def get_joint_parameters_global(self):\n n_joints = len(self.node_order)-1\n frame = np.zeros(n_joints*4+3)\n o = self.frame_offset\n for idx, node in enumerate(self.node_order[:-1]):\n offset = np.array(self.skeleton.nodes[node].children[0].offset)\n offset /= np.linalg.norm(offset)\n next_node = self.bones[node].child\n if idx == 0:\n if self.skeleton.root == node:\n dir_vector = self.bones[next_node].position\n dir_vector_len = np.linalg.norm(dir_vector)\n if dir_vector_len > 0 and np.linalg.norm(offset) > 0:\n dir_vector /= dir_vector_len\n q = quaternion_from_vector_to_vector(offset, dir_vector)\n frame[o:o + 4] = q\n else:\n print(\"work around\", offset,dir_vector_len, node)\n frame[o:o + 4] = [1, 0, 0, 0]\n else:\n print(\"work root around\")\n frame[o:o + 4] = [1, 0, 0, 0]\n\n else:\n q = self.get_global_rotation(node, next_node)\n frame[o:o+4] =q\n o += 4\n print(frame)\n return frame\n\n def get_global_positions(self):\n position_dict = dict()\n for node in self.node_order:\n position_dict[node] = self.bones[node].position\n return position_dict\n\n\n def get_end_effector_position(self):\n root_node = self.node_order[-1]\n return self.bones[root_node].position\n\n def get_next_nodes(self, next_nodes):\n for idx, n in enumerate(self.node_order):\n if idx+1 < len(self.node_order):\n next_nodes[n] = self.node_order[idx+1]\n else:\n next_nodes[n] = None\n"
] | [
[
"numpy.dot",
"numpy.nonzero",
"numpy.linalg.inv",
"numpy.linalg.norm",
"numpy.cross",
"numpy.array",
"numpy.array_equiv",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
apayeur/GIF-Ca | [
"7ba9e715d79aa3a733f417f7dfce81842041e7ec"
] | [
"Summary_DynamicIV.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n# List separate experiments in separate folder\n# data_folders_for_separate_experiments = ['tenth_set']\ndata_folders_for_separate_experiments = ['seventh_set', 'eighth_set', 'ninth_set', 'tenth_set']\n\n# For all experiments, extract the cell names\nCellNames = {}\nfor experiment_folder in data_folders_for_separate_experiments:\n folder_path = './' + experiment_folder + '/'\n CellNames[experiment_folder] = [name for name in os.listdir(folder_path) if\n os.path.isdir(folder_path + name) and '_5HT' in name]\nCellNames['eighth_set'].remove('DRN165_5HT') # problematic cell\nCellNames['eighth_set'].remove('DRN094_5HT') # problematic cell\nCellNames['eighth_set'].remove('DRN156_5HT') # problematic cell\nCellNames['seventh_set'].remove('DRN543_5HT') # problematic cell\nCellNames['ninth_set'].remove('DRN654_5HT') # problematic cell\nCellNames['tenth_set'].remove('DRN656_5HT') # problematic cell\n\n\ndata = np.array([[0,0,0,0,0]])\nfor experiment_folder in data_folders_for_separate_experiments:\n for cell_name in CellNames[experiment_folder]:\n path_data = '../../../Dropbox/Recherches/Raphe/GIF-Ca/Results/' + cell_name + '/'\n path_results = '../../../Dropbox/Recherches/Raphe/GIF-Ca/Results/'\n data = np.concatenate((data, (np.loadtxt(path_data + 'params_IV.dat', delimiter='\\n')).reshape((1,5))), axis=0)\n\nEL = data[1:,0]\ntaum = data[1:,1]\nDeltaV = data[1:,2]\nV_T = data[1:,3]\nC = data[1:,4]\n\n\n\n\n\nfig = plt.figure(1, figsize=(8,3))\n#fig.suptitle('EIF model parameters for 5-HT neurons', y=0.99)\nax1 = fig.add_subplot(141)\nax1.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off') # labels along the bottom edge are off\nax1.boxplot(EL, showmeans=True)\nplt.ylabel(r'$E_L$ (mV)')\nax2 = fig.add_subplot(142)\nax2.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off') # labels along the bottom edge are of\nplt.ylabel(r'$\\tau_m$ (ms)')\nax2.boxplot(taum, showmeans=True)\nax3 = fig.add_subplot(143)\nax3.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off') # labels along the bottom edge are off\nplt.ylabel(r'$\\Delta V$ (mV)')\nax3.boxplot(DeltaV, showmeans=True)\nax4 = fig.add_subplot(144)\nax4.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off') # labels along the bottom edge are off\nplt.ylabel(r'$V_T$ (mV)')\nax4.boxplot(V_T, showmeans=True)\nfig.tight_layout()\nplt.savefig(path_results+'DynamicIV_Params5HT.png', format='png')\nplt.close(fig)\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jkochNU/scqubits | [
"9a405759665b96284e9a449188935cd06b42d580",
"9a405759665b96284e9a449188935cd06b42d580",
"9a405759665b96284e9a449188935cd06b42d580"
] | [
"scqubits/core/zeropi.py",
"scqubits/tests/test_fluxqubit.py",
"scqubits/core/explorer.py"
] | [
"# zeropi.py\n#\n# This file is part of scqubits.\n#\n# Copyright (c) 2019, Jens Koch and Peter Groszkowski\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n############################################################################\n\nimport numpy as np\nfrom scipy import sparse\n\nimport scqubits.core.constants as constants\nimport scqubits.utils.plotting as plot\nfrom scqubits.core.discretization import Grid1d, GridSpec\nfrom scqubits.core.qubit_base import QubitBaseClass\nfrom scqubits.core.storage import WaveFunctionOnGrid\nfrom scqubits.utils.misc import is_numerical, key_in_grid1d\nfrom scqubits.utils.spectrum_utils import standardize_phases, order_eigensystem\n\n\n# -Symmetric 0-pi qubit, phi discretized, theta in charge basis---------------------------------------------------------\n\nclass ZeroPi(QubitBaseClass):\n r\"\"\"Zero-Pi Qubit\n\n | [1] Brooks et al., Physical Review A, 87(5), 052306 (2013). http://doi.org/10.1103/PhysRevA.87.052306\n | [2] Dempster et al., Phys. Rev. B, 90, 094518 (2014). http://doi.org/10.1103/PhysRevB.90.094518\n | [3] Groszkowski et al., New J. Phys. 20, 043053 (2018). https://doi.org/10.1088/1367-2630/aab7cd\n\n Zero-Pi qubit without coupling to the `zeta` mode, i.e., no disorder in `EC` and `EL`,\n see Eq. (4) in Groszkowski et al., New J. Phys. 20, 043053 (2018),\n\n .. math::\n\n H &= -2E_\\text{CJ}\\partial_\\phi^2+2E_{\\text{C}\\Sigma}(i\\partial_\\theta-n_g)^2\n +2E_{C\\Sigma}dC_J\\,\\partial_\\phi\\partial_\\theta\n -2E_\\text{J}\\cos\\theta\\cos(\\phi-\\varphi_\\text{ext}/2)+E_L\\phi^2\\\\\n &\\qquad +2E_\\text{J} + E_J dE_J \\sin\\theta\\sin(\\phi-\\phi_\\text{ext}/2).\n\n Formulation of the Hamiltonian matrix proceeds by discretization of the `phi` variable, and using charge basis for\n the `theta` variable.\n\n Parameters\n ----------\n EJ: float\n mean Josephson energy of the two junctions\n EL: float\n inductive energy of the two (super-)inductors\n ECJ: float\n charging energy associated with the two junctions\n EC: float or None\n charging energy of the large shunting capacitances; set to `None` if `ECS` is provided instead\n dEJ: float\n relative disorder in EJ, i.e., (EJ1-EJ2)/EJavg\n dCJ: float\n relative disorder of the junction capacitances, i.e., (CJ1-CJ2)/CJavg\n ng: float\n offset charge associated with theta\n flux: float\n magnetic flux through the circuit loop, measured in units of flux quanta (h/2e)\n grid: Grid1d object\n specifies the range and spacing of the discretization lattice\n ncut: int\n charge number cutoff for `n_theta`, `n_theta = -ncut, ..., ncut`\n ECS: float, optional\n total charging energy including large shunting capacitances and junction capacitances; may be provided instead\n of EC\n truncated_dim: int, optional\n desired dimension of the truncated quantum system\n \"\"\"\n\n def __init__(self, EJ, EL, ECJ, EC, ng, flux, grid, ncut, dEJ=0, dCJ=0, ECS=None, truncated_dim=None):\n self.EJ = EJ\n self.EL = EL\n self.ECJ = ECJ\n\n if EC is None and ECS is None:\n raise ValueError(\"Argument missing: must either provide EC or ECS\")\n if EC and ECS:\n raise ValueError(\"Argument error: can only provide either EC or ECS\")\n if EC:\n self.EC = EC\n else:\n self.EC = 1 / (1 / ECS - 1 / self.ECJ)\n\n self.dEJ = dEJ\n self.dCJ = dCJ\n self.ng = ng\n self.flux = flux\n self.grid = grid\n self.ncut = ncut\n self.truncated_dim = truncated_dim\n self._sys_type = '0-pi'\n self._evec_dtype = np.complex_\n self._default_grid = Grid1d(-np.pi / 2, 3 * np.pi / 2, 100) # for theta, needed for plotting wavefunction\n\n def _evals_calc(self, evals_count):\n hamiltonian_mat = self.hamiltonian()\n evals = sparse.linalg.eigsh(hamiltonian_mat, k=evals_count, return_eigenvectors=False, which='SA')\n return np.sort(evals)\n\n def _esys_calc(self, evals_count):\n hamiltonian_mat = self.hamiltonian()\n evals, evecs = sparse.linalg.eigsh(hamiltonian_mat, k=evals_count, return_eigenvectors=True, which='SA')\n # TODO consider normalization of zeropi wavefunctions\n # evecs /= np.sqrt(self.grid.grid_spacing())\n evals, evecs = order_eigensystem(evals, evecs)\n return evals, evecs\n\n def get_ECS(self):\n return 1 / (1 / self.EC + 1 / self.ECJ)\n\n def set_ECS(self, value):\n raise ValueError(\"It's not possible to directly set ECS. Instead one can set EC or ECJ,\\nor use \"\n \"set_EC_via_ECS() to update EC indirectly.\")\n\n ECS = property(get_ECS, set_ECS)\n\n def set_EC_via_ECS(self, ECS):\n \"\"\"Helper function to set `EC` by providing `ECS`, keeping `ECJ` constant.\"\"\"\n self.EC = 1 / (1 / ECS - 1 / self.ECJ)\n\n def hilbertdim(self):\n \"\"\"Returns Hilbert space dimension\"\"\"\n return self.grid.pt_count * (2 * self.ncut + 1)\n\n def potential(self, phi, theta):\n \"\"\"\n Parameters\n ----------\n phi: float\n theta: float\n\n Returns\n -------\n float\n value of the potential energy evaluated at phi, theta\n \"\"\"\n return (-2.0 * self.EJ * np.cos(theta) * np.cos(phi - 2.0 * np.pi * self.flux / 2.0)\n + self.EL * phi ** 2 + 2.0 * self.EJ\n + self.EJ * self.dEJ * np.sin(theta) * np.sin(phi - 2.0 * np.pi * self.flux / 2.0))\n\n def sparse_kinetic_mat(self):\n \"\"\"\n Kinetic energy portion of the Hamiltonian.\n TODO: update this method to use single-variable operator methods\n\n Returns\n -------\n scipy.sparse.csc_matrix\n matrix representing the kinetic energy operator\n \"\"\"\n pt_count = self.grid.pt_count\n dim_theta = 2 * self.ncut + 1\n identity_phi = sparse.identity(pt_count, format='csc', dtype=np.complex_)\n identity_theta = sparse.identity(dim_theta, format='csc', dtype=np.complex_)\n\n kinetic_matrix_phi = self.grid.second_derivative_matrix(prefactor=-2.0 * self.ECJ)\n\n diag_elements = 2.0 * self.ECS * np.square(np.arange(-self.ncut + self.ng, self.ncut + 1 + self.ng))\n kinetic_matrix_theta = sparse.dia_matrix((diag_elements, [0]), shape=(dim_theta, dim_theta)).tocsc()\n\n kinetic_matrix = (sparse.kron(kinetic_matrix_phi, identity_theta, format='csc')\n + sparse.kron(identity_phi, kinetic_matrix_theta, format='csc'))\n\n kinetic_matrix -= 2.0 * self.ECS * self.dCJ * self.i_d_dphi_operator() * self.n_theta_operator()\n return kinetic_matrix\n\n def sparse_potential_mat(self):\n \"\"\"\n Potential energy portion of the Hamiltonian.\n TODO: update this method to use single-variable operator methods\n\n Returns\n -------\n scipy.sparse.csc_matrix\n matrix representing the potential energy operator\n \"\"\"\n pt_count = self.grid.pt_count\n grid_linspace = self.grid.make_linspace()\n dim_theta = 2 * self.ncut + 1\n\n phi_inductive_vals = self.EL * np.square(grid_linspace)\n phi_inductive_potential = sparse.dia_matrix((phi_inductive_vals, [0]), shape=(pt_count, pt_count)).tocsc()\n phi_cos_vals = np.cos(grid_linspace - 2.0 * np.pi * self.flux / 2.0)\n phi_cos_potential = sparse.dia_matrix((phi_cos_vals, [0]), shape=(pt_count, pt_count)).tocsc()\n phi_sin_vals = np.sin(grid_linspace - 2.0 * np.pi * self.flux / 2.0)\n phi_sin_potential = sparse.dia_matrix((phi_sin_vals, [0]), shape=(pt_count, pt_count)).tocsc()\n\n theta_cos_potential = (-self.EJ\n * (sparse.dia_matrix(([1.0] * dim_theta, [-1]), shape=(dim_theta, dim_theta)) +\n sparse.dia_matrix(([1.0] * dim_theta, [1]), shape=(dim_theta, dim_theta)))).tocsc()\n potential_mat = (sparse.kron(phi_cos_potential, theta_cos_potential, format='csc')\n + sparse.kron(phi_inductive_potential, self._identity_theta(), format='csc')\n + 2 * self.EJ * sparse.kron(self._identity_phi(), self._identity_theta(), format='csc'))\n potential_mat += (self.EJ * self.dEJ * sparse.kron(phi_sin_potential, self._identity_theta(), format='csc')\n * self.sin_theta_operator())\n return potential_mat\n\n def hamiltonian(self):\n \"\"\"Calculates Hamiltonian in basis obtained by discretizing phi and employing charge basis for theta.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n matrix representing the potential energy operator\n \"\"\"\n return self.sparse_kinetic_mat() + self.sparse_potential_mat()\n\n def sparse_d_potential_d_flux_mat(self):\n r\"\"\"Calculates a of the potential energy w.r.t flux, at the current value of flux,\n as stored in the object.\n\n The flux is assumed to be given in the units of the ratio \\Phi_{ext}/\\Phi_0.\n So if \\frac{\\partial U}{ \\partial \\Phi_{\\rm ext}}, is needed, the expression returned\n by this function, needs to be multiplied by 1/\\Phi_0.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n matrix representing the derivative of the potential energy\n \"\"\"\n op_1 = sparse.kron(self._sin_phi_operator(x=- 2.0 * np.pi * self.flux / 2.0),\n self._cos_theta_operator(), format='csc')\n op_2 = sparse.kron(self._cos_phi_operator(x=- 2.0 * np.pi * self.flux / 2.0),\n self._sin_theta_operator(), format='csc')\n return - 2.0 * np.pi * self.EJ * op_1 - np.pi * self.EJ * self.dEJ * op_2\n\n def d_hamiltonian_d_flux(self):\n r\"\"\"Calculates a derivative of the Hamiltonian w.r.t flux, at the current value of flux,\n as stored in the object.\n\n The flux is assumed to be given in the units of the ratio \\Phi_{ext}/\\Phi_0.\n So if \\frac{\\partial H}{ \\partial \\Phi_{\\rm ext}}, is needed, the expression returned\n by this function, needs to be multiplied by 1/\\Phi_0.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n matrix representing the derivative of the Hamiltonian\n \"\"\"\n return self.sparse_d_potential_d_flux_mat()\n\n def _identity_phi(self):\n r\"\"\"\n Identity operator acting only on the `\\phi` Hilbert subspace.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n pt_count = self.grid.pt_count\n return sparse.identity(pt_count, format='csc')\n\n def _identity_theta(self):\n r\"\"\"\n Identity operator acting only on the `\\theta` Hilbert subspace.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n dim_theta = 2 * self.ncut + 1\n return sparse.identity(dim_theta, format='csc')\n\n def i_d_dphi_operator(self):\n r\"\"\"\n Operator :math:`i d/d\\varphi`.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n return sparse.kron(self.grid.first_derivative_matrix(prefactor=1j), self._identity_theta(), format='csc')\n\n def _phi_operator(self):\n r\"\"\"\n Operator :math:`\\varphi`, acting only on the `\\varphi` Hilbert subspace.\n\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n pt_count = self.grid.pt_count\n\n phi_matrix = sparse.dia_matrix((pt_count, pt_count), dtype=np.complex_)\n diag_elements = self.grid.make_linspace()\n phi_matrix.setdiag(diag_elements)\n return phi_matrix\n\n def phi_operator(self):\n r\"\"\"\n Operator :math:`\\varphi`.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n return sparse.kron(self._phi_operator(), self._identity_theta(), format='csc')\n\n def n_theta_operator(self):\n r\"\"\"\n Operator :math:`n_\\theta`.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n dim_theta = 2 * self.ncut + 1\n diag_elements = np.arange(-self.ncut, self.ncut + 1)\n n_theta_matrix = sparse.dia_matrix((diag_elements, [0]), shape=(dim_theta, dim_theta)).tocsc()\n return sparse.kron(self._identity_phi(), n_theta_matrix, format='csc')\n\n def _sin_phi_operator(self, x=0):\n r\"\"\"\n Operator :math:`\\sin(\\phi + x)`, acting only on the `\\phi` Hilbert subspace.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n pt_count = self.grid.pt_count\n\n vals = np.sin(self.grid.make_linspace() + x)\n sin_phi_matrix = sparse.dia_matrix((vals, [0]), shape=(pt_count, pt_count)).tocsc()\n return sin_phi_matrix\n\n def _cos_phi_operator(self, x=0):\n r\"\"\"\n Operator :math:`\\cos(\\phi + x)`, acting only on the `\\phi` Hilbert subspace.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n pt_count = self.grid.pt_count\n\n vals = np.cos(self.grid.make_linspace() + x)\n cos_phi_matrix = sparse.dia_matrix((vals, [0]), shape=(pt_count, pt_count)).tocsc()\n return cos_phi_matrix\n\n def _cos_theta_operator(self):\n r\"\"\"\n Operator :math:`\\cos(\\theta)`, acting only on the `\\theta` Hilbert subspace.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n dim_theta = 2 * self.ncut + 1\n cos_theta_matrix = 0.5 * (sparse.dia_matrix(([1.0] * dim_theta, [-1]), shape=(dim_theta, dim_theta)) +\n sparse.dia_matrix(([1.0] * dim_theta, [1]), shape=(dim_theta, dim_theta))).tocsc()\n return cos_theta_matrix\n\n def cos_theta_operator(self):\n r\"\"\"\n Operator :math:`\\cos(\\theta)`.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n return sparse.kron(self._identity_phi(), self._cos_phi_operator(), format='csc')\n\n def _sin_theta_operator(self):\n r\"\"\"\n Operator :math:`\\sin(\\theta)`, acting only on the `\\theta` Hilbert space.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n dim_theta = 2 * self.ncut + 1\n sin_theta_matrix = (-0.5 * 1j\n * (sparse.dia_matrix(([1.0] * dim_theta, [1]), shape=(dim_theta, dim_theta)) -\n sparse.dia_matrix(([1.0] * dim_theta, [-1]), shape=(dim_theta, dim_theta))).tocsc())\n return sin_theta_matrix\n\n def sin_theta_operator(self):\n r\"\"\"\n Operator :math:`\\sin(\\theta)`.\n\n Returns\n -------\n scipy.sparse.csc_matrix\n \"\"\"\n return sparse.kron(self._identity_phi(), self._sin_theta_operator(), format='csc')\n\n def plot_potential(self, theta_grid=None, contour_vals=None, **kwargs):\n \"\"\"Draw contour plot of the potential energy.\n\n Parameters\n ----------\n theta_grid: Grid1d, optional\n used for setting a custom grid for theta; if None use self._default_grid\n contour_vals: list, optional\n **kwargs:\n plotting parameters\n \"\"\"\n theta_grid = self._try_defaults(theta_grid)\n\n x_vals = self.grid.make_linspace()\n y_vals = theta_grid.make_linspace()\n return plot.contours(x_vals, y_vals, self.potential, contour_vals=contour_vals, **kwargs)\n\n def wavefunction(self, esys=None, which=0, theta_grid=None):\n \"\"\"Returns a zero-pi wave function in `phi`, `theta` basis\n\n Parameters\n ----------\n esys: ndarray, ndarray\n eigenvalues, eigenvectors\n which: int, optional\n index of desired wave function (default value = 0)\n theta_grid: Grid1d, optional\n used for setting a custom grid for theta; if None use self._default_grid\n\n Returns\n -------\n WaveFunctionOnGrid object\n \"\"\"\n evals_count = max(which + 1, 3)\n if esys is None:\n _, evecs = self.eigensys(evals_count)\n else:\n _, evecs = esys\n\n theta_grid = self._try_defaults(theta_grid)\n dim_theta = 2 * self.ncut + 1\n state_amplitudes = evecs[:, which].reshape(self.grid.pt_count, dim_theta)\n\n # Calculate psi_{phi, theta} = sum_n state_amplitudes_{phi, n} A_{n, theta}\n # where a_{n, theta} = 1/sqrt(2 pi) e^{i n theta}\n n_vec = np.arange(-self.ncut, self.ncut + 1)\n theta_vec = theta_grid.make_linspace()\n a_n_theta = np.exp(1j * np.outer(n_vec, theta_vec)) / (2 * np.pi) ** 0.5\n wavefunc_amplitudes = np.matmul(state_amplitudes, a_n_theta).T\n wavefunc_amplitudes = standardize_phases(wavefunc_amplitudes)\n\n grid2d = GridSpec(np.asarray([[self.grid.min_val, self.grid.max_val, self.grid.pt_count],\n [theta_grid.min_val, theta_grid.max_val, theta_grid.pt_count]]))\n return WaveFunctionOnGrid(grid2d, wavefunc_amplitudes)\n\n def plot_wavefunction(self, esys=None, which=0, theta_grid=None, mode='abs', zero_calibrate=True, **kwargs):\n \"\"\"Plots 2d phase-basis wave function.\n\n Parameters\n ----------\n esys: ndarray, ndarray\n eigenvalues, eigenvectors as obtained from `.eigensystem()`\n which: int, optional\n index of wave function to be plotted (default value = (0)\n theta_grid: Grid1d, optional\n used for setting a custom grid for theta; if None use self._default_grid\n mode: str, optional\n choices as specified in `constants.MODE_FUNC_DICT` (default value = 'abs_sqr')\n zero_calibrate: bool, optional\n if True, colors are adjusted to use zero wavefunction amplitude as the neutral color in the palette\n **kwargs:\n plot options\n\n Returns\n -------\n Figure, Axes\n \"\"\"\n theta_grid = self._try_defaults(theta_grid)\n\n amplitude_modifier = constants.MODE_FUNC_DICT[mode]\n wavefunc = self.wavefunction(esys, theta_grid=theta_grid, which=which)\n wavefunc.amplitudes = amplitude_modifier(wavefunc.amplitudes)\n return plot.wavefunction2d(wavefunc, zero_calibrate=zero_calibrate, **kwargs)\n\n def set_params_from_dict(self, meta_dict):\n \"\"\"Set object parameters by given metadata dictionary\n\n Parameters\n ----------\n meta_dict: dict\n \"\"\"\n for param_name, param_value in meta_dict.items():\n if key_in_grid1d(param_name):\n setattr(self.grid, param_name, param_value)\n elif is_numerical(param_value):\n setattr(self, param_name, param_value)\n\n @classmethod\n def create_from_dict(cls, meta_dict):\n \"\"\"Set object parameters by given metadata dictionary\n\n Parameters\n ----------\n meta_dict: dict\n \"\"\"\n filtered_dict = {}\n grid_dict = {}\n for param_name, param_value in meta_dict.items():\n if key_in_grid1d(param_name):\n grid_dict[param_name] = param_value\n elif is_numerical(param_value):\n filtered_dict[param_name] = param_value\n\n grid = Grid1d(**grid_dict)\n filtered_dict['grid'] = grid\n return cls(**filtered_dict)\n",
"# test_fluxqubit.py\n# meant to be run with 'pytest'\n#\n# This file is part of scqubits.\n#\n# Copyright (c) 2019, Jens Koch and Peter Groszkowski\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n############################################################################\n\nimport numpy as np\n\nimport scqubits.settings\nfrom scqubits import FluxQubit\nfrom scqubits.core.constants import FileType\nfrom scqubits.tests.conftest import StandardTests\n\nscqubits.settings.FILE_FORMAT = FileType.h5\n\n\nclass TestFluxQubit(StandardTests):\n @classmethod\n def setup_class(cls):\n cls.qbt = None\n cls.qbt_type = FluxQubit\n cls.file_str = 'fluxqubit'\n cls.op1_str = 'n_1_operator'\n cls.op2_str = 'n_2_operator'\n cls.param_name = 'flux'\n cls.param_list = np.linspace(0.45, 0.55, 50)\n",
"# explorer.py\n#\n# This file is part of scqubits.\n#\n# Copyright (c) 2019, Jens Koch and Peter Groszkowski\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n############################################################################\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ntry:\n import ipywidgets\nexcept ImportError:\n raise Exception(\"ImportError: failed to import ipywidgets. For use of scqubits.explorer,\"\n \"ipywidgets must be installed\")\n\ntry:\n from IPython.display import display\nexcept ImportError:\n raise Exception(\"ImportError: failed to import IPython. For use of scqubits.explorer,\"\n \"IPython must be installed\")\n\nimport scqubits.core.sweep_generators as swp\nimport scqubits.utils.explorer_panels as panels\n\n\nclass Explorer:\n \"\"\"\n This class allows interactive exploration of coupled quantum systems. The generate() method pre-calculates spectral\n data as a function of a given parameter, which can then be displayed and modified by sliders (when inside jupyter\n notebook or jupyter lab).\n\n Parameters\n ----------\n sweep: ParameterSweep\n evals_count: int\n figsize: tuple(int,int), optional\n \"\"\"\n def __init__(self, sweep, evals_count, figsize=(10, 8)):\n self.param_name = sweep.param_name\n self.param_vals = sweep.param_vals\n self.param_count = sweep.param_count\n self.sweep = sweep\n self.evals_count = evals_count\n self.figsize = figsize\n\n swp.generate_chi_sweep(sweep)\n swp.generate_charge_matrixelem_sweep(sweep)\n\n def plot_explorer_panels(self, param_val, photonnumber, initial_index, final_index, qbt_index, osc_index):\n \"\"\"\n Create a panel of plots (bare spectra, bare wavefunctions, dressed spectrum, n-photon qubit transitions, chi).\n\n Parameters\n ----------\n param_val: float\n current value of the external parameter\n photonnumber: int\n photon number n used for display of n-photon qubit transition\n initial_index: int\n dressed-state index of the initial state used in transition\n final_index: int\n dressed-state index of the final state used in transition (in dressed spectrum display)\n qbt_index: int\n index of qubit subsystem for which matrix elements and chi's are displayed\n osc_index: int\n index of oscillator subsystem for which chi's are calculated\n\n Returns\n -------\n Figure, Axes: matplotlib.Figure, matplotlib.Axes\n \"\"\"\n def fig_ax(index):\n return fig, axes_list_flattened[index]\n\n param_index = np.searchsorted(self.param_vals, param_val)\n param_val = self.param_vals[param_index]\n\n initial_bare = self.sweep.lookup.bare_index(initial_index, param_index)\n final_bare = self.sweep.lookup.bare_index(final_index, param_index)\n energy_ground = self.sweep.lookup.energy_dressed_index(0, param_index)\n energy_initial = self.sweep.lookup.energy_dressed_index(initial_index, param_index) - energy_ground\n energy_final = self.sweep.lookup.energy_dressed_index(final_index, param_index) - energy_ground\n qbt_subsys = self.sweep.hilbertspace[qbt_index]\n\n nrows = 3\n ncols = 2\n fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=self.figsize)\n axes_list_flattened = [elem for sublist in axs for elem in sublist]\n\n # Panel 1 ----------------------------------\n panels.display_bare_spectrum(self.sweep, qbt_subsys, param_val, fig_ax(0))\n\n # Panels 2 and 6----------------------------\n if type(qbt_subsys).__name__ in ['Transmon', 'Fluxonium']: # do not plot wavefunctions if multi-dimensional\n panels.display_bare_wavefunctions(self.sweep, qbt_subsys, param_val, fig_ax(1))\n panels.display_charge_matrixelems(self.sweep, initial_bare, qbt_subsys, param_val, fig_ax(5))\n\n # Panel 3 ----------------------------------\n panels.display_dressed_spectrum(self.sweep, initial_bare, final_bare, energy_initial, energy_final, param_val,\n fig_ax(2))\n\n # Panel 4 ----------------------------------\n panels.display_n_photon_qubit_transitions(self.sweep, photonnumber, initial_bare, param_val, fig_ax(3))\n\n # Panel 5 ----------------------------------\n panels.display_chi_01(self.sweep, qbt_index, osc_index, param_index, fig_ax(4))\n\n fig.tight_layout()\n return fig, axs\n\n def interact(self):\n \"\"\"Drives the interactive display of the plot explorer panels\"\"\"\n param_min = self.param_vals[0]\n param_max = self.param_vals[-1]\n param_step = self.param_vals[1] - self.param_vals[0]\n\n qbt_indices = [index for (index, subsystem) in self.sweep.hilbertspace.qbt_subsys_list]\n osc_indices = [index for (index, subsystem) in self.sweep.hilbertspace.osc_subsys_list]\n\n param_slider = ipywidgets.FloatSlider(min=param_min, max=param_max, step=param_step,\n description=self.param_name, continuous_update=False)\n photon_slider = ipywidgets.IntSlider(value=1, min=1, max=4, description='photon number')\n initial_slider = ipywidgets.IntSlider(value=0, min=0, max=self.evals_count, description='initial state index')\n final_slider = ipywidgets.IntSlider(value=1, min=1, max=self.evals_count, description='final state index')\n\n qbt_dropdown = ipywidgets.Dropdown(options=qbt_indices, description='qubit subsys')\n osc_dropdown = ipywidgets.Dropdown(options=osc_indices, description='oscillator subsys')\n\n def update_min_final_index(*args):\n final_slider.min = initial_slider.value + 1\n\n initial_slider.observe(update_min_final_index, 'value')\n\n out = ipywidgets.interactive_output(self.plot_explorer_panels,\n {'param_val': param_slider,\n 'photonnumber': photon_slider,\n 'initial_index': initial_slider,\n 'final_index': final_slider,\n 'qbt_index': qbt_dropdown,\n 'osc_index': osc_dropdown\n })\n\n left_box = ipywidgets.VBox([param_slider])\n mid_box = ipywidgets.VBox([initial_slider, final_slider, photon_slider])\n right_box = ipywidgets.VBox([qbt_dropdown, osc_dropdown])\n\n user_interface = ipywidgets.HBox([left_box, mid_box, right_box])\n display(user_interface, out)\n"
] | [
[
"numpy.square",
"numpy.asarray",
"numpy.arange",
"numpy.matmul",
"numpy.cos",
"numpy.sort",
"numpy.sin",
"scipy.sparse.identity",
"numpy.outer",
"scipy.sparse.kron",
"scipy.sparse.dia_matrix",
"scipy.sparse.linalg.eigsh"
],
[
"numpy.linspace"
],
[
"matplotlib.pyplot.subplots",
"numpy.searchsorted"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MarcinKonowalczyk/scikit-learn | [
"8b18d4cbfc3a10ce85decec292d30470c69f40d7",
"8b18d4cbfc3a10ce85decec292d30470c69f40d7",
"8b18d4cbfc3a10ce85decec292d30470c69f40d7",
"8b18d4cbfc3a10ce85decec292d30470c69f40d7",
"3a69ade5cf068b640a0d6d1f176ff0d0e2040501",
"8b18d4cbfc3a10ce85decec292d30470c69f40d7",
"8b18d4cbfc3a10ce85decec292d30470c69f40d7",
"0d1e63366c6e361ba89b8588ccc26b01c47a5563",
"3a69ade5cf068b640a0d6d1f176ff0d0e2040501",
"8b18d4cbfc3a10ce85decec292d30470c69f40d7"
] | [
"examples/miscellaneous/plot_isotonic_regression.py",
"examples/ensemble/plot_bias_variance.py",
"examples/linear_model/plot_sgd_penalties.py",
"examples/cluster/plot_affinity_propagation.py",
"sklearn/preprocessing/_discretization.py",
"examples/neighbors/plot_nca_classification.py",
"examples/model_selection/plot_nested_cross_validation_iris.py",
"sklearn/linear_model/_omp.py",
"sklearn/ensemble/_stacking.py",
"examples/neighbors/plot_classification.py"
] | [
"\"\"\"\n===================\nIsotonic Regression\n===================\n\nAn illustration of the isotonic regression on generated data (non-linear\nmonotonic trend with homoscedastic uniform noise).\n\nThe isotonic regression algorithm finds a non-decreasing approximation of a\nfunction while minimizing the mean squared error on the training data. The\nbenefit of such a non-parametric model is that it does not assume any shape for\nthe target function besides monotonicity. For comparison a linear regression is\nalso presented.\n\nThe plot on the right-hand side shows the model prediction function that\nresults from the linear interpolation of thresholds points. The thresholds\npoints are a subset of the training input observations and their matching\ntarget values are computed by the isotonic non-parametric fit.\n\n\"\"\"\nprint(__doc__)\n\n# Author: Nelle Varoquaux <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# License: BSD\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.isotonic import IsotonicRegression\nfrom sklearn.utils import check_random_state\n\nn = 100\nx = np.arange(n)\nrs = check_random_state(0)\ny = rs.randint(-50, 50, size=(n,)) + 50.0 * np.log1p(np.arange(n))\n\n# %%\n# Fit IsotonicRegression and LinearRegression models:\n\nir = IsotonicRegression(out_of_bounds=\"clip\")\ny_ = ir.fit_transform(x, y)\n\nlr = LinearRegression()\nlr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression\n\n# %%\n# Plot results:\n\nsegments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]\nlc = LineCollection(segments, zorder=0)\nlc.set_array(np.ones(len(y)))\nlc.set_linewidths(np.full(n, 0.5))\n\nfig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 6))\n\nax0.plot(x, y, \"C0.\", markersize=12)\nax0.plot(x, y_, \"C1.-\", markersize=12)\nax0.plot(x, lr.predict(x[:, np.newaxis]), \"C2-\")\nax0.add_collection(lc)\nax0.legend((\"Training data\", \"Isotonic fit\", \"Linear fit\"), loc=\"lower right\")\nax0.set_title(\"Isotonic regression fit on noisy data (n=%d)\" % n)\n\nx_test = np.linspace(-10, 110, 1000)\nax1.plot(x_test, ir.predict(x_test), \"C1-\")\nax1.plot(ir.X_thresholds_, ir.y_thresholds_, \"C1.\", markersize=12)\nax1.set_title(\"Prediction function (%d thresholds)\" % len(ir.X_thresholds_))\n\nplt.show()\n\n# %%\n# Note that we explicitly passed `out_of_bounds=\"clip\"` to the constructor of\n# `IsotonicRegression` to control the way the model extrapolates outside of the\n# range of data observed in the training set. This \"clipping\" extrapolation can\n# be seen on the plot of the decision function on the right-hand.\n",
"\"\"\"\n============================================================\nSingle estimator versus bagging: bias-variance decomposition\n============================================================\n\nThis example illustrates and compares the bias-variance decomposition of the\nexpected mean squared error of a single estimator against a bagging ensemble.\n\nIn regression, the expected mean squared error of an estimator can be\ndecomposed in terms of bias, variance and noise. On average over datasets of\nthe regression problem, the bias term measures the average amount by which the\npredictions of the estimator differ from the predictions of the best possible\nestimator for the problem (i.e., the Bayes model). The variance term measures\nthe variability of the predictions of the estimator when fit over different\ninstances LS of the problem. Finally, the noise measures the irreducible part\nof the error which is due the variability in the data.\n\nThe upper left figure illustrates the predictions (in dark red) of a single\ndecision tree trained over a random dataset LS (the blue dots) of a toy 1d\nregression problem. It also illustrates the predictions (in light red) of other\nsingle decision trees trained over other (and different) randomly drawn\ninstances LS of the problem. Intuitively, the variance term here corresponds to\nthe width of the beam of predictions (in light red) of the individual\nestimators. The larger the variance, the more sensitive are the predictions for\n`x` to small changes in the training set. The bias term corresponds to the\ndifference between the average prediction of the estimator (in cyan) and the\nbest possible model (in dark blue). On this problem, we can thus observe that\nthe bias is quite low (both the cyan and the blue curves are close to each\nother) while the variance is large (the red beam is rather wide).\n\nThe lower left figure plots the pointwise decomposition of the expected mean\nsquared error of a single decision tree. It confirms that the bias term (in\nblue) is low while the variance is large (in green). It also illustrates the\nnoise part of the error which, as expected, appears to be constant and around\n`0.01`.\n\nThe right figures correspond to the same plots but using instead a bagging\nensemble of decision trees. In both figures, we can observe that the bias term\nis larger than in the previous case. In the upper right figure, the difference\nbetween the average prediction (in cyan) and the best possible model is larger\n(e.g., notice the offset around `x=2`). In the lower right figure, the bias\ncurve is also slightly higher than in the lower left figure. In terms of\nvariance however, the beam of predictions is narrower, which suggests that the\nvariance is lower. Indeed, as the lower right figure confirms, the variance\nterm (in green) is lower than for single decision trees. Overall, the bias-\nvariance decomposition is therefore no longer the same. The tradeoff is better\nfor bagging: averaging several decision trees fit on bootstrap copies of the\ndataset slightly increases the bias term but allows for a larger reduction of\nthe variance, which results in a lower overall mean squared error (compare the\nred curves int the lower figures). The script output also confirms this\nintuition. The total error of the bagging ensemble is lower than the total\nerror of a single decision tree, and this difference indeed mainly stems from a\nreduced variance.\n\nFor further details on bias-variance decomposition, see section 7.3 of [1]_.\n\nReferences\n----------\n\n.. [1] T. Hastie, R. Tibshirani and J. Friedman,\n \"Elements of Statistical Learning\", Springer, 2009.\n\n\"\"\"\nprint(__doc__)\n\n# Author: Gilles Louppe <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.ensemble import BaggingRegressor\nfrom sklearn.tree import DecisionTreeRegressor\n\n# Settings\nn_repeat = 50 # Number of iterations for computing expectations\nn_train = 50 # Size of the training set\nn_test = 1000 # Size of the test set\nnoise = 0.1 # Standard deviation of the noise\nnp.random.seed(0)\n\n# Change this for exploring the bias-variance decomposition of other\n# estimators. This should work well for estimators with high variance (e.g.,\n# decision trees or KNN), but poorly for estimators with low variance (e.g.,\n# linear models).\nestimators = [\n (\"Tree\", DecisionTreeRegressor()),\n (\"Bagging(Tree)\", BaggingRegressor(DecisionTreeRegressor())),\n]\n\nn_estimators = len(estimators)\n\n\n# Generate data\ndef f(x):\n x = x.ravel()\n\n return np.exp(-(x ** 2)) + 1.5 * np.exp(-((x - 2) ** 2))\n\n\ndef generate(n_samples, noise, n_repeat=1):\n X = np.random.rand(n_samples) * 10 - 5\n X = np.sort(X)\n\n if n_repeat == 1:\n y = f(X) + np.random.normal(0.0, noise, n_samples)\n else:\n y = np.zeros((n_samples, n_repeat))\n\n for i in range(n_repeat):\n y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)\n\n X = X.reshape((n_samples, 1))\n\n return X, y\n\n\nX_train = []\ny_train = []\n\nfor i in range(n_repeat):\n X, y = generate(n_samples=n_train, noise=noise)\n X_train.append(X)\n y_train.append(y)\n\nX_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)\n\nplt.figure(figsize=(10, 8))\n\n# Loop over estimators to compare\nfor n, (name, estimator) in enumerate(estimators):\n # Compute predictions\n y_predict = np.zeros((n_test, n_repeat))\n\n for i in range(n_repeat):\n estimator.fit(X_train[i], y_train[i])\n y_predict[:, i] = estimator.predict(X_test)\n\n # Bias^2 + Variance + Noise decomposition of the mean squared error\n y_error = np.zeros(n_test)\n\n for i in range(n_repeat):\n for j in range(n_repeat):\n y_error += (y_test[:, j] - y_predict[:, i]) ** 2\n\n y_error /= n_repeat * n_repeat\n\n y_noise = np.var(y_test, axis=1)\n y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2\n y_var = np.var(y_predict, axis=1)\n\n print(\n \"{0}: {1:.4f} (error) = {2:.4f} (bias^2) \"\n \" + {3:.4f} (var) + {4:.4f} (noise)\".format(\n name, np.mean(y_error), np.mean(y_bias), np.mean(y_var), np.mean(y_noise)\n )\n )\n\n # Plot figures\n plt.subplot(2, n_estimators, n + 1)\n plt.plot(X_test, f(X_test), \"b\", label=\"$f(x)$\")\n plt.plot(X_train[0], y_train[0], \".b\", label=\"LS ~ $y = f(x)+noise$\")\n\n for i in range(n_repeat):\n if i == 0:\n plt.plot(X_test, y_predict[:, i], \"r\", label=r\"$\\^y(x)$\")\n else:\n plt.plot(X_test, y_predict[:, i], \"r\", alpha=0.05)\n\n plt.plot(X_test, np.mean(y_predict, axis=1), \"c\", label=r\"$\\mathbb{E}_{LS} \\^y(x)$\")\n\n plt.xlim([-5, 5])\n plt.title(name)\n\n if n == n_estimators - 1:\n plt.legend(loc=(1.1, 0.5))\n\n plt.subplot(2, n_estimators, n_estimators + n + 1)\n plt.plot(X_test, y_error, \"r\", label=\"$error(x)$\")\n plt.plot(X_test, y_bias, \"b\", label=\"$bias^2(x)$\"),\n plt.plot(X_test, y_var, \"g\", label=\"$variance(x)$\"),\n plt.plot(X_test, y_noise, \"c\", label=\"$noise(x)$\")\n\n plt.xlim([-5, 5])\n plt.ylim([0, 0.1])\n\n if n == n_estimators - 1:\n\n plt.legend(loc=(1.1, 0.5))\n\nplt.subplots_adjust(right=0.75)\nplt.show()\n",
"\"\"\"\n==============\nSGD: Penalties\n==============\n\nContours of where the penalty is equal to 1\nfor the three penalties L1, L2 and elastic-net.\n\nAll of the above are supported by :class:`~sklearn.linear_model.SGDClassifier`\nand :class:`~sklearn.linear_model.SGDRegressor`.\n\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nl1_color = \"navy\"\nl2_color = \"c\"\nelastic_net_color = \"darkorange\"\n\nline = np.linspace(-1.5, 1.5, 1001)\nxx, yy = np.meshgrid(line, line)\n\nl2 = xx ** 2 + yy ** 2\nl1 = np.abs(xx) + np.abs(yy)\nrho = 0.5\nelastic_net = rho * l1 + (1 - rho) * l2\n\nplt.figure(figsize=(10, 10), dpi=100)\nax = plt.gca()\n\nelastic_net_contour = plt.contour(\n xx, yy, elastic_net, levels=[1], colors=elastic_net_color\n)\nl2_contour = plt.contour(xx, yy, l2, levels=[1], colors=l2_color)\nl1_contour = plt.contour(xx, yy, l1, levels=[1], colors=l1_color)\nax.set_aspect(\"equal\")\nax.spines[\"left\"].set_position(\"center\")\nax.spines[\"right\"].set_color(\"none\")\nax.spines[\"bottom\"].set_position(\"center\")\nax.spines[\"top\"].set_color(\"none\")\n\nplt.clabel(\n elastic_net_contour,\n inline=1,\n fontsize=18,\n fmt={1.0: \"elastic-net\"},\n manual=[(-1, -1)],\n)\nplt.clabel(l2_contour, inline=1, fontsize=18, fmt={1.0: \"L2\"}, manual=[(-1, -1)])\nplt.clabel(l1_contour, inline=1, fontsize=18, fmt={1.0: \"L1\"}, manual=[(-1, -1)])\n\nplt.tight_layout()\nplt.show()\n",
"\"\"\"\n=================================================\nDemo of affinity propagation clustering algorithm\n=================================================\n\nReference:\nBrendan J. Frey and Delbert Dueck, \"Clustering by Passing Messages\nBetween Data Points\", Science Feb. 2007\n\n\"\"\"\nprint(__doc__)\n\nfrom sklearn.cluster import AffinityPropagation\nfrom sklearn import metrics\nfrom sklearn.datasets import make_blobs\n\n# #############################################################################\n# Generate sample data\ncenters = [[1, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(\n n_samples=300, centers=centers, cluster_std=0.5, random_state=0\n)\n\n# #############################################################################\n# Compute Affinity Propagation\naf = AffinityPropagation(preference=-50, random_state=0).fit(X)\ncluster_centers_indices = af.cluster_centers_indices_\nlabels = af.labels_\n\nn_clusters_ = len(cluster_centers_indices)\n\nprint(\"Estimated number of clusters: %d\" % n_clusters_)\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels_true, labels))\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels_true, labels))\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels_true, labels))\nprint(\"Adjusted Rand Index: %0.3f\" % metrics.adjusted_rand_score(labels_true, labels))\nprint(\n \"Adjusted Mutual Information: %0.3f\"\n % metrics.adjusted_mutual_info_score(labels_true, labels)\n)\nprint(\n \"Silhouette Coefficient: %0.3f\"\n % metrics.silhouette_score(X, labels, metric=\"sqeuclidean\")\n)\n\n# #############################################################################\n# Plot result\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\n\nplt.close(\"all\")\nplt.figure(1)\nplt.clf()\n\ncolors = cycle(\"bgrcmykbgrcmykbgrcmykbgrcmyk\")\nfor k, col in zip(range(n_clusters_), colors):\n class_members = labels == k\n cluster_center = X[cluster_centers_indices[k]]\n plt.plot(X[class_members, 0], X[class_members, 1], col + \".\")\n plt.plot(\n cluster_center[0],\n cluster_center[1],\n \"o\",\n markerfacecolor=col,\n markeredgecolor=\"k\",\n markersize=14,\n )\n for x in X[class_members]:\n plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)\n\nplt.title(\"Estimated number of clusters: %d\" % n_clusters_)\nplt.show()\n",
"# -*- coding: utf-8 -*-\n\n# Author: Henry Lin <[email protected]>\n# Tom Dupré la Tour\n\n# License: BSD\n\n\nimport numbers\nimport numpy as np\nimport warnings\n\nfrom . import OneHotEncoder\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..utils.validation import check_array\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import _check_feature_names_in\n\n\nclass KBinsDiscretizer(TransformerMixin, BaseEstimator):\n \"\"\"\n Bin continuous data into intervals.\n\n Read more in the :ref:`User Guide <preprocessing_discretization>`.\n\n .. versionadded:: 0.20\n\n Parameters\n ----------\n n_bins : int or array-like of shape (n_features,), default=5\n The number of bins to produce. Raises ValueError if ``n_bins < 2``.\n\n encode : {'onehot', 'onehot-dense', 'ordinal'}, default='onehot'\n Method used to encode the transformed result.\n\n onehot\n Encode the transformed result with one-hot encoding\n and return a sparse matrix. Ignored features are always\n stacked to the right.\n onehot-dense\n Encode the transformed result with one-hot encoding\n and return a dense array. Ignored features are always\n stacked to the right.\n ordinal\n Return the bin identifier encoded as an integer value.\n\n strategy : {'uniform', 'quantile', 'kmeans'}, default='quantile'\n Strategy used to define the widths of the bins.\n\n uniform\n All bins in each feature have identical widths.\n quantile\n All bins in each feature have the same number of points.\n kmeans\n Values in each bin have the same nearest center of a 1D k-means\n cluster.\n\n dtype : {np.float32, np.float64}, default=None\n The desired data-type for the output. If None, output dtype is\n consistent with input dtype. Only np.float32 and np.float64 are\n supported.\n\n .. versionadded:: 0.24\n\n Attributes\n ----------\n bin_edges_ : ndarray of ndarray of shape (n_features,)\n The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )``\n Ignored features will have empty arrays.\n\n n_bins_ : ndarray of shape (n_features,), dtype=np.int_\n Number of bins per feature. Bins whose width are too small\n (i.e., <= 1e-8) are removed with a warning.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n Binarizer : Class used to bin values as ``0`` or\n ``1`` based on a parameter ``threshold``.\n\n Notes\n -----\n In bin edges for feature ``i``, the first and last values are used only for\n ``inverse_transform``. During transform, bin edges are extended to::\n\n np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf])\n\n You can combine ``KBinsDiscretizer`` with\n :class:`~sklearn.compose.ColumnTransformer` if you only want to preprocess\n part of the features.\n\n ``KBinsDiscretizer`` might produce constant features (e.g., when\n ``encode = 'onehot'`` and certain bins do not contain any data).\n These features can be removed with feature selection algorithms\n (e.g., :class:`~sklearn.feature_selection.VarianceThreshold`).\n\n Examples\n --------\n >>> from sklearn.preprocessing import KBinsDiscretizer\n >>> X = [[-2, 1, -4, -1],\n ... [-1, 2, -3, -0.5],\n ... [ 0, 3, -2, 0.5],\n ... [ 1, 4, -1, 2]]\n >>> est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')\n >>> est.fit(X)\n KBinsDiscretizer(...)\n >>> Xt = est.transform(X)\n >>> Xt # doctest: +SKIP\n array([[ 0., 0., 0., 0.],\n [ 1., 1., 1., 0.],\n [ 2., 2., 2., 1.],\n [ 2., 2., 2., 2.]])\n\n Sometimes it may be useful to convert the data back into the original\n feature space. The ``inverse_transform`` function converts the binned\n data into the original feature space. Each value will be equal to the mean\n of the two bin edges.\n\n >>> est.bin_edges_[0]\n array([-2., -1., 0., 1.])\n >>> est.inverse_transform(Xt)\n array([[-1.5, 1.5, -3.5, -0.5],\n [-0.5, 2.5, -2.5, -0.5],\n [ 0.5, 3.5, -1.5, 0.5],\n [ 0.5, 3.5, -1.5, 1.5]])\n \"\"\"\n\n def __init__(self, n_bins=5, *, encode=\"onehot\", strategy=\"quantile\", dtype=None):\n self.n_bins = n_bins\n self.encode = encode\n self.strategy = strategy\n self.dtype = dtype\n\n def fit(self, X, y=None):\n \"\"\"\n Fit the estimator.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data to be discretized.\n\n y : None\n Ignored. This parameter exists only for compatibility with\n :class:`~sklearn.pipeline.Pipeline`.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n X = self._validate_data(X, dtype=\"numeric\")\n\n supported_dtype = (np.float64, np.float32)\n if self.dtype in supported_dtype:\n output_dtype = self.dtype\n elif self.dtype is None:\n output_dtype = X.dtype\n else:\n raise ValueError(\n \"Valid options for 'dtype' are \"\n f\"{supported_dtype + (None,)}. Got dtype={self.dtype} \"\n \" instead.\"\n )\n\n valid_encode = (\"onehot\", \"onehot-dense\", \"ordinal\")\n if self.encode not in valid_encode:\n raise ValueError(\n \"Valid options for 'encode' are {}. Got encode={!r} instead.\".format(\n valid_encode, self.encode\n )\n )\n valid_strategy = (\"uniform\", \"quantile\", \"kmeans\")\n if self.strategy not in valid_strategy:\n raise ValueError(\n \"Valid options for 'strategy' are {}. \"\n \"Got strategy={!r} instead.\".format(valid_strategy, self.strategy)\n )\n\n n_features = X.shape[1]\n n_bins = self._validate_n_bins(n_features)\n\n bin_edges = np.zeros(n_features, dtype=object)\n for jj in range(n_features):\n column = X[:, jj]\n col_min, col_max = column.min(), column.max()\n\n if col_min == col_max:\n warnings.warn(\n \"Feature %d is constant and will be replaced with 0.\" % jj\n )\n n_bins[jj] = 1\n bin_edges[jj] = np.array([-np.inf, np.inf])\n continue\n\n if self.strategy == \"uniform\":\n bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)\n\n elif self.strategy == \"quantile\":\n quantiles = np.linspace(0, 100, n_bins[jj] + 1)\n bin_edges[jj] = np.asarray(np.percentile(column, quantiles))\n\n elif self.strategy == \"kmeans\":\n from ..cluster import KMeans # fixes import loops\n\n # Deterministic initialization with uniform spacing\n uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)\n init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5\n\n # 1D k-means procedure\n km = KMeans(\n n_clusters=n_bins[jj], init=init, n_init=1, algorithm=\"full\"\n )\n centers = km.fit(column[:, None]).cluster_centers_[:, 0]\n # Must sort, centers may be unsorted even with sorted init\n centers.sort()\n bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5\n bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]\n\n # Remove bins whose width are too small (i.e., <= 1e-8)\n if self.strategy in (\"quantile\", \"kmeans\"):\n mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8\n bin_edges[jj] = bin_edges[jj][mask]\n if len(bin_edges[jj]) - 1 != n_bins[jj]:\n warnings.warn(\n \"Bins whose width are too small (i.e., <= \"\n \"1e-8) in feature %d are removed. Consider \"\n \"decreasing the number of bins.\" % jj\n )\n n_bins[jj] = len(bin_edges[jj]) - 1\n\n self.bin_edges_ = bin_edges\n self.n_bins_ = n_bins\n\n if \"onehot\" in self.encode:\n self._encoder = OneHotEncoder(\n categories=[np.arange(i) for i in self.n_bins_],\n sparse=self.encode == \"onehot\",\n dtype=output_dtype,\n )\n # Fit the OneHotEncoder with toy datasets\n # so that it's ready for use after the KBinsDiscretizer is fitted\n self._encoder.fit(np.zeros((1, len(self.n_bins_))))\n\n return self\n\n def _validate_n_bins(self, n_features):\n \"\"\"Returns n_bins_, the number of bins per feature.\"\"\"\n orig_bins = self.n_bins\n if isinstance(orig_bins, numbers.Number):\n if not isinstance(orig_bins, numbers.Integral):\n raise ValueError(\n \"{} received an invalid n_bins type. \"\n \"Received {}, expected int.\".format(\n KBinsDiscretizer.__name__, type(orig_bins).__name__\n )\n )\n if orig_bins < 2:\n raise ValueError(\n \"{} received an invalid number \"\n \"of bins. Received {}, expected at least 2.\".format(\n KBinsDiscretizer.__name__, orig_bins\n )\n )\n return np.full(n_features, orig_bins, dtype=int)\n\n n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False)\n\n if n_bins.ndim > 1 or n_bins.shape[0] != n_features:\n raise ValueError(\"n_bins must be a scalar or array of shape (n_features,).\")\n\n bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)\n\n violating_indices = np.where(bad_nbins_value)[0]\n if violating_indices.shape[0] > 0:\n indices = \", \".join(str(i) for i in violating_indices)\n raise ValueError(\n \"{} received an invalid number \"\n \"of bins at indices {}. Number of bins \"\n \"must be at least 2, and must be an int.\".format(\n KBinsDiscretizer.__name__, indices\n )\n )\n return n_bins\n\n def transform(self, X):\n \"\"\"\n Discretize the data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data to be discretized.\n\n Returns\n -------\n Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64}\n Data in the binned space. Will be a sparse matrix if\n `self.encode='onehot'` and ndarray otherwise.\n \"\"\"\n check_is_fitted(self)\n\n # check input and attribute dtypes\n dtype = (np.float64, np.float32) if self.dtype is None else self.dtype\n Xt = self._validate_data(X, copy=True, dtype=dtype, reset=False)\n\n bin_edges = self.bin_edges_\n for jj in range(Xt.shape[1]):\n # Values which are close to a bin edge are susceptible to numeric\n # instability. Add eps to X so these values are binned correctly\n # with respect to their decimal truncation. See documentation of\n # numpy.isclose for an explanation of ``rtol`` and ``atol``.\n rtol = 1.0e-5\n atol = 1.0e-8\n eps = atol + rtol * np.abs(Xt[:, jj])\n Xt[:, jj] = np.digitize(Xt[:, jj] + eps, bin_edges[jj][1:])\n np.clip(Xt, 0, self.n_bins_ - 1, out=Xt)\n\n if self.encode == \"ordinal\":\n return Xt\n\n dtype_init = None\n if \"onehot\" in self.encode:\n dtype_init = self._encoder.dtype\n self._encoder.dtype = Xt.dtype\n try:\n Xt_enc = self._encoder.transform(Xt)\n finally:\n # revert the initial dtype to avoid modifying self.\n self._encoder.dtype = dtype_init\n return Xt_enc\n\n def inverse_transform(self, Xt):\n \"\"\"\n Transform discretized data back to original feature space.\n\n Note that this function does not regenerate the original data\n due to discretization rounding.\n\n Parameters\n ----------\n Xt : array-like of shape (n_samples, n_features)\n Transformed data in the binned space.\n\n Returns\n -------\n Xinv : ndarray, dtype={np.float32, np.float64}\n Data in the original feature space.\n \"\"\"\n check_is_fitted(self)\n\n if \"onehot\" in self.encode:\n Xt = self._encoder.inverse_transform(Xt)\n\n Xinv = check_array(Xt, copy=True, dtype=(np.float64, np.float32))\n n_features = self.n_bins_.shape[0]\n if Xinv.shape[1] != n_features:\n raise ValueError(\n \"Incorrect number of features. Expecting {}, received {}.\".format(\n n_features, Xinv.shape[1]\n )\n )\n\n for jj in range(n_features):\n bin_edges = self.bin_edges_[jj]\n bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5\n Xinv[:, jj] = bin_centers[np.int_(Xinv[:, jj])]\n\n return Xinv\n\n def get_feature_names_out(self, input_features=None):\n \"\"\"Get output feature names.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then names are generated: `[x0, x1, ..., x(n_features_in_)]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n \"\"\"\n input_features = _check_feature_names_in(self, input_features)\n return self._encoder.get_feature_names_out(input_features)\n",
"\"\"\"\n=============================================================================\nComparing Nearest Neighbors with and without Neighborhood Components Analysis\n=============================================================================\n\nAn example comparing nearest neighbors classification with and without\nNeighborhood Components Analysis.\n\nIt will plot the class decision boundaries given by a Nearest Neighbors\nclassifier when using the Euclidean distance on the original features, versus\nusing the Euclidean distance after the transformation learned by Neighborhood\nComponents Analysis. The latter aims to find a linear transformation that\nmaximises the (stochastic) nearest neighbor classification accuracy on the\ntraining set.\n\"\"\"\n\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier, NeighborhoodComponentsAnalysis\nfrom sklearn.pipeline import Pipeline\n\n\nprint(__doc__)\n\nn_neighbors = 1\n\ndataset = datasets.load_iris()\nX, y = dataset.data, dataset.target\n\n# we only take two features. We could avoid this ugly\n# slicing by using a two-dim dataset\nX = X[:, [0, 2]]\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, stratify=y, test_size=0.7, random_state=42\n)\n\nh = 0.01 # step size in the mesh\n\n# Create color maps\ncmap_light = ListedColormap([\"#FFAAAA\", \"#AAFFAA\", \"#AAAAFF\"])\ncmap_bold = ListedColormap([\"#FF0000\", \"#00FF00\", \"#0000FF\"])\n\nnames = [\"KNN\", \"NCA, KNN\"]\n\nclassifiers = [\n Pipeline(\n [\n (\"scaler\", StandardScaler()),\n (\"knn\", KNeighborsClassifier(n_neighbors=n_neighbors)),\n ]\n ),\n Pipeline(\n [\n (\"scaler\", StandardScaler()),\n (\"nca\", NeighborhoodComponentsAnalysis()),\n (\"knn\", KNeighborsClassifier(n_neighbors=n_neighbors)),\n ]\n ),\n]\n\nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\nfor name, clf in zip(names, classifiers):\n\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.figure()\n plt.pcolormesh(xx, yy, Z, cmap=cmap_light, alpha=0.8)\n\n # Plot also the training and testing points\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold, edgecolor=\"k\", s=20)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n plt.title(\"{} (k = {})\".format(name, n_neighbors))\n plt.text(\n 0.9,\n 0.1,\n \"{:.2f}\".format(score),\n size=15,\n ha=\"center\",\n va=\"center\",\n transform=plt.gca().transAxes,\n )\n\nplt.show()\n",
"\"\"\"\n=========================================\nNested versus non-nested cross-validation\n=========================================\n\nThis example compares non-nested and nested cross-validation strategies on a\nclassifier of the iris data set. Nested cross-validation (CV) is often used to\ntrain a model in which hyperparameters also need to be optimized. Nested CV\nestimates the generalization error of the underlying model and its\n(hyper)parameter search. Choosing the parameters that maximize non-nested CV\nbiases the model to the dataset, yielding an overly-optimistic score.\n\nModel selection without nested CV uses the same data to tune model parameters\nand evaluate model performance. Information may thus \"leak\" into the model\nand overfit the data. The magnitude of this effect is primarily dependent on\nthe size of the dataset and the stability of the model. See Cawley and Talbot\n[1]_ for an analysis of these issues.\n\nTo avoid this problem, nested CV effectively uses a series of\ntrain/validation/test set splits. In the inner loop (here executed by\n:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is\napproximately maximized by fitting a model to each training set, and then\ndirectly maximized in selecting (hyper)parameters over the validation set. In\nthe outer loop (here in :func:`cross_val_score\n<sklearn.model_selection.cross_val_score>`), generalization error is estimated\nby averaging test set scores over several dataset splits.\n\nThe example below uses a support vector classifier with a non-linear kernel to\nbuild a model with optimized hyperparameters by grid search. We compare the\nperformance of non-nested and nested CV strategies by taking the difference\nbetween their scores.\n\n.. topic:: See Also:\n\n - :ref:`cross_validation`\n - :ref:`grid_search`\n\n.. topic:: References:\n\n .. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and\n subsequent selection bias in performance evaluation.\n J. Mach. Learn. Res 2010,11, 2079-2107.\n <http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_\n\n\"\"\"\nfrom sklearn.datasets import load_iris\nfrom matplotlib import pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, KFold\nimport numpy as np\n\nprint(__doc__)\n\n# Number of random trials\nNUM_TRIALS = 30\n\n# Load the dataset\niris = load_iris()\nX_iris = iris.data\ny_iris = iris.target\n\n# Set up possible values of parameters to optimize over\np_grid = {\"C\": [1, 10, 100], \"gamma\": [0.01, 0.1]}\n\n# We will use a Support Vector Classifier with \"rbf\" kernel\nsvm = SVC(kernel=\"rbf\")\n\n# Arrays to store scores\nnon_nested_scores = np.zeros(NUM_TRIALS)\nnested_scores = np.zeros(NUM_TRIALS)\n\n# Loop for each trial\nfor i in range(NUM_TRIALS):\n\n # Choose cross-validation techniques for the inner and outer loops,\n # independently of the dataset.\n # E.g \"GroupKFold\", \"LeaveOneOut\", \"LeaveOneGroupOut\", etc.\n inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)\n outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)\n\n # Non_nested parameter search and scoring\n clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=outer_cv)\n clf.fit(X_iris, y_iris)\n non_nested_scores[i] = clf.best_score_\n\n # Nested CV with parameter optimization\n clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)\n nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)\n nested_scores[i] = nested_score.mean()\n\nscore_difference = non_nested_scores - nested_scores\n\nprint(\n \"Average difference of {:6f} with std. dev. of {:6f}.\".format(\n score_difference.mean(), score_difference.std()\n )\n)\n\n# Plot scores on each trial for nested and non-nested CV\nplt.figure()\nplt.subplot(211)\n(non_nested_scores_line,) = plt.plot(non_nested_scores, color=\"r\")\n(nested_line,) = plt.plot(nested_scores, color=\"b\")\nplt.ylabel(\"score\", fontsize=\"14\")\nplt.legend(\n [non_nested_scores_line, nested_line],\n [\"Non-Nested CV\", \"Nested CV\"],\n bbox_to_anchor=(0, 0.4, 0.5, 0),\n)\nplt.title(\n \"Non-Nested and Nested Cross Validation on Iris Dataset\",\n x=0.5,\n y=1.1,\n fontsize=\"15\",\n)\n\n# Plot bar chart of the difference.\nplt.subplot(212)\ndifference_plot = plt.bar(range(NUM_TRIALS), score_difference)\nplt.xlabel(\"Individual Trial #\")\nplt.legend(\n [difference_plot],\n [\"Non-Nested CV - Nested CV Score\"],\n bbox_to_anchor=(0, 1, 0.8, 0),\n)\nplt.ylabel(\"score difference\", fontsize=\"14\")\n\nplt.show()\n",
"\"\"\"Orthogonal matching pursuit algorithms\n\"\"\"\n\n# Author: Vlad Niculae\n#\n# License: BSD 3 clause\n\nimport warnings\nfrom math import sqrt\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.linalg.lapack import get_lapack_funcs\nfrom joblib import Parallel\n\nfrom ._base import LinearModel, _pre_fit, _deprecate_normalize\nfrom ..base import RegressorMixin, MultiOutputMixin\nfrom ..utils import as_float_array, check_array\nfrom ..utils.fixes import delayed\nfrom ..model_selection import check_cv\n\npremature = (\n \"Orthogonal matching pursuit ended prematurely due to linear\"\n \" dependence in the dictionary. The requested precision might\"\n \" not have been met.\"\n)\n\n\ndef _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False):\n \"\"\"Orthogonal Matching Pursuit step using the Cholesky decomposition.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input dictionary. Columns are assumed to have unit norm.\n\n y : ndarray of shape (n_samples,)\n Input targets.\n\n n_nonzero_coefs : int\n Targeted number of non-zero elements.\n\n tol : float, default=None\n Targeted squared error, if not None overrides n_nonzero_coefs.\n\n copy_X : bool, default=True\n Whether the design matrix X must be copied by the algorithm. A false\n value is only helpful if X is already Fortran-ordered, otherwise a\n copy is made anyway.\n\n return_path : bool, default=False\n Whether to return every value of the nonzero coefficients along the\n forward path. Useful for cross-validation.\n\n Returns\n -------\n gamma : ndarray of shape (n_nonzero_coefs,)\n Non-zero elements of the solution.\n\n idx : ndarray of shape (n_nonzero_coefs,)\n Indices of the positions of the elements in gamma within the solution\n vector.\n\n coef : ndarray of shape (n_features, n_nonzero_coefs)\n The first k values of column k correspond to the coefficient value\n for the active features at that step. The lower left triangle contains\n garbage. Only returned if ``return_path=True``.\n\n n_active : int\n Number of active features at convergence.\n \"\"\"\n if copy_X:\n X = X.copy(\"F\")\n else: # even if we are allowed to overwrite, still copy it if bad order\n X = np.asfortranarray(X)\n\n min_float = np.finfo(X.dtype).eps\n nrm2, swap = linalg.get_blas_funcs((\"nrm2\", \"swap\"), (X,))\n (potrs,) = get_lapack_funcs((\"potrs\",), (X,))\n\n alpha = np.dot(X.T, y)\n residual = y\n gamma = np.empty(0)\n n_active = 0\n indices = np.arange(X.shape[1]) # keeping track of swapping\n\n max_features = X.shape[1] if tol is not None else n_nonzero_coefs\n\n L = np.empty((max_features, max_features), dtype=X.dtype)\n\n if return_path:\n coefs = np.empty_like(L)\n\n while True:\n lam = np.argmax(np.abs(np.dot(X.T, residual)))\n if lam < n_active or alpha[lam] ** 2 < min_float:\n # atom already selected or inner product too small\n warnings.warn(premature, RuntimeWarning, stacklevel=2)\n break\n\n if n_active > 0:\n # Updates the Cholesky decomposition of X' X\n L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])\n linalg.solve_triangular(\n L[:n_active, :n_active],\n L[n_active, :n_active],\n trans=0,\n lower=1,\n overwrite_b=True,\n check_finite=False,\n )\n v = nrm2(L[n_active, :n_active]) ** 2\n Lkk = linalg.norm(X[:, lam]) ** 2 - v\n if Lkk <= min_float: # selected atoms are dependent\n warnings.warn(premature, RuntimeWarning, stacklevel=2)\n break\n L[n_active, n_active] = sqrt(Lkk)\n else:\n L[0, 0] = linalg.norm(X[:, lam])\n\n X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])\n alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]\n indices[n_active], indices[lam] = indices[lam], indices[n_active]\n n_active += 1\n\n # solves LL'x = X'y as a composition of two triangular systems\n gamma, _ = potrs(\n L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False\n )\n\n if return_path:\n coefs[:n_active, n_active - 1] = gamma\n residual = y - np.dot(X[:, :n_active], gamma)\n if tol is not None and nrm2(residual) ** 2 <= tol:\n break\n elif n_active == max_features:\n break\n\n if return_path:\n return gamma, indices[:n_active], coefs[:, :n_active], n_active\n else:\n return gamma, indices[:n_active], n_active\n\n\ndef _gram_omp(\n Gram,\n Xy,\n n_nonzero_coefs,\n tol_0=None,\n tol=None,\n copy_Gram=True,\n copy_Xy=True,\n return_path=False,\n):\n \"\"\"Orthogonal Matching Pursuit step on a precomputed Gram matrix.\n\n This function uses the Cholesky decomposition method.\n\n Parameters\n ----------\n Gram : ndarray of shape (n_features, n_features)\n Gram matrix of the input data matrix.\n\n Xy : ndarray of shape (n_features,)\n Input targets.\n\n n_nonzero_coefs : int\n Targeted number of non-zero elements.\n\n tol_0 : float, default=None\n Squared norm of y, required if tol is not None.\n\n tol : float, default=None\n Targeted squared error, if not None overrides n_nonzero_coefs.\n\n copy_Gram : bool, default=True\n Whether the gram matrix must be copied by the algorithm. A false\n value is only helpful if it is already Fortran-ordered, otherwise a\n copy is made anyway.\n\n copy_Xy : bool, default=True\n Whether the covariance vector Xy must be copied by the algorithm.\n If False, it may be overwritten.\n\n return_path : bool, default=False\n Whether to return every value of the nonzero coefficients along the\n forward path. Useful for cross-validation.\n\n Returns\n -------\n gamma : ndarray of shape (n_nonzero_coefs,)\n Non-zero elements of the solution.\n\n idx : ndarray of shape (n_nonzero_coefs,)\n Indices of the positions of the elements in gamma within the solution\n vector.\n\n coefs : ndarray of shape (n_features, n_nonzero_coefs)\n The first k values of column k correspond to the coefficient value\n for the active features at that step. The lower left triangle contains\n garbage. Only returned if ``return_path=True``.\n\n n_active : int\n Number of active features at convergence.\n \"\"\"\n Gram = Gram.copy(\"F\") if copy_Gram else np.asfortranarray(Gram)\n\n if copy_Xy or not Xy.flags.writeable:\n Xy = Xy.copy()\n\n min_float = np.finfo(Gram.dtype).eps\n nrm2, swap = linalg.get_blas_funcs((\"nrm2\", \"swap\"), (Gram,))\n (potrs,) = get_lapack_funcs((\"potrs\",), (Gram,))\n\n indices = np.arange(len(Gram)) # keeping track of swapping\n alpha = Xy\n tol_curr = tol_0\n delta = 0\n gamma = np.empty(0)\n n_active = 0\n\n max_features = len(Gram) if tol is not None else n_nonzero_coefs\n\n L = np.empty((max_features, max_features), dtype=Gram.dtype)\n\n L[0, 0] = 1.0\n if return_path:\n coefs = np.empty_like(L)\n\n while True:\n lam = np.argmax(np.abs(alpha))\n if lam < n_active or alpha[lam] ** 2 < min_float:\n # selected same atom twice, or inner product too small\n warnings.warn(premature, RuntimeWarning, stacklevel=3)\n break\n if n_active > 0:\n L[n_active, :n_active] = Gram[lam, :n_active]\n linalg.solve_triangular(\n L[:n_active, :n_active],\n L[n_active, :n_active],\n trans=0,\n lower=1,\n overwrite_b=True,\n check_finite=False,\n )\n v = nrm2(L[n_active, :n_active]) ** 2\n Lkk = Gram[lam, lam] - v\n if Lkk <= min_float: # selected atoms are dependent\n warnings.warn(premature, RuntimeWarning, stacklevel=3)\n break\n L[n_active, n_active] = sqrt(Lkk)\n else:\n L[0, 0] = sqrt(Gram[lam, lam])\n\n Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])\n Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])\n indices[n_active], indices[lam] = indices[lam], indices[n_active]\n Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]\n n_active += 1\n # solves LL'x = X'y as a composition of two triangular systems\n gamma, _ = potrs(\n L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False\n )\n if return_path:\n coefs[:n_active, n_active - 1] = gamma\n beta = np.dot(Gram[:, :n_active], gamma)\n alpha = Xy - beta\n if tol is not None:\n tol_curr += delta\n delta = np.inner(gamma, beta[:n_active])\n tol_curr -= delta\n if abs(tol_curr) <= tol:\n break\n elif n_active == max_features:\n break\n\n if return_path:\n return gamma, indices[:n_active], coefs[:, :n_active], n_active\n else:\n return gamma, indices[:n_active], n_active\n\n\ndef orthogonal_mp(\n X,\n y,\n *,\n n_nonzero_coefs=None,\n tol=None,\n precompute=False,\n copy_X=True,\n return_path=False,\n return_n_iter=False,\n):\n r\"\"\"Orthogonal Matching Pursuit (OMP).\n\n Solves n_targets Orthogonal Matching Pursuit problems.\n An instance of the problem has the form:\n\n When parametrized by the number of non-zero coefficients using\n `n_nonzero_coefs`:\n argmin ||y - X\\gamma||^2 subject to ||\\gamma||_0 <= n_{nonzero coefs}\n\n When parametrized by error using the parameter `tol`:\n argmin ||\\gamma||_0 subject to ||y - X\\gamma||^2 <= tol\n\n Read more in the :ref:`User Guide <omp>`.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data. Columns are assumed to have unit norm.\n\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n Input targets.\n\n n_nonzero_coefs : int, default=None\n Desired number of non-zero entries in the solution. If None (by\n default) this value is set to 10% of n_features.\n\n tol : float, default=None\n Maximum norm of the residual. If not None, overrides n_nonzero_coefs.\n\n precompute : 'auto' or bool, default=False\n Whether to perform precomputations. Improves performance when n_targets\n or n_samples is very large.\n\n copy_X : bool, default=True\n Whether the design matrix X must be copied by the algorithm. A false\n value is only helpful if X is already Fortran-ordered, otherwise a\n copy is made anyway.\n\n return_path : bool, default=False\n Whether to return every value of the nonzero coefficients along the\n forward path. Useful for cross-validation.\n\n return_n_iter : bool, default=False\n Whether or not to return the number of iterations.\n\n Returns\n -------\n coef : ndarray of shape (n_features,) or (n_features, n_targets)\n Coefficients of the OMP solution. If `return_path=True`, this contains\n the whole coefficient path. In this case its shape is\n (n_features, n_features) or (n_features, n_targets, n_features) and\n iterating over the last axis yields coefficients in increasing order\n of active features.\n\n n_iters : array-like or int\n Number of active features across every target. Returned only if\n `return_n_iter` is set to True.\n\n See Also\n --------\n OrthogonalMatchingPursuit\n orthogonal_mp_gram\n lars_path\n sklearn.decomposition.sparse_encode\n\n Notes\n -----\n Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang,\n Matching pursuits with time-frequency dictionaries, IEEE Transactions on\n Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.\n (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)\n\n This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,\n M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal\n Matching Pursuit Technical Report - CS Technion, April 2008.\n https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf\n\n \"\"\"\n X = check_array(X, order=\"F\", copy=copy_X)\n copy_X = False\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n y = check_array(y)\n if y.shape[1] > 1: # subsequent targets will be affected\n copy_X = True\n if n_nonzero_coefs is None and tol is None:\n # default for n_nonzero_coefs is 0.1 * n_features\n # but at least one.\n n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)\n if tol is not None and tol < 0:\n raise ValueError(\"Epsilon cannot be negative\")\n if tol is None and n_nonzero_coefs <= 0:\n raise ValueError(\"The number of atoms must be positive\")\n if tol is None and n_nonzero_coefs > X.shape[1]:\n raise ValueError(\n \"The number of atoms cannot be more than the number of features\"\n )\n if precompute == \"auto\":\n precompute = X.shape[0] > X.shape[1]\n if precompute:\n G = np.dot(X.T, X)\n G = np.asfortranarray(G)\n Xy = np.dot(X.T, y)\n if tol is not None:\n norms_squared = np.sum((y ** 2), axis=0)\n else:\n norms_squared = None\n return orthogonal_mp_gram(\n G,\n Xy,\n n_nonzero_coefs=n_nonzero_coefs,\n tol=tol,\n norms_squared=norms_squared,\n copy_Gram=copy_X,\n copy_Xy=False,\n return_path=return_path,\n )\n\n if return_path:\n coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))\n else:\n coef = np.zeros((X.shape[1], y.shape[1]))\n n_iters = []\n\n for k in range(y.shape[1]):\n out = _cholesky_omp(\n X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path\n )\n if return_path:\n _, idx, coefs, n_iter = out\n coef = coef[:, :, : len(idx)]\n for n_active, x in enumerate(coefs.T):\n coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1]\n else:\n x, idx, n_iter = out\n coef[idx, k] = x\n n_iters.append(n_iter)\n\n if y.shape[1] == 1:\n n_iters = n_iters[0]\n\n if return_n_iter:\n return np.squeeze(coef), n_iters\n else:\n return np.squeeze(coef)\n\n\ndef orthogonal_mp_gram(\n Gram,\n Xy,\n *,\n n_nonzero_coefs=None,\n tol=None,\n norms_squared=None,\n copy_Gram=True,\n copy_Xy=True,\n return_path=False,\n return_n_iter=False,\n):\n \"\"\"Gram Orthogonal Matching Pursuit (OMP).\n\n Solves n_targets Orthogonal Matching Pursuit problems using only\n the Gram matrix X.T * X and the product X.T * y.\n\n Read more in the :ref:`User Guide <omp>`.\n\n Parameters\n ----------\n Gram : ndarray of shape (n_features, n_features)\n Gram matrix of the input data: X.T * X.\n\n Xy : ndarray of shape (n_features,) or (n_features, n_targets)\n Input targets multiplied by X: X.T * y.\n\n n_nonzero_coefs : int, default=None\n Desired number of non-zero entries in the solution. If None (by\n default) this value is set to 10% of n_features.\n\n tol : float, default=None\n Maximum norm of the residual. If not None, overrides n_nonzero_coefs.\n\n norms_squared : array-like of shape (n_targets,), default=None\n Squared L2 norms of the lines of y. Required if tol is not None.\n\n copy_Gram : bool, default=True\n Whether the gram matrix must be copied by the algorithm. A false\n value is only helpful if it is already Fortran-ordered, otherwise a\n copy is made anyway.\n\n copy_Xy : bool, default=True\n Whether the covariance vector Xy must be copied by the algorithm.\n If False, it may be overwritten.\n\n return_path : bool, default=False\n Whether to return every value of the nonzero coefficients along the\n forward path. Useful for cross-validation.\n\n return_n_iter : bool, default=False\n Whether or not to return the number of iterations.\n\n Returns\n -------\n coef : ndarray of shape (n_features,) or (n_features, n_targets)\n Coefficients of the OMP solution. If `return_path=True`, this contains\n the whole coefficient path. In this case its shape is\n (n_features, n_features) or (n_features, n_targets, n_features) and\n iterating over the last axis yields coefficients in increasing order\n of active features.\n\n n_iters : array-like or int\n Number of active features across every target. Returned only if\n `return_n_iter` is set to True.\n\n See Also\n --------\n OrthogonalMatchingPursuit\n orthogonal_mp\n lars_path\n sklearn.decomposition.sparse_encode\n\n Notes\n -----\n Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,\n Matching pursuits with time-frequency dictionaries, IEEE Transactions on\n Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.\n (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)\n\n This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,\n M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal\n Matching Pursuit Technical Report - CS Technion, April 2008.\n https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf\n\n \"\"\"\n Gram = check_array(Gram, order=\"F\", copy=copy_Gram)\n Xy = np.asarray(Xy)\n if Xy.ndim > 1 and Xy.shape[1] > 1:\n # or subsequent target will be affected\n copy_Gram = True\n if Xy.ndim == 1:\n Xy = Xy[:, np.newaxis]\n if tol is not None:\n norms_squared = [norms_squared]\n if copy_Xy or not Xy.flags.writeable:\n # Make the copy once instead of many times in _gram_omp itself.\n Xy = Xy.copy()\n\n if n_nonzero_coefs is None and tol is None:\n n_nonzero_coefs = int(0.1 * len(Gram))\n if tol is not None and norms_squared is None:\n raise ValueError(\n \"Gram OMP needs the precomputed norms in order \"\n \"to evaluate the error sum of squares.\"\n )\n if tol is not None and tol < 0:\n raise ValueError(\"Epsilon cannot be negative\")\n if tol is None and n_nonzero_coefs <= 0:\n raise ValueError(\"The number of atoms must be positive\")\n if tol is None and n_nonzero_coefs > len(Gram):\n raise ValueError(\n \"The number of atoms cannot be more than the number of features\"\n )\n\n if return_path:\n coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))\n else:\n coef = np.zeros((len(Gram), Xy.shape[1]))\n\n n_iters = []\n for k in range(Xy.shape[1]):\n out = _gram_omp(\n Gram,\n Xy[:, k],\n n_nonzero_coefs,\n norms_squared[k] if tol is not None else None,\n tol,\n copy_Gram=copy_Gram,\n copy_Xy=False,\n return_path=return_path,\n )\n if return_path:\n _, idx, coefs, n_iter = out\n coef = coef[:, :, : len(idx)]\n for n_active, x in enumerate(coefs.T):\n coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1]\n else:\n x, idx, n_iter = out\n coef[idx, k] = x\n n_iters.append(n_iter)\n\n if Xy.shape[1] == 1:\n n_iters = n_iters[0]\n\n if return_n_iter:\n return np.squeeze(coef), n_iters\n else:\n return np.squeeze(coef)\n\n\nclass OrthogonalMatchingPursuit(MultiOutputMixin, RegressorMixin, LinearModel):\n \"\"\"Orthogonal Matching Pursuit model (OMP).\n\n Read more in the :ref:`User Guide <omp>`.\n\n Parameters\n ----------\n n_nonzero_coefs : int, default=None\n Desired number of non-zero entries in the solution. If None (by\n default) this value is set to 10% of n_features.\n\n tol : float, default=None\n Maximum norm of the residual. If not None, overrides n_nonzero_coefs.\n\n fit_intercept : bool, default=True\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (i.e. data is expected to be centered).\n\n normalize : bool, default=True\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n .. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0. It will default\n to False in 1.2 and be removed in 1.4.\n\n precompute : 'auto' or bool, default='auto'\n Whether to use a precomputed Gram and Xy matrix to speed up\n calculations. Improves performance when :term:`n_targets` or\n :term:`n_samples` is very large. Note that if you already have such\n matrices, you can pass them directly to the fit method.\n\n Attributes\n ----------\n coef_ : ndarray of shape (n_features,) or (n_targets, n_features)\n Parameter vector (w in the formula).\n\n intercept_ : float or ndarray of shape (n_targets,)\n Independent term in decision function.\n\n n_iter_ : int or array-like\n Number of active features across every target.\n\n n_nonzero_coefs_ : int\n The number of non-zero coefficients in the solution. If\n `n_nonzero_coefs` is None and `tol` is None this value is either set\n to 10% of `n_features` or 1, whichever is greater.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n Examples\n --------\n >>> from sklearn.linear_model import OrthogonalMatchingPursuit\n >>> from sklearn.datasets import make_regression\n >>> X, y = make_regression(noise=4, random_state=0)\n >>> reg = OrthogonalMatchingPursuit(normalize=False).fit(X, y)\n >>> reg.score(X, y)\n 0.9991...\n >>> reg.predict(X[:1,])\n array([-78.3854...])\n\n Notes\n -----\n Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,\n Matching pursuits with time-frequency dictionaries, IEEE Transactions on\n Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.\n (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)\n\n This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,\n M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal\n Matching Pursuit Technical Report - CS Technion, April 2008.\n https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf\n\n See Also\n --------\n orthogonal_mp\n orthogonal_mp_gram\n lars_path\n Lars\n LassoLars\n sklearn.decomposition.sparse_encode\n OrthogonalMatchingPursuitCV\n \"\"\"\n\n def __init__(\n self,\n *,\n n_nonzero_coefs=None,\n tol=None,\n fit_intercept=True,\n normalize=\"deprecated\",\n precompute=\"auto\",\n ):\n self.n_nonzero_coefs = n_nonzero_coefs\n self.tol = tol\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.precompute = precompute\n\n def fit(self, X, y):\n \"\"\"Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values. Will be cast to X's dtype if necessary\n\n\n Returns\n -------\n self : object\n returns an instance of self.\n \"\"\"\n _normalize = _deprecate_normalize(\n self.normalize, default=True, estimator_name=self.__class__.__name__\n )\n\n X, y = self._validate_data(X, y, multi_output=True, y_numeric=True)\n n_features = X.shape[1]\n\n X, y, X_offset, y_offset, X_scale, Gram, Xy = _pre_fit(\n X, y, None, self.precompute, _normalize, self.fit_intercept, copy=True\n )\n\n if y.ndim == 1:\n y = y[:, np.newaxis]\n\n if self.n_nonzero_coefs is None and self.tol is None:\n # default for n_nonzero_coefs is 0.1 * n_features\n # but at least one.\n self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)\n else:\n self.n_nonzero_coefs_ = self.n_nonzero_coefs\n\n if Gram is False:\n coef_, self.n_iter_ = orthogonal_mp(\n X,\n y,\n n_nonzero_coefs=self.n_nonzero_coefs_,\n tol=self.tol,\n precompute=False,\n copy_X=True,\n return_n_iter=True,\n )\n else:\n norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None\n\n coef_, self.n_iter_ = orthogonal_mp_gram(\n Gram,\n Xy=Xy,\n n_nonzero_coefs=self.n_nonzero_coefs_,\n tol=self.tol,\n norms_squared=norms_sq,\n copy_Gram=True,\n copy_Xy=True,\n return_n_iter=True,\n )\n self.coef_ = coef_.T\n self._set_intercept(X_offset, y_offset, X_scale)\n return self\n\n\ndef _omp_path_residues(\n X_train,\n y_train,\n X_test,\n y_test,\n copy=True,\n fit_intercept=True,\n normalize=True,\n max_iter=100,\n):\n \"\"\"Compute the residues on left-out data for a full LARS path.\n\n Parameters\n ----------\n X_train : ndarray of shape (n_samples, n_features)\n The data to fit the LARS on.\n\n y_train : ndarray of shape (n_samples)\n The target variable to fit LARS on.\n\n X_test : ndarray of shape (n_samples, n_features)\n The data to compute the residues on.\n\n y_test : ndarray of shape (n_samples)\n The target variable to compute the residues on.\n\n copy : bool, default=True\n Whether X_train, X_test, y_train and y_test should be copied. If\n False, they may be overwritten.\n\n fit_intercept : bool, default=True\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (i.e. data is expected to be centered).\n\n normalize : bool, default=True\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n .. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0. It will default\n to False in 1.2 and be removed in 1.4.\n\n max_iter : int, default=100\n Maximum numbers of iterations to perform, therefore maximum features\n to include. 100 by default.\n\n Returns\n -------\n residues : ndarray of shape (n_samples, max_features)\n Residues of the prediction on the test data.\n \"\"\"\n\n if copy:\n X_train = X_train.copy()\n y_train = y_train.copy()\n X_test = X_test.copy()\n y_test = y_test.copy()\n\n if fit_intercept:\n X_mean = X_train.mean(axis=0)\n X_train -= X_mean\n X_test -= X_mean\n y_mean = y_train.mean(axis=0)\n y_train = as_float_array(y_train, copy=False)\n y_train -= y_mean\n y_test = as_float_array(y_test, copy=False)\n y_test -= y_mean\n\n if normalize:\n norms = np.sqrt(np.sum(X_train ** 2, axis=0))\n nonzeros = np.flatnonzero(norms)\n X_train[:, nonzeros] /= norms[nonzeros]\n\n coefs = orthogonal_mp(\n X_train,\n y_train,\n n_nonzero_coefs=max_iter,\n tol=None,\n precompute=False,\n copy_X=False,\n return_path=True,\n )\n if coefs.ndim == 1:\n coefs = coefs[:, np.newaxis]\n if normalize:\n coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]\n\n return np.dot(coefs.T, X_test.T) - y_test\n\n\nclass OrthogonalMatchingPursuitCV(RegressorMixin, LinearModel):\n \"\"\"Cross-validated Orthogonal Matching Pursuit model (OMP).\n\n See glossary entry for :term:`cross-validation estimator`.\n\n Read more in the :ref:`User Guide <omp>`.\n\n Parameters\n ----------\n copy : bool, default=True\n Whether the design matrix X must be copied by the algorithm. A false\n value is only helpful if X is already Fortran-ordered, otherwise a\n copy is made anyway.\n\n fit_intercept : bool, default=True\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (i.e. data is expected to be centered).\n\n normalize : bool, default=True\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n .. deprecated:: 1.0\n ``normalize`` was deprecated in version 1.0. It will default\n to False in 1.2 and be removed in 1.4.\n\n max_iter : int, default=None\n Maximum numbers of iterations to perform, therefore maximum features\n to include. 10% of ``n_features`` but at least 5 if available.\n\n cv : int, cross-validation generator or iterable, default=None\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 5-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n .. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold.\n\n n_jobs : int, default=None\n Number of CPUs to use during the cross validation.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n verbose : bool or int, default=False\n Sets the verbosity amount.\n\n Attributes\n ----------\n intercept_ : float or ndarray of shape (n_targets,)\n Independent term in decision function.\n\n coef_ : ndarray of shape (n_features,) or (n_targets, n_features)\n Parameter vector (w in the problem formulation).\n\n n_nonzero_coefs_ : int\n Estimated number of non-zero coefficients giving the best mean squared\n error over the cross-validation folds.\n\n n_iter_ : int or array-like\n Number of active features across every target for the model refit with\n the best hyperparameters got by cross-validating across all folds.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n Examples\n --------\n >>> from sklearn.linear_model import OrthogonalMatchingPursuitCV\n >>> from sklearn.datasets import make_regression\n >>> X, y = make_regression(n_features=100, n_informative=10,\n ... noise=4, random_state=0)\n >>> reg = OrthogonalMatchingPursuitCV(cv=5, normalize=False).fit(X, y)\n >>> reg.score(X, y)\n 0.9991...\n >>> reg.n_nonzero_coefs_\n 10\n >>> reg.predict(X[:1,])\n array([-78.3854...])\n\n See Also\n --------\n orthogonal_mp\n orthogonal_mp_gram\n lars_path\n Lars\n LassoLars\n OrthogonalMatchingPursuit\n LarsCV\n LassoLarsCV\n sklearn.decomposition.sparse_encode\n\n \"\"\"\n\n def __init__(\n self,\n *,\n copy=True,\n fit_intercept=True,\n normalize=\"deprecated\",\n max_iter=None,\n cv=None,\n n_jobs=None,\n verbose=False,\n ):\n self.copy = copy\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.max_iter = max_iter\n self.cv = cv\n self.n_jobs = n_jobs\n self.verbose = verbose\n\n def fit(self, X, y):\n \"\"\"Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,)\n Target values. Will be cast to X's dtype if necessary.\n\n Returns\n -------\n self : object\n returns an instance of self.\n \"\"\"\n\n _normalize = _deprecate_normalize(\n self.normalize, default=True, estimator_name=self.__class__.__name__\n )\n\n X, y = self._validate_data(\n X, y, y_numeric=True, ensure_min_features=2, estimator=self\n )\n X = as_float_array(X, copy=False, force_all_finite=False)\n cv = check_cv(self.cv, classifier=False)\n max_iter = (\n min(max(int(0.1 * X.shape[1]), 5), X.shape[1])\n if not self.max_iter\n else self.max_iter\n )\n cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(\n delayed(_omp_path_residues)(\n X[train],\n y[train],\n X[test],\n y[test],\n self.copy,\n self.fit_intercept,\n _normalize,\n max_iter,\n )\n for train, test in cv.split(X)\n )\n\n min_early_stop = min(fold.shape[0] for fold in cv_paths)\n mse_folds = np.array(\n [(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths]\n )\n best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1\n self.n_nonzero_coefs_ = best_n_nonzero_coefs\n omp = OrthogonalMatchingPursuit(\n n_nonzero_coefs=best_n_nonzero_coefs,\n fit_intercept=self.fit_intercept,\n normalize=_normalize,\n )\n omp.fit(X, y)\n self.coef_ = omp.coef_\n self.intercept_ = omp.intercept_\n self.n_iter_ = omp.n_iter_\n return self\n",
"\"\"\"Stacking classifier and regressor.\"\"\"\n\n# Authors: Guillaume Lemaitre <[email protected]>\n# License: BSD 3 clause\n\nfrom abc import ABCMeta, abstractmethod\nfrom copy import deepcopy\n\nimport numpy as np\nfrom joblib import Parallel\nimport scipy.sparse as sparse\n\nfrom ..base import clone\nfrom ..base import ClassifierMixin, RegressorMixin, TransformerMixin\nfrom ..base import is_classifier, is_regressor\nfrom ..exceptions import NotFittedError\nfrom ..utils._estimator_html_repr import _VisualBlock\n\nfrom ._base import _fit_single_estimator\nfrom ._base import _BaseHeterogeneousEnsemble\n\nfrom ..linear_model import LogisticRegression\nfrom ..linear_model import RidgeCV\n\nfrom ..model_selection import cross_val_predict\nfrom ..model_selection import check_cv\n\nfrom ..preprocessing import LabelEncoder\n\nfrom ..utils import Bunch\nfrom ..utils.metaestimators import if_delegate_has_method\nfrom ..utils.multiclass import check_classification_targets\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import column_or_1d\nfrom ..utils.fixes import delayed\n\n\nclass _BaseStacking(TransformerMixin, _BaseHeterogeneousEnsemble, metaclass=ABCMeta):\n \"\"\"Base class for stacking method.\"\"\"\n\n @abstractmethod\n def __init__(\n self,\n estimators,\n final_estimator=None,\n *,\n cv=None,\n stack_method=\"auto\",\n n_jobs=None,\n verbose=0,\n passthrough=False,\n ):\n super().__init__(estimators=estimators)\n self.final_estimator = final_estimator\n self.cv = cv\n self.stack_method = stack_method\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.passthrough = passthrough\n\n def _clone_final_estimator(self, default):\n if self.final_estimator is not None:\n self.final_estimator_ = clone(self.final_estimator)\n else:\n self.final_estimator_ = clone(default)\n\n def _concatenate_predictions(self, X, predictions):\n \"\"\"Concatenate the predictions of each first layer learner and\n possibly the input dataset `X`.\n\n If `X` is sparse and `self.passthrough` is False, the output of\n `transform` will be dense (the predictions). If `X` is sparse\n and `self.passthrough` is True, the output of `transform` will\n be sparse.\n\n This helper is in charge of ensuring the predictions are 2D arrays and\n it will drop one of the probability column when using probabilities\n in the binary case. Indeed, the p(y|c=0) = 1 - p(y|c=1)\n \"\"\"\n X_meta = []\n for est_idx, preds in enumerate(predictions):\n # case where the the estimator returned a 1D array\n if preds.ndim == 1:\n X_meta.append(preds.reshape(-1, 1))\n else:\n if (\n self.stack_method_[est_idx] == \"predict_proba\"\n and len(self.classes_) == 2\n ):\n # Remove the first column when using probabilities in\n # binary classification because both features are perfectly\n # collinear.\n X_meta.append(preds[:, 1:])\n else:\n X_meta.append(preds)\n if self.passthrough:\n X_meta.append(X)\n if sparse.issparse(X):\n return sparse.hstack(X_meta, format=X.format)\n\n return np.hstack(X_meta)\n\n @staticmethod\n def _method_name(name, estimator, method):\n if estimator == \"drop\":\n return None\n if method == \"auto\":\n if getattr(estimator, \"predict_proba\", None):\n return \"predict_proba\"\n elif getattr(estimator, \"decision_function\", None):\n return \"decision_function\"\n else:\n return \"predict\"\n else:\n if not hasattr(estimator, method):\n raise ValueError(\n \"Underlying estimator {} does not implement the method {}.\".format(\n name, method\n )\n )\n return method\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the estimators.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,) or default=None\n Sample weights. If None, then samples are equally weighted.\n Note that this is supported only if all underlying estimators\n support sample weights.\n\n .. versionchanged:: 0.23\n when not None, `sample_weight` is passed to all underlying\n estimators\n\n Returns\n -------\n self : object\n \"\"\"\n # all_estimators contains all estimators, the one to be fitted and the\n # 'drop' string.\n names, all_estimators = self._validate_estimators()\n self._validate_final_estimator()\n\n stack_method = [self.stack_method] * len(all_estimators)\n\n # Fit the base estimators on the whole training data. Those\n # base estimators will be used in transform, predict, and\n # predict_proba. They are exposed publicly.\n self.estimators_ = Parallel(n_jobs=self.n_jobs)(\n delayed(_fit_single_estimator)(clone(est), X, y, sample_weight)\n for est in all_estimators\n if est != \"drop\"\n )\n\n self.named_estimators_ = Bunch()\n est_fitted_idx = 0\n for name_est, org_est in zip(names, all_estimators):\n if org_est != \"drop\":\n current_estimator = self.estimators_[est_fitted_idx]\n self.named_estimators_[name_est] = current_estimator\n est_fitted_idx += 1\n if hasattr(current_estimator, \"feature_names_in_\"):\n self.feature_names_in_ = current_estimator.feature_names_in_\n else:\n self.named_estimators_[name_est] = \"drop\"\n\n # To train the meta-classifier using the most data as possible, we use\n # a cross-validation to obtain the output of the stacked estimators.\n\n # To ensure that the data provided to each estimator are the same, we\n # need to set the random state of the cv if there is one and we need to\n # take a copy.\n cv = check_cv(self.cv, y=y, classifier=is_classifier(self))\n if hasattr(cv, \"random_state\") and cv.random_state is None:\n cv.random_state = np.random.RandomState()\n\n self.stack_method_ = [\n self._method_name(name, est, meth)\n for name, est, meth in zip(names, all_estimators, stack_method)\n ]\n fit_params = (\n {\"sample_weight\": sample_weight} if sample_weight is not None else None\n )\n predictions = Parallel(n_jobs=self.n_jobs)(\n delayed(cross_val_predict)(\n clone(est),\n X,\n y,\n cv=deepcopy(cv),\n method=meth,\n n_jobs=self.n_jobs,\n fit_params=fit_params,\n verbose=self.verbose,\n )\n for est, meth in zip(all_estimators, self.stack_method_)\n if est != \"drop\"\n )\n\n # Only not None or not 'drop' estimators will be used in transform.\n # Remove the None from the method as well.\n self.stack_method_ = [\n meth\n for (meth, est) in zip(self.stack_method_, all_estimators)\n if est != \"drop\"\n ]\n\n X_meta = self._concatenate_predictions(X, predictions)\n _fit_single_estimator(\n self.final_estimator_, X_meta, y, sample_weight=sample_weight\n )\n\n return self\n\n @property\n def n_features_in_(self):\n \"\"\"Number of features seen during :term:`fit`.\"\"\"\n try:\n check_is_fitted(self)\n except NotFittedError as nfe:\n raise AttributeError(\n f\"{self.__class__.__name__} object has no attribute n_features_in_\"\n ) from nfe\n return self.estimators_[0].n_features_in_\n\n def _transform(self, X):\n \"\"\"Concatenate and return the predictions of the estimators.\"\"\"\n check_is_fitted(self)\n predictions = [\n getattr(est, meth)(X)\n for est, meth in zip(self.estimators_, self.stack_method_)\n if est != \"drop\"\n ]\n return self._concatenate_predictions(X, predictions)\n\n @if_delegate_has_method(delegate=\"final_estimator_\")\n def predict(self, X, **predict_params):\n \"\"\"Predict target for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n **predict_params : dict of str -> obj\n Parameters to the `predict` called by the `final_estimator`. Note\n that this may be used to return uncertainties from some estimators\n with `return_std` or `return_cov`. Be aware that it will only\n accounts for uncertainty in the final estimator.\n\n Returns\n -------\n y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)\n Predicted targets.\n \"\"\"\n\n check_is_fitted(self)\n return self.final_estimator_.predict(self.transform(X), **predict_params)\n\n def _sk_visual_block_(self, final_estimator):\n names, estimators = zip(*self.estimators)\n parallel = _VisualBlock(\"parallel\", estimators, names=names, dash_wrapped=False)\n\n # final estimator is wrapped in a parallel block to show the label:\n # 'final_estimator' in the html repr\n final_block = _VisualBlock(\n \"parallel\", [final_estimator], names=[\"final_estimator\"], dash_wrapped=False\n )\n return _VisualBlock(\"serial\", (parallel, final_block), dash_wrapped=False)\n\n\nclass StackingClassifier(ClassifierMixin, _BaseStacking):\n \"\"\"Stack of estimators with a final classifier.\n\n Stacked generalization consists in stacking the output of individual\n estimator and use a classifier to compute the final prediction. Stacking\n allows to use the strength of each individual estimator by using their\n output as input of a final estimator.\n\n Note that `estimators_` are fitted on the full `X` while `final_estimator_`\n is trained using cross-validated predictions of the base estimators using\n `cross_val_predict`.\n\n Read more in the :ref:`User Guide <stacking>`.\n\n .. versionadded:: 0.22\n\n Parameters\n ----------\n estimators : list of (str, estimator)\n Base estimators which will be stacked together. Each element of the\n list is defined as a tuple of string (i.e. name) and an estimator\n instance. An estimator can be set to 'drop' using `set_params`.\n\n final_estimator : estimator, default=None\n A classifier which will be used to combine the base estimators.\n The default classifier is a\n :class:`~sklearn.linear_model.LogisticRegression`.\n\n cv : int, cross-validation generator or an iterable, default=None\n Determines the cross-validation splitting strategy used in\n `cross_val_predict` to train `final_estimator`. Possible inputs for\n cv are:\n\n * None, to use the default 5-fold cross validation,\n * integer, to specify the number of folds in a (Stratified) KFold,\n * An object to be used as a cross-validation generator,\n * An iterable yielding train, test splits.\n\n For integer/None inputs, if the estimator is a classifier and y is\n either binary or multiclass,\n :class:`~sklearn.model_selection.StratifiedKFold` is used.\n In all other cases, :class:`~sklearn.model_selection.KFold` is used.\n These splitters are instantiated with `shuffle=False` so the splits\n will be the same across calls.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n .. note::\n A larger number of split will provide no benefits if the number\n of training samples is large enough. Indeed, the training time\n will increase. ``cv`` is not used for model evaluation but for\n prediction.\n\n stack_method : {'auto', 'predict_proba', 'decision_function', 'predict'}, \\\n default='auto'\n Methods called for each base estimator. It can be:\n\n * if 'auto', it will try to invoke, for each estimator,\n `'predict_proba'`, `'decision_function'` or `'predict'` in that\n order.\n * otherwise, one of `'predict_proba'`, `'decision_function'` or\n `'predict'`. If the method is not implemented by the estimator, it\n will raise an error.\n\n n_jobs : int, default=None\n The number of jobs to run in parallel all `estimators` `fit`.\n `None` means 1 unless in a `joblib.parallel_backend` context. -1 means\n using all processors. See Glossary for more details.\n\n passthrough : bool, default=False\n When False, only the predictions of estimators will be used as\n training data for `final_estimator`. When True, the\n `final_estimator` is trained on the predictions as well as the\n original training data.\n\n verbose : int, default=0\n Verbosity level.\n\n Attributes\n ----------\n classes_ : ndarray of shape (n_classes,)\n Class labels.\n\n estimators_ : list of estimators\n The elements of the estimators parameter, having been fitted on the\n training data. If an estimator has been set to `'drop'`, it\n will not appear in `estimators_`.\n\n named_estimators_ : :class:`~sklearn.utils.Bunch`\n Attribute to access any fitted sub-estimators by name.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`. Only defined if the\n underlying classifier exposes such an attribute when fit.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Only defined if the\n underlying estimators expose such an attribute when fit.\n .. versionadded:: 1.0\n\n final_estimator_ : estimator\n The classifier which predicts given the output of `estimators_`.\n\n stack_method_ : list of str\n The method used by each base estimator.\n\n See Also\n --------\n StackingRegressor : Stack of estimators with a final regressor.\n\n Notes\n -----\n When `predict_proba` is used by each estimator (i.e. most of the time for\n `stack_method='auto'` or specifically for `stack_method='predict_proba'`),\n The first column predicted by each estimator will be dropped in the case\n of a binary classification problem. Indeed, both feature will be perfectly\n collinear.\n\n References\n ----------\n .. [1] Wolpert, David H. \"Stacked generalization.\" Neural networks 5.2\n (1992): 241-259.\n\n Examples\n --------\n >>> from sklearn.datasets import load_iris\n >>> from sklearn.ensemble import RandomForestClassifier\n >>> from sklearn.svm import LinearSVC\n >>> from sklearn.linear_model import LogisticRegression\n >>> from sklearn.preprocessing import StandardScaler\n >>> from sklearn.pipeline import make_pipeline\n >>> from sklearn.ensemble import StackingClassifier\n >>> X, y = load_iris(return_X_y=True)\n >>> estimators = [\n ... ('rf', RandomForestClassifier(n_estimators=10, random_state=42)),\n ... ('svr', make_pipeline(StandardScaler(),\n ... LinearSVC(random_state=42)))\n ... ]\n >>> clf = StackingClassifier(\n ... estimators=estimators, final_estimator=LogisticRegression()\n ... )\n >>> from sklearn.model_selection import train_test_split\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, stratify=y, random_state=42\n ... )\n >>> clf.fit(X_train, y_train).score(X_test, y_test)\n 0.9...\n \"\"\"\n\n def __init__(\n self,\n estimators,\n final_estimator=None,\n *,\n cv=None,\n stack_method=\"auto\",\n n_jobs=None,\n passthrough=False,\n verbose=0,\n ):\n super().__init__(\n estimators=estimators,\n final_estimator=final_estimator,\n cv=cv,\n stack_method=stack_method,\n n_jobs=n_jobs,\n passthrough=passthrough,\n verbose=verbose,\n )\n\n def _validate_final_estimator(self):\n self._clone_final_estimator(default=LogisticRegression())\n if not is_classifier(self.final_estimator_):\n raise ValueError(\n \"'final_estimator' parameter should be a classifier. Got {}\".format(\n self.final_estimator_\n )\n )\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the estimators.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n Note that this is supported only if all underlying estimators\n support sample weights.\n\n Returns\n -------\n self : object\n Returns a fitted instance of estimator.\n \"\"\"\n check_classification_targets(y)\n self._le = LabelEncoder().fit(y)\n self.classes_ = self._le.classes_\n return super().fit(X, self._le.transform(y), sample_weight)\n\n @if_delegate_has_method(delegate=\"final_estimator_\")\n def predict(self, X, **predict_params):\n \"\"\"Predict target for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n **predict_params : dict of str -> obj\n Parameters to the `predict` called by the `final_estimator`. Note\n that this may be used to return uncertainties from some estimators\n with `return_std` or `return_cov`. Be aware that it will only\n accounts for uncertainty in the final estimator.\n\n Returns\n -------\n y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)\n Predicted targets.\n \"\"\"\n y_pred = super().predict(X, **predict_params)\n return self._le.inverse_transform(y_pred)\n\n @if_delegate_has_method(delegate=\"final_estimator_\")\n def predict_proba(self, X):\n \"\"\"Predict class probabilities for `X` using the final estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n probabilities : ndarray of shape (n_samples, n_classes) or \\\n list of ndarray of shape (n_output,)\n The class probabilities of the input samples.\n \"\"\"\n check_is_fitted(self)\n return self.final_estimator_.predict_proba(self.transform(X))\n\n @if_delegate_has_method(delegate=\"final_estimator_\")\n def decision_function(self, X):\n \"\"\"Decision function for samples in `X` using the final estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n decisions : ndarray of shape (n_samples,), (n_samples, n_classes), \\\n or (n_samples, n_classes * (n_classes-1) / 2)\n The decision function computed the final estimator.\n \"\"\"\n check_is_fitted(self)\n return self.final_estimator_.decision_function(self.transform(X))\n\n def transform(self, X):\n \"\"\"Return class labels or probabilities for X for each estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n y_preds : ndarray of shape (n_samples, n_estimators) or \\\n (n_samples, n_classes * n_estimators)\n Prediction outputs for each estimator.\n \"\"\"\n return self._transform(X)\n\n def _sk_visual_block_(self):\n # If final_estimator's default changes then this should be\n # updated.\n if self.final_estimator is None:\n final_estimator = LogisticRegression()\n else:\n final_estimator = self.final_estimator\n return super()._sk_visual_block_(final_estimator)\n\n\nclass StackingRegressor(RegressorMixin, _BaseStacking):\n \"\"\"Stack of estimators with a final regressor.\n\n Stacked generalization consists in stacking the output of individual\n estimator and use a regressor to compute the final prediction. Stacking\n allows to use the strength of each individual estimator by using their\n output as input of a final estimator.\n\n Note that `estimators_` are fitted on the full `X` while `final_estimator_`\n is trained using cross-validated predictions of the base estimators using\n `cross_val_predict`.\n\n Read more in the :ref:`User Guide <stacking>`.\n\n .. versionadded:: 0.22\n\n Parameters\n ----------\n estimators : list of (str, estimator)\n Base estimators which will be stacked together. Each element of the\n list is defined as a tuple of string (i.e. name) and an estimator\n instance. An estimator can be set to 'drop' using `set_params`.\n\n final_estimator : estimator, default=None\n A regressor which will be used to combine the base estimators.\n The default regressor is a :class:`~sklearn.linear_model.RidgeCV`.\n\n cv : int, cross-validation generator or an iterable, default=None\n Determines the cross-validation splitting strategy used in\n `cross_val_predict` to train `final_estimator`. Possible inputs for\n cv are:\n\n * None, to use the default 5-fold cross validation,\n * integer, to specify the number of folds in a (Stratified) KFold,\n * An object to be used as a cross-validation generator,\n * An iterable yielding train, test splits.\n\n For integer/None inputs, if the estimator is a classifier and y is\n either binary or multiclass,\n :class:`~sklearn.model_selection.StratifiedKFold` is used.\n In all other cases, :class:`~sklearn.model_selection.KFold` is used.\n These splitters are instantiated with `shuffle=False` so the splits\n will be the same across calls.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n .. note::\n A larger number of split will provide no benefits if the number\n of training samples is large enough. Indeed, the training time\n will increase. ``cv`` is not used for model evaluation but for\n prediction.\n\n n_jobs : int, default=None\n The number of jobs to run in parallel for `fit` of all `estimators`.\n `None` means 1 unless in a `joblib.parallel_backend` context. -1 means\n using all processors. See Glossary for more details.\n\n passthrough : bool, default=False\n When False, only the predictions of estimators will be used as\n training data for `final_estimator`. When True, the\n `final_estimator` is trained on the predictions as well as the\n original training data.\n\n verbose : int, default=0\n Verbosity level.\n\n Attributes\n ----------\n estimators_ : list of estimator\n The elements of the estimators parameter, having been fitted on the\n training data. If an estimator has been set to `'drop'`, it\n will not appear in `estimators_`.\n\n named_estimators_ : :class:`~sklearn.utils.Bunch`\n Attribute to access any fitted sub-estimators by name.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`. Only defined if the\n underlying regressor exposes such an attribute when fit.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Only defined if the\n underlying estimators expose such an attribute when fit.\n .. versionadded:: 1.0\n\n final_estimator_ : estimator\n The regressor to stacked the base estimators fitted.\n\n stack_method_ : list of str\n The method used by each base estimator.\n\n References\n ----------\n .. [1] Wolpert, David H. \"Stacked generalization.\" Neural networks 5.2\n (1992): 241-259.\n\n Examples\n --------\n >>> from sklearn.datasets import load_diabetes\n >>> from sklearn.linear_model import RidgeCV\n >>> from sklearn.svm import LinearSVR\n >>> from sklearn.ensemble import RandomForestRegressor\n >>> from sklearn.ensemble import StackingRegressor\n >>> X, y = load_diabetes(return_X_y=True)\n >>> estimators = [\n ... ('lr', RidgeCV()),\n ... ('svr', LinearSVR(random_state=42))\n ... ]\n >>> reg = StackingRegressor(\n ... estimators=estimators,\n ... final_estimator=RandomForestRegressor(n_estimators=10,\n ... random_state=42)\n ... )\n >>> from sklearn.model_selection import train_test_split\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... X, y, random_state=42\n ... )\n >>> reg.fit(X_train, y_train).score(X_test, y_test)\n 0.3...\n\n \"\"\"\n\n def __init__(\n self,\n estimators,\n final_estimator=None,\n *,\n cv=None,\n n_jobs=None,\n passthrough=False,\n verbose=0,\n ):\n super().__init__(\n estimators=estimators,\n final_estimator=final_estimator,\n cv=cv,\n stack_method=\"predict\",\n n_jobs=n_jobs,\n passthrough=passthrough,\n verbose=verbose,\n )\n\n def _validate_final_estimator(self):\n self._clone_final_estimator(default=RidgeCV())\n if not is_regressor(self.final_estimator_):\n raise ValueError(\n \"'final_estimator' parameter should be a regressor. Got {}\".format(\n self.final_estimator_\n )\n )\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the estimators.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n Note that this is supported only if all underlying estimators\n support sample weights.\n\n Returns\n -------\n self : object\n \"\"\"\n y = column_or_1d(y, warn=True)\n return super().fit(X, y, sample_weight)\n\n def transform(self, X):\n \"\"\"Return the predictions for X for each estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n y_preds : ndarray of shape (n_samples, n_estimators)\n Prediction outputs for each estimator.\n \"\"\"\n return self._transform(X)\n\n def _sk_visual_block_(self):\n # If final_estimator's default changes then this should be\n # updated.\n if self.final_estimator is None:\n final_estimator = RidgeCV()\n else:\n final_estimator = self.final_estimator\n return super()._sk_visual_block_(final_estimator)\n",
"\"\"\"\n================================\nNearest Neighbors Classification\n================================\n\nSample usage of Nearest Neighbors classification.\nIt will plot the decision boundaries for each class.\n\"\"\"\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.colors import ListedColormap\nfrom sklearn import neighbors, datasets\n\nn_neighbors = 15\n\n# import some data to play with\niris = datasets.load_iris()\n\n# we only take the first two features. We could avoid this ugly\n# slicing by using a two-dim dataset\nX = iris.data[:, :2]\ny = iris.target\n\nh = 0.02 # step size in the mesh\n\n# Create color maps\ncmap_light = ListedColormap([\"orange\", \"cyan\", \"cornflowerblue\"])\ncmap_bold = [\"darkorange\", \"c\", \"darkblue\"]\n\nfor weights in [\"uniform\", \"distance\"]:\n # we create an instance of Neighbours Classifier and fit the data.\n clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)\n clf.fit(X, y)\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.figure(figsize=(8, 6))\n plt.contourf(xx, yy, Z, cmap=cmap_light)\n\n # Plot also the training points\n sns.scatterplot(\n x=X[:, 0],\n y=X[:, 1],\n hue=iris.target_names[y],\n palette=cmap_bold,\n alpha=1.0,\n edgecolor=\"black\",\n )\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n plt.title(\n \"3-Class classification (k = %i, weights = '%s')\" % (n_neighbors, weights)\n )\n plt.xlabel(iris.feature_names[0])\n plt.ylabel(iris.feature_names[1])\n\nplt.show()\n"
] | [
[
"sklearn.isotonic.IsotonicRegression",
"numpy.linspace",
"matplotlib.collections.LineCollection",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.full",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show",
"sklearn.utils.check_random_state"
],
[
"matplotlib.pyplot.legend",
"sklearn.tree.DecisionTreeRegressor",
"numpy.random.seed",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"numpy.sort",
"matplotlib.pyplot.plot",
"numpy.exp",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"numpy.mean",
"numpy.random.rand",
"numpy.random.normal",
"matplotlib.pyplot.subplots_adjust",
"numpy.var",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.clabel",
"numpy.abs",
"numpy.linspace",
"matplotlib.pyplot.contour",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"sklearn.cluster.AffinityPropagation",
"matplotlib.pyplot.title",
"sklearn.metrics.silhouette_score",
"sklearn.metrics.v_measure_score",
"sklearn.metrics.homogeneity_score",
"sklearn.metrics.completeness_score",
"matplotlib.pyplot.plot",
"sklearn.metrics.adjusted_mutual_info_score",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"sklearn.metrics.adjusted_rand_score",
"matplotlib.pyplot.show",
"sklearn.datasets.make_blobs",
"matplotlib.pyplot.figure"
],
[
"numpy.abs",
"numpy.linspace",
"numpy.clip",
"numpy.ediff1d",
"numpy.arange",
"numpy.percentile",
"numpy.full",
"numpy.int_",
"numpy.digitize",
"numpy.array",
"numpy.where",
"numpy.zeros"
],
[
"matplotlib.pyplot.gca",
"sklearn.neighbors.NeighborhoodComponentsAnalysis",
"matplotlib.pyplot.scatter",
"numpy.arange",
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.pcolormesh",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"sklearn.model_selection.GridSearchCV",
"sklearn.model_selection.cross_val_score",
"matplotlib.pyplot.title",
"sklearn.datasets.load_iris",
"sklearn.model_selection.KFold",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"sklearn.svm.SVC",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"numpy.dot",
"scipy.linalg.get_blas_funcs",
"numpy.sum",
"numpy.abs",
"numpy.inner",
"numpy.asarray",
"numpy.arange",
"numpy.asfortranarray",
"numpy.empty_like",
"numpy.squeeze",
"numpy.finfo",
"numpy.flatnonzero",
"scipy.linalg.norm",
"scipy.linalg.lapack.get_lapack_funcs",
"numpy.zeros",
"scipy.linalg.solve_triangular",
"numpy.empty"
],
[
"scipy.sparse.hstack",
"numpy.hstack",
"numpy.random.RandomState",
"scipy.sparse.issparse"
],
[
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.title",
"numpy.arange",
"sklearn.datasets.load_iris",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.ylabel",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weihaoxie/FaceX-Zoo | [
"db0b087e4f4d28152e172d6c8d3767a8870733b4",
"db0b087e4f4d28152e172d6c8d3767a8870733b4"
] | [
"addition_module/DSDG/DUM/utils.py",
"test_protocol/remove_noises.py"
] | [
"import os\nimport numpy as np\nimport torch\nimport shutil\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport sklearn\nfrom sklearn import metrics\nfrom sklearn.metrics import roc_curve, auc\nimport pdb\n\n\nclass AvgrageMeter(object):\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n\n\ndef accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef get_threshold(score_file):\n with open(score_file, 'r') as file:\n lines = file.readlines()\n\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n angle = float(tokens[0])\n # pdb.set_trace()\n type = int(tokens[1])\n data.append({'map_score': angle, 'label': type})\n if type == 1:\n num_real += 1\n else:\n num_fake += 1\n\n min_error = count # account ACER (or ACC)\n min_threshold = 0.0\n min_ACC = 0.0\n min_ACER = 0.0\n min_APCER = 0.0\n min_BPCER = 0.0\n\n for d in data:\n threshold = d['map_score']\n\n type1 = len([s for s in data if s['map_score'] <= threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > threshold and s['label'] == 0])\n\n ACC = 1 - (type1 + type2) / count\n APCER = type2 / num_fake\n BPCER = type1 / num_real\n ACER = (APCER + BPCER) / 2.0\n\n if ACER < min_error:\n min_error = ACER\n min_threshold = threshold\n min_ACC = ACC\n min_ACER = ACER\n min_APCER = APCER\n min_BPCER = min_BPCER\n\n # print(min_error, min_threshold)\n return min_threshold, min_ACC, min_APCER, min_BPCER, min_ACER\n\n\ndef test_threshold_based(threshold, score_file):\n with open(score_file, 'r') as file:\n lines = file.readlines()\n\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n angle = float(tokens[0])\n type = int(tokens[1])\n data.append({'map_score': angle, 'label': type})\n if type == 1:\n num_real += 1\n else:\n num_fake += 1\n\n type1 = len([s for s in data if s['map_score'] <= threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > threshold and s['label'] == 0])\n\n ACC = 1 - (type1 + type2) / count\n APCER = type2 / num_fake\n BPCER = type1 / num_real\n ACER = (APCER + BPCER) / 2.0\n\n return ACC, APCER, BPCER, ACER\n\n\ndef get_err_threhold(fpr, tpr, threshold):\n RightIndex = (tpr + (1 - fpr) - 1)\n right_index = np.argmax(RightIndex)\n best_th = threshold[right_index]\n err = fpr[right_index]\n\n differ_tpr_fpr_1 = tpr + fpr - 1.0\n\n right_index = np.argmin(np.abs(differ_tpr_fpr_1))\n best_th = threshold[right_index]\n err = fpr[right_index]\n\n # print(err, best_th)\n return err, best_th\n\n\n# def performances(dev_scores, dev_labels, test_scores, test_labels):\ndef performances(map_score_val_filename, map_score_test_filename):\n # val\n with open(map_score_val_filename, 'r') as file:\n lines = file.readlines()\n val_scores = []\n val_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = float(tokens[1]) # label = int(tokens[1])\n val_scores.append(score)\n val_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)\n val_err, val_threshold = get_err_threhold(fpr, tpr, threshold)\n\n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n val_ACC = 1 - (type1 + type2) / count\n val_APCER = type2 / num_fake\n val_BPCER = type1 / num_real\n val_ACER = (val_APCER + val_BPCER) / 2.0\n\n # test \n with open(map_score_test_filename, 'r') as file2:\n lines = file2.readlines()\n test_scores = []\n test_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = float(tokens[1]) # label = int(tokens[1])\n test_scores.append(score)\n test_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n # test based on val_threshold \n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n print([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n print([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n test_ACC = 1 - (type1 + type2) / count\n test_APCER = type2 / num_fake\n test_BPCER = type1 / num_real\n test_ACER = (test_APCER + test_BPCER) / 2.0\n\n # test based on test_threshold \n fpr_test, tpr_test, threshold_test = roc_curve(test_labels, test_scores, pos_label=1)\n err_test, best_test_threshold = get_err_threhold(fpr_test, tpr_test, threshold_test)\n\n type1 = len([s for s in data if s['map_score'] <= best_test_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > best_test_threshold and s['label'] == 0])\n\n test_threshold_ACC = 1 - (type1 + type2) / count\n test_threshold_APCER = type2 / num_fake\n test_threshold_BPCER = type1 / num_real\n test_threshold_ACER = (test_threshold_APCER + test_threshold_BPCER) / 2.0\n\n return val_threshold, best_test_threshold, val_ACC, val_ACER, test_ACC, test_APCER, test_BPCER, test_ACER, test_threshold_ACER\n\n\ndef performances_SiW_EER(map_score_val_filename):\n # val\n with open(map_score_val_filename, 'r') as file:\n lines = file.readlines()\n val_scores = []\n val_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = int(tokens[1])\n val_scores.append(score)\n val_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)\n val_err, val_threshold = get_err_threhold(fpr, tpr, threshold)\n\n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n val_ACC = 1 - (type1 + type2) / count\n val_APCER = type2 / num_fake\n val_BPCER = type1 / num_real\n val_ACER = (val_APCER + val_BPCER) / 2.0\n\n return val_threshold, val_ACC, val_APCER, val_BPCER, val_ACER\n\n\ndef performances_SiWM_EER(map_score_val_filename):\n # val\n with open(map_score_val_filename, 'r') as file:\n lines = file.readlines()\n val_scores = []\n val_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = int(tokens[1])\n val_scores.append(score)\n val_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)\n val_err, val_threshold = get_err_threhold(fpr, tpr, threshold)\n\n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n val_ACC = 1 - (type1 + type2) / count\n val_APCER = type2 / num_fake\n val_BPCER = type1 / num_real\n val_ACER = (val_APCER + val_BPCER) / 2.0\n\n return val_threshold, val_err, val_ACC, val_APCER, val_BPCER, val_ACER\n\n\ndef get_err_threhold_CASIA_Replay(fpr, tpr, threshold):\n RightIndex = (tpr + (1 - fpr) - 1)\n right_index = np.argmax(RightIndex)\n best_th = threshold[right_index]\n err = fpr[right_index]\n\n differ_tpr_fpr_1 = tpr + fpr - 1.0\n\n right_index = np.argmin(np.abs(differ_tpr_fpr_1))\n best_th = threshold[right_index]\n err = fpr[right_index]\n\n # print(err, best_th)\n return err, best_th, right_index\n\n\ndef performances_CASIA_Replay(map_score_val_filename):\n # val\n with open(map_score_val_filename, 'r') as file:\n lines = file.readlines()\n val_scores = []\n val_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = float(tokens[1]) # int(tokens[1])\n val_scores.append(score)\n val_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)\n val_err, val_threshold, right_index = get_err_threhold_CASIA_Replay(fpr, tpr, threshold)\n\n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n print([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n print([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n val_ACC = 1 - (type1 + type2) / count\n\n FRR = 1 - tpr # FRR = 1 - TPR\n\n HTER = (fpr + FRR) / 2.0 # error recognition rate & reject recognition rate\n\n return val_ACC, fpr[right_index], FRR[right_index], HTER[right_index], val_threshold\n\n\ndef performances_ZeroShot(map_score_val_filename):\n # val\n with open(map_score_val_filename, 'r') as file:\n lines = file.readlines()\n val_scores = []\n val_labels = []\n data = []\n count = 0.0\n num_real = 0.0\n num_fake = 0.0\n for line in lines:\n count += 1\n tokens = line.split()\n score = float(tokens[0])\n label = int(tokens[1])\n val_scores.append(score)\n val_labels.append(label)\n data.append({'map_score': score, 'label': label})\n if label == 1:\n num_real += 1\n else:\n num_fake += 1\n\n fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)\n auc_val = metrics.auc(fpr, tpr)\n\n val_err, val_threshold, right_index = get_err_threhold_CASIA_Replay(fpr, tpr, threshold)\n\n type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])\n type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])\n\n val_ACC = 1 - (type1 + type2) / count\n\n FRR = 1 - tpr # FRR = 1 - TPR\n\n HTER = (fpr + FRR) / 2.0 # error recognition rate & reject recognition rate\n\n return val_ACC, auc_val, HTER[right_index]\n\n\ndef count_parameters_in_MB(model):\n return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if \"auxiliary\" not in name) / 1e6\n\n\ndef save_checkpoint(state, is_best, save):\n filename = os.path.join(save, 'checkpoint.pth.tar')\n torch.save(state, filename)\n if is_best:\n best_filename = os.path.join(save, 'model_best.pth.tar')\n shutil.copyfile(filename, best_filename)\n\n\ndef save(model, model_path):\n torch.save(model.state_dict(), model_path)\n\n\ndef load(model, model_path):\n model.load_state_dict(torch.load(model_path))\n\n\ndef drop_path(x, drop_prob):\n if drop_prob > 0.:\n keep_prob = 1. - drop_prob\n mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1, 1).bernoulli_(keep_prob))\n x.div_(keep_prob)\n x.mul_(mask)\n return x\n\n\ndef create_exp_dir(path, scripts_to_save=None):\n if not os.path.exists(path):\n os.mkdir(path)\n print('Experiment dir : {}'.format(path))\n\n if scripts_to_save is not None:\n os.mkdir(os.path.join(path, 'scripts'))\n for script in scripts_to_save:\n dst_file = os.path.join(path, 'scripts', os.path.basename(script))\n shutil.copyfile(script, dst_file)\n",
"\"\"\"\n@author: JiXuan Xu, Jun Wang\n@date: 20201012\n@contact: [email protected] \n\"\"\"\n\n# based on:\n# https://github.com/deepinsight/insightface/blob/master/Evaluation/Megaface/remove_noises.py\n# for 1M test, remove facescrub noise 25, megaface noise 686.\n\nimport os\nimport yaml\nimport numpy as np\nimport argparse\nimport shutil\nimport logging as logger\n\nlogger.basicConfig(level = logger.INFO, \n format = '%(levelname)s %(asctime)s %(filename)s: %(lineno)d] %(message)s',\n datefmt = '%Y-%m-%d %H:%M:%S')\n\nfeature_dim = 512\n\ndef remove_facescrub_noises(facescrub_noises_file, facescrub_feature_dir, facescrub_feature_outdir):\n \"\"\"Remove the noise in facescrub.\n We use the class center of certain id as the feature of noise faces.\n\n Args:\n facescrub_noises_file(str): the path of facescrub noise list provided by deepglint.\n facescrub_feature_dir(str): thpe directory which contains the features of facescrub.\n facescrub_feature_outdir(str): the directory to save the clean features of facescrub.\n \"\"\"\n noise_image2person_name = {}\n for line in open(facescrub_noises_file, 'r'):\n if line.startswith('#'):\n continue\n noise_image = line.strip()\n fname = noise_image.split('.')[0]\n person_name = fname[0 : fname.rfind('_')]\n noise_image2person_name[noise_image] = person_name\n logger.info('Total noise images in facescrub: %d.' % len(noise_image2person_name))\n person_name2center = {}\n noises = []\n for root, dirs, files in os.walk(facescrub_feature_dir):\n for feat_name in files:\n feat_name_ext = os.path.splitext(feat_name)[-1]\n if feat_name_ext == '.npy':\n feat_path = os.path.join(root, feat_name)\n assert(os.path.exists(feat_path))\n person_name = feat_path.split('/')[-2]\n image_name = feat_name[:-4] + '.jpg'\n cur_feature_outdir = os.path.join(facescrub_feature_outdir, person_name)\n if not os.path.exists(cur_feature_outdir):\n os.makedirs(cur_feature_outdir)\n cur_feature_outpath = os.path.join(cur_feature_outdir, feat_name)\n if not image_name in noise_image2person_name:\n cur_feature = np.load(feat_path)\n np.save(cur_feature_outpath, cur_feature)\n if not person_name in person_name2center:\n person_name2center[person_name] = np.zeros((feature_dim,), dtype=np.float32)\n person_name2center[person_name] += cur_feature\n else:\n noises.append((person_name, feat_name))\n logger.info('Total noise images in current facescrub dir: %d.' % len(noises))\n for (person_name, feat_name) in noises:\n assert person_name in person_name2center\n center = person_name2center[person_name]\n center /= np.linalg.norm(center)\n cur_feature_outpath = os.path.join(facescrub_feature_outdir, person_name, feat_name)\n np.save(cur_feature_outpath, center)\n\ndef remove_megaface_noises(megaface_noises_file, megaface_feature_dir, megaface_feature_outdir):\n \"\"\"Remove the noise in megaface.\n We set the feature of noise faces to zero vector, \n since we use cos similarity as the distance metric.\n\n Args:\n megaface_noises_file(str): the path of megaface noise list provided by deepglint.\n megaface_feature_dir(str): thpe directory which contains the features of megaface.\n megaface_feature_outdir(str): the directory to save the clean features of megaface.\n \"\"\"\n noise_image_set = set()\n for line in open(megaface_noises_file, 'r'):\n if line.startswith('#'):\n continue\n line = line.strip()\n _vec = line.split(\"\\t\")\n if len(_vec)>1:\n line = _vec[1]\n noise_image_set.add(line)\n logger.info('Total noise images in megaface: %d.' % len(noise_image_set))\n\n count_noises = 0\n for root, dirs, files in os.walk(megaface_feature_dir):\n for feat_name in files:\n feat_name_ext = os.path.splitext(feat_name)[-1]\n if feat_name_ext == '.npy':\n feat_path = os.path.join(root, feat_name)\n assert(os.path.exists(feat_path))\n id1 = feat_path.split('/')[-3]\n id2 = feat_path.split('/')[-2]\n image_name = feat_name[:-4] + '.jpg'\n cur_feature_outdir = os.path.join(megaface_feature_outdir, id1, id2)\n if not os.path.exists(cur_feature_outdir):\n os.makedirs(cur_feature_outdir)\n cur_feature_outpath = os.path.join(cur_feature_outdir, feat_name)\n short_image_path = os.path.join(id1, id2, image_name)\n if not short_image_path in noise_image_set:\n shutil.copyfile(feat_path, cur_feature_outpath)\n else:\n cur_feature = np.zeros((feature_dim,), dtype=np.float32)\n np.save(cur_feature_outpath, cur_feature)\n count_noises += 1\n logger.info('Total noise images in current megaface dir: %d.' % count_noises)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_conf_file', type=str,\n help = \"The path of data_conf.yaml.\")\n parser.add_argument('--remove_facescrub_noise', type=int, \n help = \"Remove facescrub noise or not, 1 for remove.\")\n parser.add_argument('--remove_megaface_noise', type=int,\n help = \"Remove megaface noise or not, 1 for remove.\")\n parser.add_argument('--facescrub_feature_dir', type=str, default='')\n parser.add_argument('--facescrub_feature_outdir', type=str, default='')\n parser.add_argument('--megaface_feature_dir', type=str, default='')\n parser.add_argument('--megaface_feature_outdir', type=str, default='')\n parser.add_argument('--masked_facescrub_feature_dir', type=str, default='')\n parser.add_argument('--masked_facescrub_feature_outdir', type=str, default='')\n args = parser.parse_args()\n with open(args.data_conf_file) as f:\n data_conf = yaml.load(f)['MegaFace']\n facescrub_noises_file = data_conf['facescrub_noises_file']\n megaface_noises_file = data_conf['megaface_noises_file']\n megaface_mask = data_conf['megaface-mask']\n if args.remove_facescrub_noise == 1:\n remove_facescrub_noises(\n facescrub_noises_file, args.facescrub_feature_dir, args.facescrub_feature_outdir)\n if args.remove_megaface_noise == 1:\n remove_megaface_noises(\n megaface_noises_file, args.megaface_feature_dir, args.megaface_feature_outdir)\n if megaface_mask == 1:\n remove_facescrub_noises(\n facescrub_noises_file, args.masked_facescrub_feature_dir, args.masked_facescrub_feature_outdir)\n"
] | [
[
"numpy.abs",
"torch.load",
"sklearn.metrics.roc_curve",
"numpy.argmax",
"sklearn.metrics.auc",
"torch.save"
],
[
"numpy.load",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fancyerii/voicebook | [
"def82da8577086d0361643a05fec2463006533a9"
] | [
"chapter_2_collection/diarize.py"
] | [
"'''\n================================================ \n## VOICEBOOK REPOSITORY ## \n================================================ \n\nrepository name: voicebook \nrepository version: 1.0 \nrepository link: https://github.com/jim-schwoebel/voicebook \nauthor: Jim Schwoebel \nauthor contact: [email protected] \ndescription: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts. \nlicense category: opensource \nlicense: Apache 2.0 license \norganization name: NeuroLex Laboratories, Inc. \nlocation: Seattle, WA \nwebsite: https://neurolex.ai \nrelease date: 2018-09-28 \n\nThis code (voicebook) is hereby released under a Apache 2.0 license license. \n\nFor more information, check out the license terms below. \n\n================================================ \n## LICENSE TERMS ## \n================================================ \n\nCopyright 2018 NeuroLex Laboratories, Inc. \n\nLicensed under the Apache License, Version 2.0 (the \"License\"); \nyou may not use this file except in compliance with the License. \nYou may obtain a copy of the License at \n\n http://www.apache.org/licenses/LICENSE-2.0 \n\nUnless required by applicable law or agreed to in writing, software \ndistributed under the License is distributed on an \"AS IS\" BASIS, \nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \nSee the License for the specific language governing permissions and \nlimitations under the License. \n\n================================================ \n## SERVICE STATEMENT ## \n================================================ \n\nIf you are using the code written for a larger project, we are \nhappy to consult with you and help you with deployment. Our team \nhas >10 world experts in Kafka distributed architectures, microservices \nbuilt on top of Node.js / Python / Docker, and applying machine learning to \nmodel speech and text data. \n\nWe have helped a wide variety of enterprises - small businesses, \nresearchers, enterprises, and/or independent developers. \n\nIf you would like to work with us let us know @ [email protected]. \n\n================================================ \n## DIARIZE.PY ## \n================================================ \n\nThis function takes in a speech sample and diarizes it for 2 speakers.\n\nThe output files are stored in a folder structure with Speaker A and Speaker B.\n\nIt is assumed to be a 2 speaker diarization problem.\n\nThe output .zip file is named filename[0:-4]+'diarization.zip' and contains:\n\n--->filename[0:-4]+'.json'\n--> speaker 1 folder\n --> speaker 1 sections (multiple .wav files)\n --> speaker 1 stiched togetehr (single .wav file)\n--> speaker 2 folder\n --> speaker 2 sections (multiple .wav files)\n --> speaker 2 stich (single .wav file)\n\nDiarization is done with the pyaudioanalysis3 library.\n'''\n\nimport os, json, importlib, scipy, shutil, ffmpy, time, sys, getpass, zipfile\nimport speech_recognition as sr_audio\nfrom pydub import AudioSegment\nimport numpy as np \n\nif 'pyAudioAnalysis3' not in os.listdir():\n os.system(\"git clone [email protected]:NeuroLexDiagnostics/pyAudioAnalysis3.git\")\n \nsys.path.append(os.getcwd()+'/pyAudioAnalysis3')\n\nimport audioTrainTest as aT\nimport audioBasicIO \nimport audioFeatureExtraction as aF\nimport audioSegmentation as aS\n\n##INITIALIZE FUNCTIONS FOR DIARIZATION\n####################################################################################\n\ndef exportfile(newAudio,time1,time2,filename,i,speaknum):\n #Exports to a wav file in the current path.\n newAudio2 = newAudio[time1:time2]\n print('making '+filename[0:-4]+'_'+str(speaknum)+'_'+str(i)+'_'+str(time1/1000)+'_'+str(time2/1000)+'.wav')\n newAudio2.export(filename[0:-4]+'_'+str(speaknum)+'_'+str(i)+'_'+str(time1/1000)+'_'+str(time2/1000)+'.wav', format=\"wav\")\n\n return filename[0:-4]+'_'+str(speaknum)+'_'+str(i)+'_'+str(time1/1000)+'_'+str(time2/1000)+'.wav'\n\ndef stitchtogether(dirlist,dirloc,filename):\n try:\n #assumes already in proper directory \n for i in range(len(dirlist)):\n if i ==0:\n sound=AudioSegment.from_wav(dirloc+'/'+str(dirlist[i]))\n else:\n sound=sound+AudioSegment.from_wav(dirloc+'/'+str(dirlist[i]))\n sound.export(dirloc+'/'+filename, format=\"wav\")\n\n except:\n print('error stitching...')\n\ndef stereo2mono(audiodata,filename):\n newaudiodata = list()\n \n for i in range(len(audiodata)):\n d = audiodata[i][0]/2 + audiodata[i][1]/2\n newaudiodata.append(d)\n \n return np.array(newaudiodata, dtype='int16')\n #to apply this function, SR=sample rate usually 44100\n #wavfile.write(newfilename, sr, newaudiodata)\n\ndef convertformat(filename):\n newfilename=filename[0:-4]+'.wav'\n ff = ffmpy.FFmpeg(\n inputs={filename:None},\n outputs={newfilename: None}\n )\n ff.run()\n\n return newfilename\n\ndef zipdir(path, ziph):\n # ziph is zipfile handle\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file))\n\ndef transcribe_audio_google(filename):\n # transcribe the audio (note this is only done if a voice sample)\n r=sr_audio.Recognizer()\n with sr_audio.AudioFile(filename) as source:\n audio = r.record(source) \n text=r.recognize_google_cloud(audio)\n\n return text \n\ndef transcribe_audio_sphinx(filename):\n # transcribe the audio (note this is only done if a voice sample)\n r=sr_audio.Recognizer()\n with sr_audio.AudioFile(filename) as source:\n audio = r.record(source) \n text=r.recognize_sphinx(audio)\n print('transcript: '+text)\n \n return text\n\n##GO TO HOST DIRECTORY AND BEGIN BULK PROCESSING \n####################################################################################\n\n#host directory in app is likely /usr/app/...\nhostdir=os.getcwd()\ncurdir=os.listdir()\n\n#now create some folders if they have not already been created \nincoming_dir=hostdir+'/diarize-incoming/'\nprocessed_dir=hostdir+'/diarize-processed/'\n\ntry:\n os.chdir(incoming_dir)\n curdir=os.listdir()\n if 'data' not in curdir:\n #this is necessary for diarnization\n shutil.copytree(hostdir+'/pyaudioanalysis3/data/',os.getcwd()+'/data/')\nexcept:\n os.mkdir(incoming_dir)\n os.chdir(incoming_dir)\n curdir=os.listdir()\n if 'data' not in curdir:\n #this is necessary for diarization \n shutil.copytree(hostdir+'/pyaudioanalysis3/data/',os.getcwd()+'/data/')\n\ntry:\n os.chdir(processed_dir)\nexcept:\n os.mkdir(processed_dir)\n\n#change to incoming directory to look for samples\nos.chdir(incoming_dir)\n\n#initialize sleep time for worker (default is 1 second)\nsleeptime=1\n\n# now initialize process list with files already in the directory\nprocesslist=os.listdir()\nconvertformat_list=list()\n\n#error counts will help us debug later\nerrorcount=0\nprocesscount=0\n\n#initialize t for infinite loop\nt=1\n\n#infinite loop for worker now begins with while loop...\n\nwhile t>0:\n\n #go to incoming directory\n os.chdir(incoming_dir)\n listdir=os.listdir()\n print(listdir)\n\n #try statement to avoid errors\n try:\n if listdir==['.DS_Store'] or listdir == ['data'] or listdir==['data','.DS_Store'] or listdir==[]:\n #pass if no files are processible\n print('no files found...')\n \n else:\n #look for any files that have not been previously in the directory\n for i in range(len(listdir)):\n if listdir[i]=='.DS_Store' or listdir[i]=='data':\n pass\n \n else:\n #convert format if not .wav\n if listdir[i][-4:] != '.wav':\n filename=convertformat(listdir[i])\n os.remove(listdir[i])\n else:\n filename=listdir[i]\n \n #log start time for later \n start_time=time.time()\n \n if filename not in processlist:\n print('processing '+filename)\n processlist.append(listdir[i])\n filesize=os.path.getsize(filename)\n \n if filesize > int(500):\n #if over 20 minute of audio collected (10.580MB), assume 2 speakers \n\n shutil.copy(incoming_dir+filename,hostdir+'/pyaudioanalysis3/data/'+filename)\n \n g=aS.speakerDiarization(filename,2,mtSize=2.0,mtStep=0.2,stWin=0.05,LDAdim=35, PLOT=False)\n\n s0seg=list()\n s1seg=list()\n allseg=list()\n\n for i in range(len(g)-1):\n if i==0:\n start=i/5.0\n else:\n if g[i]==g[i+1]:\n pass\n #continue where left off to find start length, 20 milliseconds \n else:\n if g[i+1]==0:\n end=i/5.0\n s1seg.append([start,end])\n allseg.append([0,[start,end]])\n start=(i+1)/5.0\n \n elif g[i+1]==1:\n end=i/5.0\n s0seg.append([start,end])\n allseg.append([1, [start,end]])\n start=(i+1)/5.0\n \n else:\n print('error')\n\n #now save this data in individual segments\n newAudio = AudioSegment.from_wav(filename)\n diarizedir=os.getcwd()+'/'+filename[0:-4]+'_diarization'\n\n try:\n os.mkdir(diarizedir)\n os.chdir(diarizedir)\n except:\n os.chdir(diarizedir)\n\n #copy file to this directory and delete from other directory\n shutil.move(incoming_dir+filename,os.getcwd()+'/'+filename)\n\n #diarize speaker 1 \n print('diarizing speaker 1')\n curdir=os.getcwd()\n newdir1=curdir+'/1'\n\n try:\n os.mkdir(newdir1)\n os.chdir(newdir1)\n except:\n os.chdir(newdir1)\n \n for i in range(len(s0seg)):\n filename2=filename[0:-4]+'_speaker_1'+str(i)+'.wav'\n print(('making file @ %s to %s')%(str(s0seg[i][0]),str(s0seg[i][1])))\n exportfile(newAudio,s0seg[i][0]*1000,s0seg[i][1]*1000,filename,i,1)\n\n curdir=os.getcwd()\n listdir=os.listdir(curdir)\n removedfilelist1=list()\n keptfilelist1=list()\n\n for i in range(len(listdir)):\n if os.path.getsize(listdir[i]) < 300000:\n removedfile=[listdir[i], os.path.getsize(listdir[i])]\n removedfilelist1.append(removedfile)\n os.remove(listdir[i])\n else:\n keptfile=[listdir[i],os.path.getsize(listdir[i])]\n keptfilelist1.append(keptfile)\n\n #speaker 1 stitched size\n s1stitchedsize=0\n for i in range(len(keptfilelist1)):\n s1stitchedsize=s1stitchedsize+int(keptfilelist1[i][1])\n \n #speaker 2 \n os.chdir(diarizedir)\n curdir=os.getcwd()\n newdir2=curdir+'/2'\n\n try:\n os.mkdir(newdir2)\n os.chdir(newdir2)\n except:\n os.chdir(newdir2)\n \n print('diarizing speaker 2')\n for i in range(len(s1seg)):\n filename2=filename[0:-4]+'_speaker_2'+str(i)+'.wav'\n print(('making file @ %s to %s')%(str(s1seg[i][0]),str(s1seg[i][1])))\n exportfile(newAudio,s1seg[i][0]*1000,s1seg[i][1]*1000,filename,i,2)\n\n curdir=os.getcwd()\n listdir=os.listdir(curdir)\n removedfilelist2=list()\n keptfilelist2=list()\n\n ##now delete files that are less than 300 KB \n for i in range(len(listdir)):\n if os.path.getsize(listdir[i]) < 300000:\n removedfile=[listdir[i], os.path.getsize(listdir[i])]\n removedfilelist2.append(removedfile)\n os.remove(listdir[i])\n else:\n keptfile=[listdir[i],os.path.getsize(listdir[i])]\n keptfilelist2.append(keptfile)\n\n #speaker 2 stitched size\n s2stitchedsize=0\n for i in range(len(keptfilelist2)):\n s2stitchedsize=s2stitchedsize+int(keptfilelist2[i][1])\n\n # all segments \n os.chdir(diarizedir)\n curdir=os.getcwd()\n newdir3=curdir+'/all'\n\n try:\n os.mkdir(newdir3)\n os.chdir(newdir3)\n except:\n os.chdir(newdir3)\n\n print('transcribing session')\n master_transcript=open('transcript.txt','w')\n\n for i in range(len(allseg)):\n print(('making file @ %s to %s')%(str(allseg[i][1][0]),str(allseg[i][1][1])))\n filename2=str(i)+'_'+str(allseg[i][0])+'.wav'\n filename2=exportfile(newAudio,allseg[i][1][0]*1000,allseg[i][1][1]*1000,filename,i,2)\n new_filename=str(i)+'_'+str(allseg[i][0])+'.wav'\n os.rename(filename2,new_filename)\n os.system('ffmpeg -i %s -ac 1 -acodec pcm_s16le -ar 16000 %s -y'%(new_filename,new_filename))\n\n if i == 0:\n speaker='102334'\n\n try:\n try:\n transcript=transcribe_audio_google(new_filename)\n except:\n transcript=transcribe_audio_sphinx(new_filename)\n\n if str(allseg[i][0]) != speaker:\n speaker=str(allseg[i][0])\n master_transcript.write('\\n\\nspeaker %s: %s '%(str(allseg[i][0]), transcript))\n print('\\n\\nspeaker %s: %s '%(str(allseg[i][0]), transcript))\n else:\n speaker=str(allseg[i][0])\n master_transcript.write('%s'%(transcript))\n print(transcript)\n \n except:\n print('failed transcript')\n\n master_transcript.close()\n transcript=open('transcript.txt').read()\n\n #calculate processing time\n end_time=time.time()\n processtime=end_time-start_time \n\n #this is the .json serializable diarization\n os.chdir(diarizedir)\n \n data={\n 'filename':filename,\n 'file location':diarizedir,\n 'file size':filesize,\n 'processing time':processtime,\n 'processcount':processcount,\n 'errorcount':errorcount,\n 'data':list(g),\n 'master transcript': transcript,\n 'allseg': allseg,\n 'speaker 1':s0seg,\n 'speaker 2':s1seg,\n 'speaker 1 kept segments':keptfilelist1,\n 'speaker 1 stitched size':s1stitchedsize,\n 'speaker 1 folder location':newdir1,\n 'speaker 2 kept segments':keptfilelist2,\n 'speaker 2 stitched size':s2stitchedsize,\n 'speaker 2 folder location':newdir2,\n 'speaker 1 deleted segments':removedfilelist1,\n 'speaker 2 deleted segments':removedfilelist2,\n }\n\n #write to json \n os.chdir(diarizedir)\n with open(filename[0:-4]+'.json', 'w') as f:\n json.dump(data, f)\n f.close()\n\n #read the db\n g=json.loads(open(filename[0:-4]+'.json').read())\n keptlist1=g['speaker 1 kept segments']\n keptloc1=g['speaker 1 folder location']\n filelist1=list()\n for i in range(len(keptlist1)):\n filelist1.append(str(keptlist1[i][0]))\n\n keptlist2=g['speaker 2 kept segments']\n keptloc2=g['speaker 2 folder location']\n filelist2=list()\n for i in range(len(keptlist2)):\n filelist2.append(str(keptlist2[i][0]))\n\n #save stitch to locations where segments are \n os.chdir(keptloc1)\n try:\n print('stitching to location 1: ' + keptloc1)\n print(filelist1)\n stitchtogether(filelist1,keptloc1,'stitched_1.wav')\n except:\n print('error stitching 1')\n\n #save stitch to locations where segments are\n os.chdir(keptloc2)\n try:\n print('stiching to location 2: ' + keptloc2)\n print(filelist2)\n stitchtogether(filelist2,keptloc2,'stitched_2.wav')\n except:\n print('error stitching 2')\n \n #go back to the incoming dir folder for further processing \n os.chdir(incoming_dir)\n\n #zip the entire directory into a .zip file and move to processed_dir folder\n shutil.make_archive(filename[0:-4]+'_diarization','zip',filename[0:-4]+'_diarization/') \n shutil.move(incoming_dir+filename[0:-4]+'_diarization.zip',processed_dir+filename[0:-4]+'_diarization.zip')\n\n #delete the directory using shutil\n shutil.rmtree(filename[0:-4]+'_diarization')\n\n #update processcount\n processcount=processcount+1\n\n else:\n errorcount=errorcount+1\n os.remove(filename)\n print('skipping file, need to resample (too small size)')\n \n #sleep to avoid server overhead\n print('sleeping...')\n time.sleep(sleeptime)\n except:\n print('error')\n print('sleeping...')\n errorcount=errorcount+1\n time.sleep(sleeptime)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yliu1229/CPCTR | [
"66fcd336ee69fd18b322853f195c5b65b4a046b7"
] | [
"CPCTrans/main.py"
] | [
"import os\nimport sys\nimport time\nimport re\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom tensorboardX import SummaryWriter\nimport matplotlib.pyplot as plt\n\nplt.switch_backend('agg')\n\nsys.path.append('../Utils')\nfrom CPCTrans.dataset_3d import *\nfrom CPCTrans.model_3d import *\nfrom Backbone.resnet import neq_load_customized\nfrom Utils.augmentation import *\nfrom Utils.utils import AverageMeter, save_checkpoint, denorm, calc_topk_accuracy\n\nimport torch\nimport torch.optim as optim\nfrom torch.utils import data\nfrom torchvision import datasets, models, transforms\nimport torchvision.utils as vutils\n\ntorch.backends.cudnn.benchmark = True\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--net', default='resnet18', type=str)\nparser.add_argument('--model', default='cpc-trans', type=str)\nparser.add_argument('--dataset', default='ucf101', type=str)\nparser.add_argument('--num_seq', default=8, type=int, help='number of video blocks')\nparser.add_argument('--pred_step', default=3, type=int)\nparser.add_argument('--ds', default=3, type=int, help='frame downsampling rate')\nparser.add_argument('--batch_size', default=16, type=int)\nparser.add_argument('--lr', default=1e-3, type=float, help='learning rate')\nparser.add_argument('--wd', default=1e-5, type=float, help='weight decay')\nparser.add_argument('--resume', default='', type=str, help='path of model to resume')\nparser.add_argument('--pretrain', default='', type=str, help='path of pretrained model')\nparser.add_argument('--epochs', default=300, type=int, help='number of total epochs to run')\nparser.add_argument('--start_epoch', default=1, type=int, help='manual epoch number (useful on restarts)')\nparser.add_argument('--gpu', default='0', type=str)\nparser.add_argument('--print_freq', default=200, type=int, help='frequency of printing output during training')\nparser.add_argument('--reset_lr', action='store_true', help='Reset learning rate when resume training?')\nparser.add_argument('--prefix', default='tmp', type=str, help='prefix of checkpoint filename')\nparser.add_argument('--train_what', default='all', type=str)\nparser.add_argument('--img_dim', default=128, type=int)\n\n\ndef main():\n torch.manual_seed(0)\n np.random.seed(0)\n global args;\n\n args = parser.parse_args()\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu)\n global cuda;\n cuda = torch.device('cuda')\n\n ### CPC with TransformerEncoder model ###\n if args.model == 'cpc-trans':\n model = CPC_Trans(sample_size=args.img_dim,\n num_seq=args.num_seq,\n network=args.net,\n pred_step=args.pred_step)\n else:\n raise ValueError('wrong model!')\n\n model = model.to(cuda)\n global criterion;\n criterion = nn.CrossEntropyLoss()\n\n params = model.parameters()\n optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.wd)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)\n args.old_lr = None\n\n best_acc = 0\n global iteration;\n iteration = 0\n\n ### restart training ###\n if args.resume:\n if os.path.isfile(args.resume):\n args.old_lr = float(re.search('_lr(.+?)_', args.resume).group(1))\n print(\"=> loading resumed checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))\n args.start_epoch = checkpoint['epoch']\n iteration = checkpoint['iteration']\n best_acc = checkpoint['best_acc']\n model.load_state_dict(checkpoint['state_dict'])\n if not args.reset_lr: # if didn't reset lr, load old optimizer\n optimizer.load_state_dict(checkpoint['optimizer'])\n else:\n print('==== Change lr from %f to %f ====' % (args.old_lr, args.lr))\n print(\"=> loaded resumed checkpoint '{}' (epoch {}) with best_acc {}\".format(args.resume, checkpoint['epoch'], best_acc))\n else:\n print(\"[Warning] no checkpoint found at '{}'\".format(args.resume))\n\n if args.pretrain:\n if os.path.isfile(args.pretrain):\n print(\"=> loading pretrained checkpoint '{}'\".format(args.pretrain))\n checkpoint = torch.load(args.pretrain, map_location=torch.device('cpu'))\n model = neq_load_customized(model, checkpoint['state_dict'])\n print(\"=> loaded pretrained checkpoint '{}' (epoch {})\"\n .format(args.pretrain, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.pretrain))\n\n ### load data ###\n if args.dataset == 'ucf101': # designed for ucf101, short size=256, rand crop to 224x224 then scale to 128x128\n transform = transforms.Compose([\n RandomHorizontalFlip(consistent=True),\n RandomCrop(size=224, consistent=True),\n Scale(size=(args.img_dim, args.img_dim)),\n RandomGray(consistent=False, p=0.5),\n ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25, p=1.0),\n ToTensor(),\n Normalize()\n ])\n elif args.dataset == 'k400': # designed for kinetics400, short size=150, rand crop to 128x128\n transform = transforms.Compose([\n RandomSizedCrop(size=args.img_dim, consistent=True, p=1.0),\n RandomHorizontalFlip(consistent=True),\n RandomGray(consistent=False, p=0.5),\n ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25, p=1.0),\n ToTensor(),\n Normalize()\n ])\n\n train_loader = get_data(transform, 'train')\n val_loader = get_data(transform, 'val')\n\n # setup tools\n global de_normalize;\n de_normalize = denorm()\n global img_path;\n img_path, model_path = set_path(args)\n global writer_train\n try: # old version\n writer_val = SummaryWriter(log_dir=os.path.join(img_path, 'val'))\n writer_train = SummaryWriter(log_dir=os.path.join(img_path, 'train'))\n except: # v1.7\n writer_val = SummaryWriter(logdir=os.path.join(img_path, 'val'))\n writer_train = SummaryWriter(logdir=os.path.join(img_path, 'train'))\n\n print('-- start main loop --')\n\n ### main loop ###\n for epoch in range(args.start_epoch, args.epochs):\n train_loss, train_acc, train_accuracy_list = train(train_loader, model, optimizer, epoch)\n val_loss, val_acc, val_accuracy_list = validate(val_loader, model, epoch)\n scheduler.step()\n print('\\t Epoch: ', epoch, 'with lr: ', scheduler.get_last_lr())\n\n # save curve\n writer_train.add_scalar('global/loss', train_loss, epoch)\n writer_train.add_scalar('global/accuracy', train_acc, epoch)\n writer_val.add_scalar('global/loss', val_loss, epoch)\n writer_val.add_scalar('global/accuracy', val_acc, epoch)\n writer_train.add_scalar('accuracy/top1', train_accuracy_list[0], epoch)\n writer_train.add_scalar('accuracy/top3', train_accuracy_list[1], epoch)\n writer_train.add_scalar('accuracy/top5', train_accuracy_list[2], epoch)\n writer_val.add_scalar('accuracy/top1', val_accuracy_list[0], epoch)\n writer_val.add_scalar('accuracy/top3', val_accuracy_list[1], epoch)\n writer_val.add_scalar('accuracy/top5', val_accuracy_list[2], epoch)\n\n # save check_point\n is_best = val_acc > best_acc;\n best_acc = max(val_acc, best_acc)\n save_checkpoint({'epoch': epoch + 1,\n 'net': args.net,\n 'state_dict': model.state_dict(),\n 'best_acc': best_acc,\n 'optimizer': optimizer.state_dict(),\n 'iteration': iteration},\n is_best, filename=os.path.join(model_path, 'epoch%s.pth.tar' % str(epoch + 1)), keep_all=False)\n\n print('Training from ep %d to ep %d finished' % (args.start_epoch, args.epochs))\n\n\n\ndef process_output(mask):\n '''task mask as input, compute the target for contrastive loss'''\n (B, NP, SQ, B2, NS, _) = mask.size() # [B, P, SQ, B, N, SQ]\n target = mask == 1\n target = target * 1\n target.requires_grad = False\n return target, (B, B2, NS, NP, SQ)\n\n\ndef train(data_loader, model, optimizer, epoch):\n losses = AverageMeter()\n accuracy = AverageMeter()\n accuracy_list = [AverageMeter(), AverageMeter(), AverageMeter()]\n model.train()\n global iteration\n\n for idx, input_seq in enumerate(data_loader):\n tic = time.time()\n input_seq = input_seq.to(cuda)\n B = input_seq.size(0)\n [score_, mask_] = model(input_seq)\n # visualize\n if (iteration == 0) or (iteration == args.print_freq):\n if B > 2: input_seq = input_seq[0:2, :]\n writer_train.add_image('input_seq',\n de_normalize(vutils.make_grid(\n input_seq.view(-1, 3, args.img_dim, args.img_dim),\n nrow=args.num_seq)),\n iteration)\n del input_seq\n\n if idx == 0: target_, (_, B2, NS, NP, SQ) = process_output(mask_)\n\n score_flattened = score_.view(B * NP * SQ, B2 * NS * SQ)\n target_flattened = target_.contiguous().view(B * NP * SQ, B2 * NS * SQ)\n target_flattened = target_flattened.argmax(dim=1)\n\n loss = criterion(score_flattened, target_flattened)\n top1, top3, top5 = calc_topk_accuracy(score_flattened, target_flattened, (1, 3, 5))\n\n accuracy_list[0].update(top1.item(), B)\n accuracy_list[1].update(top3.item(), B)\n accuracy_list[2].update(top5.item(), B)\n\n losses.update(loss.item(), B)\n accuracy.update(top1.item(), B)\n\n del score_\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n del loss\n\n if idx % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Loss {loss.val:.6f} ({loss.local_avg:.4f})\\t'\n 'Acc: top1 {3:.4f}; top3 {4:.4f}; top5 {5:.4f} T:{6:.2f}\\t'.format(\n epoch, idx, len(data_loader), top1, top3, top5, time.time() - tic, loss=losses))\n\n writer_train.add_scalar('local/loss', losses.val, iteration)\n writer_train.add_scalar('local/accuracy', accuracy.val, iteration)\n\n iteration += 1\n\n return losses.local_avg, accuracy.local_avg, [i.local_avg for i in accuracy_list]\n\n\ndef validate(data_loader, model, epoch):\n losses = AverageMeter()\n accuracy = AverageMeter()\n accuracy_list = [AverageMeter(), AverageMeter(), AverageMeter()]\n model.eval()\n\n with torch.no_grad():\n for idx, input_seq in tqdm(enumerate(data_loader), total=len(data_loader)):\n input_seq = input_seq.to(cuda)\n B = input_seq.size(0)\n [score_, mask_] = model(input_seq)\n del input_seq\n\n if idx == 0: target_, (_, B2, NS, NP, SQ) = process_output(mask_)\n\n score_flattened = score_.view(B * NP * SQ, B2 * NS * SQ)\n target_flattened = target_.contiguous().view(B * NP * SQ, B2 * NS * SQ)\n target_flattened = target_flattened.argmax(dim=1)\n\n loss = criterion(score_flattened, target_flattened)\n top1, top3, top5 = calc_topk_accuracy(score_flattened, target_flattened, (1, 3, 5))\n\n losses.update(loss.item(), B)\n accuracy.update(top1.item(), B)\n\n accuracy_list[0].update(top1.item(), B)\n accuracy_list[1].update(top3.item(), B)\n accuracy_list[2].update(top5.item(), B)\n\n print('[{0}/{1}] Loss {loss.local_avg:.4f}\\t'\n 'Acc: top1 {2:.4f}; top3 {3:.4f}; top5 {4:.4f} \\t'.format(\n epoch, args.epochs, *[i.avg for i in accuracy_list], loss=losses))\n return losses.local_avg, accuracy.local_avg, [i.local_avg for i in accuracy_list]\n\n\ndef get_data(transform, mode='train'):\n print('Loading data for \"%s\" ...' % mode)\n if args.dataset == 'k400':\n pass\n elif args.dataset == 'ucf101':\n dataset = UCF101_3d(mode=mode,\n transform=transform,\n num_seq=args.num_seq,\n downsample=args.ds,\n which_split=3)\n else:\n raise ValueError('dataset not supported')\n\n sampler = data.RandomSampler(dataset)\n\n if mode == 'train':\n data_loader = data.DataLoader(dataset,\n batch_size=args.batch_size,\n sampler=sampler,\n shuffle=False,\n num_workers=2,\n pin_memory=True,\n drop_last=True)\n elif mode == 'val':\n data_loader = data.DataLoader(dataset,\n batch_size=args.batch_size,\n sampler=sampler,\n shuffle=False,\n num_workers=2,\n pin_memory=True,\n drop_last=True)\n print('\"%s\" dataset size: %d' % (mode, len(dataset)))\n return data_loader\n\n\ndef set_path(args):\n if args.resume:\n exp_path = os.path.dirname(os.path.dirname(args.resume))\n else:\n exp_path = 'log_{args.prefix}/{args.dataset}-{args.img_dim}_{0}_{args.model}_\\\nbs{args.batch_size}_lr{1}_seq{args.num_seq}_pred{args.pred_step}_ds{args.ds}_\\\ntrain-{args.train_what}{2}'.format(\n 'r%s' % args.net[6::], \\\n args.old_lr if args.old_lr is not None else args.lr, \\\n '_pt=%s' % args.pretrain.replace('/', '-') if args.pretrain else '', \\\n args=args)\n img_path = os.path.join(exp_path, 'img')\n model_path = os.path.join(exp_path, 'model')\n if not os.path.exists(img_path): os.makedirs(img_path)\n if not os.path.exists(model_path): os.makedirs(model_path)\n return img_path, model_path\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.optim.Adam",
"numpy.random.seed",
"matplotlib.pyplot.switch_backend",
"torch.manual_seed",
"torch.utils.data.RandomSampler",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.device",
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AgnesYichenFeng/Attention | [
"b70b8882871cb74f1533265fc13ccfad7bea3550",
"b70b8882871cb74f1533265fc13ccfad7bea3550"
] | [
"text_eval.py",
"pegasus_app.py"
] | [
"import numpy as np\n\ndef ids2str(encoder, ids, num_reserved):\n if num_reserved:\n if np.any(np.where(ids==1)[0]):\n eos = np.where(ids==1)[0]\n ids = ids[:eos[0]] \n reserved_tokens = np.where(ids < num_reserved)[0]\n \n if reserved_totkens.size > 0:\n split_locations = np.unioj1d(reserved_tokens, reserved_tokens + 1)\n ids_list = np.split(ids, split_locations)\n text_list = [\n \"<%d>\" &\n i if len(i) == 1 and i < num_reserved else encoder.decode(i.tolist())\n for i in ids_list\n ]\n return \" \".join(test_list)\n \n return encoder.decode(ids.flatten().tolist())\n \n \n \n ",
"import numpy as np\nimport public_parsing_ops\nimport tensorflow as tf\nimport text_eval\n\n_MODEL_FILE = 'ckpt/c4.unigram.newline.10pct.96000.model'\n\nshapes = {\n 'cnn_dailymail': (1024,128),\n}\n\nencoder = public_parsing_ops.create_text_encoder(\"sentencepiece\", _MODEL_FILE)\n\nif __name__ == '__main__':\n import argparse\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--article\", help=\"Your article file location\", default = \"example_article\")\n parser.add_argument(\"--model_dir\", help=\"Your model directory\", default = \"model/\")\n parser.add_argument(\"--model_name\", help=\"Name of your model\", default = \"cnn_dailymail\")\n args = parser.parse_args()\n \n text = open(args.article, \"r\", encoding = \"utf-8\"). read()\n \n shape,_ = shapes[args.model_name]\n \n input_ids = encoder.encode(text)\n inputs = np.zeros(shape)\n input_len = len(input_ids)\n if input_len > shape: input_len = shape\n inputs[:input_len] = input_ids[:input_len]\n \n loaded = tf.saved_model.load(args.model_dir, tags = \"serve\")\n \n example = tf.train.Example()\n example.features.feature[\"inputs\"].int64_list.value.extend(inputs.astype(int))\n \n output = loaded.signatures[\"serving_default\"](examples = tf.constant([example.SerializeToString()]))\n \n print(\"\\nAbstract: \", text_eval.ids2str(encoder, output[\"outputs\"].numpy(), None))"
] | [
[
"numpy.split",
"numpy.unioj1d",
"numpy.where"
],
[
"numpy.zeros",
"tensorflow.saved_model.load",
"tensorflow.train.Example"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JoshuaEbenezer/cwgan | [
"5f6a9a0bb8760bf85a9a28e25b29a149f3a4a7ca"
] | [
"util/visualizer.py"
] | [
"import numpy as np\nimport os\nimport sys\nimport ntpath\nimport time\nfrom . import util, html\nfrom subprocess import Popen, PIPE\nfrom scipy.misc import imresize\n\nif sys.version_info[0] == 2:\n VisdomExceptionBase = Exception\nelse:\n VisdomExceptionBase = ConnectionError\n\n\ndef save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):\n \"\"\"Save images to the disk.\n\n Parameters:\n webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)\n visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs\n image_path (str) -- the string is used to create image paths\n aspect_ratio (float) -- the aspect ratio of saved images\n width (int) -- the images will be resized to width x width\n\n This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.\n \"\"\"\n image_dir = webpage.get_image_dir()\n short_path = ntpath.basename(image_path[0])\n name = os.path.splitext(short_path)[0]\n\n webpage.add_header(name)\n ims, txts, links = [], [], []\n\n for label, im_data in visuals.items():\n im = util.tensor2im(im_data)\n image_name = '%s_%s.png' % (name, label)\n save_path = os.path.join(image_dir, image_name)\n h, w, _ = im.shape\n if aspect_ratio > 1.0:\n im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic')\n if aspect_ratio < 1.0:\n im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic')\n util.save_image(im, save_path)\n\n ims.append(image_name)\n txts.append(label)\n links.append(image_name)\n webpage.add_images(ims, txts, links, width=width)\n\n\nclass Visualizer():\n \"\"\"This class includes several functions that can display/save images and print/save logging information.\n\n It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize the Visualizer class\n\n Parameters:\n opt -- stores all the experiment flags; needs to be a subclass of BaseOptions\n Step 1: Cache the training/test options\n Step 2: connect to a visdom server\n Step 3: create an HTML object for saveing HTML filters\n Step 4: create a logging file to store training losses\n \"\"\"\n self.opt = opt # cache the option\n self.display_id = opt.display_id\n self.use_html = opt.isTrain and not opt.no_html\n self.win_size = opt.display_winsize\n self.name = opt.name\n self.port = opt.display_port\n self.saved = False\n if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>\n import visdom\n self.ncols = opt.display_ncols\n self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env,raise_exceptions=True,proxies={'http': '172.16.2.30:8080', 'https': '172.16.2.30:8080'})\n\n\n if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/\n self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')\n self.img_dir = os.path.join(self.web_dir, 'images')\n print('create web directory %s...' % self.web_dir)\n util.mkdirs([self.web_dir, self.img_dir])\n # create a logging file to store training losses\n self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')\n with open(self.log_name, \"a\") as log_file:\n now = time.strftime(\"%c\")\n log_file.write('================ Training Loss (%s) ================\\n' % now)\n\n def reset(self):\n \"\"\"Reset the self.saved status\"\"\"\n self.saved = False\n\n def create_visdom_connections(self):\n \"\"\"If the program could not connect to Visdom server, this function will start a new server at port < self.port > \"\"\"\n cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port\n print('\\n\\nCould not connect to Visdom server. \\n Trying to start a server....')\n print('Command: %s' % cmd)\n Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\n def display_current_results(self, visuals, epoch, save_result):\n \"\"\"Display current results on visdom; save current results to an HTML file.\n\n Parameters:\n visuals (OrderedDict) - - dictionary of images to display or save\n epoch (int) - - the current epoch\n save_result (bool) - - if save the current results to an HTML file\n \"\"\"\n if self.display_id > 0: # show images in the browser using visdom\n ncols = self.ncols\n if ncols > 0: # show all the images in one visdom panel\n ncols = min(ncols, len(visuals))\n h, w = next(iter(visuals.values())).shape[:2]\n table_css = \"\"\"<style>\n table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}\n table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}\n </style>\"\"\" % (w, h) # create a table css\n # create a table of images.\n title = self.name\n label_html = ''\n label_html_row = ''\n images = []\n idx = 0\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n label_html_row += '<td>%s</td>' % label\n images.append(image_numpy.transpose([2, 0, 1]))\n idx += 1\n if idx % ncols == 0:\n label_html += '<tr>%s</tr>' % label_html_row\n label_html_row = ''\n white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255\n while idx % ncols != 0:\n images.append(white_image)\n label_html_row += '<td></td>'\n idx += 1\n if label_html_row != '':\n label_html += '<tr>%s</tr>' % label_html_row\n try:\n self.vis.images(images, nrow=ncols, win=self.display_id + 1,\n padding=2, opts=dict(title=title + ' images'))\n label_html = '<table>%s</table>' % label_html\n self.vis.text(table_css + label_html, win=self.display_id + 2,\n opts=dict(title=title + ' labels'))\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n else: # show each image in a separate visdom panel;\n idx = 1\n try:\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),\n win=self.display_id + idx)\n idx += 1\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.\n self.saved = True\n # save images to the disk\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))\n util.save_image(image_numpy, img_path)\n\n # update website\n webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)\n for n in range(epoch, 0, -1):\n webpage.add_header('epoch [%d]' % n)\n ims, txts, links = [], [], []\n\n for label, image_numpy in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = 'epoch%.3d_%s.png' % (n, label)\n ims.append(img_path)\n txts.append(label)\n links.append(img_path)\n webpage.add_images(ims, txts, links, width=self.win_size)\n webpage.save()\n\n def plot_current_losses(self, epoch, counter_ratio, losses):\n \"\"\"display the current losses on visdom display: dictionary of error labels and values\n\n Parameters:\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n \"\"\"\n if not hasattr(self, 'plot_data'):\n self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}\n self.plot_data['X'].append(epoch + counter_ratio)\n self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])\n try:\n self.vis.line(\n X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),\n Y=np.array(self.plot_data['Y']),\n opts={\n 'title': self.name + ' loss over time',\n 'legend': self.plot_data['legend'],\n 'xlabel': 'epoch',\n 'ylabel': 'loss'},\n win=self.display_id)\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n # losses: same format as |losses| of plot_current_losses\n def print_current_losses(self, epoch, iters, losses, t_comp, t_data):\n \"\"\"print current losses on console; also save the losses to the disk\n\n Parameters:\n epoch (int) -- current epoch\n iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n t_comp (float) -- computational time per data point (normalized by batch_size)\n t_data (float) -- data loading time per data point (normalized by batch_size)\n \"\"\"\n message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)\n for k, v in losses.items():\n message += '%s: %.3f ' % (k, v)\n\n print(message) # print the message\n with open(self.log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message) # save the message\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pavlin-policar/ALRA | [
"fab8d7661bf2a2179b40e68fb4f022015c700252"
] | [
"ALRA/ALRA.py"
] | [
"import logging\nimport numpy as np\nfrom fbpca import pca\nfrom scipy.stats import norm\n\nfrom .sparseutils import nonzero_mean, nonzero_std, find_zeroed_indices\n\nlog = logging.getLogger(\"ALRA\")\n\n\ndef choose_k(X, k=100, pval_thresh=1e-10, noise_start=80, n_iter=2):\n if k > min(X.shape):\n raise ValueError(\n f\"`k` must be smaller than `min(N, M)`. Maximum value \"\n f\"can be {min(X.shape)} but `{k}` given\"\n )\n\n if noise_start > k - 5:\n raise ValueError(\"At least 5 singular values must be considered noise.\")\n\n U, s, Va = pca(X, k=k, n_iter=n_iter, raw=True)\n\n differences = np.diff(s)\n\n mean = np.mean(differences[noise_start - 1 :])\n std = np.std(differences[noise_start - 1 :], ddof=1)\n\n probabilities = norm.pdf(differences, loc=mean, scale=std)\n\n k = np.max(np.argwhere(probabilities < pval_thresh)) + 1\n\n return k\n\n\ndef ALRA(X, k=None, n_iter=10):\n \"\"\"Adaptively-thresholded Low Rank Approximation.\n\n Parameters\n ----------\n X: array_like\n k: int\n n_iter: int\n\n Returns\n -------\n np.array\n\n \"\"\"\n if k is None:\n k = choose_k(X)\n log.info(f\"No `k` given. Automatically determined `k={k}`.\")\n\n # Compute the SVD and compute the rank-k reconstruction\n U, s, Va = pca(X, k=k, n_iter=n_iter, raw=True)\n X_rank_k = U * s @ Va\n\n X_rank_k = np.ma.masked_array(X_rank_k)\n\n # Find the absolute values of the minimum expression levels for each gene\n minimum_expressions = np.abs(np.min(X_rank_k, axis=0))\n # Zero out all expressions with values below the gene minimum value\n X_rank_k[X_rank_k <= minimum_expressions] = np.ma.masked\n\n # Rescale the expressions so the first two moments match the original matrix\n X_mean, X_std = nonzero_mean(X, axis=0), nonzero_std(X, axis=0, ddof=1)\n X_rk_mean, X_rk_std = X_rank_k.mean(axis=0), X_rank_k.std(axis=0, ddof=1)\n\n scale = X_std / X_rk_std\n translate = -X_rk_mean * scale + X_mean\n\n scale_columns = ~np.isnan(X_std) & ~np.isnan(X_rk_std)\n X_rank_k[:, scale_columns] *= scale[scale_columns]\n X_rank_k[:, scale_columns] += translate[scale_columns]\n\n # Values can become negative during rescaling, so we zero those out\n X_rank_k[X_rank_k < 0] = np.ma.masked\n\n # Restore potentially zeroed out expression values which appeared in the\n # original expression matrix. Where both values are non-zero, prefer the\n # rank-k approximation\n zeroed_out_indices = find_zeroed_indices(X_rank_k, X)\n X_rank_k[zeroed_out_indices] = X[zeroed_out_indices]\n\n log.info(\n f\"{len(zeroed_out_indices[0])} original expression values were \"\n f\"zeroed out during imputation and restored to original values.\"\n )\n\n X_rank_k = X_rank_k.filled(0)\n\n return X_rank_k\n"
] | [
[
"scipy.stats.norm.pdf",
"numpy.min",
"numpy.isnan",
"numpy.argwhere",
"numpy.std",
"numpy.diff",
"numpy.mean",
"numpy.ma.masked_array"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChriPo92/tensorpack | [
"45d2155850d3870bbf110c94c73508c707e1ae42",
"d7a13cb74c9066bc791d7aafc3b744b60ee79a9f"
] | [
"examples/FasterRCNN/eval.py",
"tensorpack/dataflow/imgaug/convert.py"
] | [
"# -*- coding: utf-8 -*-\n# File: eval.py\n\nimport itertools\nimport sys\nimport os\nimport json\nimport numpy as np\nfrom collections import namedtuple\nfrom concurrent.futures import ThreadPoolExecutor\nfrom contextlib import ExitStack\nimport cv2\nimport pycocotools.mask as cocomask\nimport tqdm\nimport tensorflow as tf\n\nfrom tensorpack.callbacks import Callback\nfrom tensorpack.tfutils.common import get_tf_version_tuple\nfrom tensorpack.utils import logger\nfrom tensorpack.utils.utils import get_tqdm\n\nfrom common import CustomResize, clip_boxes\nfrom data import get_eval_dataflow, get_eval_dataflow_YCBV\nfrom dataset import DetectionDataset\nfrom config import config as cfg\n\ntry:\n import horovod.tensorflow as hvd\nexcept ImportError:\n pass\n\n\nDetectionResult = namedtuple(\n 'DetectionResult',\n ['box', 'score', 'class_id', 'mask'])\n\"\"\"\nbox: 4 float\nscore: float\nclass_id: int, 1~NUM_CLASS\nmask: None, or a binary image of the original image shape\n\"\"\"\n\n\ndef _paste_mask(box, mask, shape):\n \"\"\"\n Args:\n box: 4 float\n mask: MxM floats\n shape: h,w\n Returns:\n A uint8 binary image of hxw.\n \"\"\"\n # int() is floor\n # box fpcoor=0.0 -> intcoor=0.0\n x0, y0 = list(map(int, box[:2] + 0.5))\n # box fpcoor=h -> intcoor=h-1, inclusive\n x1, y1 = list(map(int, box[2:] - 0.5)) # inclusive\n x1 = max(x0, x1) # require at least 1x1\n y1 = max(y0, y1)\n\n w = x1 + 1 - x0\n h = y1 + 1 - y0\n\n # rounding errors could happen here, because masks were not originally computed for this shape.\n # but it's hard to do better, because the network does not know the \"original\" scale\n mask = (cv2.resize(mask, (w, h)) > 0.5).astype('uint8')\n ret = np.zeros(shape, dtype='uint8')\n ret[y0:y1 + 1, x0:x1 + 1] = mask\n return ret\n\n\ndef predict_image(img, model_func):\n \"\"\"\n Run detection on one image, using the TF callable.\n This function should handle the preprocessing internally.\n\n Args:\n img: an image\n model_func: a callable from the TF model.\n It takes image and returns (boxes, probs, labels, [masks])\n\n Returns:\n [DetectionResult]\n \"\"\"\n\n orig_shape = img.shape[:2]\n resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE)\n resized_img = resizer.augment(img)\n scale = np.sqrt(resized_img.shape[0] * 1.0 / img.shape[0] * resized_img.shape[1] / img.shape[1])\n boxes, probs, labels, *masks = model_func(resized_img)\n boxes = boxes / scale\n # boxes are already clipped inside the graph, but after the floating point scaling, this may not be true any more.\n boxes = clip_boxes(boxes, orig_shape)\n\n if masks:\n # has mask\n full_masks = [_paste_mask(box, mask, orig_shape)\n for box, mask in zip(boxes, masks[0])]\n masks = full_masks\n else:\n # fill with none\n masks = [None] * len(boxes)\n\n results = [DetectionResult(*args) for args in zip(boxes, probs, labels.tolist(), masks)]\n return results\n\n\ndef predict_dataflow(df, model_func, tqdm_bar=None):\n \"\"\"\n Args:\n df: a DataFlow which produces (image, image_id)\n model_func: a callable from the TF model.\n It takes image and returns (boxes, probs, labels, [masks])\n tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,\n will create a new one.\n\n Returns:\n list of dict, in the format used by\n `DetectionDataset.eval_or_save_inference_results`\n \"\"\"\n df.reset_state()\n all_results = []\n with ExitStack() as stack:\n # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323\n if tqdm_bar is None:\n tqdm_bar = stack.enter_context(get_tqdm(total=df.size()))\n for img, img_id in df:\n results = predict_image(img, model_func)\n for r in results:\n # int()/float() to make it json-serializable\n res = {\n 'image_id': img_id,\n 'category_id': int(r.class_id),\n 'bbox': [round(float(x), 4) for x in r.box],\n 'score': round(float(r.score), 4),\n }\n\n # also append segmentation to results\n if r.mask is not None:\n rle = cocomask.encode(\n np.array(r.mask[:, :, None], order='F'))[0]\n rle['counts'] = rle['counts'].decode('ascii')\n res['segmentation'] = rle\n all_results.append(res)\n tqdm_bar.update(1)\n return all_results\n\n\ndef multithread_predict_dataflow(dataflows, model_funcs):\n \"\"\"\n Running multiple `predict_dataflow` in multiple threads, and aggregate the results.\n\n Args:\n dataflows: a list of DataFlow to be used in :func:`predict_dataflow`\n model_funcs: a list of callable to be used in :func:`predict_dataflow`\n\n Returns:\n list of dict, in the format used by\n `DetectionDataset.eval_or_save_inference_results`\n \"\"\"\n num_worker = len(model_funcs)\n assert len(dataflows) == num_worker\n if num_worker == 1:\n return predict_dataflow(dataflows[0], model_funcs[0])\n kwargs = {'thread_name_prefix': 'EvalWorker'} if sys.version_info.minor >= 6 else {}\n with ThreadPoolExecutor(max_workers=num_worker, **kwargs) as executor, \\\n tqdm.tqdm(total=sum([df.size() for df in dataflows])) as pbar:\n futures = []\n for dataflow, pred in zip(dataflows, model_funcs):\n futures.append(executor.submit(predict_dataflow, dataflow, pred, pbar))\n all_results = list(itertools.chain(*[fut.result() for fut in futures]))\n return all_results\n\n# TODO: Make this runnable with YCBV\nclass EvalCallback(Callback):\n \"\"\"\n A callback that runs evaluation once a while.\n It supports multi-gpu evaluation.\n \"\"\"\n\n _chief_only = False\n\n def __init__(self, eval_dataset, in_names, out_names, output_dir):\n self._eval_dataset = eval_dataset\n self._in_names, self._out_names = in_names, out_names\n self._output_dir = output_dir\n\n def _setup_graph(self):\n num_gpu = cfg.TRAIN.NUM_GPUS\n if cfg.TRAINER == 'replicated':\n # TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750\n buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)]\n\n # Use two predictor threads per GPU to get better throughput\n self.num_predictor = num_gpu if buggy_tf else num_gpu * 2\n self.predictors = [self._build_predictor(k % num_gpu) for k in range(self.num_predictor)]\n self.dataflows = [get_eval_dataflow_YCBV(self._eval_dataset,\n shard=k, num_shards=self.num_predictor)\n for k in range(self.num_predictor)]\n else:\n # Only eval on the first machine.\n # Alternatively, can eval on all ranks and use allgather, but allgather sometimes hangs\n self._horovod_run_eval = hvd.rank() == hvd.local_rank()\n if self._horovod_run_eval:\n self.predictor = self._build_predictor(0)\n self.dataflow = get_eval_dataflow_YCBV(self._eval_dataset,\n shard=hvd.local_rank(), num_shards=hvd.local_size())\n\n self.barrier = hvd.allreduce(tf.random_normal(shape=[1]))\n\n def _build_predictor(self, idx):\n return self.trainer.get_predictor(self._in_names, self._out_names, device=idx)\n\n def _before_train(self):\n eval_period = cfg.TRAIN.EVAL_PERIOD\n self.epochs_to_eval = set()\n for k in itertools.count(1):\n if k * eval_period > self.trainer.max_epoch:\n break\n self.epochs_to_eval.add(k * eval_period)\n self.epochs_to_eval.add(self.trainer.max_epoch)\n logger.info(\"[EvalCallback] Will evaluate every {} epochs\".format(eval_period))\n\n def _eval(self):\n logdir = self._output_dir\n if cfg.TRAINER == 'replicated':\n all_results = multithread_predict_dataflow(self.dataflows, self.predictors)\n else:\n filenames = [os.path.join(\n logdir, 'outputs{}-part{}.json'.format(self.global_step, rank)\n ) for rank in range(hvd.local_size())]\n\n if self._horovod_run_eval:\n local_results = predict_dataflow(self.dataflow, self.predictor)\n fname = filenames[hvd.local_rank()]\n with open(fname, 'w') as f:\n json.dump(local_results, f)\n self.barrier.eval()\n if hvd.rank() > 0:\n return\n all_results = []\n for fname in filenames:\n with open(fname, 'r') as f:\n obj = json.load(f)\n all_results.extend(obj)\n os.unlink(fname)\n\n output_file = os.path.join(\n logdir, '{}-outputs{}.json'.format(self._eval_dataset, self.global_step))\n\n scores = DetectionDataset().eval_or_save_inference_results(\n all_results, self._eval_dataset, output_file)\n for k, v in scores.items():\n self.trainer.monitors.put_scalar(k, v)\n\n def _trigger_epoch(self):\n if self.epoch_num in self.epochs_to_eval:\n logger.info(\"Running evaluation ...\")\n self._eval()\n",
"# -*- coding: utf-8 -*-\n# File: convert.py\n\nimport numpy as np\nimport cv2\n\nfrom .base import ImageAugmentor\nfrom .meta import MapImage\n\n__all__ = ['ColorSpace', 'Grayscale', 'ToUint8', 'ToFloat32']\n\n\nclass ColorSpace(ImageAugmentor):\n \"\"\" Convert into another color space. \"\"\"\n\n def __init__(self, mode, keepdims=True):\n \"\"\"\n Args:\n mode: OpenCV color space conversion code (e.g., ``cv2.COLOR_BGR2HSV``)\n keepdims (bool): keep the dimension of image unchanged if OpenCV\n changes it.\n \"\"\"\n self._init(locals())\n\n def _augment(self, img, _):\n transf = cv2.cvtColor(img, self.mode)\n if self.keepdims:\n if len(transf.shape) is not len(img.shape):\n transf = transf[..., None]\n return transf\n\n\nclass Grayscale(ColorSpace):\n \"\"\" Convert image to grayscale. \"\"\"\n\n def __init__(self, keepdims=True, rgb=False):\n \"\"\"\n Args:\n keepdims (bool): return image of shape [H, W, 1] instead of [H, W]\n rgb (bool): interpret input as RGB instead of the default BGR\n \"\"\"\n mode = cv2.COLOR_RGB2GRAY if rgb else cv2.COLOR_BGR2GRAY\n super(Grayscale, self).__init__(mode, keepdims)\n\n\nclass ToUint8(MapImage):\n \"\"\" Convert image to uint8. Useful to reduce communication overhead. \"\"\"\n def __init__(self):\n super(ToUint8, self).__init__(lambda x: np.clip(x, 0, 255).astype(np.uint8), lambda x: x)\n\n\nclass ToFloat32(MapImage):\n \"\"\" Convert image to float32, may increase quality of the augmentor. \"\"\"\n def __init__(self):\n super(ToFloat32, self).__init__(lambda x: x.astype(np.float32), lambda x: x)\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.sqrt",
"tensorflow.random_normal"
],
[
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
netoferraz/oeuanalitico-posts | [
"fc5799a2c77de1133f4f3f6b9f048b0fb3de6ba7"
] | [
"oeuanalitico-posts/nfe/preprocessing/functions.py"
] | [
"import pandas as pd\nfrom pathlib import Path\nfrom collections import Counter\nimport datetime\nfrom collections import defaultdict\nfrom faker import Factory\nimport faker\nfrom preprocessing.nfeProvider import Invoice\nimport csv\n\n\ndef convert_to_numeric(num):\n \"\"\"\n Converte strings que representam valores monetários em Reais (R$) para\n o padrão americano.\n \"\"\"\n num = num.strip()\n if num != \"\":\n num = num.replace(',', '.')\n count_dot = num.count('.')\n if count_dot >= 2:\n while count_dot >= 2:\n # armazena o index da primeira ocorrência da string ponto\n slice_index = num.index('.')\n # faz um slice baseado na localizacao desse index\n new_str = num[0:slice_index] + num[slice_index + 1:]\n num = new_str\n count_dot = num.count('.')\n return float(num)\n else:\n return float(num)\n else:\n return 0.0\n\n\ndef identify_encoding(filename: str) -> str:\n \"\"\"\n Identifica o encoding do arquivo filename retornando uma string com o nome do encoding.\n\n Atributos:\n filename: é o path (full ou relativo) do arquivo a ser analisado.\n \"\"\"\n try:\n encoding = 'utf8'\n with open(filename, \"r\", encoding=encoding) as file:\n _ = file.readlines()\n except UnicodeDecodeError:\n encoding = 'latin1'\n with open(filename, \"r\", encoding=encoding) as file:\n _ = file.readlines()\n finally:\n return encoding\n\n\ndef report_pkl_into_csv(filename, foldername, logger):\n \"\"\"\n Produz um relatório do status do arquivo tabular gerado a partir dos arquivos .pkl\n\n Atributos:\n filename: é o nome do arquivo .csv que será analisado.\n foldername: é o nome da sub pasta dos arquivos pkl dentro de ./data-storage/validacao/\n \"\"\"\n # VERIFICA SE ALGUM ARQUIVO .pkl NÃO FORAM PROCESSADOS.\n df = pd.read_csv(f\"./tabular-data/{filename}.csv\", sep=';', encoding='latin1')\n lista_chaves_processadas = set(df['nf_chave'].unique())\n pkl_folder = Path(f\"./data-storage/validacao/{foldername}\")\n pkl_folder = set(pkl_folder.rglob(\"*.pkl\"))\n pkl_folder = set([f.name[:-4][-44:] for f in pkl_folder])\n num_arquivos_diff = lista_chaves_processadas.difference(pkl_folder)\n if len(num_arquivos_diff) == 0:\n logger.debug(f\"Todos os arquivos .pkl foram processados. Ao todo foram processados {df['nf_chave'].nunique()} notas fiscais.\\n\")\n else:\n logger.critical(f\"Não foram processados {len(num_arquivos_diff)} arquivos.\\n\")\n for f in num_arquivos_diff:\n logger.critical(f\"Arquivo {f} não foi processado.\\n\")\n # VALIDAÇÃO SE HÁ ARQUIVOS DUPLICADOS\n files_check = Path(f\"./data-storage/validacao/{foldername}\")\n files_check = list(files_check.rglob(\"*.pkl\"))\n files_check = [f.name[:-4][-44:] for f in files_check]\n a = Counter()\n for f in files_check:\n a[f] += 1\n for chave, count in a.items():\n if count > 1:\n logger.critical(f\"CHAVE REPETIDA: {chave} # {count}\")\n # VERIFICA SE HÁ ALGUMA INCONSISTÊNCIA NOS VALORES DOS PRODUTOS E DA NOTA FISCAL\n df['prod_valor_liquido'] = df.apply(lambda x: x['prod_valor'] - x['prod_valor_desconto'], axis='columns')\n check_valor_nota_valores = df.groupby(\"nf_chave\")['prod_valor_liquido'].sum().sort_values(ascending=False)\n inconsistencia_count = 0\n container = {}\n for chave, valor in zip(check_valor_nota_valores.index, check_valor_nota_valores.values):\n validacao = df.loc[df['nf_chave'] == chave, 'nf_valor'].values[0]\n valor = round(valor, 2)\n chave = chave.replace(\"-\", \"\").replace(\".\", \"\").replace(\"/\", \"\")\n if validacao != valor:\n inconsistencia_count += 1\n diff_produtos = round(valor - validacao, 2)\n container[chave] = diff_produtos\n logger.critical(f\"{chave} => Valor Nota: R${validacao} @ Valor Produtos: R${valor} @ Diferença: R${diff_produtos}\\n\")\n\n\ndef normalize_ncm(ncm: str) -> str:\n \"\"\"\n Normaliza a string que representa o código NCM\n\n Atributos:\n ncm : string que representa o código NCM\n \"\"\"\n if len(ncm) != 8:\n ncm = \"0\" + ncm\n return ncm\n\n\ndef get_ncm_values():\n \"\"\"\n Função que retorna um dicionário contendo uma lista de macro categorias e os codigos\n NCM associados a cada um deles.\n \"\"\"\n sheet_names = [\n 'CARNES E OVOS',\n 'HORTIFRUTI',\n 'LIMPEZA',\n 'HIGIENE',\n 'LATICINIOS E DERIVADOS',\n 'BEBIDAS',\n 'PET',\n 'PADARIA',\n 'CEREAIS_GRAOS_SEMENTES',\n 'DOCES',\n 'VESTUARIO',\n 'FARINACEOS',\n 'MASSAS',\n 'TEMPEROS_MOLHOS',\n 'OUTROS'\n ]\n categorias_ncm = {}\n for sheet in sheet_names:\n df = pd.read_excel(\"./data/others/compilado_ncm_mercado_mod.xlsx\", sheet_name=sheet, dtype={'cod_ncm': str})\n df['cod_ncm'] = df['cod_ncm'].astype(str)\n df['cod_ncm'] = df['cod_ncm'].apply(normalize_ncm)\n categorias_ncm[sheet] = df['cod_ncm'].unique().tolist()\n return categorias_ncm\n\n\ndef get_weekday(value: int):\n \"\"\"\n Recebe um INT representando um datetime e retorna uma string com o dia da semana.\n\n Atributos:\n value: Inteiro representando um timestamp\n \"\"\"\n convert_int_to_day = {\n 0: 'Segunda-Feira',\n 1: 'Terça-Feira',\n 2: 'Quarta-Feira',\n 3: 'Quinta-Feira',\n 4: 'Sexta-Feira',\n 5: 'Sábado',\n 6: 'Domingo'\n }\n weekday = datetime.datetime.utcfromtimestamp(value / 1e9).weekday()\n return convert_int_to_day[weekday]\n\n\ndef logging_report(report, list_required_fields, logger):\n f = Path(report['tables'][0]['source'])\n map_columns_to_number = report['tables'][0]['headers']\n map_columns_to_number = {i: col for col, i in zip(map_columns_to_number, range(1, len(map_columns_to_number)))}\n fields_required_error = {f: False for f in list_required_fields}\n num_errors = report['error-count']\n if report['valid']:\n logger.debug(f\"Arquivo: {f.name} válido pelo schema.\\n\")\n return True, num_errors\n else:\n lista_errors = report['tables'][0]['errors']\n if 0 < num_errors < 1000:\n logger.debug(f\"Arquivo {f.name} não validado com {num_errors} erros.\")\n for erro in lista_errors:\n for feature, valor in erro.items():\n if feature == 'code':\n if valor == 'required-constraint':\n # identify which column is null\n col = map_columns_to_number[erro['column-number']]\n # change validation status of this feature\n if not fields_required_error[col]:\n fields_required_error[col] = True\n line = erro['row-number']\n logger.critical(f\"{f.name} @ Linha {line} possui {col} sem valor atribuído.\")\n elif valor == 'enumerable-constraint':\n col = map_columns_to_number[erro['column-number']]\n line = erro['row-number']\n logger.critical(f\"{f.name} @ Linha {line} e Coluna {col} erro: {erro['message']} \")\n else:\n try:\n col = map_columns_to_number[erro['column-number']]\n except: # o erro associado não é referente a uma coluna\n try:\n line = erro['row-number']\n logger.critical(f\"{f.name} @ Linha {line} : {erro['message']}\")\n except KeyError:\n logger.critical(f\"{f.name} @ {erro['message']}\")\n return False, num_errors\n\n\ndef anonymize_rows(rows):\n \"\"\"\n Rows is an iterable of dictionaries that contain name and\n email fields that need to be anonymized.\n\n \"\"\"\n # Load the faker and its providers\n faker = Factory.create(\"pt_BR\")\n faker.add_provider(Invoice)\n\n # Create mappings of names & emails to faked names & emails.\n # https://stackoverflow.com/questions/18066837/passing-a-parameter-to-objects-created-by-defaultdict\n nfecod = defaultdict(lambda: faker.nfce(**{'uf_code': 'DF'}))\n cpf = defaultdict(faker.cpf)\n nome = defaultdict(faker.name)\n endereco = defaultdict(faker.address)\n bairro = defaultdict(faker.bairro)\n municipio = defaultdict(faker.city)\n telefone = defaultdict(faker.phone_number)\n uf = defaultdict(faker.state_abbr)\n pais = defaultdict(faker.country)\n email = defaultdict(faker.email)\n\n # Iterate over the rows and yield anonymized rows.\n for row in rows:\n # Replace the name and email fields with faked fields.\n row['nf_chave'] = nfecod[row['nf_chave']]\n row['dest_cpf'] = cpf[row['dest_cpf']]\n row['dest_rz'] = nome[row['dest_rz']]\n row['dest_endereco'] = endereco[row['dest_endereco']]\n row['dest_bairro'] = bairro[row['dest_bairro']]\n row['dest_municipio'] = municipio[row['dest_municipio']]\n row['dest_telefone'] = telefone[row['dest_telefone']]\n row['dest_uf'] = uf[row['dest_uf']]\n row['dest_pais'] = pais[row['dest_pais']]\n row['dest_email'] = email[row['dest_email']]\n\n # Yield the row back to the caller\n yield row\n\n\ndef anonymize(source, target):\n \"\"\"\n The source argument is a path to a CSV file containing data to anonymize,\n while target is a path to write the anonymized CSV data to.\n \"\"\"\n # https://pymotw.com/2/csv/\n PARTIAL_SOURCE_DATA = Path(\"./tabular-data/\") / f\"{source}\"\n PARTIAL_DEST_DATA = Path(\"./tabular-data/\") / f\"{target}\"\n csv.register_dialect('semicolon', delimiter=';')\n with open(PARTIAL_SOURCE_DATA, 'r') as f:\n with open(PARTIAL_DEST_DATA, 'w') as o:\n # Use the DictReader to easily extract fields\n reader = csv.DictReader(f, dialect='semicolon')\n writer = csv.DictWriter(o, reader.fieldnames, dialect='semicolon')\n # write col names\n writer.writeheader()\n # Read and anonymize data, writing to target file.\n for row in anonymize_rows(reader):\n writer.writerow(row)\n\n\ndef subseting_data(dataframe: pd.core.frame.DataFrame, rootname: str):\n \"\"\"\n Salva um arquivo .csv com um subset das features originais\n \"\"\"\n dataframe = dataframe[['nf_dia_semana', 'nf_chave', 'nf_valor', 'em_rz',\n 'em_nomeFantasia', 'em_cnpj', 'em_endereco', 'em_bairro', 'em_cep', 'em_municipio',\n 'em_telefone', 'em_uf', 'em_pais', 'em_inscricao_estadual', 'em_inscricao_municipal',\n 'em_cnae_fiscal', 'dest_rz', 'dest_cpf', 'dest_endereco', 'dest_bairro', 'dest_municipio',\n 'dest_telefone', 'dest_uf', 'dest_pais', 'dest_inscricao_estadual', 'dest_email', 'prod_nome',\n 'prod_quantidade', 'prod_unidade', 'prod_valor', 'prod_codigo_produto', 'prod_codigo_ncm',\n 'prod_categoria_ncm', 'prod_cfop', 'prod_valor_desconto', 'prod_valor_tributos',\n 'prod_codigo_ean_cmc', 'prod_valor_unitario_cmc', 'prod_valor_unitario_trib', 'prod_unidade_trib']]\n dataframe.to_csv(f\"./tabular-data/PRE_ANONY_{rootname}.csv\", sep=';', encoding='latin1', index=True)\n"
] | [
[
"pandas.read_excel",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
turnergarrow/galpy | [
"7132eddbf2dab491fe137790e31eacdc604b0534",
"cabb42bef3b4f88a2f593cdb123452cd41451db3",
"7132eddbf2dab491fe137790e31eacdc604b0534"
] | [
"tests/test_actionAngleTorus.py",
"galpy/potential/IsothermalDiskPotential.py",
"galpy/actionAngle/actionAngleIsochroneApprox.py"
] | [
"from __future__ import print_function, division\nimport os\nimport sys\nimport pytest\nimport warnings\nimport numpy\nfrom galpy.util import galpyWarning\nfrom test_actionAngle import reset_warning_registry\n_TRAVIS= bool(os.getenv('TRAVIS'))\nPY2= sys.version < '3'\n# Print all galpyWarnings always for tests of warnings\nwarnings.simplefilter(\"always\",galpyWarning)\n\n#Basic sanity checking: circular orbit should have constant R, zero vR, vT=vc\ndef test_actionAngleTorus_basic():\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import MWPotential, rl, vcirc, \\\n FlattenedPowerPotential, PlummerPotential\n tol= -4.\n jr= 10.**-10.\n jz= 10.**-10.\n aAT= actionAngleTorus(pot=MWPotential)\n # at R=1, Lz=1\n jphi= 1.\n angler= numpy.linspace(0.,2.*numpy.pi,101)\n anglephi= numpy.linspace(0.,2.*numpy.pi,101)+1.\n anglez= numpy.linspace(0.,2.*numpy.pi,101)+2.\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n assert numpy.all(numpy.fabs(RvR[0]-rl(MWPotential,jphi)) < 10.**tol), \\\n 'circular orbit does not have constant radius for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \\\n 'circular orbit does not have zero radial velocity for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[2]-vcirc(MWPotential,rl(MWPotential,jphi))) < 10.**tol), \\\n 'circular orbit does not have constant vT=vc for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \\\n 'circular orbit does not have zero vertical height for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \\\n 'circular orbit does not have zero vertical velocity for actionAngleTorus'\n # at Lz=1.5, using Plummer\n tol= -3.25\n pp= PlummerPotential(normalize=1.)\n aAT= actionAngleTorus(pot=pp)\n jphi= 1.5\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n assert numpy.all(numpy.fabs(RvR[0]-rl(pp,jphi)) < 10.**tol), \\\n 'circular orbit does not have constant radius for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \\\n 'circular orbit does not have zero radial velocity for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[2]-vcirc(pp,rl(pp,jphi))) < 10.**tol), \\\n 'circular orbit does not have constant vT=vc for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \\\n 'circular orbit does not have zero vertical height for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \\\n 'circular orbit does not have zero vertical velocity for actionAngleTorus'\n # at Lz=0.5, using FlattenedPowerPotential\n tol= -4.\n fp= FlattenedPowerPotential(normalize=1.)\n aAT= actionAngleTorus(pot=fp)\n jphi= 0.5\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n assert numpy.all(numpy.fabs(RvR[0]-rl(fp,jphi)) < 10.**tol), \\\n 'circular orbit does not have constant radius for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \\\n 'circular orbit does not have zero radial velocity for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[2]-vcirc(fp,rl(fp,jphi))) < 10.**tol), \\\n 'circular orbit does not have constant vT=vc for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \\\n 'circular orbit does not have zero vertical height for actionAngleTorus'\n assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \\\n 'circular orbit does not have zero vertical velocity for actionAngleTorus'\n return None\n\n#Basic sanity checking: close-to-circular orbit should have freq. = epicycle freq.\ndef test_actionAngleTorus_basic_freqs():\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import epifreq, omegac, verticalfreq, rl, \\\n JaffePotential, PowerSphericalPotential, HernquistPotential\n tol= -3.\n jr= 10.**-6.\n jz= 10.**-6.\n jp= JaffePotential(normalize=1.)\n aAT= actionAngleTorus(pot=jp)\n # at Lz=1\n jphi= 1.\n om= aAT.Freqs(jr,jphi,jz)\n assert numpy.fabs((om[0]-epifreq(jp,rl(jp,jphi)))/om[0]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'\n assert numpy.fabs((om[1]-omegac(jp,rl(jp,jphi)))/om[1]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'\n assert numpy.fabs((om[2]-verticalfreq(jp,rl(jp,jphi)))/om[2]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'\n # at Lz=1.5, w/ different potential\n pp= PowerSphericalPotential(normalize=1.)\n aAT= actionAngleTorus(pot=pp)\n jphi= 1.5\n om= aAT.Freqs(jr,jphi,jz)\n assert numpy.fabs((om[0]-epifreq(pp,rl(pp,jphi)))/om[0]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'\n assert numpy.fabs((om[1]-omegac(pp,rl(pp,jphi)))/om[1]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'\n assert numpy.fabs((om[2]-verticalfreq(pp,rl(pp,jphi)))/om[2]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'\n # at Lz=0.5, w/ different potential\n tol= -2.5 # appears more difficult\n hp= HernquistPotential(normalize=1.)\n aAT= actionAngleTorus(pot=hp)\n jphi= 0.5\n om= aAT.Freqs(jr,jphi,jz)\n assert numpy.fabs((om[0]-epifreq(hp,rl(hp,jphi)))/om[0]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'\n assert numpy.fabs((om[1]-omegac(hp,rl(hp,jphi)))/om[1]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'\n assert numpy.fabs((om[2]-verticalfreq(hp,rl(hp,jphi)))/om[2]) < 10.**tol, \\\n 'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'\n return None\n\n#Test that orbit from actionAngleTorus is the same as an integrated orbit\ndef test_actionAngleTorus_orbit():\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import MWPotential2014\n from galpy.orbit import Orbit\n # Set up instance\n aAT= actionAngleTorus(pot=MWPotential2014,tol=10.**-5.)\n jr,jphi,jz= 0.05,1.1,0.025\n # First calculate frequencies and the initial RvR\n RvRom= aAT.xvFreqs(jr,jphi,jz,\n numpy.array([0.]),\n numpy.array([1.]),\n numpy.array([2.]))\n om= RvRom[1:]\n # Angles along an orbit\n ts= numpy.linspace(0.,100.,1001)\n angler= ts*om[0]\n anglephi= 1.+ts*om[1]\n anglez= 2.+ts*om[2]\n # Calculate the orbit using actionAngleTorus\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n # Calculate the orbit using orbit integration\n orb= Orbit([RvRom[0][0,0],RvRom[0][0,1],RvRom[0][0,2],\n RvRom[0][0,3],RvRom[0][0,4],RvRom[0][0,5]])\n orb.integrate(ts,MWPotential2014)\n # Compare\n tol= -3.\n assert numpy.all(numpy.fabs(orb.R(ts)-RvR[0]) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in R'\n assert numpy.all(numpy.fabs(orb.vR(ts)-RvR[1]) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in vR'\n assert numpy.all(numpy.fabs(orb.vT(ts)-RvR[2]) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in vT'\n assert numpy.all(numpy.fabs(orb.z(ts)-RvR[3]) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in z'\n assert numpy.all(numpy.fabs(orb.vz(ts)-RvR[4]) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in vz'\n assert numpy.all(numpy.fabs((orb.phi(ts)-RvR[5]+numpy.pi) % (2.*numpy.pi) -numpy.pi) < 10.**tol), \\\n 'Integrated orbit does not agree with torus orbit in phi'\n return None\n\n# Test that actionAngleTorus w/ interp pot gives same freqs as regular pot\n# Doesn't work well: TM aborts because our interpolated forces aren't\n# consistent enough with the potential for TM's taste, but we test that it at\n# at least works somewhat\ndef test_actionAngleTorus_interppot_freqs():\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import LogarithmicHaloPotential, interpRZPotential\n lp= LogarithmicHaloPotential(normalize=1.)\n ip= interpRZPotential(RZPot=lp,\n interpPot=True,\n interpDens=True,interpRforce=True,interpzforce=True,\n enable_c=True)\n aAT= actionAngleTorus(pot=lp)\n aATi= actionAngleTorus(pot=ip)\n jr,jphi,jz= 0.05,1.1,0.02\n om= aAT.Freqs(jr,jphi,jz)\n omi= aATi.Freqs(jr,jphi,jz)\n assert numpy.fabs((om[0]-omi[0])/om[0]) < 0.2, 'Radial frequency computed using the torus machine does not agree between potential and interpolated potential'\n assert numpy.fabs((om[1]-omi[1])/om[1]) < 0.2, 'Azimuthal frequency computed using the torus machine does not agree between potential and interpolated potential'\n assert numpy.fabs((om[2]-omi[2])/om[2]) < 0.8, 'Vertical frequency computed using the torus machine does not agree between potential and interpolated potential'\n return None\n\n#Test the actionAngleTorus against an isochrone potential: actions\ndef test_actionAngleTorus_Isochrone_actions():\n from galpy.potential import IsochronePotential\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleIsochrone\n ip= IsochronePotential(normalize=1.,b=1.2)\n aAI= actionAngleIsochrone(ip=ip)\n tol= -6.\n aAT= actionAngleTorus(pot=ip,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.])\n anglephi= numpy.array([numpy.pi])\n anglez= numpy.array([numpy.pi/2.])\n # Calculate position from aAT\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n # Calculate actions from aAI\n ji= aAI(*RvR)\n djr= numpy.fabs((ji[0]-jr)/jr)\n dlz= numpy.fabs((ji[1]-jphi)/jphi)\n djz= numpy.fabs((ji[2]-jz)/jz)\n assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djr*100.)\n assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (dlz*100.) \n assert djz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djz*100.)\n return None\n\n#Test the actionAngleTorus against an isochrone potential: frequencies and angles\ndef test_actionAngleTorus_Isochrone_freqsAngles():\n from galpy.potential import IsochronePotential\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleIsochrone\n ip= IsochronePotential(normalize=1.,b=1.2)\n aAI= actionAngleIsochrone(ip=ip)\n tol= -6.\n aAT= actionAngleTorus(pot=ip,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)\n angler= angler % (2.*numpy.pi)\n anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)\n anglephi= anglephi % (2.*numpy.pi)\n anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)\n anglez= anglez % (2.*numpy.pi)\n # Calculate position from aAT\n RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)\n # Calculate actions, frequencies, and angles from aAI\n ws= aAI.actionsFreqsAngles(*RvRom[0].T)\n dOr= numpy.fabs((ws[3]-RvRom[1]))\n dOp= numpy.fabs((ws[4]-RvRom[2]))\n dOz= numpy.fabs((ws[5]-RvRom[3]))\n dar= numpy.fabs((ws[6]-angler))\n dap= numpy.fabs((ws[7]-anglephi))\n daz= numpy.fabs((ws[8]-anglez))\n dar[dar > numpy.pi]-= 2.*numpy.pi\n dar[dar < -numpy.pi]+= 2.*numpy.pi\n dap[dap > numpy.pi]-= 2.*numpy.pi\n dap[dap < -numpy.pi]+= 2.*numpy.pi\n daz[daz > numpy.pi]-= 2.*numpy.pi\n daz[daz < -numpy.pi]+= 2.*numpy.pi\n assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)\n assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.) \n assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)\n assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for ar at %f' % (numpy.nanmax(dar))\n assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for aphi at %f' % (numpy.nanmax(dap))\n assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for az at %f' % (numpy.nanmax(daz))\n return None\n\n#Test the actionAngleTorus against a Staeckel potential: actions\ndef test_actionAngleTorus_Staeckel_actions():\n from galpy.potential import KuzminKutuzovStaeckelPotential\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleStaeckel\n delta= 1.2\n kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)\n aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)\n tol= -3.\n aAT= actionAngleTorus(pot=kp,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.])\n anglephi= numpy.array([numpy.pi])\n anglez= numpy.array([numpy.pi/2.])\n # Calculate position from aAT\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n # Calculate actions from aAI\n ji= aAS(*RvR)\n djr= numpy.fabs((ji[0]-jr)/jr)\n dlz= numpy.fabs((ji[1]-jphi)/jphi)\n djz= numpy.fabs((ji[2]-jz)/jz)\n assert djr < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djr*100.)\n assert dlz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (dlz*100.) \n assert djz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djz*100.)\n return None\n\n#Test the actionAngleTorus against an isochrone potential: frequencies and angles\ndef test_actionAngleTorus_Staeckel_freqsAngles():\n from galpy.potential import KuzminKutuzovStaeckelPotential\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleStaeckel\n delta= 1.2\n kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)\n aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)\n tol= -3.\n aAT= actionAngleTorus(pot=kp,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)\n angler= angler % (2.*numpy.pi)\n anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)\n anglephi= anglephi % (2.*numpy.pi)\n anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)\n anglez= anglez % (2.*numpy.pi)\n # Calculate position from aAT\n RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)\n # Calculate actions, frequencies, and angles from aAI\n ws= aAS.actionsFreqsAngles(*RvRom[0].T)\n dOr= numpy.fabs((ws[3]-RvRom[1]))\n dOp= numpy.fabs((ws[4]-RvRom[2]))\n dOz= numpy.fabs((ws[5]-RvRom[3]))\n dar= numpy.fabs((ws[6]-angler))\n dap= numpy.fabs((ws[7]-anglephi))\n daz= numpy.fabs((ws[8]-anglez))\n dar[dar > numpy.pi]-= 2.*numpy.pi\n dar[dar < -numpy.pi]+= 2.*numpy.pi\n dap[dap > numpy.pi]-= 2.*numpy.pi\n dap[dap < -numpy.pi]+= 2.*numpy.pi\n daz[daz > numpy.pi]-= 2.*numpy.pi\n daz[daz < -numpy.pi]+= 2.*numpy.pi\n assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)\n assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.) \n assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)\n assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for ar at %f' % (numpy.nanmax(dar))\n assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for aphi at %f' % (numpy.nanmax(dap))\n assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for az at %f' % (numpy.nanmax(daz))\n return None\n\n#Test the actionAngleTorus against a general potential w/ actionAngleIsochroneApprox: actions\ndef test_actionAngleTorus_isochroneApprox_actions():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleIsochroneApprox\n aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)\n tol= -2.5\n aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.])\n anglephi= numpy.array([numpy.pi])\n anglez= numpy.array([numpy.pi/2.])\n # Calculate position from aAT\n RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T\n # Calculate actions from aAIA\n ji= aAIA(*RvR)\n djr= numpy.fabs((ji[0]-jr)/jr)\n dlz= numpy.fabs((ji[1]-jphi)/jphi)\n djz= numpy.fabs((ji[2]-jz)/jz)\n assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (djr*100.)\n assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (dlz*100.) \n assert djz < 10.**tol, 'actionAngleTorus and actionAngleMWPotential2014 applied to MWPotential2014 potential disagree for Jr at %f%%' % (djz*100.)\n return None\n\n#Test the actionAngleTorus against a general potential w/ actionAngleIsochrone: frequencies and angles\ndef test_actionAngleTorus_isochroneApprox_freqsAngles():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus, \\\n actionAngleIsochroneApprox\n aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)\n tol= -3.5\n aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,21)\n angler= angler % (2.*numpy.pi)\n anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,21)\n anglephi= anglephi % (2.*numpy.pi)\n anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,21)\n anglez= anglez % (2.*numpy.pi)\n # Calculate position from aAT\n RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)\n # Calculate actions, frequencies, and angles from aAI\n ws= aAIA.actionsFreqsAngles(*RvRom[0].T)\n dOr= numpy.fabs((ws[3]-RvRom[1]))\n dOp= numpy.fabs((ws[4]-RvRom[2]))\n dOz= numpy.fabs((ws[5]-RvRom[3]))\n dar= numpy.fabs((ws[6]-angler))\n dap= numpy.fabs((ws[7]-anglephi))\n daz= numpy.fabs((ws[8]-anglez))\n dar[dar > numpy.pi]-= 2.*numpy.pi\n dar[dar < -numpy.pi]+= 2.*numpy.pi\n dap[dap > numpy.pi]-= 2.*numpy.pi\n dap[dap < -numpy.pi]+= 2.*numpy.pi\n daz[daz > numpy.pi]-= 2.*numpy.pi\n daz[daz < -numpy.pi]+= 2.*numpy.pi\n assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)\n assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.) \n assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)\n assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for ar at %f' % (numpy.nanmax(dar))\n assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for aphi at %f' % (numpy.nanmax(dap))\n assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for az at %f' % (numpy.nanmax(daz))\n return None\n\n# Test that the frequencies returned by hessianFreqs are the same as those returned by Freqs\ndef test_actionAngleTorus_hessian_freqs():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n fO= aAT.Freqs(jr,jphi,jz)[:3]\n hO= aAT.hessianFreqs(jr,jphi,jz)[1:4]\n assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods Freqs and hessianFreqs return different frequencies'\n return None\n\n# Test that the Hessian is approximately symmetric\ndef test_actionAngleTorus_hessian_symm():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n h= aAT.hessianFreqs(jr,jphi,jz,tol=0.0001,nosym=True)[0]\n assert numpy.all(numpy.fabs((h-h.T)/h) < 0.03), 'actionAngleTorus Hessian is not symmetric'\n return None\n\n# Test that the Hessian is approximately correct\ndef test_actionAngleTorus_hessian_linear():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n h= aAT.hessianFreqs(jr,jphi,jz,tol=0.0001,nosym=True)[0]\n dj= numpy.array([0.02,0.005,-0.01])\n do_fromhessian= numpy.dot(h,dj)\n O= numpy.array(aAT.Freqs(jr,jphi,jz)[:3])\n do= numpy.array(aAT.Freqs(jr+dj[0],jphi+dj[1],jz+dj[2])[:3])-O\n assert numpy.all(numpy.fabs((do_fromhessian-do)/O)< 0.001), 'actionAngleTorus Hessian does not return good approximation to dO/dJ'\n return None\n\n# Test that the frequencies returned by xvJacobianFreqs are the same as those returned by Freqs\ndef test_actionAngleTorus_jacobian_freqs():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n fO= aAT.Freqs(jr,jphi,jz)[:3]\n hO= aAT.xvJacobianFreqs(jr,jphi,jz,\n numpy.array([0.]),numpy.array([1.]),\n numpy.array([2.]))[3:6]\n assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods Freqs and xvJacobianFreqs return different frequencies'\n return None\n\n# Test that the Hessian returned by xvJacobianFreqs are the same as those returned by hessianFreqs\ndef test_actionAngleTorus_jacobian_hessian():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n fO= aAT.hessianFreqs(jr,jphi,jz)[0]\n hO= aAT.xvJacobianFreqs(jr,jphi,jz,\n numpy.array([0.]),numpy.array([1.]),\n numpy.array([2.]))[2]\n assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods hessianFreqs and xvJacobianFreqs return different Hessians'\n return None\n\n# Test that the xv returned by xvJacobianFreqs are the same as those returned by __call__\ndef test_actionAngleTorus_jacobian_xv():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.,1.])\n anglephi= numpy.array([1.,2.])\n anglez= numpy.array([2.,3.])\n fO= aAT(jr,jphi,jz,angler,anglephi,anglez)\n hO= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)[0]\n assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods __call__ and xvJacobianFreqs return different xv'\n return None\n\n# Test that the determinant of the Jacobian returned by xvJacobianFreqs is close to 1/R (should be 1 for rectangular coordinates, 1/R for cylindrical\ndef test_actionAngleTorus_jacobian_detone():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014,dJ=0.0001)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.,1.])\n anglephi= numpy.array([1.,2.])\n anglez= numpy.array([2.,3.])\n jf= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)\n assert numpy.fabs(jf[0][0,0]*numpy.fabs(numpy.linalg.det(jf[1][0]))-1) < 0.01, 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not have the expected determinant'\n assert numpy.fabs(jf[0][1,0]*numpy.fabs(numpy.linalg.det(jf[1][1]))-1) < 0.01, 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not have the expected determinant'\n return None\n\n# Test that Jacobian returned by xvJacobianFreqs is approximately correct\ndef test_actionAngleTorus_jacobian_linear():\n from galpy.potential import MWPotential2014\n from galpy.actionAngle import actionAngleTorus\n aAT= actionAngleTorus(pot=MWPotential2014)\n jr,jphi,jz= 0.075,1.1,0.05\n angler= numpy.array([0.5])\n anglephi= numpy.array([1.])\n anglez= numpy.array([2.])\n jf= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)\n xv= aAT(jr,jphi,jz,angler,anglephi,anglez)\n dja= 2.*numpy.array([0.001,0.002,0.003,-0.002,0.004,0.002])\n xv_direct= aAT(jr+dja[0],jphi+dja[1],jz+dja[2],\n angler+dja[3],anglephi+dja[4],anglez+dja[5])\n xv_fromjac= xv+numpy.dot(jf[1],dja)\n assert numpy.all(numpy.fabs((xv_fromjac-xv_direct)/xv_direct) < 0.01), 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not appear to be correct'\n return None\n\n#Test error when potential is not implemented in C\ndef test_actionAngleTorus_nocerr():\n from galpy.actionAngle import actionAngleTorus\n from test_potential import BurkertPotentialNoC\n bp= BurkertPotentialNoC()\n try:\n aAT= actionAngleTorus(pot=bp)\n except RuntimeError: pass\n else:\n raise AssertionError(\"actionAngleTorus initialization with potential w/o C should have given a RuntimeError, but didn't\")\n return None\n\n#Test error when potential is not axisymmetric\ndef test_actionAngleTorus_nonaxierr():\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import TriaxialNFWPotential\n np= TriaxialNFWPotential(normalize=1.,b=0.9)\n try:\n aAT= actionAngleTorus(pot=np)\n except RuntimeError: pass\n else:\n raise AssertionError(\"actionAngleTorus initialization with non-axisymmetric potential should have given a RuntimeError, but didn't\")\n return None\n\n# Test the Autofit torus warnings\ndef test_actionAngleTorus_AutoFitWarning():\n from galpy.potential import LogarithmicHaloPotential\n from galpy.actionAngle import actionAngleTorus\n lp= LogarithmicHaloPotential(normalize=1.,q=0.9)\n aAT= actionAngleTorus(pot=lp,tol=10.**-8.)\n # These should give warnings\n jr, jp, jz= 0.27209033, 1.80253892, 0.6078445\n ar, ap, az= numpy.array([1.95732492]), numpy.array([6.16753224]), \\\n numpy.array([4.08233059])\n #Turn warnings into errors to test for them\n import warnings\n with warnings.catch_warnings(record=True) as w:\n if PY2: reset_warning_registry('galpy')\n warnings.simplefilter(\"always\",galpyWarning)\n aAT(jr,jp,jz,ar,ap,az)\n # Should raise warning bc of Autofit, might raise others\n raisedWarning= False\n for wa in w:\n raisedWarning= (str(wa.message) == \"actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2\")\n if raisedWarning: break\n assert raisedWarning, \"actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\",galpyWarning)\n aAT.xvFreqs(jr,jp,jz,ar,ap,az)\n # Should raise warning bc of Autofit, might raise others\n raisedWarning= False\n for wa in w:\n raisedWarning= (str(wa.message) == \"actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2\")\n if raisedWarning: break\n assert raisedWarning, \"actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\",galpyWarning)\n aAT.Freqs(jr,jp,jz)\n # Should raise warning bc of Autofit, might raise others\n raisedWarning= False\n for wa in w:\n raisedWarning= (str(wa.message) == \"actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2\")\n if raisedWarning: break\n assert raisedWarning, \"actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\",galpyWarning)\n aAT.hessianFreqs(jr,jp,jz)\n # Should raise warning bc of Autofit, might raise others\n raisedWarning= False\n for wa in w:\n raisedWarning= (str(wa.message) == \"actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2\")\n if raisedWarning: break\n assert raisedWarning, \"actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\",galpyWarning)\n aAT.xvJacobianFreqs(jr,jp,jz,ar,ap,az)\n # Should raise warning bc of Autofit, might raise others\n raisedWarning= False\n for wa in w:\n raisedWarning= (str(wa.message) == \"actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2\")\n if raisedWarning: break\n assert raisedWarning, \"actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't\"\n return None\n\ndef test_MWPotential_warning_torus():\n # Test that using MWPotential throws a warning, see #229\n from galpy.actionAngle import actionAngleTorus\n from galpy.potential import MWPotential\n if PY2: reset_warning_registry('galpy')\n warnings.simplefilter(\"error\",galpyWarning)\n try:\n aAA= actionAngleTorus(pot=MWPotential)\n except: pass\n else:\n raise AssertionError(\"actionAngleTorus with MWPotential should have thrown a warning, but didn't\")\n #Turn warnings back into warnings\n warnings.simplefilter(\"always\",galpyWarning)\n return None\n\n",
"###############################################################################\n# IsothermalDiskPotential.py: class that implements the one-dimensional\n# self-gravitating isothermal disk\n###############################################################################\nimport numpy\nfrom .linearPotential import linearPotential, _APY_LOADED\nif _APY_LOADED:\n from astropy import units\nclass IsothermalDiskPotential(linearPotential):\n \"\"\"Class representing the one-dimensional self-gravitating isothermal disk\n\n .. math::\n\n \\\\rho(x) = \\\\mathrm{amp}\\\\,\\\\mathrm{sech}^2\\\\left(\\\\frac{x}{2H}\\\\right)\n\n where the scale height :math:`H^2 = \\\\sigma^2/[8\\\\pi G \\\\,\\\\mathrm{amp}]`. The parameter to setup the disk is the velocity dispersion :math:`\\\\sigma`.\n\n \"\"\"\n def __init__(self,amp=1.,sigma=0.1,ro=None,vo=None):\n \"\"\"\n NAME:\n\n __init__\n\n PURPOSE:\n\n Initialize an IsothermalDiskPotential\n\n INPUT:\n\n amp - an overall amplitude\n\n sigma - velocity dispersion (can be a Quantity)\n\n OUTPUT:\n\n instance\n\n HISTORY:\n\n 2018-04-11 - Written - Bovy (UofT)\n\n \"\"\"\n linearPotential.__init__(self,amp=amp,ro=ro,vo=vo)\n if _APY_LOADED and isinstance(sigma,units.Quantity):\n sigma= sigma.to(units.km/units.s).value/self._vo\n self._sigma2= sigma**2.\n self._H= sigma/numpy.sqrt(8.*numpy.pi*self._amp)\n self.hasC= True\n \n def _evaluate(self,x,t=0.):\n return 2.*self._sigma2*numpy.log(numpy.cosh(0.5*x/self._H))\n\n def _force(self,x,t=0.):\n return -self._sigma2*numpy.tanh(0.5*x/self._H)/self._H\n",
"###############################################################################\n# actionAngle: a Python module to calculate actions, angles, and frequencies\n#\n# class: actionAngleIsochroneApprox\n#\n# Calculate actions-angle coordinates for any potential by using \n# an isochrone potential as an approximate potential and using \n# a Fox & Binney (2013?) + torus machinery-like algorithm \n# (angle-fit) (Bovy 2014)\n#\n# methods:\n# __call__: returns (jr,lz,jz)\n# actionsFreqs: returns (jr,lz,jz,Or,Op,Oz)\n# actionsFreqsAngles: returns (jr,lz,jz,Or,Op,Oz,ar,ap,az)\n#\n###############################################################################\nimport math\nimport warnings\nimport numpy as nu\nimport numpy.linalg as linalg\nfrom scipy import optimize\nfrom galpy.potential import dvcircdR, vcirc, _isNonAxi\nfrom galpy.potential.Potential import flatten as flatten_potential\nfrom .actionAngleIsochrone import actionAngleIsochrone\nfrom .actionAngle import actionAngle\nfrom galpy.potential import IsochronePotential, MWPotential\nfrom galpy.util import bovy_plot, galpyWarning\nfrom galpy.util.bovy_conversion import physical_conversion, \\\n potential_physical_input, time_in_Gyr\n_TWOPI= 2.*nu.pi\n_ANGLETOL= 0.02 #tolerance for deciding whether full angle range is covered\n_APY_LOADED= True\ntry:\n from astropy import units\nexcept ImportError:\n _APY_LOADED= False\nclass actionAngleIsochroneApprox(actionAngle):\n \"\"\"Action-angle formalism using an isochrone potential as an approximate potential and using a Fox & Binney (2014?) like algorithm to calculate the actions using orbit integrations and a torus-machinery-like angle-fit to get the angles and frequencies (Bovy 2014)\"\"\"\n def __init__(self,*args,**kwargs):\n \"\"\"\n NAME:\n __init__\n PURPOSE:\n initialize an actionAngleIsochroneApprox object\n INPUT:\n\n Either:\n\n b= scale parameter of the isochrone parameter (can be Quantity)\n\n ip= instance of a IsochronePotential\n\n aAI= instance of an actionAngleIsochrone\n\n pot= potential to calculate action-angle variables for\n\n tintJ= (default: 100) time to integrate orbits for to estimate actions (can be Quantity)\n\n ntintJ= (default: 10000) number of time-integration points\n\n integrate_method= (default: 'dopr54_c') integration method to use\n\n dt= (None) orbit.integrate dt keyword (for fixed stepsize integration)\n\n maxn= (default: 3) Default value for all methods when using a grid in vec(n) up to this n (zero-based)\n\n ro= distance from vantage point to GC (kpc; can be Quantity)\n\n vo= circular velocity at ro (km/s; can be Quantity)\n\n OUTPUT:\n\n instance\n\n HISTORY:\n 2013-09-10 - Written - Bovy (IAS)\n \"\"\"\n actionAngle.__init__(self,\n ro=kwargs.get('ro',None),vo=kwargs.get('vo',None))\n if not 'pot' in kwargs: #pragma: no cover\n raise IOError(\"Must specify pot= for actionAngleIsochroneApprox\")\n self._pot= flatten_potential(kwargs['pot'])\n if self._pot == MWPotential:\n warnings.warn(\"Use of MWPotential as a Milky-Way-like potential is deprecated; galpy.potential.MWPotential2014, a potential fit to a large variety of dynamical constraints (see Bovy 2015), is the preferred Milky-Way-like potential in galpy\",\n galpyWarning)\n if not 'b' in kwargs and not 'ip' in kwargs \\\n and not 'aAI' in kwargs: #pragma: no cover\n raise IOError(\"Must specify b=, ip=, or aAI= for actionAngleIsochroneApprox\")\n if 'aAI' in kwargs:\n if not isinstance(kwargs['aAI'],actionAngleIsochrone): #pragma: no cover\n raise IOError(\"'Provided aAI= does not appear to be an instance of an actionAngleIsochrone\")\n self._aAI= kwargs['aAI']\n elif 'ip' in kwargs:\n ip= kwargs['ip']\n if not isinstance(ip,IsochronePotential): #pragma: no cover\n raise IOError(\"'Provided ip= does not appear to be an instance of an IsochronePotential\")\n self._aAI= actionAngleIsochrone(ip=ip)\n else:\n if _APY_LOADED and isinstance(kwargs['b'],units.Quantity):\n b= kwargs['b'].to(units.kpc).value/self._ro\n else:\n b= kwargs['b']\n self._aAI= actionAngleIsochrone(ip=IsochronePotential(b=b,\n normalize=1.))\n self._tintJ= kwargs.get('tintJ',100.)\n if _APY_LOADED and isinstance(self._tintJ,units.Quantity):\n self._tintJ= self._tintJ.to(units.Gyr).value\\\n /time_in_Gyr(self._vo,self._ro)\n self._ntintJ= kwargs.get('ntintJ',10000)\n self._integrate_dt= kwargs.get('dt',None)\n self._tsJ= nu.linspace(0.,self._tintJ,self._ntintJ)\n self._integrate_method= kwargs.get('integrate_method','dopr54_c')\n self._maxn= kwargs.get('maxn',3)\n self._c= False\n ext_loaded= False\n if ext_loaded and (('c' in kwargs and kwargs['c'])\n or not 'c' in kwargs): #pragma: no cover\n self._c= True\n else:\n self._c= False\n # Check the units\n self._check_consistent_units()\n return None\n \n def _evaluate(self,*args,**kwargs):\n \"\"\"\n NAME:\n __call__ (_evaluate)\n PURPOSE:\n evaluate the actions (jr,lz,jz)\n INPUT:\n Either:\n a) R,vR,vT,z,vz[,phi]:\n 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)\n 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)\n b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument\n cumul= if True, return the cumulative average actions (to look \n at convergence)\n OUTPUT:\n (jr,lz,jz)\n HISTORY:\n 2013-09-10 - Written - Bovy (IAS)\n \"\"\"\n R,vR,vT,z,vz,phi= self._parse_args(False,False,*args)\n if self._c: #pragma: no cover\n pass\n else:\n #Use self._aAI to calculate the actions and angles in the isochrone potential\n acfs= self._aAI._actionsFreqsAngles(R.flatten(),\n vR.flatten(),\n vT.flatten(),\n z.flatten(),\n vz.flatten(),\n phi.flatten())\n jrI= nu.reshape(acfs[0],R.shape)[:,:-1]\n jzI= nu.reshape(acfs[2],R.shape)[:,:-1]\n anglerI= nu.reshape(acfs[6],R.shape)\n anglezI= nu.reshape(acfs[8],R.shape)\n if nu.any((nu.fabs(nu.amax(anglerI,axis=1)-_TWOPI) > _ANGLETOL)\\\n *(nu.fabs(nu.amin(anglerI,axis=1)) > _ANGLETOL)): #pragma: no cover\n warnings.warn(\"Full radial angle range not covered for at least one object; actions are likely not reliable\",galpyWarning)\n if nu.any((nu.fabs(nu.amax(anglezI,axis=1)-_TWOPI) > _ANGLETOL)\\\n *(nu.fabs(nu.amin(anglezI,axis=1)) > _ANGLETOL)): #pragma: no cover\n warnings.warn(\"Full vertical angle range not covered for at least one object; actions are likely not reliable\",galpyWarning)\n danglerI= ((nu.roll(anglerI,-1,axis=1)-anglerI) % _TWOPI)[:,:-1]\n danglezI= ((nu.roll(anglezI,-1,axis=1)-anglezI) % _TWOPI)[:,:-1]\n if kwargs.get('cumul',False):\n sumFunc= nu.cumsum\n else:\n sumFunc= nu.sum\n jr= sumFunc(jrI*danglerI,axis=1)/sumFunc(danglerI,axis=1)\n jz= sumFunc(jzI*danglezI,axis=1)/sumFunc(danglezI,axis=1)\n if _isNonAxi(self._pot):\n lzI= nu.reshape(acfs[1],R.shape)[:,:-1]\n anglephiI= nu.reshape(acfs[7],R.shape)\n danglephiI= ((nu.roll(anglephiI,-1,axis=1)-anglephiI) % _TWOPI)[:,:-1]\n if nu.any((nu.fabs(nu.amax(anglephiI,axis=1)-_TWOPI) > _ANGLETOL)\\\n *(nu.fabs(nu.amin(anglephiI,axis=1)) > _ANGLETOL)): #pragma: no cover\n warnings.warn(\"Full azimuthal angle range not covered for at least one object; actions are likely not reliable\",galpyWarning)\n lz= sumFunc(lzI*danglephiI,axis=1)/sumFunc(danglephiI,axis=1)\n else:\n lz= R[:,0]*vT[:,0]\n return (jr,lz,jz)\n\n def _actionsFreqs(self,*args,**kwargs):\n \"\"\"\n NAME:\n actionsFreqs (_actionsFreqs)\n PURPOSE:\n evaluate the actions and frequencies (jr,lz,jz,Omegar,Omegaphi,Omegaz)\n INPUT:\n Either:\n a) R,vR,vT,z,vz[,phi]:\n 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)\n 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)\n b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument\n maxn= (default: object-wide default) Use a grid in vec(n) up to this n (zero-based)\n ts= if set, the phase-space points correspond to these times (IF NOT SET, WE ASSUME THAT ts IS THAT THAT IS ASSOCIATED WITH THIS OBJECT)\n _firstFlip= (False) if True and Orbits are given, the backward part of the orbit is integrated first and stored in the Orbit object\n OUTPUT:\n (jr,lz,jz,Omegar,Omegaphi,Omegaz)\n HISTORY:\n 2013-09-10 - Written - Bovy (IAS)\n \"\"\"\n acfs= self._actionsFreqsAngles(*args,**kwargs)\n return (acfs[0],acfs[1],acfs[2],acfs[3],acfs[4],acfs[5])\n\n def _actionsFreqsAngles(self,*args,**kwargs):\n \"\"\"\n NAME:\n actionsFreqsAngles (_actionsFreqsAngles)\n PURPOSE:\n evaluate the actions, frequencies, and angles (jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)\n INPUT:\n Either:\n a) R,vR,vT,z,vz[,phi]:\n 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)\n 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)\n b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument\n maxn= (default: object-wide default) Use a grid in vec(n) up to this n (zero-based)\n ts= if set, the phase-space points correspond to these times (IF NOT SET, WE ASSUME THAT ts IS THAT THAT IS ASSOCIATED WITH THIS OBJECT)\n _firstFlip= (False) if True and Orbits are given, the backward part of the orbit is integrated first and stored in the Orbit object\n OUTPUT:\n (jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)\n HISTORY:\n 2013-09-10 - Written - Bovy (IAS)\n \"\"\"\n from galpy.orbit import Orbit\n _firstFlip= kwargs.get('_firstFlip',False)\n #If the orbit was already integrated, set ts to the integration times\n if isinstance(args[0],Orbit) and hasattr(args[0],'orbit') \\\n and not 'ts' in kwargs:\n kwargs['ts']= args[0].t\n elif (isinstance(args[0],list) and isinstance(args[0][0],Orbit)) \\\n and hasattr(args[0][0],'orbit') \\\n and not 'ts' in kwargs:\n kwargs['ts']= args[0][0].t\n R,vR,vT,z,vz,phi= self._parse_args(True,_firstFlip,*args)\n if 'ts' in kwargs and not kwargs['ts'] is None:\n ts= kwargs['ts']\n if _APY_LOADED and isinstance(ts,units.Quantity):\n ts= ts.to(units.Gyr).value\\\n /time_in_Gyr(self._vo,self._ro)\n else:\n ts= nu.empty(R.shape[1])\n ts[self._ntintJ-1:]= self._tsJ\n ts[:self._ntintJ-1]= -self._tsJ[1:][::-1]\n maxn= kwargs.get('maxn',self._maxn)\n if self._c: #pragma: no cover\n pass\n else:\n #Use self._aAI to calculate the actions and angles in the isochrone potential\n if '_acfs' in kwargs: acfs= kwargs['_acfs']\n else:\n acfs= self._aAI._actionsFreqsAngles(R.flatten(),\n vR.flatten(),\n vT.flatten(),\n z.flatten(),\n vz.flatten(),\n phi.flatten())\n jrI= nu.reshape(acfs[0],R.shape)[:,:-1]\n jzI= nu.reshape(acfs[2],R.shape)[:,:-1]\n anglerI= nu.reshape(acfs[6],R.shape)\n anglezI= nu.reshape(acfs[8],R.shape)\n if nu.any((nu.fabs(nu.amax(anglerI,axis=1)-_TWOPI) > _ANGLETOL)\\\n *(nu.fabs(nu.amin(anglerI,axis=1)) > _ANGLETOL)): #pragma: no cover\n warnings.warn(\"Full radial angle range not covered for at least one object; actions are likely not reliable\",galpyWarning)\n if nu.any((nu.fabs(nu.amax(anglezI,axis=1)-_TWOPI) > _ANGLETOL)\\\n *(nu.fabs(nu.amin(anglezI,axis=1)) > _ANGLETOL)): #pragma: no cover\n warnings.warn(\"Full vertical angle range not covered for at least one object; actions are likely not reliable\",galpyWarning)\n danglerI= ((nu.roll(anglerI,-1,axis=1)-anglerI) % _TWOPI)[:,:-1]\n danglezI= ((nu.roll(anglezI,-1,axis=1)-anglezI) % _TWOPI)[:,:-1]\n jr= nu.sum(jrI*danglerI,axis=1)/nu.sum(danglerI,axis=1)\n jz= nu.sum(jzI*danglezI,axis=1)/nu.sum(danglezI,axis=1)\n if _isNonAxi(self._pot): #pragma: no cover\n lzI= nu.reshape(acfs[1],R.shape)[:,:-1]\n anglephiI= nu.reshape(acfs[7],R.shape)\n if nu.any((nu.fabs(nu.amax(anglephiI,axis=1)-_TWOPI) > _ANGLETOL)\\\n *(nu.fabs(nu.amin(anglephiI,axis=1)) > _ANGLETOL)): #pragma: no cover\n warnings.warn(\"Full azimuthal angle range not covered for at least one object; actions are likely not reliable\",galpyWarning)\n danglephiI= ((nu.roll(anglephiI,-1,axis=1)-anglephiI) % _TWOPI)[:,:-1]\n lz= nu.sum(lzI*danglephiI,axis=1)/nu.sum(danglephiI,axis=1)\n else:\n lz= R[:,len(ts)//2]*vT[:,len(ts)//2]\n #Now do an 'angle-fit'\n angleRT= dePeriod(nu.reshape(acfs[6],R.shape))\n acfs7= nu.reshape(acfs[7],R.shape)\n negFreqIndx= nu.median(acfs7-nu.roll(acfs7,1,axis=1),axis=1) < 0. #anglephi is decreasing\n anglephiT= nu.empty(acfs7.shape)\n anglephiT[negFreqIndx,:]= dePeriod(_TWOPI-acfs7[negFreqIndx,:])\n negFreqPhi= nu.zeros(R.shape[0],dtype='bool')\n negFreqPhi[negFreqIndx]= True\n anglephiT[True^negFreqIndx,:]= dePeriod(acfs7[True^negFreqIndx,:])\n angleZT= dePeriod(nu.reshape(acfs[8],R.shape))\n #Write the angle-fit as Y=AX, build A and Y\n nt= len(ts)\n no= R.shape[0]\n #remove 0,0,0 and half-plane\n if _isNonAxi(self._pot):\n nn= (2*maxn-1)**2*maxn-(maxn-1)*(2*maxn-1)-maxn\n else:\n nn= maxn*(2*maxn-1)-maxn \n A= nu.zeros((no,nt,2+nn))\n A[:,:,0]= 1.\n A[:,:,1]= ts\n #sorting the phi and Z grids this way makes it easy to exclude the origin\n phig= list(nu.arange(-maxn+1,maxn,1))\n phig.sort(key = lambda x: abs(x))\n phig= nu.array(phig,dtype='int')\n if _isNonAxi(self._pot):\n grid= nu.meshgrid(nu.arange(maxn),phig,phig)\n else:\n grid= nu.meshgrid(nu.arange(maxn),phig)\n gridR= grid[0].T.flatten()[1:] #remove 0,0,0\n gridZ= grid[1].T.flatten()[1:]\n mask = nu.ones(len(gridR),dtype=bool)\n # excludes axis that is not in half-space\n if _isNonAxi(self._pot):\n gridphi= grid[2].T.flatten()[1:]\n mask= True\\\n ^(gridR == 0)*((gridphi < 0)+((gridphi==0)*(gridZ < 0)))\n else:\n mask[:2*maxn-3:2]= False\n gridR= gridR[mask]\n gridZ= gridZ[mask]\n tangleR= nu.tile(angleRT.T,(nn,1,1)).T\n tgridR= nu.tile(gridR,(no,nt,1))\n tangleZ= nu.tile(angleZT.T,(nn,1,1)).T\n tgridZ= nu.tile(gridZ,(no,nt,1))\n if _isNonAxi(self._pot):\n gridphi= gridphi[mask]\n tgridphi= nu.tile(gridphi,(no,nt,1))\n tanglephi= nu.tile(anglephiT.T,(nn,1,1)).T\n sinnR= nu.sin(tgridR*tangleR+tgridphi*tanglephi+tgridZ*tangleZ)\n else:\n sinnR= nu.sin(tgridR*tangleR+tgridZ*tangleZ)\n A[:,:,2:]= sinnR\n #Matrix magic\n atainv= nu.empty((no,2+nn,2+nn))\n AT= nu.transpose(A,axes=(0,2,1))\n for ii in range(no):\n atainv[ii,:,:,]= linalg.inv(nu.dot(AT[ii,:,:],A[ii,:,:]))\n ATAR= nu.sum(AT*nu.transpose(nu.tile(angleRT,(2+nn,1,1)),axes=(1,0,2)),axis=2)\n ATAT= nu.sum(AT*nu.transpose(nu.tile(anglephiT,(2+nn,1,1)),axes=(1,0,2)),axis=2)\n ATAZ= nu.sum(AT*nu.transpose(nu.tile(angleZT,(2+nn,1,1)),axes=(1,0,2)),axis=2)\n angleR= nu.sum(atainv[:,0,:]*ATAR,axis=1)\n OmegaR= nu.sum(atainv[:,1,:]*ATAR,axis=1)\n anglephi= nu.sum(atainv[:,0,:]*ATAT,axis=1)\n Omegaphi= nu.sum(atainv[:,1,:]*ATAT,axis=1)\n angleZ= nu.sum(atainv[:,0,:]*ATAZ,axis=1)\n OmegaZ= nu.sum(atainv[:,1,:]*ATAZ,axis=1)\n Omegaphi[negFreqIndx]= -Omegaphi[negFreqIndx]\n anglephi[negFreqIndx]= _TWOPI-anglephi[negFreqIndx]\n if kwargs.get('_retacfs',False):\n return (jr,lz,jz,OmegaR,Omegaphi,OmegaZ, #pragma: no cover\n angleR % _TWOPI,\n anglephi % _TWOPI,\n angleZ % _TWOPI,acfs)\n else:\n return (jr,lz,jz,OmegaR,Omegaphi,OmegaZ,\n angleR % _TWOPI,\n anglephi % _TWOPI,\n angleZ % _TWOPI)\n\n def plot(self,*args,**kwargs):\n \"\"\"\n NAME:\n plot\n PURPOSE:\n plot the angles vs. each other, to check whether the isochrone\n approximation is good\n INPUT:\n Either:\n a) R,vR,vT,z,vz:\n floats: phase-space value for single object\n b) Orbit instance\n type= ('araz') type of plot to make\n a) 'araz': az vs. ar, with color-coded aphi\n b) 'araphi': aphi vs. ar, with color-coded az\n c) 'azaphi': aphi vs. az, with color-coded ar\n d) 'jr': cumulative average of jr with time, to assess convergence\n e) 'lz': same as 'jr' but for lz\n f) 'jz': same as 'jr' but for jz\n deperiod= (False), if True, de-period the angles\n downsample= (False) if True, downsample what's plotted to 400 points\n +plot kwargs\n OUTPUT:\n plot to output\n HISTORY:\n 2013-09-10 - Written - Bovy (IAS)\n \"\"\"\n #Kwargs\n type= kwargs.pop('type','araz')\n deperiod= kwargs.pop('deperiod',False)\n downsample= kwargs.pop('downsample',False)\n #Parse input\n R,vR,vT,z,vz,phi= self._parse_args('a' in type,False,*args)\n #Use self._aAI to calculate the actions and angles in the isochrone potential\n acfs= self._aAI._actionsFreqsAngles(R.flatten(),\n vR.flatten(),\n vT.flatten(),\n z.flatten(),\n vz.flatten(),\n phi.flatten())\n if type == 'jr' or type == 'lz' or type == 'jz':\n jrI= nu.reshape(acfs[0],R.shape)[:,:-1]\n jzI= nu.reshape(acfs[2],R.shape)[:,:-1]\n anglerI= nu.reshape(acfs[6],R.shape)\n anglezI= nu.reshape(acfs[8],R.shape)\n danglerI= ((nu.roll(anglerI,-1,axis=1)-anglerI) % _TWOPI)[:,:-1]\n danglezI= ((nu.roll(anglezI,-1,axis=1)-anglezI) % _TWOPI)[:,:-1]\n if True:\n sumFunc= nu.cumsum\n jr= sumFunc(jrI*danglerI,axis=1)/sumFunc(danglerI,axis=1)\n jz= sumFunc(jzI*danglezI,axis=1)/sumFunc(danglezI,axis=1)\n lzI= nu.reshape(acfs[1],R.shape)[:,:-1]\n anglephiI= nu.reshape(acfs[7],R.shape)\n danglephiI= ((nu.roll(anglephiI,-1,axis=1)-anglephiI) % _TWOPI)[:,:-1]\n lz= sumFunc(lzI*danglephiI,axis=1)/sumFunc(danglephiI,axis=1)\n from galpy.orbit import Orbit\n if isinstance(args[0],Orbit) and hasattr(args[0],'t'):\n ts= args[0].t[:-1]\n else:\n ts= self._tsJ[:-1]\n if type == 'jr':\n if downsample:\n plotx= ts[::int(round(self._ntintJ//400))]\n ploty= jr[0,::int(round(self._ntintJ//400))]/jr[0,-1]\n plotz= anglerI[0,:-1:int(round(self._ntintJ//400))]\n else:\n plotx= ts\n ploty= jr[0,:]/jr[0,-1]\n plotz= anglerI[0,:-1]\n bovy_plot.bovy_plot(plotx,ploty,\n c=plotz,\n s=20.,\n scatter=True,\n edgecolor='none',\n xlabel=r'$t$',\n ylabel=r'$J^A_R / \\langle J^A_R \\rangle$',\n clabel=r'$\\theta^A_R$',\n vmin=0.,vmax=2.*nu.pi,\n crange=[0.,2.*nu.pi],\n colorbar=True,\n **kwargs)\n elif type == 'lz':\n if downsample:\n plotx= ts[::int(round(self._ntintJ//400))]\n ploty= lz[0,::int(round(self._ntintJ//400))]/lz[0,-1]\n plotz= anglephiI[0,:-1:int(round(self._ntintJ//400))]\n else:\n plotx= ts\n ploty= lz[0,:]/lz[0,-1]\n plotz= anglephiI[0,:-1]\n bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,\n scatter=True,\n edgecolor='none',\n xlabel=r'$t$',\n ylabel=r'$L^A_Z / \\langle L^A_Z \\rangle$',\n clabel=r'$\\theta^A_\\phi$',\n vmin=0.,vmax=2.*nu.pi,\n crange=[0.,2.*nu.pi],\n colorbar=True,\n **kwargs)\n elif type == 'jz':\n if downsample:\n plotx= ts[::int(round(self._ntintJ//400))]\n ploty= jz[0,::int(round(self._ntintJ//400))]/jz[0,-1]\n plotz= anglezI[0,:-1:int(round(self._ntintJ//400))]\n else:\n plotx= ts\n ploty= jz[0,:]/jz[0,-1]\n plotz= anglezI[0,:-1]\n bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,\n scatter=True,\n edgecolor='none',\n xlabel=r'$t$',\n ylabel=r'$J^A_Z / \\langle J^A_Z \\rangle$',\n clabel=r'$\\theta^A_Z$',\n vmin=0.,vmax=2.*nu.pi,\n crange=[0.,2.*nu.pi],\n colorbar=True,\n **kwargs)\n else:\n if deperiod:\n if 'ar' in type:\n angleRT= dePeriod(nu.reshape(acfs[6],R.shape))\n else:\n angleRT= nu.reshape(acfs[6],R.shape)\n if 'aphi' in type:\n acfs7= nu.reshape(acfs[7],R.shape)\n negFreqIndx= nu.median(acfs7-nu.roll(acfs7,1,axis=1),axis=1) < 0. #anglephi is decreasing\n anglephiT= nu.empty(acfs7.shape)\n anglephiT[negFreqIndx,:]= dePeriod(_TWOPI-acfs7[negFreqIndx,:])\n negFreqPhi= nu.zeros(R.shape[0],dtype='bool')\n negFreqPhi[negFreqIndx]= True\n anglephiT[True^negFreqIndx,:]= dePeriod(acfs7[True^negFreqIndx,:])\n else:\n anglephiT= nu.reshape(acfs[7],R.shape)\n if 'az' in type:\n angleZT= dePeriod(nu.reshape(acfs[8],R.shape))\n else:\n angleZT= nu.reshape(acfs[8],R.shape)\n xrange= None\n yrange= None\n else:\n angleRT= nu.reshape(acfs[6],R.shape)\n anglephiT= nu.reshape(acfs[7],R.shape)\n angleZT= nu.reshape(acfs[8],R.shape)\n xrange= [-0.5,2.*nu.pi+0.5]\n yrange= [-0.5,2.*nu.pi+0.5]\n vmin, vmax= 0.,2.*nu.pi\n crange= [vmin,vmax]\n if type == 'araz':\n if downsample:\n plotx= angleRT[0,::int(round(self._ntintJ//400))]\n ploty= angleZT[0,::int(round(self._ntintJ//400))]\n plotz= anglephiT[0,::int(round(self._ntintJ//400))]\n else:\n plotx= angleRT[0,:]\n ploty= angleZT[0,:]\n plotz= anglephiT[0,:]\n bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,\n scatter=True,\n edgecolor='none',\n xlabel=r'$\\theta^A_R$',\n ylabel=r'$\\theta^A_Z$',\n clabel=r'$\\theta^A_\\phi$',\n xrange=xrange,yrange=yrange,\n vmin=vmin,vmax=vmax,\n crange=crange,\n colorbar=True,\n **kwargs) \n elif type == 'araphi':\n if downsample:\n plotx= angleRT[0,::int(round(self._ntintJ//400))]\n ploty= anglephiT[0,::int(round(self._ntintJ//400))]\n plotz= angleZT[0,::int(round(self._ntintJ//400))]\n else:\n plotx= angleRT[0,:]\n ploty= anglephiT[0,:]\n plotz= angleZT[0,:]\n bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,\n scatter=True,\n edgecolor='none',\n xlabel=r'$\\theta^A_R$',\n clabel=r'$\\theta^A_Z$',\n ylabel=r'$\\theta^A_\\phi$',\n xrange=xrange,yrange=yrange,\n vmin=vmin,vmax=vmax,\n crange=crange,\n colorbar=True,\n **kwargs) \n elif type == 'azaphi':\n if downsample:\n plotx= angleZT[0,::int(round(self._ntintJ//400))]\n ploty= anglephiT[0,::int(round(self._ntintJ//400))]\n plotz= angleRT[0,::int(round(self._ntintJ//400))]\n else:\n plotx= angleZT[0,:]\n ploty= anglephiT[0,:]\n plotz= angleRT[0,:]\n bovy_plot.bovy_plot(plotx,ploty,c=plotz,s=20.,\n scatter=True,\n edgecolor='none',\n clabel=r'$\\theta^A_R$',\n xlabel=r'$\\theta^A_Z$',\n ylabel=r'$\\theta^A_\\phi$',\n xrange=xrange,yrange=yrange,\n vmin=vmin,vmax=vmax,\n crange=crange,\n colorbar=True,\n **kwargs) \n return None\n\n def _parse_args(self,freqsAngles=True,_firstFlip=False,*args):\n \"\"\"Helper function to parse the arguments to the __call__ and actionsFreqsAngles functions\"\"\"\n from galpy.orbit import Orbit\n RasOrbit= False\n integrated= True #whether the orbit was already integrated when given\n if len(args) == 5 or len(args) == 3: #pragma: no cover\n raise IOError(\"Must specify phi for actionAngleIsochroneApprox\")\n if len(args) == 6 or len(args) == 4:\n if len(args) == 6:\n R,vR,vT, z, vz, phi= args\n else:\n R,vR,vT, phi= args\n z, vz= 0., 0.\n if isinstance(R,float):\n os= [Orbit([R,vR,vT,z,vz,phi])]\n RasOrbit= True\n integrated= False\n elif len(R.shape) == 1: #not integrated yet\n os= [Orbit([R[ii],vR[ii],vT[ii],z[ii],vz[ii],phi[ii]]) for ii in range(R.shape[0])]\n RasOrbit= True\n integrated= False\n if isinstance(args[0],Orbit) \\\n or (isinstance(args[0],list) and isinstance(args[0][0],Orbit)) \\\n or RasOrbit:\n if RasOrbit:\n pass\n elif not isinstance(args[0],list):\n os= [args[0]]\n if os[0].phasedim() == 3 or os[0].phasedim() == 5: #pragma: no cover\n raise IOError(\"Must specify phi for actionAngleIsochroneApprox\")\n else:\n os= args[0]\n if os[0].phasedim() == 3 or os[0].phasedim() == 5: #pragma: no cover\n raise IOError(\"Must specify phi for actionAngleIsochroneApprox\")\n self._check_consistent_units_orbitInput(os[0])\n if not hasattr(os[0],'orbit'): #not integrated yet\n if _firstFlip:\n for o in os:\n o.vxvv[...,1]= -o.vxvv[...,1]\n o.vxvv[...,2]= -o.vxvv[...,2]\n o.vxvv[...,4]= -o.vxvv[...,4]\n [o.integrate(self._tsJ,pot=self._pot,\n method=self._integrate_method,\n dt=self._integrate_dt) for o in os]\n if _firstFlip:\n for o in os:\n o.vxvv[...,1]= -o.vxvv[...,1]\n o.vxvv[...,2]= -o.vxvv[...,2]\n o.vxvv[...,4]= -o.vxvv[...,4]\n o.orbit[...,1]= -o.orbit[...,1]\n o.orbit[...,2]= -o.orbit[...,2]\n o.orbit[...,4]= -o.orbit[...,4]\n integrated= False\n ntJ= os[0].getOrbit().shape[0]\n no= len(os)\n R= nu.empty((no,ntJ))\n vR= nu.empty((no,ntJ))\n vT= nu.empty((no,ntJ))\n z= nu.zeros((no,ntJ))+10.**-7. #To avoid numpy warnings for\n vz= nu.zeros((no,ntJ))+10.**-7. #planarOrbits\n phi= nu.empty((no,ntJ))\n for ii in range(len(os)):\n this_orbit= os[ii].getOrbit()\n R[ii,:]= this_orbit[:,0]\n vR[ii,:]= this_orbit[:,1]\n vT[ii,:]= this_orbit[:,2]\n if this_orbit.shape[1] == 6:\n z[ii,:]= this_orbit[:,3]\n vz[ii,:]= this_orbit[:,4]\n phi[ii,:]= this_orbit[:,5]\n else:\n phi[ii,:]= this_orbit[:,3]\n if freqsAngles and not integrated: #also integrate backwards in time, such that the requested point is not at the edge\n no= R.shape[0]\n nt= R.shape[1]\n oR= nu.empty((no,2*nt-1))\n ovR= nu.empty((no,2*nt-1))\n ovT= nu.empty((no,2*nt-1))\n oz= nu.zeros((no,2*nt-1))+10.**-7. #To avoid numpy warnings for\n ovz= nu.zeros((no,2*nt-1))+10.**-7. #planarOrbits\n ophi= nu.empty((no,2*nt-1))\n if _firstFlip:\n oR[:,:nt]= R[:,::-1]\n ovR[:,:nt]= vR[:,::-1]\n ovT[:,:nt]= vT[:,::-1]\n oz[:,:nt]= z[:,::-1]\n ovz[:,:nt]= vz[:,::-1]\n ophi[:,:nt]= phi[:,::-1]\n else:\n oR[:,nt-1:]= R\n ovR[:,nt-1:]= vR\n ovT[:,nt-1:]= vT\n oz[:,nt-1:]= z\n ovz[:,nt-1:]= vz\n ophi[:,nt-1:]= phi\n #load orbits\n if _firstFlip:\n os= [Orbit([R[ii,0],vR[ii,0],vT[ii,0],z[ii,0],vz[ii,0],phi[ii,0]]) for ii in range(R.shape[0])]\n else:\n os= [Orbit([R[ii,0],-vR[ii,0],-vT[ii,0],z[ii,0],-vz[ii,0],phi[ii,0]]) for ii in range(R.shape[0])]\n #integrate orbits\n [o.integrate(self._tsJ,pot=self._pot,\n method=self._integrate_method,\n dt=self._integrate_dt) for o in os]\n #extract phase-space points along the orbit\n ts= self._tsJ\n if _firstFlip:\n for ii in range(no):\n oR[ii,nt:]= os[ii].R(ts[1:]) #drop t=0, which we have\n ovR[ii,nt:]= os[ii].vR(ts[1:]) #already\n ovT[ii,nt:]= os[ii].vT(ts[1:]) # reverse, such that \n if os[ii].getOrbit().shape[1] == 6:\n oz[ii,nt:]= os[ii].z(ts[1:]) #everything is in the \n ovz[ii,nt:]= os[ii].vz(ts[1:]) #right order\n ophi[ii,nt:]= os[ii].phi(ts[1:]) #!\n else:\n for ii in range(no):\n oR[ii,:nt-1]= os[ii].R(ts[1:])[::-1] #drop t=0, which we have\n ovR[ii,:nt-1]= -os[ii].vR(ts[1:])[::-1] #already\n ovT[ii,:nt-1]= -os[ii].vT(ts[1:])[::-1] # reverse, such that \n if os[ii].getOrbit().shape[1] == 6:\n oz[ii,:nt-1]= os[ii].z(ts[1:])[::-1] #everything is in the \n ovz[ii,:nt-1]= -os[ii].vz(ts[1:])[::-1] #right order\n ophi[ii,:nt-1]= os[ii].phi(ts[1:])[::-1] #!\n return (oR,ovR,ovT,oz,ovz,ophi)\n else:\n return (R,vR,vT,z,vz,phi)\n\n@potential_physical_input\n@physical_conversion('position',pop=True)\ndef estimateBIsochrone(pot,R,z,phi=None):\n \"\"\"\n NAME:\n\n estimateBIsochrone\n\n PURPOSE:\n\n Estimate a good value for the scale of the isochrone potential by matching the slope of the rotation curve\n\n INPUT:\n\n pot- Potential instance or list thereof\n\n R,z - coordinates (if these are arrays, the median estimated delta is returned, i.e., if this is an orbit)\n\n phi= (None) azimuth to use for non-axisymmetric potentials (array if R and z are arrays)\n\n OUTPUT:\n\n b if 1 R,Z given\n\n bmin,bmedian,bmax if multiple R given \n\n HISTORY:\n\n 2013-09-12 - Written - Bovy (IAS)\n\n 2016-02-20 - Changed input order to allow physical conversions - Bovy (UofT)\n\n 2016-06-28 - Added phi= keyword for non-axisymmetric potential - Bovy (UofT)\n\n \"\"\"\n if pot is None: #pragma: no cover\n raise IOError(\"pot= needs to be set to a Potential instance or list thereof\")\n if isinstance(R,nu.ndarray):\n if phi is None: phi= [None for r in R]\n bs= nu.array([estimateBIsochrone(pot,R[ii],z[ii],phi=phi[ii],\n use_physical=False)\n for ii in range(len(R))])\n return nu.array([nu.amin(bs[True^nu.isnan(bs)]),\n nu.median(bs[True^nu.isnan(bs)]),\n nu.amax(bs[True^nu.isnan(bs)])])\n else:\n r2= R**2.+z**2\n r= math.sqrt(r2)\n dlvcdlr= dvcircdR(pot,r,phi=phi,use_physical=False)/vcirc(pot,r,phi=phi,use_physical=False)*r\n try:\n b= optimize.brentq(lambda x: dlvcdlr-(x/math.sqrt(r2+x**2.)-0.5*r2/(r2+x**2.)),\n 0.01,100.)\n except: #pragma: no cover\n b= nu.nan\n return b\n\ndef dePeriod(arr):\n \"\"\"make an array of periodic angles increase linearly\"\"\"\n diff= arr-nu.roll(arr,1,axis=1)\n w= diff < -6.\n addto= nu.cumsum(w.astype(int),axis=1)\n return arr+_TWOPI*addto\n"
] | [
[
"numpy.nanmax",
"numpy.dot",
"numpy.linspace",
"numpy.all",
"numpy.linalg.det",
"numpy.array",
"numpy.fabs"
],
[
"numpy.cosh",
"numpy.tanh",
"numpy.sqrt"
],
[
"numpy.dot",
"numpy.amax",
"numpy.sum",
"numpy.linspace",
"numpy.reshape",
"numpy.arange",
"numpy.amin",
"numpy.isnan",
"numpy.tile",
"numpy.sin",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"numpy.roll",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abefukasawa/datascience_course | [
"ee0a505134383034e09020d9b1de18904d9b2665"
] | [
"03-machine-learning-tabular-crossection/05 - Data_Compression/05_Data_Compression/solutions/solution_01.py"
] | [
"# performing preprocessing part \nfrom sklearn.preprocessing import StandardScaler \nsc = StandardScaler() \n \nX_train = sc.fit_transform(X_train) \nX_test = sc.transform(X_test) "
] | [
[
"sklearn.preprocessing.StandardScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Rupii/Machine-Learning | [
"2b00698815efb04346d5cb980b68af76f27a5ca6"
] | [
"Regression/multiple_linear_regression.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 24 23:18:54 2018\n\n@author: Rupesh\n\"\"\"\n\n\n\n# Multiple Linear Regression\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nplt.style.use(\"ggplot\")\n# loading dependies\n\ndf = pd.read_csv(\"50_Startups.csv\")\ndf.head()\nX = df.iloc[:, :-1].values\ny = df.iloc[:, 4].values\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nX_cat = LabelEncoder()\nX[:, 3] = X_cat.fit_transform(X[:, 3])\n\nonehot = OneHotEncoder(categorical_features = [3])\nX = onehot.fit_transform(X).toarray()\n# avoiding the dummy variable trap\nX = X[:, 1:]\n\n\n# train test split\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)\n\n# model\nfrom sklearn.linear_model import LinearRegression\n\nreg = LinearRegression()\nreg.fit(X_train, y_train)\n\n# predict\n\ny_pred = reg.predict(X_test)\nimport skl"
] | [
[
"pandas.read_csv",
"sklearn.cross_validation.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.style.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
argonde/codeSkills | [
"939feb29102a2e47a8c2c3047d3f77dd75b1465d"
] | [
"py/webScrapper/getSyntax.py"
] | [
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# auth: Ruben López Vázquez\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom IPython.core.display import clear_output\nfrom random import randint\nimport pandas as pd\nimport csv\nimport time as t\nimport sqlite3\nimport sys\nimport os\n\n\ndef is_digit(check_input):\n \"\"\"\n function to check whether the input is an integer digit\n returns : bool\n \"\"\"\n if check_input.isdigit():\n return True\n return False\n\n\ndef create_table():\n \"\"\"\n pass an SQL statement to the database, to create a new table with a Q&A (two fields) schema\n in case that table does not exist yet\n \"\"\"\n c.execute('CREATE TABLE IF NOT EXISTS SqlSyntax (Inquiry TEXT, Code TEXT)')\n conn.commit()\n\n\ndef data_entry(data):\n \"\"\"\n pass an SQL statement to the database, to insert new values to the table\n values are always in pairs, to write a line in compliance with the relational data scheme\n \"\"\"\n insert = \"INSERT INTO SqlSyntax (Inquiry, Code) VALUES (?, ?)\"\n c.executemany(insert, [data, ])\n conn.commit()\n\n\ndef web_data_html_scrap(page, lang):\n # Output lists\n title = []\n value = []\n exception_dict = {}\n\n # User requests to get a response from a page\n r = requests.get(page)\n # Use bs to parse the response\n soup = BeautifulSoup(r.text, 'html.parser')\n\n # Create a list of links, i.e. urls to scrap\n url_links = []\n a = 1\n try:\n # Select all links in 'div' objects from the index page\n for link in soup.findAll('a'):\n url_links.append(url + str(link.get('href')))\n # log all url links to file for inspection, then hash the BREAK command\n a += 1\n if a < len(soup.findAll('a')):\n continue\n else:\n with open(wd + f\"all_{lang}_url_links.csv\", \"w\") as f:\n write_file = csv.writer(f)\n write_file.writerow(url_links)\n # Customise the url_links slice and then hash the line below\n break\n print(\"all links found: \" + str(len(url_links)))\n re = 1\n\n def try_despite_exceptions(links, req):\n print(\"Looking into list of urls ...\")\n urls = links[req:len(links)]\n print('There are ' + str(len(urls)) + ' requests left')\n # Preparing the monitoring of the loop\n start_time = t.time()\n # Make a get request\n for u in urls:\n try:\n r2 = requests.get(u)\n # Pause the loop\n t.sleep(randint(1, 3))\n\n # Monitor the requests\n elapsed_time = t.time() - start_time\n print('Request:{}; Frequency: {} requests/s'.format(req, req / elapsed_time))\n clear_output(wait=True)\n\n # parse the data\n html_soup = BeautifulSoup(r2.text, 'html.parser')\n container = html_soup.find('div', class_='answer_info_holder_outer')\n title.append(container.find('div', class_='answer_info_title').text)\n value.append(container.find('textarea', class_='code_mirror_code').text)\n req += 1\n # in case any of the attribute references on previous lines above is not available\n except AttributeError as e:\n print(\"Error while fetching data, resuming ...\")\n # catch the exception and log it on the dictionary opened for exceptions\n exception_dict[u] = e\n req += 1\n # as long as there are links left to visit: da Capo\n if req < len(links):\n try_despite_exceptions(links, req)\n # no links left, then do nothing\n else:\n pass\n\n url_links = url_links[10:-15]\n if re < len(url_links):\n return try_despite_exceptions(url_links, re)\n else:\n pass\n\n except AttributeError as err:\n print(\"Error while fetching urls in blocks:\", err)\n\n finally:\n # log files, always useful for monitoring operations\n with open(wd + f\"log_{lang}_code_column.csv\", \"a\") as f:\n write_file = csv.writer(f)\n write_file.writerow(value)\n\n with open(wd + f\"log_{lang}_title_column.csv\", \"a\") as g:\n write_file = csv.writer(g)\n write_file.writerow(title)\n\n with open(wd + f\"log_{lang}_exceptions.csv\", \"a\") as h:\n write_file = csv.writer(h)\n write_file.writerow(exception_dict)\n\n # collect raw data into a data.frame\n py_syntax = {'Inquiry': title,\n 'Code': value\n }\n df = pd.DataFrame(py_syntax, columns=['Inquiry', 'Code'])\n\n # Create table and populate it\n create_table()\n print(\"Database created on SQLite\")\n for idx, rows in df.iterrows():\n # create a table\n row = [rows['Inquiry'], rows['Code']]\n data_entry(row)\n if conn:\n # close cursor\n c.close()\n # close connection\n conn.close()\n print(\"The SQLite connection is closed\")\n\n\n#####################################################################################################################\n# Gather input data from user\nprint(\"\\nThis script will collect data from public posts on well-known Q&A sites like stackoverflow.com\\n\"\n \" These are the available programming languages:\\n\")\n\n# variables on the contents of the menu:\nlangList = ['sql\\t\\t', 'javascript', 'python\\t', 'r\\t\\t', 'matlab\\t', 'shell\\t']\nlangShort = ['SQL', 'JS', 'PY', 'R', 'ML', 'SH']\nlanguage = ['SQL: a domain-specific declarative language for managing data in relational databases',\n 'JS: an imperative, high-level, event-driven programming language used in front end development',\n 'PY: an interpreted, imperative, high-level programming language, often used for scripting',\n 'R: a programming language used for numeric/statistical computing, graphics and data analysis',\n 'ML: a programming language used for numeric computing, plotting of functions and implementing algorithms',\n 'SH: a scripting language or program that automates the execution of tasks on a runtime system']\nn = len(langList)\noptions = {}\n\n# create a dictionary of options to later ensure the user inputs a valid choice\nfor index, item in enumerate(langList):\n print(f'{index} : {item} \\t--> {language[index]}.')\n options[str(item)] = str(language[index])\n\n# create variables to extract input from the while loop\noption = ''\noptDir = ''\noptName = ''\n# while the input is not within the range of acceptable answers keep looping\nwhile option not in options.keys():\n # ask the user for the programming language she/he is interested\n option = input(\"Choose a language to collect syntax and usage examples on a DB.\\n\"\n \"Select an index number from the menu above, else quit with 'q': \")\n # if case the user wants to quit\n if option == 'q':\n print('\\nTransaction cancelled\\n')\n print(f'O data downloaded')\n sys.exit()\n\n # if not 'q', the input from the user must be a digit (integer number)\n elif is_digit(option):\n # catch an error is the integer number provided is out of the range of available options\n try:\n # turn the index into short and long names for the chosen language\n m = int(option)\n langShort = langShort[m]\n optName = str(langList[m]).strip()\n # confirm the user wants to download the data, as well as the language option\n usr_input = input(f'You chose {optName}, do you want to continue? [y/n]: ').lower()\n\n # if the choice is confirm, ask for a subdirectory where to save the database (none: present dir)\n if usr_input == 'y':\n optDir = input('Enter a subdirectory where to save the data: ').strip(\"/\")\n break\n\n # If the user does not confirm his/her choice, then quit\n else:\n print('\\nTransaction cancelled\\n')\n print(f'O data downloaded')\n sys.exit()\n\n # index error: the digit provided is out of the range provided by the index\n except IndexError:\n print(f'\\nThere are {n} choices. Please choose only one of them.\\n')\n # If the user does not want to quit 'q', but has not supplied a digit, remind the correct input\n else:\n print(f'\\nYou must please enter one of the integer numbers indexing your choice.\\n')\n\n# Local variables\nurl = \"https://www.codegrepper.com/code-examples/\"+optName\nwd = os.path.dirname(os.path.abspath(__file__))+'/'+optDir+'/'\nos.chdir(wd)\n# prepare to establish a connection to an sqlite database\nconn = sqlite3.connect(f'SQLite_{langShort}_Syntax.db')\n# create a cursor to operate on the database\nc = conn.cursor()\nprint(\"Successfully Connected to SQLite\")\n\n# send a query to confirm, that the connection was successful\nsqlite_select_Query = \"select sqlite_version();\"\nc.execute(sqlite_select_Query)\nrecord = c.fetchall()\n# here is proof that it was successful\nprint(\"SQLite Database Version is: \", record, '\\n')\n\n# begin data collection by calling the python function web_data_html_scrap()\nprint(f'Collecting list of links to urls ...\\n')\nprint(f'Getting data from the web on {optName}!\\n')\n\nweb_data_html_scrap(url, langShort)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
byronyi/tensor2tensor | [
"b93fc036fdbacfddcadad8fb781f5b670533384e"
] | [
"tensor2tensor/trax/layers/core.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Trax layers library.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport operator as op\n\nfrom jax import lax\n\nimport numpy as onp\nfrom six.moves import reduce\nfrom tensor2tensor.trax import backend\nfrom tensor2tensor.trax.backend import numpy as np\nfrom tensor2tensor.trax.layers import base\n\n# Following the convention used in Keras and tf.layers, we use CamelCase for the\n# names of layer constructors, like Conv and Relu, while using snake_case for\n# other functions, like lax.conv and relu. To allow this, we disable below.\n# pylint: disable=invalid-name\n\n\n# Initializers.\n\n\ndef RandomNormalInitializer(stddev=1e-2):\n \"\"\"An initializer function for random normal coefficients.\"\"\"\n def init(shape, rng):\n return (stddev * backend.random.normal(rng, shape)).astype('float32')\n return init\n\n\ndef GlorotNormalInitializer(out_dim=0, in_dim=1, scale=onp.sqrt(2)):\n \"\"\"An initializer function for random Glorot-scaled coefficients.\"\"\"\n def init(shape, rng):\n fan_in, fan_out = shape[in_dim], shape[out_dim]\n size = onp.prod(onp.delete(shape, [in_dim, out_dim]))\n std = scale / np.sqrt((fan_in + fan_out) / 2. * size)\n return (std * backend.random.normal(rng, shape)).astype('float32')\n return init\n\n\ndef XavierUniformInitializer(out_dim=0, in_dim=1):\n \"\"\"An initializer function for random uniform xavier-scaled coefficients.\"\"\"\n def init(shape, rng):\n fan_in, fan_out = shape[in_dim], shape[out_dim]\n std = np.sqrt(2.0 / (fan_in + fan_out))\n a = np.sqrt(3.0) * std\n return backend.random.uniform(rng, shape, minval=-a, maxval=a)\n return init\n\n\ndef one_hot(x, size, dtype=np.float32):\n \"\"\"Make a n+1 dim one-hot array from n dim int-categorical array.\"\"\"\n return np.array(x[..., np.newaxis] == np.arange(size), dtype)\n\n\n# Layers.\n\n\[email protected]()\ndef Relu(x, **unused_kwargs):\n return np.maximum(x, 0.)\n\n\[email protected]()\ndef Tanh(x, **unused_kwargs):\n return np.tanh(x)\n\n\[email protected]()\ndef Exp(x, **unused_kwargs):\n return np.exp(x)\n\n\[email protected]()\ndef LogSoftmax(x, params, axis=-1, **kwargs):\n \"\"\"Apply log softmax to x: log-normalize along the given axis.\"\"\"\n del params, kwargs\n return x - backend.logsumexp(x, axis, keepdims=True)\n\n\[email protected]()\ndef Softmax(x, params, axis=-1, **kwargs):\n \"\"\"Apply softmax to x: exponentiate and normalize along the given axis.\"\"\"\n del params, kwargs\n return np.exp(x - backend.logsumexp(x, axis, keepdims=True))\n\n\[email protected]()\ndef Softplus(x, **unused_kwargs):\n return np.logaddexp(x, 0.)\n\n\nclass Dense(base.Layer):\n \"\"\"Layer constructor function for a dense (fully-connected) layer.\"\"\"\n\n def __init__(self, units,\n kernel_initializer=GlorotNormalInitializer(),\n bias_initializer=RandomNormalInitializer(1e-6)):\n super(Dense, self).__init__()\n self._units = units\n self._kernel_initializer = kernel_initializer\n self._bias_initializer = bias_initializer\n\n def call(self, x, params, **kwargs):\n del kwargs\n w, b = params\n return np.dot(x, w) + b\n\n def output_shape(self, input_shape):\n return tuple(input_shape[:-1]) + (self._units,)\n\n def new_parameters(self, input_shape, rng):\n w = self._kernel_initializer((input_shape[-1], self._units), rng)\n b = self._bias_initializer((self._units,), rng)\n return (w, b)\n\n\nclass Embedding(base.Layer):\n \"\"\"Layer constructor function for an embedding layer.\"\"\"\n\n def __init__(self, feature_depth, vocab_size,\n kernel_initializer=XavierUniformInitializer()):\n super(Embedding, self).__init__()\n self._feature_depth = feature_depth\n self._vocab_size = vocab_size\n self._kernel_initializer = kernel_initializer\n\n def call(self, x, params, **kwargs):\n del kwargs\n return np.take(params, x, axis=0)\n\n def output_shape(self, input_shape):\n return tuple(input_shape) + (self._feature_depth,)\n\n def new_parameters(self, input_shape, rng):\n return self._kernel_initializer(\n (self._vocab_size, self._feature_depth), rng)\n\n\ndef padtype_to_pads(in_shape, window_shape, window_strides, padding):\n \"\"\"Convert padding string to list of pairs of pad values.\"\"\"\n padding = padding.upper()\n if padding == 'SAME':\n out_shape = onp.ceil(\n onp.true_divide(in_shape, window_strides)).astype(int)\n pad_sizes = [max((out_size - 1) * stride + window_shape - in_size, 0)\n for out_size, stride, window_shape, in_size\n in zip(out_shape, window_strides, window_shape, in_shape)]\n return [(pad_size // 2, pad_size - pad_size // 2)\n for pad_size in pad_sizes]\n elif padding == 'VALID':\n return [(0, 0)] * len(in_shape)\n else:\n msg = 'Unknown padding type: {}.'\n raise TypeError(msg.format(padding))\n\n\nclass Conv(base.Layer):\n \"\"\"Layer constructor function for a general convolution layer.\"\"\"\n\n def __init__(self, filters, kernel_size, strides=None, padding='VALID',\n dimension_numbers=('NHWC', 'HWIO', 'NHWC'),\n kernel_initializer=None,\n bias_initializer=RandomNormalInitializer(1e-6)):\n super(Conv, self).__init__()\n self._filters = filters\n self._kernel_size = kernel_size\n self._padding = padding\n self._dimension_numbers = dimension_numbers\n self._lhs_spec, self._rhs_spec, self._out_spec = dimension_numbers\n self._one = (1,) * len(kernel_size)\n self._strides = strides or self._one\n self._bias_initializer = bias_initializer\n rhs_spec = self._rhs_spec\n self._kernel_initializer = kernel_initializer or GlorotNormalInitializer(\n rhs_spec.index('O'), rhs_spec.index('I'))\n\n def call(self, x, params=(), **kwargs):\n del kwargs\n w, b = params\n return lax.conv_general_dilated(\n x, w, self._strides, self._padding, self._one, self._one,\n self._dimension_numbers) + b\n\n def _kernel_shape(self, input_shape):\n \"\"\"Helper to calculate the kernel shape.\"\"\"\n kernel_size_iter = iter(self._kernel_size)\n return [self._filters if c == 'O' else\n input_shape[self._lhs_spec.index('C')] if c == 'I' else\n next(kernel_size_iter) for c in self._rhs_spec]\n\n def _conv_shape_tuple(self, lhs_shape, rhs_shape, strides, pads):\n \"\"\"Compute the shape of a conv given input shapes in canonical order.\"\"\"\n if isinstance(pads, str):\n pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)\n if len(pads) != len(lhs_shape) - 2:\n msg = 'Wrong number of explicit pads for conv: expected {}, got {}.'\n raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))\n lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads)))\n out_space = onp.floor_divide(\n onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1\n out_space = onp.maximum(0, out_space)\n out_shape = (lhs_shape[0], rhs_shape[0]) + tuple(out_space)\n return tuple(out_shape)\n\n def _conv_general_permutations(self, dimension_numbers):\n \"\"\"Utility for convolution dimension permutations relative to Conv HLO.\"\"\"\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n lhs_char, rhs_char, out_char = ('N', 'C'), ('O', 'I'), ('N', 'C')\n charpairs = (lhs_char, rhs_char, out_char)\n for i, (a, b) in enumerate(charpairs):\n if not (dimension_numbers[i].count(a) == 1 and\n dimension_numbers[i].count(b) == 1):\n msg = ('convolution dimension_numbers[{}] must contain the characters '\n '\"{}\" and \"{}\" exatly once, got {}.')\n raise TypeError(msg.format(i, a, b, dimension_numbers[i]))\n if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):\n msg = ('convolution dimension_numbers[{}] cannot have duplicate '\n 'characters, got {}.')\n raise TypeError(msg.format(i, dimension_numbers[i]))\n if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==\n set(out_spec) - set(out_char)):\n msg = ('convolution dimension_numbers elements must each have the same '\n 'set of spatial characters, got {}.')\n raise TypeError(msg.format(dimension_numbers))\n\n def getperm(spec, charpair):\n spatial = (i for i, c in enumerate(spec) if c not in charpair)\n if spec is not rhs_spec:\n spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))\n return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)\n\n lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)\n return lhs_perm, rhs_perm, out_perm\n\n def _conv_general_shape_tuple(self, lhs_shape, rhs_shape, window_strides,\n padding, dimension_numbers):\n \"\"\"Generalized computation of conv shape.\"\"\"\n lhs_perm, rhs_perm, out_perm = self._conv_general_permutations(\n dimension_numbers)\n lhs_trans = onp.take(lhs_shape, lhs_perm)\n rhs_trans = onp.take(rhs_shape, rhs_perm)\n out_trans = self._conv_shape_tuple(\n lhs_trans, rhs_trans, window_strides, padding)\n return tuple(onp.take(out_trans, onp.argsort(out_perm)))\n\n def output_shape(self, input_shape):\n kernel_shape = self._kernel_shape(input_shape)\n return self._conv_general_shape_tuple(\n input_shape, kernel_shape,\n self._strides, self._padding, self._dimension_numbers)\n\n def new_parameters(self, input_shape, rng):\n kernel_shape = self._kernel_shape(input_shape)\n bias_shape = [self._filters if c == 'C' else 1 for c in self._out_spec]\n bias_shape = tuple(itertools.dropwhile(lambda x: x == 1, bias_shape))\n w = self._kernel_initializer(kernel_shape, rng)\n b = self._bias_initializer(bias_shape, rng)\n return (w, b)\n\n\n# Flatten.\ndef _flatten_output_shape(input_shape, num_axis_to_keep=1):\n \"\"\"Output shape of a flatten layer.\"\"\"\n if num_axis_to_keep >= len(input_shape):\n raise ValueError(\n \"num_axis_to_keep[%d] should be less than input's rank[%d]\" %\n (num_axis_to_keep, len(input_shape)))\n return tuple(input_shape[:num_axis_to_keep]) + (\n reduce(op.mul, input_shape[num_axis_to_keep:], 1),)\n\n\[email protected](output_shape=_flatten_output_shape)\ndef Flatten(x, params, num_axis_to_keep=1, **kwargs):\n del params, kwargs\n return np.reshape(x, (x.shape[:num_axis_to_keep] + (-1,)))\n\n\n# Batch normalization.\ndef _batch_norm_new_params(input_shape, rng, axis=(0, 1, 2),\n center=True, scale=True, **kwargs):\n \"\"\"Helper to initialize batch norm params.\"\"\"\n del rng, kwargs\n axis = (axis,) if np.isscalar(axis) else axis\n shape = tuple(d for i, d in enumerate(input_shape) if i not in axis)\n beta = np.zeros(shape, dtype='float32') if center else ()\n gamma = np.ones(shape, dtype='float32') if scale else ()\n return (beta, gamma)\n\n\[email protected](new_parameters=_batch_norm_new_params)\ndef BatchNorm(x, params, axis=(0, 1, 2), epsilon=1e-5,\n center=True, scale=True, **unused_kwargs):\n \"\"\"Layer construction function for a batch normalization layer.\"\"\"\n mean = np.mean(x, axis, keepdims=True)\n # Fast but less numerically-stable variance calculation than np.var.\n m1 = np.mean(x**2, axis, keepdims=True)\n var = m1 - mean**2\n z = (x - mean) / np.sqrt(var + epsilon)\n\n # Expand the parameters to have the right axes.\n beta, gamma = params\n # TODO(phawkins): np.expand_dims should accept an axis tuple.\n # (https://github.com/numpy/numpy/issues/12290)\n ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x)))\n beta = beta[ed]\n gamma = gamma[ed]\n\n # Return the z rescaled by the parameters if requested.\n if center and scale:\n return gamma * z + beta\n if center:\n return z + beta\n if scale:\n return gamma * z\n return z\n\n\n# Pooling.\ndef _pooling_output_shape(input_shape, pool_size=(2, 2),\n strides=None, padding='VALID'):\n \"\"\"Helper: compute the output shape for the pooling layer.\"\"\"\n dims = (1,) + pool_size + (1,) # NHWC\n spatial_strides = strides or (1,) * len(pool_size)\n strides = (1,) + spatial_strides + (1,)\n pads = padtype_to_pads(input_shape, dims, strides, padding)\n operand_padded = onp.add(input_shape, onp.add(*zip(*pads)))\n t = onp.floor_divide(onp.subtract(operand_padded, dims), strides) + 1\n return tuple(t)\n\n\ndef _pooling_general(inputs, reducer, init_val, rescaler=None,\n pool_size=(2, 2), strides=None, padding='VALID'):\n \"\"\"Helper: general pooling computation used in pooling layers later.\"\"\"\n spatial_strides = strides or (1,) * len(pool_size)\n rescale = rescaler(pool_size, spatial_strides, padding) if rescaler else None\n dims = (1,) + pool_size + (1,) # NHWC\n strides = (1,) + spatial_strides + (1,)\n out = lax.reduce_window(inputs, init_val, reducer, dims, strides, padding)\n return rescale(out, inputs) if rescale else out\n\n\[email protected](output_shape=_pooling_output_shape)\ndef MaxPool(x, params, pool_size=(2, 2), strides=None, padding='VALID', **kw):\n del params, kw\n return _pooling_general(x, lax.max, -np.inf, pool_size=pool_size,\n strides=strides, padding=padding)\n\n\[email protected](output_shape=_pooling_output_shape)\ndef SumPool(x, params, pool_size=(2, 2), strides=None, padding='VALID', **kw):\n del params, kw\n return _pooling_general(x, lax.add, 0., pool_size=pool_size,\n strides=strides, padding=padding)\n\n\ndef _normalize_by_window_size(dims, spatial_strides, padding):\n def rescale(outputs, inputs):\n one = np.ones(inputs.shape[1:-1], dtype=inputs.dtype)\n window_sizes = lax.reduce_window(\n one, 0., lax.add, dims, spatial_strides, padding)\n return outputs / window_sizes[..., np.newaxis]\n return rescale\n\n\[email protected](output_shape=_pooling_output_shape)\ndef AvgPool(x, params, pool_size=(2, 2), strides=None, padding='VALID', **kw):\n del params, kw\n return _pooling_general(x, lax.add, 0., _normalize_by_window_size,\n pool_size, strides=strides, padding=padding)\n\n\[email protected]()\ndef Dropout(x, params, rate=0.0, mode='train', rng=None, **kwargs):\n \"\"\"Layer construction function for a dropout layer with given rate.\"\"\"\n del params, kwargs\n if rng is None:\n msg = ('Dropout layer requires apply_fun to be called with a rng keyword '\n 'argument. That is, instead of `Dropout(params, inputs)`, call '\n 'it like `Dropout(params, inputs, rng=key)`.')\n raise ValueError(msg)\n if rate >= 1.0:\n raise ValueError('Dropout rate (%f) must be lower than 1.' % rate)\n if mode == 'train' and rate > 0.0:\n keep = backend.random.bernoulli(rng, 1.0 - rate, x.shape)\n return np.where(keep, x / (1.0 - rate), 0)\n else:\n return x\n\n\[email protected]()\ndef Div(x, params, divisor=1.0, **kwargs):\n del params, kwargs\n return x / divisor\n\n\n# Mean.\ndef _mean_output_shape(input_shape, axis=-1, keepdims=False):\n shape1 = list(input_shape)[:axis] # Shape before axis.\n shape2 = list(input_shape)[axis:][1:] # Shape after axis.\n mid_shape = [1] if keepdims else []\n return tuple(shape1 + mid_shape + shape2)\n\n\[email protected](output_shape=_mean_output_shape)\ndef Mean(x, params, axis=-1, keepdims=False, **kwargs):\n del params, kwargs\n return np.mean(x, axis=axis, keepdims=keepdims)\n\n\[email protected]()\ndef ShiftRight(x, **unused_kwargs):\n \"\"\"Layer to shift the tensor to the right by padding on axis 1.\"\"\"\n pad_widths = [(0, 0), (1, 0)]\n pad_widths += [(0, 0) for _ in range(len(x.shape) - 2)]\n padded = np.pad(x, pad_widths, mode='constant')\n return padded[:, :-1, ...]\n"
] | [
[
"numpy.true_divide",
"numpy.maximum",
"numpy.take",
"numpy.sqrt",
"numpy.subtract",
"numpy.delete",
"numpy.argsort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PawelRosikiewicz/SkinDiagnosticAI | [
"7cc7b7a9ccd4103095a7548e7b99de4988858356"
] | [
"src/utils/image_augmentation.py"
] | [
"# ********************************************************************************** #\n# #\n# Project: FastClassAI workbecnch # \n# # \n# Author: Pawel Rosikiewicz #\n# Contact: prosikiewicz_gmail.com #\n# #\n# This notebook is a part of Skin AanaliticAI development kit, created #\n# for evaluation of public datasets used for skin cancer detection with #\n# large number of AI models and data preparation pipelines. #\n# # \n# License: MIT #\n# Copyright (C) 2021.01.30 Pawel Rosikiewicz #\n# https://opensource.org/licenses/MIT # \n# #\n# ********************************************************************************** #\n\n\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os # allow changing, and navigating files and folders, \nimport sys\nimport re # module to use regular expressions, \nimport glob # lists names in folders that match Unix shell patterns\nimport random # functions that use and generate random numbers\n\nimport numpy as np # support for multi-dimensional arrays and matrices\nimport pandas as pd # library for data manipulation and analysis\nimport seaborn as sns # advance plots, for statistics, \nimport matplotlib.pyplot as plt # for making plots, \nimport matplotlib as mpl # to get some basif functions, heping with plot mnaking \nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport scipy.stats as stats # library for statistics and technical programming, \nimport tensorflow.keras as keras \n\nfrom PIL import Image, ImageDraw\nfrom IPython.display import display\nfrom tensorflow.keras import backend as K # used for housekeeping of tf models,\n\nimport matplotlib.patches as mpatches\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\n\n\n# Function ................................................................................\n\ndef create_augmented_images(*, external_generator, augm_img_nr=10, paramsforgenerator=\"\"):\n \"\"\" \n Function that takes pictures in a batch, provided with keras generators\n and uses another generator.\n Secondarly, this function can be used to create dataframe with data on images in image batch\n if, augm_img_nr is set 0, \n \n external_generator : iterator, based on keras image generator\n the function was designed to work with all images in a given dataset\n provided as one batch,\n \n augm_img_nr : the number of augment images that will be created \n for each image, if augm_img_nr=0, no augmented images will be created, \n but both array, and dataframe will be returned, \n \n paramsforgenerator : dictionary, with parameters for image generator,\n used for image augmentation, \n \n Returns : numpy array with img batch, [?, pixel_size, pixel_size, 3]\n pandas dataframe, with rows corresponding to each image in the batch, \n and following columns: \n class = foldername in data directory, imagename= original image name, \n imgtype={'raw', 'aug'}, imgidnumber=0, foir raw, >=1 for augmented images\n \"\"\"\n\n # extract one batch with all images in a given dataset\n img_batch, batch_labels = next(external_generator)\n\n #.. create df, with class, image and image type names\n \"\"\" I will use this df, to create, new file with subdirectories, \n and save raw and augmented images with proper names\n \"\"\"\n img_filenames = pd.Series(external_generator.filenames).str.split(pat=\"/\", expand=True)\n img_filenames = pd.concat([img_filenames, pd.Series([\"raw\"]*img_filenames.shape[0]), pd.Series([0]*img_filenames.shape[0])], axis=1)\n img_filenames.columns = [\"classname\", \"imgname\", \"imgtype\", \"imgidnumber\" ]\n\n # in case, I just wish to use that function to get everythign in the same format, but not to generate augmented images\n if augm_img_nr==0: \n pass\n \n if augm_img_nr>0:\n \n # Create generator for image augmentation\n datagen = ImageDataGenerator(**paramsforgenerator)\n datagen.fit(img_batch)\n\n #.. prepare iterator, that will return all figures in a batch, one by one, \n # augm_datagen.fit(img_batch)\n datagen_iter = datagen.flow(img_batch, batch_size=1, shuffle=False) \n\n\n # Create n augmented figures for each image in gthe batch, \n aug_img_filenames = list()\n for i in range(augm_img_nr):\n for j in range(img_batch.shape[0]):\n # create augmented figure, and add to new batch\n one_img = datagen_iter.next()\n if i+j==0: \n batch_img_augm = one_img\n else: \n batch_img_augm = np.r_[batch_img_augm, one_img]\n\n # save name and id for that image\n aug_img_filenames.append({\n \"classname\" : img_filenames.iloc[j,0],\n \"imgname\": img_filenames.iloc[j,1], \n \"imgtype\": \"aug\",\n \"imgidnumber\": i+1}) \n \n # create new batch and df with labels and filenames to return,\n img_filenames = pd.concat([img_filenames,pd.DataFrame(aug_img_filenames)], axis=0, sort=False).reset_index(drop=True)\n img_batch = np.r_[img_batch, batch_img_augm]\n \n #print(img_filenames.shape, img_batch.shape)\n return img_batch, img_filenames\n \n \n \n \n \n# Function ................................................................................\n \ndef save_augmented_images(*,\n datasetname, img_batch, batch_info, savedir, verbose=False):\n\n \"\"\"\n 1) creates save directory, with subdirectories for saving classified images\n 2) saves images as png, that were stored in img_batch\n \n datasetname : str, eg {\"test\", \"train\"}\n img_batch. : numpy array [?, pixel_nr, pixel_nr, 3], contains rgb pictures \n on scale [0-255]\n batch_info : data frame with info on each image in img_batch\n created with create_augmented_images()\n savedir : full path to directory, where all classes should be stored, \n verbose : default = False, \n \"\"\"\n\n # check if savedir exist, if not create it\n try: os.chdir(savedir)\n except: os.mkdir(savedir)\n\n # create directories with provided datasetname\n os.chdir(savedir)\n try: os.mkdir(datasetname)\n except: pass\n\n # create directories for each class\n os.chdir(os.path.join(savedir, datasetname))\n for dirname in list(batch_info.classname.unique()):\n try: os.mkdir(dirname)\n except: pass \n\n # save each images in img_batch with proper name in corresponing class/directory\n for i in range(img_batch.shape[0]):\n img_info = batch_info.iloc[i,:]\n\n # img name\n if img_info.imgtype==\"raw\":\n img_name = f\"{img_info.imgtype}_{img_info.imgname}\"\n if img_info.imgtype!=\"raw\":\n img_name = f\"{img_info.imgtype}{img_info.imgidnumber}_{img_info.imgname}\"\n\n # saving, \n try:\n mpl.image.imsave(os.path.join(savedir, datasetname, img_info.classname, img_name), \n np.array(img_batch[i], dtype=int)\n ) # [0-255] must be int, \n except: \n pass\n\n # info,\n if verbose==True:\n print(f\"{img_batch.shape[0]} images were saved\")\n print(f\"in {savedir}\")\n print(f\"in following files for each classe: {list(batch_info.classname.unique())}\")\n\n"
] | [
[
"numpy.array",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
Xlinford/ContrastiveSeg | [
"79eec700d2efdaad4da8cf0c07674107e72078da"
] | [
"lib/models/backbones/resnet/wide_resnet_models.py"
] | [
"from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport math\r\nimport torch.nn as nn\r\nfrom collections import OrderedDict\r\nfrom functools import partial\r\n\r\nfrom lib.models.tools.module_helper import ModuleHelper \r\n\r\n\r\n\r\nclass GlobalAvgPool2d(nn.Module):\r\n def __init__(self):\r\n \"\"\"Global average pooling over the input's spatial dimensions\"\"\"\r\n super(GlobalAvgPool2d, self).__init__()\r\n\r\n def forward(self, inputs):\r\n in_size = inputs.size()\r\n return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)\r\n\r\n\r\nclass IdentityResidualBlock(nn.Module):\r\n def __init__(self,\r\n in_channels,\r\n channels,\r\n stride=1,\r\n dilation=1,\r\n groups=1,\r\n bn_type=None,\r\n dropout=None):\r\n \"\"\"Configurable identity-mapping residual block\r\n\r\n Parameters\r\n ----------\r\n in_channels : int\r\n Number of input channels.\r\n channels : list of int\r\n Number of channels in the internal feature maps. Can either have two or three elements: if three construct\r\n a residual block with two `3 x 3` convolutions, otherwise construct a bottleneck block with `1 x 1`, then\r\n `3 x 3` then `1 x 1` convolutions.\r\n stride : int\r\n Stride of the first `3 x 3` convolution\r\n dilation : int\r\n Dilation to apply to the `3 x 3` convolutions.\r\n groups : int\r\n Number of convolution groups. This is used to create ResNeXt-style blocks and is only compatible with\r\n bottleneck blocks.\r\n bn_type : callable\r\n Function to create normalization / activation Module.\r\n dropout: callable\r\n Function to create Dropout Module.\r\n \"\"\"\r\n super(IdentityResidualBlock, self).__init__()\r\n\r\n # Check parameters for inconsistencies\r\n if len(channels) != 2 and len(channels) != 3:\r\n raise ValueError(\"channels must contain either two or three values\")\r\n if len(channels) == 2 and groups != 1:\r\n raise ValueError(\"groups > 1 are only valid if len(channels) == 3\")\r\n\r\n is_bottleneck = len(channels) == 3\r\n need_proj_conv = stride != 1 or in_channels != channels[-1]\r\n\r\n self.bn1 = ModuleHelper.BNReLU(in_channels, bn_type=bn_type)\r\n if not is_bottleneck:\r\n layers = [\r\n (\"conv1\", nn.Conv2d(in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False,\r\n dilation=dilation)),\r\n (\"bn2\", ModuleHelper.BNReLU(channels[0], bn_type=bn_type)),\r\n (\"conv2\", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False,\r\n dilation=dilation))\r\n ]\r\n if dropout is not None:\r\n layers = layers[0:2] + [(\"dropout\", dropout())] + layers[2:]\r\n else:\r\n layers = [\r\n (\"conv1\", nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)),\r\n (\"bn2\", ModuleHelper.BNReLU(channels[0], bn_type=bn_type)),\r\n (\"conv2\", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False,\r\n groups=groups, dilation=dilation)),\r\n (\"bn3\", ModuleHelper.BNReLU(channels[1], bn_type=bn_type)),\r\n (\"conv3\", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False))\r\n ]\r\n if dropout is not None:\r\n layers = layers[0:4] + [(\"dropout\", dropout())] + layers[4:]\r\n self.convs = nn.Sequential(OrderedDict(layers))\r\n\r\n if need_proj_conv:\r\n self.proj_conv = nn.Conv2d(in_channels, channels[-1], 1, stride=stride, padding=0, bias=False)\r\n\r\n def forward(self, x):\r\n if hasattr(self, \"proj_conv\"):\r\n bn1 = self.bn1(x)\r\n shortcut = self.proj_conv(bn1)\r\n else:\r\n shortcut = x.clone()\r\n bn1 = self.bn1(x)\r\n\r\n out = self.convs(bn1)\r\n out.add_(shortcut)\r\n\r\n return out\r\n\r\n\r\nclass WiderResNetA2(nn.Module):\r\n def __init__(self,\r\n structure=[3, 3, 6, 3, 1, 1],\r\n bn_type=None,\r\n classes=0,\r\n dilation=True):\r\n \"\"\"Wider ResNet with pre-activation (identity mapping) blocks\r\n\r\n This variant uses down-sampling by max-pooling in the first two blocks and by strided convolution in the others.\r\n\r\n Parameters\r\n ----------\r\n structure : list of int\r\n Number of residual blocks in each of the six modules of the network.\r\n bn_type : callable\r\n Function to create normalization / activation Module.\r\n classes : int\r\n If not `0` also include global average pooling and a fully-connected layer with `classes` outputs at the end\r\n of the network.\r\n dilation : bool\r\n If `True` apply dilation to the last three modules and change the down-sampling factor from 32 to 8.\r\n \"\"\"\r\n super(WiderResNetA2, self).__init__()\r\n self.structure = structure\r\n self.dilation = dilation\r\n\r\n if len(structure) != 6:\r\n raise ValueError(\"Expected a structure with six values\")\r\n\r\n # Initial layers\r\n self.mod1 = nn.Sequential(OrderedDict([\r\n (\"conv1\", nn.Conv2d(3, 64, 3, stride=1, padding=1, bias=False))\r\n ]))\r\n\r\n # Groups of residual blocks\r\n in_channels = 64\r\n channels = [(128, 128), (256, 256), (512, 512), (512, 1024), (512, 1024, 2048), (1024, 2048, 4096)]\r\n for mod_id, num in enumerate(structure):\r\n # Create blocks for module\r\n blocks = []\r\n for block_id in range(num):\r\n if not dilation:\r\n dil = 1\r\n stride = 2 if block_id == 0 and 2 <= mod_id <= 4 else 1\r\n else:\r\n if mod_id == 3:\r\n dil = 2\r\n elif mod_id > 3:\r\n dil = 4\r\n else:\r\n dil = 1\r\n stride = 2 if block_id == 0 and mod_id == 2 else 1\r\n\r\n if mod_id == 4:\r\n drop = None\r\n elif mod_id == 5:\r\n drop = None\r\n else:\r\n drop = None\r\n\r\n blocks.append((\r\n \"block%d\" % (block_id + 1),\r\n IdentityResidualBlock(in_channels, channels[mod_id], bn_type=bn_type, stride=stride, dilation=dil,\r\n dropout=drop)\r\n ))\r\n\r\n # Update channels and p_keep\r\n in_channels = channels[mod_id][-1]\r\n\r\n # Create module\r\n if mod_id < 2:\r\n self.add_module(\"pool%d\" % (mod_id + 2), nn.MaxPool2d(3, stride=2, padding=1, ceil_mode=True))\r\n self.add_module(\"mod%d\" % (mod_id + 2), nn.Sequential(OrderedDict(blocks)))\r\n\r\n self.bn_out = ModuleHelper.BNReLU(in_channels, bn_type=bn_type)\r\n\r\n\r\n def forward(self, img):\r\n tuple_features = list()\r\n out = self.mod1(img)\r\n out = self.mod2(self.pool2(out))\r\n out = self.mod3(self.pool3(out))\r\n out = self.mod4(out)\r\n tuple_features.append(out)\r\n out = self.mod5(out)\r\n tuple_features.append(out)\r\n out = self.mod6(out)\r\n tuple_features.append(out)\r\n out = self.mod7(out)\r\n out = self.bn_out(out)\r\n tuple_features.append(out)\r\n return tuple_features\r\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.Conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bbreton3/glove_tf_21 | [
"16b18bdb2d41c104dcd9159c0a760336bb5fd4d1",
"16b18bdb2d41c104dcd9159c0a760336bb5fd4d1"
] | [
"tests/test_preprocessing_glove.py",
"src/glove_tf_21/callbacks/lr_tensorboard_callback.py"
] | [
"from glove_tf_21.utils.file_utils import save_labels\n\nimport numpy as np\nimport os\n\n\ndef test_cooc_count(preprocessing_glove, ix_sequences_full, cooc_dict):\n output_cooc = dict()\n for ix_seq in ix_sequences_full:\n output_cooc = preprocessing_glove.cooc_count(output_cooc, ix_seq)\n\n assert len(output_cooc) == len(cooc_dict)\n\n for key, val in cooc_dict.items():\n assert np.allclose(output_cooc[key], val)\n\n\ndef test_cooc_dict_to_sparse(preprocessing_glove_fit, cooc_dict, cooc_matrix_sparse):\n sparse_cooc_mat = preprocessing_glove_fit.cooc_dict_to_sparse(cooc_dict)\n assert np.sum(sparse_cooc_mat != cooc_matrix_sparse) == 0.0\n\n\ndef test_glove_formatter(preprocessing_glove, cooc_matrix_sparse, cooc_rows, cooc_cols, cooc_data):\n test_cooc_rows, test_cooc_cols, test_cooc_data = preprocessing_glove.glove_formatter(cooc_matrix_sparse)\n\n assert np.allclose(test_cooc_rows, cooc_rows)\n assert np.allclose(test_cooc_cols, cooc_cols)\n assert np.allclose(test_cooc_data, cooc_data)\n\n\ndef test_get_labels(preprocessing_glove_fit, vocab):\n assert preprocessing_glove_fit.get_labels() == vocab\n\n\ndef test_get_cooc_mat(preprocessing_glove_fit, corpus_file_path, cooc_matrix_sparse, temp_folder_path):\n test_cooc_matrix_sparse = preprocessing_glove_fit.get_cooc_mat(corpus_file_path)\n assert np.sum(test_cooc_matrix_sparse != cooc_matrix_sparse) == 0.0\n\n empty_file_path = os.path.join(temp_folder_path, \"empty_file.txt\")\n save_labels([\"\"], empty_file_path)\n assert np.sum(preprocessing_glove_fit.get_cooc_mat(empty_file_path)) == 0.0\n\n os.remove(empty_file_path)\n\n\ndef test_call(preprocessing_glove_fit):\n\n cooc_rows, cooc_cols, cooc_data, cooc = preprocessing_glove_fit()\n\n assert len(cooc_rows) == 40\n assert len(cooc_cols) == 40\n assert len(cooc_data) == 40\n",
"import tensorflow as tf\n\n\nclass LrTensorboardCallback(tf.keras.callbacks.TensorBoard):\n \"\"\"\n Inspired from : https://stackoverflow.com/questions/49127214/keras-how-to-output-learning-rate-onto-tensorboard\n\n This class adds the learning rate to the tensorboard\n \"\"\"\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n logs.update({'lr': tf.keras.backend.eval(self.model.optimizer.lr)})\n super().on_epoch_end(epoch, logs)\n"
] | [
[
"numpy.sum",
"numpy.allclose"
],
[
"tensorflow.keras.backend.eval"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
CitizenScienceInAstronomyWorkshop/pyIBCC | [
"35215648f3361689e374780182f39182eddda64f"
] | [
"python/tests/ibcc_test.py"
] | [
"'''\nCreated on 8 Apr 2015\n\n@author: edwin\n'''\nimport unittest\nimport ibcc\nimport logging\nimport numpy as np\nfrom dynibcc import DynIBCC\nfrom ibcc_balanced import BalancedIBCC\n\ndef check_accuracy(pT, target_acc, goldfile='./data/gold_verify.csv'):\n # check values are in tolerance range\n gold = np.genfromtxt(goldfile)\n decisions = np.round(pT[:,1]) \n errors = np.abs(gold-decisions)\n nerrors = np.nansum(errors)\n acc = np.round(1 - (nerrors/float(np.sum(np.isfinite(gold)))), decimals=5)\n logging.info( \"accuracy is %f, nerrors=%i\" % (acc, nerrors))\n assert acc==target_acc\n\ndef check_accuracy_multi(pT, target_acc, goldfile='./data/gold5_verify.csv'):\n # check values are in tolerance range\n gold = np.genfromtxt(goldfile)\n nerrors = 0\n for j in range(pT.shape[1]):\n decisions = np.round(pT[:,j])\n goldj = gold==j \n errors = np.abs(goldj-decisions)\n errors = errors[goldj]\n nerrors += np.nansum(errors)\n acc = np.round(1 - (nerrors/float(np.sum(np.isfinite(gold)))), decimals=5)\n logging.info( \"accuracy is %f, nerrors=%i\" % (acc, nerrors))\n assert acc==target_acc\n \ndef check_outputsize(pT, combiner, shape=(2,2,5), ptlength=100):\n # check output has right number of data points\n assert pT.shape[0]==ptlength\n logging.info(\"Alpha shape: \" + str(combiner.alpha.shape))\n assert combiner.alpha.shape == shape\n \nclass Test(unittest.TestCase):\n def testSparseList_noGold(self):\n configFile = './config/sparse_nogold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.82, goldfile='./data/gold_mixed_verify.csv')\n \n def testTable_noGold(self):\n # Crowdlabels contains some NaNs and some -1s.\n configFile = './config/table_nogold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.82) \n \n def testSparseList_withGold(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.95)\n \n def testTable_withGold(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.95) \n \n def testSparseList_shortGold(self):\n #Gold labels is shorter than the no. crowd-labelled data points\n configFile = './config/sparse_shortgold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.95)\n \n def testTable_shortGold(self):\n #Gold labels is shorter than the no. crowd-labelled data points\n configFile = './config/table_shortgold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.95) \n \n def testSparseList_shortGoldMatrix(self):\n #Gold labels is shorter than the no. crowd-labelled data points\n configFile = './config/sparse_shortgoldmat.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.94, goldfile='./data/gold_mixed_verify.csv')\n \n def testTable_shortGoldMatrix(self):\n #Gold labels is shorter than the no. crowd-labelled data points\n configFile = './config/table_shortgoldmat.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.94, goldfile='./data/gold_mixed_verify.csv') \n \n def testSparseList_withGold_5classes(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, (5,5,5))\n check_accuracy_multi(pT, 1)\n \n def testTable_withGold_5classes(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, (5,5,5))\n check_accuracy_multi(pT, 1) \n \n def testSparseList_lowerbound(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold_lowerbound.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner)\n check_accuracy(pT, 0.95)\n \n def testTable_lowerbound_5classes(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5_lowerbound.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, (5,5,5))\n check_accuracy_multi(pT, 1) \n \n# Dynamic IBCC---------------------------------------------------------------------------------------------------------\n \n def test_SparseList_withGold_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner, (2,2,375))\n check_accuracy(pT, 0.93)\n \n def test_Table_withGold_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner,(2,2,500))\n check_accuracy(pT, 0.94) \n \n def test_SparseList_withGold_5classes_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner,(5,5,500))\n check_accuracy_multi(pT, 1)\n \n def test_Table_withGold_5classes_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner, (5,5,500))\n check_accuracy_multi(pT, 1) \n \n def test_SparseList_lowerbound_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/sparse_gold_lowerbound.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner, (2,2,375))\n check_accuracy(pT, 0.93)\n \n def test_Table_lowerbound_5classes_dyn(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5_lowerbound.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=DynIBCC)\n check_outputsize(pT, combiner, (5,5,500))\n check_accuracy_multi(pT, 1) \n \n# BALANCED IBCC -------------------------------------------------------------------------------------------------------\n \n def testSparseList_balanced(self):\n configFile = './config/sparse_nogold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=BalancedIBCC)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.82, goldfile='./data/gold_mixed_verify.csv')\n \n def testTable_withGold_5classes_balanced(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=BalancedIBCC)\n check_outputsize(pT, combiner, (5,5,5))\n check_accuracy_multi(pT, 1) \n \n# OPTIMIZATION --------------------------------------------------------------------------------------------------------\n\n def testSparseList_opt(self):\n configFile = './config/sparse_nogold.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None, optimise_hyperparams=True)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.82, goldfile='./data/gold_mixed_verify.csv')\n \n def testTable_withGold_5classes_opt(self):\n #Gold labels is longer than the no. crowd-labelled data points\n configFile = './config/table_gold5.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None, optimise_hyperparams=True)\n check_outputsize(pT, combiner, (5,5,5))\n check_accuracy_multi(pT, 1) \n \n# SCORES NOT FROM 0 ---------------------------------------------------------------------------------------------------\n \n def testSparseList_scores(self):\n configFile = './config/sparse_nogold_mixscores.py'\n pT, combiner = ibcc.load_and_run_ibcc(configFile, ibcc_class=None)\n check_outputsize(pT, combiner, ptlength=199)\n check_accuracy(pT, 0.82, goldfile='./data/gold_mixed_verify.csv')\n\n# SETUP ETC. ----------------------------------------------------------------------------------------------------------\n\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n logging.info(\"TEST: \"+self._testMethodName)\n\n# TODO list -----------------------------------------------------------------------------------------------------------\n# Add tests for when scores are not consecutive from 0\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testSparseList']\n unittest.main()"
] | [
[
"numpy.abs",
"numpy.isfinite",
"numpy.genfromtxt",
"numpy.round",
"numpy.nansum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
klarman-cell-observatory/PowerAnalysisForSpatialOmics | [
"257e5663bb5476c7d9a22230741b5507fd621352"
] | [
"scripts/random_self_pref_cluster.py"
] | [
"from glob import glob\nimport numpy as np\nimport scipy.sparse as sparse\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport operator\nfrom spatialpower.tissue_generation import assign_labels\nfrom spatialpower.tissue_generation import visualization\n\nresults_dir = './results/motif_detection/'\nadj_mat_list = np.sort(glob(results_dir + 'blank_graph_network*.npy'))\npos_mat_list = np.sort(glob(results_dir + 'blank_graph_positions*.npy'))\n\ndim = 300\n\n##RANDOM##\ncell_type_probabilities = np.ones(10) * 0.1\nneighborhood_probabilities = np.ones((10,10)) * 0.1 \nn_cell_types = len(cell_type_probabilities)\n\nfor ii in range(0, len(adj_mat_list)):\n A = np.load(adj_mat_list[ii])\n C = np.load(pos_mat_list[ii])\n \n j = adj_mat_list[ii].split('_')[-1].split('.')[0]\n # Blank assignment structure\n n_cell_types = len(cell_type_probabilities)\n position_dict = dict()\n for i in range(0, C.shape[0]):\n position_dict[i] = C[i, :]\n\n graph = nx.from_numpy_matrix(A)\n node_id_list = list(graph.nodes)\n attribute_dict = dict(zip(node_id_list, [-1 for i in graph.nodes]))\n \n attribute_dict = assign_labels.heuristic_assignment(graph, cell_type_probabilities, neighborhood_probabilities, 'region', dim, position_dict)\n observed_cell_type_dist, kl = assign_labels.check_cell_type_dist(n_cell_types, attribute_dict, cell_type_probabilities)\n observed_neighborhood_dist, kl_neighbor = assign_labels.check_neighborhood_dist(n_cell_types, attribute_dict, neighborhood_probabilities, graph, 1)\n B = assign_labels.build_assignment_matrix(attribute_dict, n_cell_types)\n np.save(results_dir + 'random_B_' + str(j) + '.npy', B)\n\n visualization.make_vor(dim, attribute_dict, position_dict, n_cell_types, results_dir, 'random_B_' + str(j), node_id_list)\n\n## High Self Preference ##\n'''cell_type_probabilities = [0.03, 0.11, 0.11, 0.11, 0.11, 0.11, 0.11, 0.10, 0.11, 0.10]\nneighborhood_probabilities = np.array([[0.50, 0.06, 0.06, 0.06, 0.06, 0.06, 0.05, 0.05, 0.05, 0.05],\n [0.06, 0.11, 0.11, 0.11, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.06, 0.11, 0.11, 0.11, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.06, 0.11, 0.11, 0.11, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.06, 0.11, 0.11, 0.11, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10],\n [0.06, 0.10, 0.10, 0.10, 0.10, 0.10, 0.11, 0.11, 0.11, 0.11],\n [0.05, 0.10, 0.10, 0.10, 0.10, 0.11, 0.11, 0.11, 0.11, 0.11],\n [0.05, 0.10, 0.10, 0.10, 0.10, 0.11, 0.11, 0.11, 0.11, 0.11],\n [0.05, 0.10, 0.10, 0.10, 0.10, 0.11, 0.11, 0.11, 0.11, 0.11],\n [0.05, 0.10, 0.10, 0.10, 0.10, 0.11, 0.11, 0.11, 0.11, 0.11]])\nn_cell_types = len(cell_type_probabilities)\n\nfor ii in range(0, len(adj_mat_list)):\n A = np.load(adj_mat_list[ii])\n C = np.load(pos_mat_list[ii])\n j = adj_mat_list[ii].split('_')[-1].split('.')[0]\n\n # Blank assignment structure\n n_cell_types = len(cell_type_probabilities)\n position_dict = dict()\n for i in range(0, C.shape[0]):\n position_dict[i] = C[i, :]\n\n graph = nx.from_numpy_matrix(A)\n node_id_list = list(graph.nodes)\n attribute_dict = dict(zip(node_id_list, [-1 for i in graph.nodes]))\n \n attribute_dict = assign_labels.heuristic_assignment(graph, cell_type_probabilities, neighborhood_probabilities, 'region', dim, position_dict)\n\n preferred_node_type = 0 \n for i in list(graph.nodes):\n if attribute_dict[i] == preferred_node_type:\n #print(i)\n graph_distance = 1\n neighborhood = nx.ego_graph(graph, i, radius = graph_distance)\n neighborhood_nodes = list(neighborhood.nodes)\n\n # Now set the remaining probabilities in the region. \n\n for node in neighborhood_nodes:\n if node != i:\n attribute_dict[node] = assign_labels.sample_cell_type(neighborhood_probabilities[preferred_node_type])\n else:\n continue\n\n observed_cell_type_dist, kl = assign_labels.check_cell_type_dist(n_cell_types, attribute_dict, cell_type_probabilities)\n observed_neighborhood_dist, kl_neighbor = assign_labels.check_neighborhood_dist(n_cell_types, attribute_dict, neighborhood_probabilities, graph, 1)\n B = assign_labels.build_assignment_matrix(attribute_dict, n_cell_types)\n np.save(results_dir + 'selfpref_B_' + str(j) + '.npy', B)\n\n visualization.make_vor(dim, attribute_dict, position_dict, n_cell_types, results_dir, 'selfpref_B_' + str(j), node_id_list)'''\n\n## 3 Cell Motif ##\ncell_type_probabilities = [0.04, 0.04, 0.04, 0.13, 0.13, 0.13, 0.12, 0.12, 0.13, 0.12]\nneighborhood_probabilities = np.array([[0.15, 0.40, 0.15, 0.05, 0.05, 0.04, 0.04, 0.04, 0.04, 0.04],\n [0.40, 0.06, 0.40, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],\n [0.15, 0.40, 0.15, 0.05, 0.05, 0.04, 0.04, 0.04, 0.04, 0.04],\n [0.05, 0.02, 0.05, 0.13, 0.12, 0.13, 0.13, 0.13, 0.12, 0.12],\n [0.05, 0.02, 0.05, 0.12, 0.13, 0.13, 0.12, 0.12, 0.13, 0.13],\n [0.04, 0.02, 0.04, 0.13, 0.13, 0.13, 0.12, 0.13, 0.13, 0.13],\n [0.04, 0.02, 0.04, 0.13, 0.12, 0.12, 0.13, 0.13, 0.14, 0.13],\n [0.04, 0.02, 0.04, 0.13, 0.12, 0.13, 0.13, 0.12, 0.14, 0.13],\n [0.04, 0.02, 0.04, 0.12, 0.13, 0.13, 0.14, 0.14, 0.12, 0.12],\n [0.04, 0.02, 0.04, 0.12, 0.13, 0.13, 0.13, 0.13, 0.12, 0.14]])\nn_cell_types = len(cell_type_probabilities)\n\nfor ii in range(0, len(adj_mat_list)):\n A = np.load(adj_mat_list[ii])\n C = np.load(pos_mat_list[ii])\n j = adj_mat_list[ii].split('_')[-1].split('.')[0]\n\n # Blank assignment structure\n n_cell_types = len(cell_type_probabilities)\n position_dict = dict()\n for i in range(0, C.shape[0]):\n position_dict[i] = C[i, :]\n\n graph = nx.from_numpy_matrix(A)\n node_id_list = list(graph.nodes)\n attribute_dict = dict(zip(node_id_list, [-1 for i in graph.nodes]))\n \n attribute_dict = assign_labels.heuristic_assignment(graph, cell_type_probabilities, neighborhood_probabilities, 'region', dim, position_dict)\n\n #preferred_node_type = 0 \n for i in list(graph.nodes):\n if ((attribute_dict[i] == 0) or (attribute_dict[i] == 1) or (attribute_dict[i] == 2)):\n #print(i)\n graph_distance = 1\n neighborhood = nx.ego_graph(graph, i, radius = graph_distance)\n neighborhood_nodes = list(neighborhood.nodes)\n\n # Now set the remaining probabilities in the region. \n\n for node in neighborhood_nodes:\n if node != i:\n attribute_dict[node] = assign_labels.sample_cell_type(neighborhood_probabilities[attribute_dict[i]])\n else:\n continue\n\n observed_cell_type_dist, kl = assign_labels.check_cell_type_dist(n_cell_types, attribute_dict, cell_type_probabilities)\n observed_neighborhood_dist, kl_neighbor = assign_labels.check_neighborhood_dist(n_cell_types, attribute_dict, neighborhood_probabilities, graph, 1)\n B = assign_labels.build_assignment_matrix(attribute_dict, n_cell_types)\n np.save(results_dir + '3cellmotif_B_' + str(j) + '.npy', B)\n\n visualization.make_vor(dim, attribute_dict, position_dict, n_cell_types, results_dir, '3cellmotif_B_' + str(j), node_id_list)"
] | [
[
"numpy.load",
"numpy.array",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JiachengLi1995/UCTopic | [
"3875f2afbf6b99dfce2d5b5cd930976049746d41"
] | [
"topic_modeling/utils.py"
] | [
"import json\nimport torch\nfrom tqdm import tqdm\nfrom .consts import ARGS, DEVICE, TOKENIZER\n\n\ndef read_data(path):\n\tdata = []\n\twith open(path, encoding='utf8') as f:\n\t\tfor line in f:\n\t\t\tline = json.loads(line)\n\t\t\tdata.append(line)\n\n\treturn data\n\ndef batchify(sentence_dict, phrase_list_sampled, batch_size=32):\n\n\tbatches = []\n\tpointer = 0\n\ttotal_num = len(phrase_list_sampled)\n\twhile pointer < total_num:\n\t\ttext_batch = []\n\t\tspan_batch = []\n\n\t\tfor data_line in phrase_list_sampled[pointer:pointer+batch_size]:\n\n\t\t\tsent_id, start, end, phrase_lemma = data_line\n\t\t\ttext = sentence_dict[sent_id]\n\n\t\t\ttext_batch.append(text)\n\t\t\tspan_batch.append([(start, end)])\n\n\t\tbatches.append((text_batch, span_batch))\n\t\tpointer += batch_size\n\n\treturn batches\n\n\ndef get_features(sentence_dict, phrase_list, model, return_prob=False):\n\n\tall_features = []\n\n\tif return_prob:\n\t\tall_probs = []\n\n\tfor batch in tqdm(batchify(sentence_dict, phrase_list, ARGS.batch_size), ncols=100, desc='Generate all features...'):\n\n\t\ttext_batch, span_batch = batch\n\n\t\tinputs = TOKENIZER(text_batch, entity_spans=span_batch, padding=True, add_prefix_space=True, return_tensors=\"pt\")\n\n\t\tfor k,v in inputs.items():\n\t\t\tinputs[k] = v.to(DEVICE)\n\n\t\twith torch.no_grad():\n\t\t\tluke_outputs, entity_pooling = model(**inputs)\n\n\t\tif return_prob:\n\t\t\tmodel_prob = model.get_cluster_prob(entity_pooling)\n\n\t\t\tall_probs.append(model_prob.detach().cpu())\n\n\t\t\n\t\tall_features.append(entity_pooling.detach().cpu())\n\n\tall_features = torch.cat(all_features, dim=0)\n\tif return_prob:\n\t\tall_probs = torch.cat(all_probs, dim=0)\n\t\treturn all_features, all_probs\n\n\treturn all_features\n\n\ndef get_probs(sentence_dict, phrase_list, model):\n\n\tall_probs = []\n\n\tfor batch in tqdm(batchify(sentence_dict, phrase_list, ARGS.batch_size), ncols=100, desc='Generate all features...'):\n\n\t\ttext_batch, span_batch = batch\n\n\t\tinputs = TOKENIZER(text_batch, entity_spans=span_batch, padding=True, add_prefix_space=True, return_tensors=\"pt\")\n\n\t\tfor k,v in inputs.items():\n\t\t\tinputs[k] = v.to(DEVICE)\n\n\t\twith torch.no_grad():\n\t\t\tluke_outputs, entity_pooling = model(**inputs)\n\n\t\tmodel_prob = model.get_cluster_prob(entity_pooling)\n\n\t\tall_probs.append(model_prob.detach().cpu())\n\n\tall_probs = torch.cat(all_probs, dim=0)\n\treturn all_probs\n\n\n\n\ndef get_all_phrase_bert_features(sentence_dict, phrase_list, model):\n\n\tall_features = []\n\n\twith torch.no_grad():\n\n\t\tfor batch in tqdm(batchify(sentence_dict, phrase_list, ARGS.batch_size), ncols=100, desc='Generate all features...'):\n\n\t\t\ttext_batch, span_batch = batch\n\n\t\t\tphrase_list = []\n\t\t\tfor text, span in zip(text_batch, span_batch):\n\n\t\t\t\tspan = span[0]\n\t\t\t\tstart, end = span\n\t\t\t\tphrase_list.append(text[start:end])\n\n\t\t\trepr_list = model.encode(phrase_list)\n\n\t\t\tall_features+=list(repr_list)\n\n\tall_features = torch.FloatTensor(all_features)\n\t\n\treturn all_features"
] | [
[
"torch.no_grad",
"torch.FloatTensor",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dquigley-warwick/matador | [
"729e97efb0865c4fff50af87555730ff4b7b6d91",
"729e97efb0865c4fff50af87555730ff4b7b6d91",
"729e97efb0865c4fff50af87555730ff4b7b6d91",
"729e97efb0865c4fff50af87555730ff4b7b6d91"
] | [
"matador/hull/phase_diagram.py",
"tests/test_crystal.py",
"matador/plotting/pxrd_plotting.py",
"tests/test_file_io.py"
] | [
"# coding: utf-8\n# Distributed under the terms of the MIT License.\n\n\"\"\" This submodule implements the base PhaseDiagram creator that interfaces\nwith QueryConvexHull and EnsembleHull.\n\n\"\"\"\n\n\nfrom traceback import print_exc\nimport bisect\n\nimport scipy.spatial\nimport numpy as np\n\nfrom matador.utils.hull_utils import (\n barycentric2cart, vertices2plane, vertices2line, FakeHull, is_point_in_triangle\n)\nfrom matador.utils.chem_utils import get_formula_from_stoich\nfrom matador.utils.cursor_utils import get_array_from_cursor, display_results, set_cursor_from_array\n\nEPS = 1e-12\n\n\nclass PhaseDiagram:\n \"\"\" This class encapsulates the actual phase data, e.g. the actual\n energy and compositions found to be stable.\n\n Attributes:\n structures (numpy.ndarray): the array passed to init used to\n make the hull, with the first (num_species-1) columns\n containing normalised concentrations, and the final column\n containing formation energy.\n convex_hull (scipy.spatial.ConvexHull): the actual convex hull\n returned by SciPy.\n formation_key (list): index/key specification of formation energy\n per atom from top level of each document.\n\n \"\"\"\n def __init__(self, cursor, formation_key, dimension):\n \"\"\" Compute the convex hull of data passed, to retrieve hull\n distances and thus stable structures.\n\n Parameters:\n cursor (list[dict]): list of matador documents to make\n phase diagram from.\n formation_key (str or list): location of the formation energy\n inside each document, either a single key or iterable of\n keys to use with `recursive_get`.\n\n \"\"\"\n self._dimension = dimension\n self.cursor = cursor\n self.formation_key = formation_key\n\n structures = np.hstack((\n get_array_from_cursor(cursor, 'concentration').reshape(len(cursor), dimension-1),\n get_array_from_cursor(cursor, self.formation_key).reshape(len(cursor), 1)))\n\n # define self._structure_slice as the filtered array of points actually used to create the convex hull\n # which can include/exclude points from the passed structures. This array is the one indexed by\n # vertices/simplices in ConvexHull\n\n if self._dimension == 3:\n # add a point \"above\" the hull\n # for simple removal of extraneous vertices (e.g. top of 2D hull)\n dummy_point = [0.333, 0.333, 1e5]\n # if ternary, use all structures, not just those with negative eform for compatibility reasons\n self._structure_slice = np.vstack((structures, dummy_point))\n else:\n # filter out those with positive formation energy, to reduce expense computing hull\n self._structure_slice = structures[np.where(structures[:, -1] <= 0 + EPS)]\n\n # filter out \"duplicates\" in _structure_slice\n # this prevents breakages if no structures are on the hull and chempots are duplicated\n # but it might be faster to hardcode this case individually\n self._structure_slice = np.unique(self._structure_slice, axis=0)\n\n # if we only have the chempots (or worse) with negative formation energy, don't even make the hull\n if len(self._structure_slice) <= dimension:\n if len(self._structure_slice) < dimension:\n raise RuntimeError('No chemical potentials on hull... either mysterious use of custom chempots, or worry!')\n self.convex_hull = FakeHull()\n else:\n try:\n self.convex_hull = scipy.spatial.ConvexHull(self._structure_slice)\n except scipy.spatial.qhull.QhullError:\n print(self._structure_slice)\n print('Error with QHull, plotting formation energies only...')\n print_exc()\n self.convex_hull = FakeHull()\n\n # remove vertices that have positive formation energy\n filtered_vertices = [vertex for vertex in self.convex_hull.vertices if self._structure_slice[vertex, -1] <= 0 + EPS]\n bad_simplices = set()\n for ind, simplex in enumerate(self.convex_hull.simplices):\n for vertex in simplex:\n if vertex not in filtered_vertices:\n bad_simplices.add(ind)\n\n filtered_simplices = [simplex for ind, simplex in enumerate(self.convex_hull.simplices) if ind not in bad_simplices]\n\n self.convex_hull = FakeHull()\n self.convex_hull.points = self._structure_slice\n self.convex_hull.vertices = list(filtered_vertices)\n self.convex_hull.simplices = list(filtered_simplices)\n\n self.hull_dist = self.get_hull_distances(structures, precompute=True)\n set_cursor_from_array(self.cursor, self.hull_dist, 'hull_distance')\n self.structures = structures\n self.stable_structures = [doc for doc in self.cursor if doc['hull_distance'] < EPS]\n\n def __str__(self):\n \"\"\" Print underlying phase diagram. \"\"\"\n return display_results(self.cursor,\n hull=True,\n colour=False,\n energy_key=self.formation_key,\n sort=False,\n return_str=True)\n\n def get_hull_distances(self, structures, precompute=False, **kwargs):\n \"\"\" Returns array of distances to pre-computed binary or ternary\n hull, from array containing concentrations and energies.\n\n Parameters:\n structures (numpy.ndarray): N x n array of concentrations and\n enthalpies for N structures, with up to 2 columns of\n concentrations and the last column containing the\n structure's formation enthalpy.\n\n Keyword arguments:\n precompute (bool): whether or not to bootstrap hull\n distances from previously computed values at the same\n stoichiometry.\n\n Returns:\n numpy.ndarray: N-dim array storing distances to\n the hull for N structures,\n\n \"\"\"\n\n if precompute:\n # dict with formula keys, containing tuple of pre-computed enthalpy/atom and hull distance\n cached_formula_dists = dict()\n cache_hits = 0\n cache_misses = 0\n\n if isinstance(structures, list):\n structures = np.asarray(structures)\n\n # if only chem pots on hull, dist = energy\n if len(self._structure_slice) == self._dimension:\n hull_dist = np.ones((len(structures)))\n hull_dist = structures[:, -1]\n\n # if binary hull, do binary search\n elif self._dimension == 2:\n tie_line_comp = self._structure_slice[self.convex_hull.vertices, 0]\n tie_line_energy = self._structure_slice[self.convex_hull.vertices, -1]\n tie_line_comp = np.asarray(tie_line_comp)\n tie_line_energy = tie_line_energy[np.argsort(tie_line_comp)]\n tie_line_comp = tie_line_comp[np.argsort(tie_line_comp)]\n\n hull_dist = np.empty((len(structures)))\n hull_dist.fill(np.nan)\n if precompute:\n for ind, _ in enumerate(structures):\n formula = get_formula_from_stoich(self.cursor[ind]['stoichiometry'], sort=True, tex=False)\n if formula in cached_formula_dists:\n hull_dist[ind] = (structures[ind, -1] - cached_formula_dists[formula][0] +\n cached_formula_dists[formula][1])\n cache_hits += 1\n else:\n i = bisect.bisect_left(tie_line_comp, structures[ind, 0])\n gradient, intercept = vertices2line([[tie_line_comp[i-1], tie_line_energy[i-1]],\n [tie_line_comp[i], tie_line_energy[i]]])\n # calculate hull_dist\n hull_dist[ind] = structures[ind, -1] - (gradient * structures[ind, 0] + intercept)\n cached_formula_dists[formula] = (structures[ind, -1], hull_dist[ind])\n cache_misses += 1\n else:\n for ind, _ in enumerate(structures):\n i = bisect.bisect_left(tie_line_comp, structures[ind, 0])\n gradient, intercept = vertices2line([[tie_line_comp[i-1], tie_line_energy[i-1]],\n [tie_line_comp[i], tie_line_energy[i]]])\n # calculate hull_dist\n hull_dist[ind] = structures[ind, -1] - (gradient * structures[ind, 0] + intercept)\n\n # if ternary, use barycentric coords\n elif self._dimension == 3:\n # loop through structures and find which plane they correspond to\n # using barycentric coordinates, if a formula has already been\n # computed then calculate delta relative to that and skip\n self.convex_hull.planes = [[self._structure_slice[vertex] for vertex in simplex]\n for simplex in self.convex_hull.simplices]\n structures_finished = [False] * len(structures)\n hull_dist = np.empty(len(structures))\n hull_dist.fill(np.nan)\n cart_planes_inv = []\n planes_height_fn = []\n for ind, plane in enumerate(self.convex_hull.planes):\n cart_planes = barycentric2cart(plane).T\n cart_planes[-1, :] = 1\n # if projection of triangle in 2D is a line, do binary search\n if np.linalg.det(cart_planes) == 0:\n cart_planes_inv.append(None)\n planes_height_fn.append(None)\n else:\n cart_planes_inv.append(np.linalg.inv(cart_planes))\n planes_height_fn.append(vertices2plane(plane))\n for idx, structure in enumerate(structures):\n for ind, plane in enumerate(self.convex_hull.planes):\n if cart_planes_inv[ind] is None:\n continue\n if precompute and get_formula_from_stoich(self.cursor[idx]['stoichiometry'], sort=True,\n tex=False) in cached_formula_dists:\n formula = get_formula_from_stoich(self.cursor[idx]['stoichiometry'], sort=True, tex=False)\n if formula in cached_formula_dists:\n cache_hits += 1\n hull_dist[idx] = (structures[idx, -1] - cached_formula_dists[formula][0] +\n cached_formula_dists[formula][1])\n structures_finished[idx] = True\n\n elif is_point_in_triangle(structure, cart_planes_inv[ind], preprocessed_triangle=True):\n structures_finished[idx] = True\n hull_dist[idx] = planes_height_fn[ind](structure)\n if precompute:\n cached_formula_dists[\n get_formula_from_stoich(self.cursor[idx]['stoichiometry'], sort=True,\n tex=False)] = (structure[-1], hull_dist[idx])\n cache_misses += 1\n break\n\n # mask values very close to 0 with 0\n hull_dist[np.where(np.abs(hull_dist) < EPS)] = 0\n\n failed_structures = []\n for ind, structure in enumerate(structures_finished):\n if not structure:\n failed_structures.append(ind)\n\n if failed_structures:\n raise RuntimeError('There were issues calculating the hull distance for {} structures.'\n .format(len(failed_structures)))\n\n # otherwise, set to zero until proper N-d distance can be implemented\n else:\n raise NotImplementedError(\n \"Unable to compute {dimension}-dimensional hull distances (yet) \"\n \"consider breaking your phase diagram into a pseudo-ternary or pseudo-binary system.\"\n )\n\n if np.isnan(hull_dist).any():\n raise RuntimeError(f\"Some hull distances failed, found NaNs at {np.isnan(hull_dist, where=True)}\")\n\n return hull_dist\n",
"#!/usr/bin/env python\n# standard library\nimport unittest\nimport copy\nfrom os.path import realpath\n\nimport numpy as np\n\n# matador modules\nfrom matador.crystal.crystal import Crystal, UnitCell\nfrom matador.crystal.crystal_site import Site\nfrom matador.scrapers.castep_scrapers import castep2dict, res2dict\nfrom matador.utils.cell_utils import frac2cart\nfrom matador.scrapers.magres_scrapers import magres2dict\n\n# grab abs path for accessing test data\nREAL_PATH = \"/\".join(realpath(__file__).split(\"/\")[:-1]) + \"/\"\n\ntry:\n import networkx # noqa\n\n imported_networkx = True\nexcept ImportError:\n imported_networkx = False\n\nimported_vornet = False\n\n\nclass UnitCellTest(unittest.TestCase):\n def test_cart_init(self):\n lattice_cart = [[3, 0, 0], [0, 3, 0], [0, 0, 3]]\n lat_tup = tuple(tuple(vec) for vec in lattice_cart)\n cell = UnitCell(lattice_cart)\n self.assertEqual(cell.lattice_cart, lat_tup)\n self.assertEqual(cell.lattice_abc, ((3, 3, 3), (90, 90, 90)))\n self.assertEqual(cell.volume, 27)\n self.assertEqual(cell.lengths, (3, 3, 3))\n self.assertEqual(cell.angles, (90, 90, 90))\n\n lattice_cart = np.asarray([[3, 0, 0], [0, 3, 0], [0, 0, 3]])\n cell_2 = UnitCell(lattice_cart)\n self.assertAlmostEqual(cell_2.lattice_cart, lat_tup)\n self.assertAlmostEqual(cell_2.lattice_abc, ((3, 3, 3), (90, 90, 90)))\n self.assertEqual(cell_2.volume, 27)\n self.assertAlmostEqual(cell_2.lengths, (3, 3, 3))\n self.assertAlmostEqual(cell_2.angles, (90, 90, 90))\n self.assertEqual(cell.lattice_cart, lat_tup)\n self.assertEqual(cell.lattice_abc, ((3, 3, 3), (90, 90, 90)))\n self.assertEqual(cell.volume, 27)\n self.assertEqual(cell.lengths, (3, 3, 3))\n\n lattice_cart = [[10, 0, 0], [0, 10, 0], [0, 0, 10]]\n cell.lattice_cart = lattice_cart\n lat_tup = tuple(tuple(vec) for vec in lattice_cart)\n self.assertEqual(cell.lattice_cart, lat_tup)\n lattice_cart = \"aadsfadsf\"\n self.assertEqual(cell.lattice_cart, lat_tup)\n self.assertEqual(cell.lattice_abc, ((10, 10, 10), (90, 90, 90)))\n self.assertEqual(cell.volume, 1000)\n\n def test_abc_init(self):\n lattice_abc = [[2, 3, 4], [60, 60, 60]]\n lat_tup = tuple(tuple(elem) for elem in lattice_abc)\n cell = UnitCell(lattice_abc)\n self.assertAlmostEqual(cell.lattice_abc, lat_tup)\n cell.lengths = [10, 10, 10]\n self.assertEqual(cell.lattice_abc, ((10, 10, 10), (60, 60, 60)))\n cell.angles = [90, 90, 90]\n self.assertEqual(cell.lattice_abc, ((10, 10, 10), (90, 90, 90)))\n lattice_cart = ((10, 0, 0), (0, 10, 0), (0, 0, 10))\n self.assertEqual(cell.lattice_cart, lattice_cart)\n\n\nclass CrystalTest(unittest.TestCase):\n def test_getters_setters(self):\n doc, s = castep2dict(REAL_PATH + \"data/Na3Zn4-swap-ReOs-OQMD_759599.castep\")\n crystal = Crystal(doc)\n self.assertEqual(\n list(crystal.lattice_cart[0]), [9.0397727, 0.0081202, 0.0000000]\n )\n self.assertEqual(crystal.num_atoms, 14)\n with self.assertRaises(AttributeError):\n crystal[\"positions_frac\"] = [[0, 1, 2]]\n\n # check we can set fields to the same value\n crystal[\"new_field\"] = [1, 2, 3]\n crystal[\"new_field\"] = [1, 2, 3]\n\n crystal[\"new_field_2\"] = np.nan\n crystal[\"new_field_2\"] = np.nan\n\n crystal[\"new_field_3\"] = [1, 2, 4]\n with self.assertRaises(AttributeError):\n crystal[\"new_field_3\"] = [1, 2, 5]\n\n crystal[\"new_field_4\"] = [1, 2, np.nan]\n crystal[\"new_field_4\"] = [1, 2, np.nan]\n\n crystal[\"new_field_5\"] = [1, np.nan, 2]\n with self.assertRaises(AttributeError):\n crystal[\"new_field_5\"] = [1, 2, np.nan]\n\n crystal[\"new_field_6\"] = np.linspace(0, 1, 1000).tolist()\n crystal[\"new_field_6\"] = np.array(crystal[\"new_field_6\"], copy=True).tolist()\n\n def test_set_positions(self):\n doc, s = castep2dict(REAL_PATH + \"data/Na3Zn4-swap-ReOs-OQMD_759599.castep\")\n doc = Crystal(doc)\n\n copydoc = copy.deepcopy(doc)\n old_pos = np.asarray(doc.positions_frac)\n copydoc.set_positions(np.zeros_like(old_pos), fractional=True)\n\n np.testing.assert_array_almost_equal(\n np.asarray(copydoc.positions_frac), np.zeros_like(old_pos)\n )\n np.testing.assert_array_almost_equal(\n np.asarray(copydoc.positions_abs), np.zeros_like(old_pos)\n )\n\n self.assertNotAlmostEqual(doc.positions_frac[-1][0], 0.0)\n\n def test_convert_positions(self):\n doc = res2dict(REAL_PATH + \"data/structures/Li7Sn-Fmmm.res\")[0]\n crystal = res2dict(REAL_PATH + \"data/structures/Li7Sn-Fmmm.res\", as_model=True)[\n 0\n ]\n\n doc[\"positions_abs\"] = frac2cart(doc[\"lattice_cart\"], doc[\"positions_frac\"])\n\n np.testing.assert_array_almost_equal(doc[\"positions_abs\"], crystal.positions_abs)\n for ind, site in enumerate(crystal):\n np.testing.assert_array_almost_equal(doc[\"positions_abs\"][ind], site.coords_cartesian)\n\n crystal.cell.lengths = np.asarray(crystal.cell.lengths) * 10\n\n rescaled_pos = frac2cart(np.asarray(doc[\"lattice_cart\"]) * 10, doc[\"positions_frac\"])\n\n for ind, site in enumerate(crystal):\n np.testing.assert_array_almost_equal(doc[\"positions_frac\"][ind], site.coords)\n np.testing.assert_array_almost_equal(rescaled_pos[ind], site.coords_cartesian)\n\n def test_minimal_init(self):\n doc = Crystal(\n dict(\n lattice_abc=np.asarray([[3, 3, 3], [90, 90, 90]]),\n atom_types=[\"Na\", \"Cl\"],\n positions_frac=[[0, 0, 0], [0.5, 0.5, 0.5]],\n )\n )\n self.assertEqual(doc.stoichiometry, [[\"Cl\", 1.0], [\"Na\", 1.0]])\n self.assertEqual(doc.lattice_abc, ((3.0, 3.0, 3.0), (90.0, 90.0, 90.0)))\n self.assertEqual(\n doc.lattice_cart, ((3.0, 0.0, 0.0), (0.0, 3.0, 0.0), (0.0, 0.0, 3.0))\n )\n self.assertEqual(len(doc.sites), 2)\n self.assertEqual(doc.num_atoms, 2)\n self.assertEqual(doc.concentration, [0.5, 0.5])\n self.assertEqual(doc.positions_abs, [[0, 0, 0], [1.5, 1.5, 1.5]])\n self.assertEqual(doc.positions_frac, [[0, 0, 0], [0.5, 0.5, 0.5]])\n self.assertEqual(doc.formula, \"NaCl\")\n self.assertEqual(doc.cell_volume, 27.0)\n self.assertEqual(doc.space_group, \"Pm-3m\")\n self.assertEqual(doc.space_group_tex, \"$Pm\\\\bar{3}m$\")\n\n doc = Crystal(\n dict(\n lattice_cart=((3.0, 0.0, 0.0), (0.0, 3.0, 0.0), (0.0, 0.0, 3.0)),\n atom_types=[\"Na\", \"Cl\"],\n positions_abs=[[0, 0, 0], [1.5, 1.5, 1.5]],\n )\n )\n self.assertEqual(doc.lattice_abc, ((3.0, 3.0, 3.0), (90.0, 90.0, 90.0)))\n self.assertEqual(\n doc.lattice_cart, ((3.0, 0.0, 0.0), (0.0, 3.0, 0.0), (0.0, 0.0, 3.0))\n )\n self.assertEqual(doc.stoichiometry, [[\"Cl\", 1.0], [\"Na\", 1.0]])\n self.assertEqual(len(doc.sites), 2)\n self.assertEqual(doc.num_atoms, 2)\n self.assertEqual(doc.concentration, [0.5, 0.5])\n self.assertEqual(doc.positions_abs, [[0.0, 0.0, 0.0], [1.5, 1.5, 1.5]])\n self.assertEqual(doc.positions_frac, [[0, 0, 0], [0.5, 0.5, 0.5]])\n self.assertEqual(doc.formula, \"NaCl\")\n self.assertEqual(doc.cell_volume, 27.0)\n self.assertEqual(doc.space_group, \"Pm-3m\")\n\n def testSites(self):\n doc, s = castep2dict(REAL_PATH + \"data/Na3Zn4-swap-ReOs-OQMD_759599.castep\")\n del doc[\"lattice_cart\"]\n crystal = Crystal(doc)\n np.testing.assert_array_almost_equal(crystal[0].coords, [0.776467, 0.466319, 0.0])\n\n with self.assertRaises(RuntimeError):\n crystal[0].set_position([0.5, 0.6, 0.7, 0.8], \"fractional\")\n with self.assertRaises(RuntimeError):\n crystal[0].set_position([[1, 2, 3], [4, 5, 6], [7, 8, 9]], \"fractional\")\n self.assertEqual(\n [atom for atom in crystal], [atom[1] for atom in enumerate(crystal)]\n )\n\n atom = Site(\n species=\"Cl\",\n position=[0.2, 0.5, 0.2],\n lattice=[[10, 0, 0], [0, 10, 0], [0, 0, 10]],\n )\n atom2 = copy.deepcopy(atom)\n atom2.species = \"Br\"\n\n self.assertEqual(atom.species, \"Cl\")\n self.assertEqual(atom2.species, \"Br\")\n\n atom2.set_position([1.2, -0.5, 0.2], \"fractional\")\n np.testing.assert_array_almost_equal(\n atom2.displacement_between_sites(atom), [0.0, 0.0, 0.0], decimal=10\n )\n self.assertAlmostEqual(atom2.distance_between_sites(atom), 0.0, places=10)\n atom2.set_position([1.3, -0.5, 0.2], \"fractional\")\n np.testing.assert_array_almost_equal(\n atom2.displacement_between_sites(atom), [1.0, 0.0, 0.0], decimal=10\n )\n self.assertAlmostEqual(atom2.distance_between_sites(atom), 1.0, places=10)\n atom2.set_position([1.3, -0.5, 0.3], \"fractional\")\n np.testing.assert_array_almost_equal(\n atom2.displacement_between_sites(atom), [1.0, 0.0, 1.0], decimal=10\n )\n self.assertAlmostEqual(\n atom2.distance_between_sites(atom), np.sqrt(2), places=10\n )\n\n def testSpg(self):\n doc, s = castep2dict(REAL_PATH + \"data/Na3Zn4-swap-ReOs-OQMD_759599.castep\")\n crystal = Crystal(doc)\n print(crystal.get_space_group(symprec=0.01))\n print(crystal.get_space_group(symprec=0.001))\n self.assertEqual(crystal.get_space_group(symprec=0.0000001), \"Pm\")\n\n def testFromMagres(self):\n doc, s = magres2dict(REAL_PATH + \"data/magres_files/NaP_QE6.magres\")\n crystal = Crystal(doc)\n for atom in crystal:\n print(atom, atom[\"chemical_shielding_iso\"], atom[\"chemical_shift_asymmetry\"])\n\n @unittest.skipIf(not imported_vornet, \"Voronoi code not found in this distribution\")\n def testCoordination(self):\n doc, s = magres2dict(REAL_PATH + \"data/magres_files/NaP_QE6.magres\")\n crystal = Crystal(doc, voronoi=True)\n for atom in crystal:\n print(atom, atom.coordination)\n print(crystal.coordination_lists)\n print(crystal.coordination_stats)\n\n @unittest.skipIf(not imported_vornet, \"Voronoi code not found in this distribution\")\n def testVoronoi(self):\n doc, s = magres2dict(REAL_PATH + \"data/magres_files/NaP_QE6.magres\")\n crystal = Crystal(doc)\n print(crystal.unique_sites)\n\n @unittest.skipIf(not imported_networkx, \"NetworkX missing\")\n def testBondLengths(self):\n doc, s = magres2dict(REAL_PATH + \"data/magres_files/NaP_QE6.magres\")\n crystal = Crystal(doc)\n print(crystal.bond_lengths)\n\n @unittest.skipIf(not imported_networkx, \"NetworkX missing\")\n def testBondStats(self):\n doc, s = magres2dict(REAL_PATH + \"data/magres_files/NaP_QE6.magres\")\n crystal = Crystal(doc)\n print(crystal.bonding_stats)\n\n\nclass ElasticCrystalTest(unittest.TestCase):\n \"\"\" Test the elastic functionality of the Crystal module. \"\"\"\n\n def testKBulkModulus(self):\n from matador.crystal.elastic import get_equation_of_state\n\n results = get_equation_of_state(\n REAL_PATH + \"/data/bulk_modulus/K-bulk_modulus\", plot=False\n )\n self.assertTrue(\"eos\" in results)\n self.assertEqual(len(results[\"eos\"]), 3)\n self.assertAlmostEqual(results[\"eos\"][0].bulk_modulus, 3.696117355)\n self.assertAlmostEqual(results[\"eos\"][1].bulk_modulus, 3.699072676)\n self.assertAlmostEqual(results[\"eos\"][2].bulk_modulus, 3.691406442)\n self.assertAlmostEqual(results[\"eos\"][0].bulk_modulus_err, 3e-6, places=1)\n self.assertAlmostEqual(results[\"eos\"][1].bulk_modulus_err, 2e-6, places=1)\n self.assertAlmostEqual(results[\"eos\"][2].bulk_modulus_err, 2e-6, places=1)\n\n\nif __name__ == \"__main__\":\n unittest.main(buffer=False, verbosity=2)\n",
"# coding: utf-8\n# Distributed under the terms of the MIT License.\n\n\"\"\" This file implements plotting routines specifically\nfor the PXRD objects defined in the\nmatador.fingerprints.pxrd module.\n\n\"\"\"\n\n\nfrom matador.plotting.plotting import plotting_function\nfrom matador.utils.cell_utils import get_space_group_label_latex\nfrom matador.crystal import Crystal\n\n\n__all__ = ['plot_pxrd']\n\n\n@plotting_function\ndef plot_pxrd(\n pxrds, two_theta_range=None, rug=False, rug_height=0.05, rug_offset=0.04, offset=None,\n ax=None, labels=None, figsize=None, text_offset=0.1, filename=None, **kwargs\n):\n \"\"\" Plot PXRD or PXRDs.\n\n Parameters:\n pxrds (list or matador.fingerprints.pxrd.PXRD): the PXRD\n or list of PXRDs to plot.\n\n Keyword arguments:\n two_theta_range (tuple): plotting limits for 2theta\n rug (bool): whether to provide a rug plot of all peaks.\n rug_height (float): size of rug ticks.\n rug_offset (float): offset of rug ticks.\n offset (float): extra space added between patterns (as fraction\n of max intensity). Default 0.1.\n labels (list of str): list of labels to plot alongside pattern.\n figsize (tuple): specify a figure size, the default\n scales with the number of PXRDs to be plotted.\n ax (matplotlib.Axis): optional axis object to plot on.\n text_offset (float): amount by which to offset the labels.\n filename (str): optional filename for saving.\n\n\n \"\"\"\n\n import matplotlib.pyplot as plt\n\n if not isinstance(pxrds, list):\n pxrds = [pxrds]\n if labels is not None and not isinstance(labels, list):\n labels = [labels]\n\n if figsize is None:\n _user_default_figsize = plt.rcParams.get('figure.figsize', (8, 6))\n height = len(pxrds) * max(0.5, _user_default_figsize[1] / 1.5 / len(pxrds))\n figsize = (_user_default_figsize[0], height)\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n\n if offset is None and rug:\n offset = 0.2\n elif offset is None:\n offset = 0.1\n\n colour_cycle = ax._get_lines.prop_cycler\n\n for ind, pxrd in enumerate(pxrds):\n if isinstance(pxrd, Crystal):\n pxrd = pxrd.pxrd\n elif isinstance(pxrd, dict) and 'pxrd' in pxrd:\n pxrd = pxrd['pxrd']\n\n c = next(colour_cycle).get('color')\n\n if labels:\n label = labels[ind]\n else:\n label = get_space_group_label_latex(pxrd.spg) + '-' + pxrd.formula\n\n ax.plot(pxrd.two_thetas, (1 - offset) * pxrd.pattern + ind, c=c)\n\n ax.text(0.95, ind+text_offset, label,\n transform=ax.get_yaxis_transform(),\n horizontalalignment='right')\n\n if rug:\n import numpy as np\n peaks = np.unique(pxrd.peak_positions)\n for peak in peaks:\n ax.plot([peak, peak], [ind-rug_height-rug_offset, ind-rug_offset], c=c, alpha=0.5)\n\n if len(pxrds) > 1:\n ax.set_yticks([])\n else:\n import numpy as np\n ax.set_yticks(np.linspace(0, 1, 5, endpoint=True))\n ax.set_ylim(-0.2, len(pxrds)+0.1)\n if two_theta_range is not None:\n ax.set_xlim(*two_theta_range)\n ax.set_ylabel('Relative intensity')\n ax.set_xlabel('$2\\\\theta$ (degrees)')\n\n if any([kwargs.get('pdf'), kwargs.get('svg'), kwargs.get('png')]):\n bbox_extra_artists = None\n if filename is None:\n filename = '-'.join([pxrd.formula for pxrd in pxrds]) + '_pxrd'\n\n if kwargs.get('pdf'):\n plt.savefig('{}.pdf'.format(filename),\n bbox_inches='tight', transparent=True, bbox_extra_artists=bbox_extra_artists)\n if kwargs.get('svg'):\n plt.savefig('{}.svg'.format(filename),\n bbox_inches='tight', transparent=True, bbox_extra_artists=bbox_extra_artists)\n if kwargs.get('png'):\n plt.savefig('{}.png'.format(filename),\n bbox_inches='tight', transparent=True, bbox_extra_artists=bbox_extra_artists)\n",
"#!/usr/bin/env python\n\"\"\" Test file scraping and writing functionality. \"\"\"\n\nimport json\nimport os\nimport glob\nimport itertools\nimport numpy as np\n\nfrom matador.scrapers import castep2dict, res2dict, cell2dict\nfrom matador.scrapers import (\n cif2dict,\n param2dict,\n phonon2dict,\n optados2dict,\n phonon_dos2dict,\n)\nfrom matador.scrapers import arbitrary2dict, bands2dict, pwout2dict, magres2dict\nfrom matador.scrapers.castep_scrapers import usp2dict, get_seed_metadata\nfrom matador.export import doc2res, doc2param, doc2cell, query2files\nfrom matador.orm.spectral import (\n ElectronicDispersion,\n ElectronicDOS,\n VibrationalDispersion,\n VibrationalDOS,\n)\nfrom matador.utils.chem_utils import INVERSE_CM_TO_EV\nfrom .utils import REAL_PATH, MatadorUnitTest\n\nVERBOSITY = 10\n\n\nclass CellScraperTests(MatadorUnitTest):\n \"\"\" Test cell scraper functions. \"\"\"\n\n def test_standard_cell_scraper(self):\n cell_fname = REAL_PATH + \"data/LiP2Zn-0bm995-a_9-out.cell\"\n self.assertTrue(\n os.path.isfile(cell_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n cell_fname\n ),\n )\n test_dict, s = cell2dict(\n cell_fname, db=False, lattice=True, verbosity=VERBOSITY\n )\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n self.assertEqual(\n test_dict[\"lattice_cart\"][0][0],\n 9.83262140721165,\n msg=\"Failed to read lattice vectors.\",\n )\n self.assertEqual(\n test_dict[\"lattice_cart\"][1][1],\n 5.96357780025648,\n msg=\"Failed to read lattice vectors.\",\n )\n self.assertEqual(\n test_dict[\"lattice_cart\"][2][2],\n 4.39895761828278,\n msg=\"Failed to read lattice vectors.\",\n )\n self.assertEqual(\n test_dict[\"lattice_cart\"][1][0],\n -0.115688800302997,\n msg=\"Failed to read lattice vectors.\",\n )\n self.assertEqual(\n test_dict[\"symmetry_tol\"], 0.001, msg=\"Failed to read symmetry tolerance.\"\n )\n self.assertEqual(\n test_dict[\"kpoints_mp_grid\"],\n [2, 3, 4],\n msg=\"Failed to read kpoint grid {}\".format(test_dict[\"kpoints_mp_grid\"]),\n )\n self.assertEqual(\n test_dict[\"species_pot\"][\"Li\"], \"Li_00PBE.usp\", msg=\"Failed to read pspots.\"\n )\n self.assertEqual(\n test_dict[\"species_pot\"][\"P\"], \"P_00PBE.usp\", msg=\"Failed to read pspots.\"\n )\n self.assertEqual(\n test_dict[\"species_pot\"][\"Zn\"], \"Zn_00PBE.usp\", msg=\"Failed to read pspots.\"\n )\n # test that lattice_vec only read when outcell is true\n test_dict, s = cell2dict(\n cell_fname, db=False, lattice=False, verbosity=VERBOSITY\n )\n self.assertTrue(test_dict.get(\"lattice_cart\") is None)\n\n def test_cell_outcell(self):\n cell_fname = REAL_PATH + \"data/Li2C2-out.cell\"\n self.assertTrue(\n os.path.isfile(cell_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n cell_fname\n ),\n )\n test_dict, s = cell2dict(\n cell_fname, db=False, lattice=True, verbosity=VERBOSITY\n )\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n self.assertEqual(test_dict[\"cell_constraints\"], [[1, 1, 3], [4, 4, 6]])\n self.assertEqual(\n test_dict[\"external_pressure\"],\n [[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]],\n )\n tmp_name = \"tmp.cell\"\n doc2cell(test_dict, tmp_name)\n new_test_dict, s = cell2dict(\n tmp_name, db=False, lattice=True, verbosity=VERBOSITY\n )\n new_test_dict[\"source\"] = test_dict[\"source\"]\n self.assertEqual(\n test_dict[\"external_pressure\"], new_test_dict[\"external_pressure\"]\n )\n\n def test_cell_phonon(self):\n cell_fname = REAL_PATH + \"data/K5P4-phonon.cell\"\n self.assertTrue(\n os.path.isfile(cell_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n cell_fname\n ),\n )\n test_dict, s = cell2dict(\n cell_fname, db=False, lattice=True, verbosity=VERBOSITY\n )\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n self.assertEqual(\n test_dict[\"lattice_cart\"][0][0],\n 11.4518745146637,\n msg=\"Failed to read lattice vectors.\",\n )\n self.assertEqual(\n test_dict[\"lattice_cart\"][1][1],\n 5.09448137301246,\n msg=\"Failed to read lattice vectors.\",\n )\n self.assertEqual(\n test_dict[\"lattice_cart\"][2][2],\n 9.18378851243459,\n msg=\"Failed to read lattice vectors.\",\n )\n self.assertEqual(\n test_dict[\"lattice_cart\"][1][0], 0.0, msg=\"Failed to read lattice vectors.\"\n )\n self.assertEqual(\n test_dict[\"symmetry_tol\"], 0.0001, msg=\"Failed to read symmetry tolerance.\"\n )\n self.assertEqual(\n test_dict[\"kpoints_mp_spacing\"],\n 0.03,\n msg=\"Failed to read kpoint grid {}\".format(test_dict[\"kpoints_mp_spacing\"]),\n )\n self.assertEqual(\n test_dict[\"phonon_kpoint_mp_grid\"],\n [2, 2, 2],\n msg=\"Failed to read kpoint grid {}\".format(\n test_dict[\"phonon_kpoint_mp_grid\"]\n ),\n )\n\n self.assertEqual(\n test_dict[\"phonon_kpoint_mp_offset\"],\n [0.25, 0.25, 0.25],\n msg=\"Failed to read kpoint grid {}\".format(\n test_dict[\"phonon_kpoint_mp_offset\"]\n ),\n )\n self.assertEqual(\n test_dict[\"phonon_fine_kpoint_mp_spacing\"],\n 0.02,\n msg=\"Failed to read kpoint {}\".format(\n test_dict[\"phonon_fine_kpoint_mp_spacing\"]\n ),\n )\n self.assertEqual(\n test_dict[\"phonon_fine_kpoint_path_spacing\"],\n 0.01,\n msg=\"Failed to read kpoint {}\".format(\n test_dict[\"phonon_fine_kpoint_path_spacing\"]\n ),\n )\n self.assertEqual(\n test_dict[\"species_pot\"][\"K\"],\n \"2|1.5|9|10|11|30U:40:31(qc=6)\",\n msg=\"Failed to read pspots.\",\n )\n self.assertEqual(\n test_dict[\"species_pot\"][\"P\"],\n \"3|1.8|4|4|5|30:31:32\",\n msg=\"Failed to read pspots.\",\n )\n self.assertEqual(\n test_dict[\"hubbard_u\"][\"K\"][\"s\"], 2, msg=\"Failed to read Hubbard U block.\"\n )\n self.assertEqual(\n test_dict[\"hubbard_u\"][\"P\"][\"p\"], 3, msg=\"Failed to read Hubbard U block.\"\n )\n self.assertEqual(\n test_dict[\"hubbard_u\"][\"U\"][\"d\"],\n 10.101,\n msg=\"Failed to read Hubbard U block.\",\n )\n self.assertTrue(test_dict[\"snap_to_symmetry\"])\n self.assertTrue(test_dict[\"symmetry_generate\"])\n self.assertEqual(test_dict[\"phonon_supercell_matrix\"][0], [3, 0, 1])\n self.assertEqual(test_dict[\"phonon_supercell_matrix\"][1], [0, 3, 0])\n self.assertEqual(test_dict[\"phonon_supercell_matrix\"][2], [0, 0, 9])\n np.testing.assert_array_equal(\n test_dict[\"external_efield\"], np.array([0.5, 0, 0]),\n )\n\n def test_cell_failure(self):\n cell_fname = REAL_PATH + \"data/K5P4-phonon_bodged.cell\"\n self.assertTrue(\n os.path.isfile(cell_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n cell_fname\n ),\n )\n test_dict, s = cell2dict(cell_fname, db=True, lattice=True, verbosity=VERBOSITY)\n self.assertFalse(s, msg=test_dict)\n\n def test_cell_spin(self):\n cell_fname = REAL_PATH + \"data/cell_files/spin_test.cell\"\n self.assertTrue(\n os.path.isfile(cell_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n cell_fname\n ),\n )\n test_dict, s = cell2dict(\n cell_fname, db=False, lattice=True, positions=True, verbosity=VERBOSITY\n )\n self.assertEqual(test_dict[\"species_pot\"][\"library\"], \"QC5\")\n self.assertEqual(test_dict[\"atom_types\"], [\"H\", \"C\", \"H\", \"C\", \"H\", \"H\"])\n self.assertEqual(test_dict[\"lattice_cart\"][0], [10, 0, 0])\n self.assertEqual(test_dict[\"lattice_cart\"][1], [0, 10, 0])\n self.assertEqual(test_dict[\"lattice_cart\"][2], [0, 0, 10])\n self.assertEqual(test_dict[\"lattice_abc\"][0], [10, 10, 10])\n self.assertEqual(test_dict[\"lattice_abc\"][1], [90, 90, 90])\n self.assertEqual(\n test_dict[\"atomic_init_spins\"], [None, 0.32675521, None, -0.1234, None, 1.0]\n )\n\n def test_cell_kpoint_path(self):\n cell_name = REAL_PATH + \"data/cell_files/kpoint_path.cell\"\n cell, s = cell2dict(cell_name, db=False)\n\n self.assertTrue(s)\n self.assertEqual(\n cell[\"spectral_kpoints_path\"],\n [[0.0, 0.0, 0.0], [0.0, 0.0, 0.5], [0.0, 0.5, 0.0], [0.5, 0.0, 0.0]],\n )\n self.assertEqual(\n cell[\"spectral_kpoints_path_labels\"], [\"$\\\\Gamma$\", \"Z\", \"$Y$\", \"X\"]\n )\n self.assertEqual(cell[\"spectral_kpoints_path_spacing\"], 0.02)\n self.assertEqual(\n cell[\"phonon_fine_kpoint_path\"],\n [[0.0, 0.0, 0.0], [0.0, 0.0, 0.5], [0.0, 0.5, 0.0], [0.5, 0.0, 0.0]],\n )\n self.assertEqual(\n cell[\"phonon_fine_kpoint_path_labels\"], [\"$\\\\Gamma$\", \"Z\", \"$Y$\", \"X\"]\n )\n self.assertEqual(cell[\"phonon_fine_kpoint_path_spacing\"], 0.01)\n\n def test_cell_positions_abs(self):\n cell_name = REAL_PATH + \"data/cell_files/npm.cell\"\n cell, s = cell2dict(cell_name, db=False)\n\n self.assertTrue(s, msg=\"Failed entirely: {}\".format(cell))\n self.assertEqual(cell[\"lattice_cart\"][0], [21.84, 0, 0])\n self.assertEqual(cell[\"lattice_cart\"][1], [0, 16.38, 0])\n self.assertEqual(cell[\"lattice_cart\"][2], [0, 0, 40.46])\n\n self.assertEqual(cell[\"positions_abs\"][0], [0, 0, 0])\n self.assertEqual(cell[\"positions_abs\"][95], [17.745, 15.015, -4.095])\n self.assertEqual(cell[\"positions_abs\"][-1], [13.65, 13.65, 10.92])\n\n np.testing.assert_array_almost_equal(cell[\"positions_frac\"][0], [0, 0, 0])\n np.testing.assert_array_almost_equal(\n cell[\"positions_frac\"][95], [0.8125, 0.9166666, 0.8987889]\n )\n np.testing.assert_array_almost_equal(\n cell[\"positions_frac\"][-1], [0.625, 0.8333333, 0.26989619]\n )\n\n def test_cell_ionic_cell_constraints(self):\n cell_name = REAL_PATH + \"data/cell_files/ionic_constraints.cell\"\n cell, s = cell2dict(cell_name, db=False)\n\n self.assertTrue(s)\n self.assertEqual(cell[\"ionic_constraints\"][0], \"1 C 1 0 0 1\")\n self.assertEqual(cell[\"ionic_constraints\"][1], \"2 C 1 0 1 0\")\n self.assertEqual(cell[\"ionic_constraints\"][2], \"3 C 1 1 0 0\")\n self.assertEqual(cell[\"cell_constraints\"][0], [1, 1, 3])\n self.assertEqual(cell[\"cell_constraints\"][1], [0, 0, 0])\n\n\nclass CastepScraperTests(MatadorUnitTest):\n \"\"\" Test CASTEP scrapers. \"\"\"\n\n def test_castep16(self):\n castep_fname = REAL_PATH + \"data/Na3Zn4-swap-ReOs-OQMD_759599.castep\"\n failed_open = False\n try:\n f = open(castep_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n castep_fname\n ),\n )\n if not failed_open:\n f.close()\n test_dict, s = castep2dict(castep_fname, timings=True, db=True, verbosity=VERBOSITY)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n self.assertEqual(\n test_dict[\"pressure\"], 0.0763, msg=\"Failed to read pressure!\"\n )\n self.assertEqual(\n test_dict[\"enthalpy\"], -2.15036930e4, msg=\"Failed to read enthalpy!\"\n )\n self.assertEqual(test_dict[\"num_atoms\"], 14, msg=\"Wrong number of atoms!\")\n self.assertTrue(\n [\"Na\", 3] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertTrue(\n [\"Zn\", 4] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertEqual(\n test_dict[\"cell_volume\"], 288.041941, msg=\"Wrong cell volume!\"\n )\n self.assertEqual(test_dict[\"space_group\"], \"Pm\", msg=\"Wrong space group!\")\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][0], 9.039776, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][1], 9.045651, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][2], 4.068682, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][0], 90, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][1], 90, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][2],\n 59.971185,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(\n test_dict[\"geom_force_tol\"], 0.05, msg=\"Wrong geom force tol\"\n )\n self.assertEqual(test_dict[\"castep_version\"], \"16.11\")\n self.assertEqual(test_dict[\"_castep_commit\"], \"203e84763863+\")\n self.assertAlmostEqual(test_dict[\"total_time_secs\"], 1291.14, places=2)\n self.assertEqual(test_dict[\"geom_iter\"], 8)\n self.assertEqual(\n test_dict[\"external_pressure\"],\n [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],\n )\n self.assertEqual(test_dict[\"estimated_mem_per_process_MB\"], 345.1)\n self.assertEqual(test_dict[\"peak_mem_MB\"], int(675372 / 1024))\n self.assertEqual(test_dict[\"num_mpi_processes\"], 15)\n\n def test_castep17(self):\n castep_fname = REAL_PATH + \"data/KP-castep17.castep\"\n failed_open = False\n try:\n f = open(castep_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n castep_fname\n ),\n )\n if not failed_open:\n f.close()\n test_dict, s = castep2dict(castep_fname, db=True, verbosity=VERBOSITY)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n self.assertEqual(\n test_dict[\"pressure\"], 0.0180, msg=\"Failed to read pressure!\"\n )\n self.assertEqual(\n test_dict[\"enthalpy\"], -5.98055077e3, msg=\"Failed to read enthalpy!\"\n )\n self.assertEqual(test_dict[\"num_atoms\"], 9, msg=\"Wrong number of atoms!\")\n self.assertTrue(\n [\"P\", 2] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertTrue(\n [\"K\", 7] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertEqual(\n test_dict[\"cell_volume\"], 522.226927, msg=\"Wrong cell volume!\"\n )\n self.assertEqual(test_dict[\"space_group\"], \"Pm\", msg=\"Wrong space group!\")\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][0],\n 10.231976,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][1], 5.024837, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][2],\n 10.186949,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][0],\n 90.000000,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][1],\n 94.373377,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][2],\n 90.000000,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(\n test_dict[\"geom_force_tol\"], 0.01, msg=\"Wrong geom force tol\"\n )\n self.assertEqual(test_dict[\"castep_version\"], \"17.21\")\n self.assertEqual(\n test_dict[\"_compiler_architecture\"], \"linux_x86_64_ifort17\"\n )\n self.assertEqual(test_dict[\"_castep_commit\"], \"056e886bd5a1+\")\n self.assertEqual(test_dict[\"optimised\"], True)\n self.assertEqual(test_dict[\"estimated_mem_per_process_MB\"], 300.1)\n self.assertEqual(\n test_dict[\"species_pot\"][\"K\"],\n \"2|1.5|9|10|11|30U:40:31(qc=6)\",\n msg=\"Failed to scrape K_OTF.usp file\",\n )\n self.assertEqual(\n test_dict[\"species_pot\"][\"P\"],\n \"3|1.8|4|4|5|30:31:32\",\n msg=\"Failed to scrape P_OTF.usp file\",\n )\n\n def test_castep_single_atom_edgecase(self):\n castep_fname = REAL_PATH + \"data/castep_files/Na-edgecase-CollCode10101.castep\"\n failed_open = False\n try:\n f = open(castep_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n castep_fname\n ),\n )\n if not failed_open:\n f.close()\n test_dict, s = castep2dict(castep_fname, db=True, verbosity=VERBOSITY)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n self.assertEqual(\n test_dict[\"pressure\"], -0.0966, msg=\"Failed to read pressure!\"\n )\n self.assertEqual(\n test_dict[\"enthalpy\"], -1.30423371e3, msg=\"Failed to read enthalpy!\"\n )\n self.assertEqual(test_dict[\"positions_frac\"], [[0, 0, 0]])\n self.assertEqual(test_dict[\"forces\"], [[0, 0, 0]])\n self.assertEqual(\n test_dict[\"enthalpy\"], -1.30423371e3, msg=\"Failed to read enthalpy!\"\n )\n self.assertEqual(\n test_dict[\"total_energy\"],\n -1304.223019263,\n msg=\"Failed to read total energy!\",\n )\n self.assertEqual(\n test_dict[\"total_energy_per_atom\"],\n -1304.223019263,\n msg=\"Failed to read total energy!\",\n )\n self.assertEqual(\n test_dict[\"smeared_free_energy\"],\n -1304.233706274,\n msg=\"Failed to read free energy!\",\n )\n self.assertEqual(test_dict[\"num_atoms\"], 1, msg=\"Wrong number of atoms!\")\n self.assertTrue(\n [\"Na\", 1] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertEqual(\n len(test_dict[\"stoichiometry\"]), 1, msg=\"Wrong stoichiometry!\"\n )\n self.assertEqual(\n test_dict[\"cell_volume\"], 36.761902, msg=\"Wrong cell volume!\"\n )\n self.assertEqual(\n test_dict[\"space_group\"], \"Im-3m\", msg=\"Wrong space group!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][0], 3.628050, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][1], 3.628050, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][2], 3.628050, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][0],\n 109.471221,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][1],\n 109.471221,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][2],\n 109.471221,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(\n test_dict[\"geom_force_tol\"], 0.05, msg=\"Wrong geom force tol\"\n )\n self.assertEqual(test_dict[\"castep_version\"], \"16.1\")\n self.assertEqual(test_dict[\"species_pot\"][\"Na\"], \"Na_00PBE.usp\")\n self.assertEqual(test_dict[\"icsd\"], 10101)\n self.assertEqual(\n test_dict[\"_compiler_architecture\"], \"linux_x86_64_ifort14\"\n )\n self.assertEqual(test_dict[\"_castep_commit\"], \"2756eb6097bf+\")\n\n int_dict, s = castep2dict(\n castep_fname, db=False, intermediates=True, verbosity=VERBOSITY\n )\n for key in test_dict:\n self.assertEqual(test_dict[key], int_dict[key])\n\n self.assertEqual(len(int_dict[\"intermediates\"]), 51)\n\n energies = [\n -1304.222889926,\n -1304.222911722,\n -1304.222930541,\n -1304.222928920,\n -1304.222941837,\n -1304.222959187,\n -1304.222958028,\n -1304.222976388\n ]\n\n for i, energy in enumerate(energies):\n self.assertEqual(int_dict[\"intermediates\"][i][\"total_energy\"], energy)\n\n special_case = -8\n\n for i in range(len(int_dict[\"intermediates\"])):\n self.assertEqual(int_dict[\"intermediates\"][i][\"forces\"], [[0, 0, 0]])\n self.assertEqual(\n int_dict[\"intermediates\"][i][\"positions_frac\"], [[0, 0, 0]]\n )\n self.assertEqual(int_dict[\"intermediates\"][i][\"atom_types\"], [\"Na\"])\n self.assertEqual(\n int_dict[\"intermediates\"][-1][\"total_energy\"], -1304.223019263\n )\n self.assertEqual(\n int_dict[\"intermediates\"][-1][\"smeared_free_energy\"], -1304.233706274\n )\n self.assertEqual(\n int_dict[\"intermediates\"][special_case][\"total_energy\"], -1304.222982442\n )\n self.assertEqual(\n int_dict[\"intermediates\"][special_case][\"smeared_free_energy\"], -1304.233677344\n )\n self.assertEqual(\n int_dict[\"intermediates\"][-1][\"total_energy_per_atom\"], -1304.223019263\n )\n self.assertEqual(\n int_dict[\"intermediates\"][-1][\"smeared_free_energy_per_atom\"],\n -1304.233706274,\n )\n self.assertEqual(\n int_dict[\"intermediates\"][special_case][\"total_energy_per_atom\"], -1304.222982442\n )\n self.assertEqual(\n int_dict[\"intermediates\"][special_case][\"smeared_free_energy_per_atom\"],\n -1304.233677344,\n )\n self.assertEqual(int_dict[\"geom_iter\"], 44)\n\n def test_castep_unoptimised(self):\n castep_fname = REAL_PATH + \"data/castep_files/TiO2_unconverged-MP-10101.castep\"\n failed_open = False\n try:\n f = open(castep_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n castep_fname\n ),\n )\n if not failed_open:\n f.close()\n test_dict, s = castep2dict(castep_fname, db=True, verbosity=VERBOSITY)\n self.assertFalse(s, msg=\"Should have failed with db=True, but didn't!\")\n self.assertTrue(\n isinstance(test_dict, Exception),\n msg=\"Should have returned error message!\",\n )\n test_dict, s = castep2dict(castep_fname, db=False, verbosity=VERBOSITY)\n self.assertTrue(s, msg=\"Should have succeeded with db=False, but didn't!\")\n self.assertTrue(\n isinstance(test_dict, dict), msg=\"Should have returned dict!\"\n )\n self.assertEqual(test_dict[\"total_energy\"], -12479.86611705)\n self.assertEqual(test_dict[\"num_atoms\"], 12)\n self.assertEqual(\n test_dict[\"pressure\"], 0.9455, msg=\"Failed to read pressure!\"\n )\n self.assertTrue(\n [\"Ti\", 1] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertTrue(\n [\"O\", 2] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertEqual(\n test_dict[\"cell_volume\"], 127.269750, msg=\"Wrong cell volume!\"\n )\n self.assertEqual(test_dict[\"space_group\"], \"Pmmm\")\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][0], 4.026041, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][1], 7.906524, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][2], 3.998172, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][0],\n 90.000000,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][1],\n 90.000000,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][2],\n 90.000000,\n msg=\"Wrong lattice constants!\",\n )\n self.assertEqual(test_dict[\"optimised\"], False)\n self.assertEqual(test_dict[\"geom_force_tol\"], 0.05)\n self.assertEqual(test_dict[\"castep_version\"], \"18.1\")\n self.assertEqual(\n test_dict[\"species_pot\"][\"Ti\"], \"3|1.9|8|9|10|30U:40:31:32(qc=5)\"\n )\n self.assertEqual(\n test_dict[\"species_pot\"][\"O\"], \"2|1.5|12|13|15|20:21(qc=5)\"\n )\n self.assertEqual(test_dict[\"mp_id\"], 10101)\n\n def test_file_not_found(self):\n \"\"\" Ensure that FileNotFound errors fail gracefully. \"\"\"\n error = False\n try:\n res, s = res2dict(\"___not_a_file\")\n except FileNotFoundError:\n error = True\n self.assertTrue(error)\n\n castep_fname = []\n castep_fname += [REAL_PATH + \"data/castep_files/NaP_intermediates.castep\"]\n castep_fname += [REAL_PATH + \"data/___not_a_file\"]\n castep_fname += [REAL_PATH + \"data/KP-castep17.castep\"]\n castep_fname += [REAL_PATH + \"data/Na3Zn4-swap-ReOs-OQMD_759599.castep\"]\n\n error = False\n try:\n cursor, failures = castep2dict(castep_fname, db=True)\n except FileNotFoundError:\n error = True\n\n def test_multiple_exts(self):\n castep_fname = REAL_PATH + \"data/castep_files/Na-edgecase-CollCode10101\"\n test_dict, s = castep2dict(castep_fname, db=True)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n\n castep_fname = REAL_PATH + \"data/castep_files/CuP-thermo-test\"\n test_dict, s = castep2dict(castep_fname, db=False)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n\n def test_history(self):\n castep_fname = (\n REAL_PATH + \"data/castep_files/Na3Zn4-swap-ReOs-OQMD_759599.history\"\n )\n test_dict, s = castep2dict(castep_fname, db=True)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n self.assertEqual(test_dict[\"source\"][0], castep_fname)\n self.assertEqual(test_dict[\"pressure\"], 0.0763, msg=\"Failed to read pressure!\")\n self.assertEqual(\n test_dict[\"enthalpy\"], -2.15036930e4, msg=\"Failed to read enthalpy!\"\n )\n self.assertEqual(test_dict[\"num_atoms\"], 14, msg=\"Wrong number of atoms!\")\n self.assertTrue(\n [\"Na\", 3] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertTrue(\n [\"Zn\", 4] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertEqual(test_dict[\"cell_volume\"], 288.041941, msg=\"Wrong cell volume!\")\n self.assertEqual(test_dict[\"space_group\"], \"Pm\", msg=\"Wrong space group!\")\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][0], 9.039776, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][1], 9.045651, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][2], 4.068682, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][0], 90, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][1], 90, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][2], 59.971185, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(test_dict[\"geom_force_tol\"], 0.05, msg=\"Wrong geom force tol\")\n self.assertEqual(test_dict[\"castep_version\"], \"16.11\")\n self.assertEqual(test_dict[\"estimated_mem_per_process_MB\"], 345.1)\n\n def test_history_gz(self):\n castep_fname = (\n REAL_PATH + \"data/castep_files/Na3Zn4-swap-ReOs-OQMD_759599.history.gz\"\n )\n test_dict, s = castep2dict(castep_fname, db=True)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n self.assertEqual(test_dict[\"pressure\"], 0.0763, msg=\"Failed to read pressure!\")\n self.assertEqual(\n test_dict[\"enthalpy\"], -2.15036930e4, msg=\"Failed to read enthalpy!\"\n )\n self.assertEqual(test_dict[\"num_atoms\"], 14, msg=\"Wrong number of atoms!\")\n self.assertTrue(\n [\"Na\", 3] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertTrue(\n [\"Zn\", 4] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertEqual(test_dict[\"cell_volume\"], 288.041941, msg=\"Wrong cell volume!\")\n self.assertEqual(test_dict[\"space_group\"], \"Pm\", msg=\"Wrong space group!\")\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][0], 9.039776, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][1], 9.045651, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][0][2], 4.068682, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][0], 90, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][1], 90, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"][1][2], 59.971185, msg=\"Wrong lattice constants!\"\n )\n self.assertEqual(test_dict[\"geom_force_tol\"], 0.05, msg=\"Wrong geom force tol\")\n self.assertEqual(test_dict[\"castep_version\"], \"16.11\")\n\n def test_castep_intermediates(self):\n castep_fname = REAL_PATH + \"data/castep_files/NaP_intermediates.castep\"\n self.assertTrue(\n os.path.isfile(castep_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n castep_fname\n ),\n )\n test_dict, s = castep2dict(\n castep_fname, db=False, intermediates=True, verbosity=VERBOSITY\n )\n self.assertTrue(s, msg=\"Should have succeeded with db=False, but didn't!\")\n final_dict, s = castep2dict(\n castep_fname, db=True, intermediates=False, verbosity=VERBOSITY\n )\n self.assertTrue(s)\n for key in final_dict:\n self.assertEqual(\n final_dict[key], test_dict[key], msg=\"{} didn't match\".format(key)\n )\n self.assertEqual(test_dict[\"intermediates\"][0][\"total_energy\"], -8537.190779552)\n self.assertEqual(test_dict[\"intermediates\"][1][\"total_energy\"], -8538.161269966)\n self.assertEqual(\n test_dict[\"intermediates\"][-1][\"total_energy\"], -8546.922111847\n )\n self.assertEqual(\n test_dict[\"intermediates\"][0][\"smeared_free_energy\"], -8537.247551883\n )\n self.assertEqual(\n test_dict[\"intermediates\"][1][\"smeared_free_energy\"], -8538.215032441\n )\n self.assertEqual(\n test_dict[\"intermediates\"][-1][\"smeared_free_energy\"], -8546.922614706\n )\n self.assertEqual(test_dict[\"geom_iter\"], 70)\n self.assertEqual(len(test_dict[\"intermediates\"]), 148)\n self.assertEqual(test_dict[\"smeared_free_energy\"], -8546.922614706)\n self.assertEqual(final_dict[\"smeared_free_energy\"], -8546.922614706)\n\n def test_castep_parameter_change(self):\n castep_fname = REAL_PATH + \"data/castep_files/input-mzs7x1.castep\"\n self.assertTrue(\n os.path.isfile(castep_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n castep_fname\n ),\n )\n test_dict, s = castep2dict(castep_fname, db=True, verbosity=VERBOSITY)\n self.assertTrue(s)\n self.assertTrue(test_dict[\"optimised\"])\n self.assertEqual(test_dict[\"enthalpy\"], -6.16805339e003)\n self.assertEqual(test_dict[\"total_energy\"], -6168.053386094)\n\n def test_castep_mulliken_scraper(self):\n castep_fname = REAL_PATH + \"data/castep_files/Fe-spin.castep\"\n self.assertTrue(\n os.path.isfile(castep_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n castep_fname\n ),\n )\n test_dict, s = castep2dict(castep_fname, db=False, verbosity=VERBOSITY)\n self.assertTrue(s)\n self.assertEqual(test_dict[\"task\"], \"singlepointenergy\")\n self.assertEqual(test_dict[\"atom_types\"], [\"Fe\", \"Fe\"])\n self.assertAlmostEqual(test_dict[\"integrated_spin_density\"], 4.27207)\n self.assertAlmostEqual(test_dict[\"integrated_mod_spin_density\"], 4.44521)\n self.assertEqual(test_dict[\"mulliken_spins\"], [2.14, 2.14])\n self.assertEqual(test_dict[\"mulliken_net_spin\"], 4.28)\n self.assertEqual(test_dict[\"mulliken_abs_spin\"], 4.28)\n\n def test_castep_beef_scraper(self):\n from matador.utils.chem_utils import HARTREE_TO_EV\n\n castep_fname = REAL_PATH + \"data/beef_files/K3P_BEEF.castep\"\n self.assertTrue(\n os.path.isfile(castep_fname),\n msg=\"Failed to open test case {} - please check installation\".format(\n castep_fname\n ),\n )\n test_dict, s = castep2dict(castep_fname, db=False, verbosity=VERBOSITY)\n self.assertTrue(s)\n self.assertEqual(test_dict[\"task\"], \"singlepointenergy\")\n self.assertEqual(\n test_dict[\"atom_types\"], [\"P\", \"P\", \"K\", \"K\", \"K\", \"K\", \"K\", \"K\"]\n )\n self.assertEqual(len(test_dict[\"_beef\"][\"thetas\"]), 5000)\n self.assertEqual(len(test_dict[\"_beef\"][\"total_energy_per_atom\"]), 5000)\n self.assertAlmostEqual(\n test_dict[\"_beef\"][\"total_energy\"][-1], -1.9029640520e02 * HARTREE_TO_EV\n )\n self.assertAlmostEqual(\n test_dict[\"_beef\"][\"total_energy_per_atom\"][-1],\n -1.9029640520e02 * HARTREE_TO_EV / 8,\n )\n self.assertAlmostEqual(\n test_dict[\"_beef\"][\"mean_total_energy\"], -190.6571830577 * HARTREE_TO_EV\n )\n self.assertAlmostEqual(\n test_dict[\"_beef\"][\"std_dev_total_energy\"], 2.2674151843 * HARTREE_TO_EV\n )\n self.assertAlmostEqual(\n test_dict[\"_beef\"][\"mean_total_energy_per_atom\"],\n -190.6571830577 * HARTREE_TO_EV / 8,\n )\n self.assertAlmostEqual(\n test_dict[\"_beef\"][\"std_dev_total_energy_per_atom\"],\n 2.2674151843 * HARTREE_TO_EV / 8,\n )\n\n def test_castep_encap_scraper(self):\n castep_fname = REAL_PATH + \"data/encap_files/Se.castep\"\n self.assertTrue(\n os.path.isfile(castep_fname),\n msg=\"Failed to find test case {}, please check installation\".format(\n castep_fname\n ),\n )\n test_dict, s = castep2dict(castep_fname, db=False, verbosity=VERBOSITY)\n self.assertTrue(s)\n self.assertEqual(test_dict[\"task\"], \"geometryoptimization\")\n self.assertEqual(test_dict[\"atom_types\"], 12 * [\"Se\"])\n self.assertEqual(test_dict[\"encapsulated\"], True)\n self.assertEqual(test_dict[\"cnt_radius\"], 4.69825)\n self.assertEqual(len(test_dict[\"devel_code\"]), 195)\n\n def test_castep_fixed_cell_scraper(self):\n castep_fname = REAL_PATH + \"data/fix_cell_test/TiNb2O7-JVAa6LNI-0K-prim.castep\"\n self.assertTrue(\n os.path.isfile(castep_fname),\n msg=\"Failed to find test case {}, please check installation\".format(\n castep_fname\n ),\n )\n test_dict, s = castep2dict(castep_fname, db=False, verbosity=VERBOSITY)\n self.assertTrue(s)\n self.assertEqual(test_dict[\"positions_frac\"][0], [0.501184, 0.501184, 0.997063])\n self.assertEqual(\n test_dict[\"positions_frac\"][-1], [0.182069, 0.182069, 0.989013]\n )\n self.assertEqual(test_dict[\"lattice_abc\"][0], [10.360830, 10.360830, 11.883000])\n self.assertEqual(\n test_dict[\"lattice_abc\"][1], [119.631200, 119.631200, 21.144990]\n )\n self.assertTrue(test_dict[\"fix_all_cell\"])\n self.assertTrue(\"cell_constraints\" not in test_dict)\n\n\nclass ResScraperTests(MatadorUnitTest):\n def test_res(self):\n failed_open = False\n res_fname = REAL_PATH + \"data/LiPZn-r57des.res\"\n try:\n f = open(res_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n res_fname\n ),\n )\n if not failed_open:\n f.close()\n test_dict, s = res2dict(res_fname)\n self.assertTrue(s, \"Failed entirely, oh dear!\")\n self.assertEqual(\n test_dict[\"pressure\"], 0.0106, msg=\"Failed to read pressure!\"\n )\n self.assertEqual(\n test_dict[\"enthalpy\"], -7600.06148, msg=\"Failed to read enthalpy!\"\n )\n self.assertEqual(test_dict[\"num_atoms\"], 8, msg=\"Wrong number of atoms!\")\n self.assertTrue(\n [\"Li\", 1] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertTrue(\n [\"Zn\", 1] in test_dict[\"stoichiometry\"], msg=\"Wrong stoichiometry!\"\n )\n self.assertTrue(\n sorted(test_dict[\"stoichiometry\"]) == test_dict[\"stoichiometry\"],\n msg=\"Wrong stoichiometry!\",\n )\n self.assertEqual(\n test_dict[\"cell_volume\"], 105.918342, msg=\"Wrong cell volume!\"\n )\n self.assertEqual(\n test_dict[\"space_group\"], \"Pmc2_1\", msg=\"Wrong space group!\"\n )\n self.assertEqual(\n test_dict[\"lattice_abc\"],\n [[5.057429, 4.93404, 4.244619], [90.0, 90.0, 90.0]],\n msg=\"Wrong lattice constants!\",\n )\n\n res_fname = (\n REAL_PATH\n + \"data/hull-NaFeP-afh41_new_Na+Fe+P/FeP2-OQMD_2958-CollCode15027-nospin.res\"\n )\n failed_open = False\n try:\n f = open(res_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n res_fname\n ),\n )\n if not failed_open:\n f.close()\n test_dict, s = res2dict(res_fname)\n self.assertTrue(s)\n self.assertEqual(test_dict[\"icsd\"], 15027)\n\n res_fname = REAL_PATH + \"data/LiPZn-r57des_bodged.res\"\n failed_open = False\n try:\n f = open(res_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n res_fname\n ),\n )\n if not failed_open:\n f.close()\n test_dict, s = res2dict(res_fname)\n self.assertFalse(s, \"This wasn't meant to succeed!\")\n\n def test_c2x_shelx_res(self):\n res_fname = REAL_PATH + \"data/structures/npm.res\"\n res, s = res2dict(res_fname, db=False)\n self.assertTrue(s, msg=\"Failed entirely: {}\".format(res))\n\n\nclass ParamScraperTests(MatadorUnitTest):\n \"\"\" Test CASTEP param scrapers. \"\"\"\n\n def test_param(self):\n param_fname = REAL_PATH + \"data/KX.param\"\n failed_open = False\n try:\n f = open(param_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n param_fname\n ),\n )\n if not failed_open:\n f.close()\n test_dict, s = param2dict(param_fname, db=True)\n self.assertTrue(s, \"Failed entirely, oh dear!\")\n self.assertEqual(\n test_dict[\"source\"][0].split(\"/\")[-1], \"KX.param\", msg=\"Wrong source!\"\n )\n self.assertEqual(\n test_dict[\"task\"], \"geometryoptimization\", msg=\"Failed to read task!\"\n )\n self.assertEqual(\n test_dict[\"xc_functional\"], \"PBE\", msg=\"Failed to read xc!\"\n )\n self.assertEqual(\n test_dict[\"perc_extra_bands\"], 40.0, msg=\"Failed to read extra bands!\"\n )\n self.assertEqual(\n test_dict[\"cut_off_energy\"], 500, msg=\"Failed to read cut_off_energy\"\n )\n\n test_dict, s = param2dict(param_fname, db=False)\n self.assertTrue(s, \"Failed db=False test entirely, oh dear!\")\n self.assertEqual(\n test_dict[\"source\"][0].split(\"/\")[-1],\n \"KX.param\",\n msg=\"Wrong db=False source!\",\n )\n self.assertEqual(\n test_dict[\"task\"],\n \"geometryoptimization\",\n msg=\"Failed to read db=False task!\",\n )\n self.assertEqual(\n test_dict[\"xc_functional\"], \"PBE\", msg=\"Failed to read db=False xc!\"\n )\n self.assertEqual(\n test_dict[\"fix_occupancy\"],\n False,\n msg=\"Failed to read db=False occupancy!\",\n )\n self.assertEqual(\n test_dict[\"perc_extra_bands\"],\n 40.0,\n msg=\"Failed to read db=False extra bands!\",\n )\n self.assertEqual(\n test_dict[\"geom_max_iter\"], 200, msg=\"Wrong db=False geom_max_iter!\"\n )\n self.assertEqual(\n test_dict[\"fixed_npw\"], False, msg=\"Wrong db=False fixed_npw!\"\n )\n self.assertEqual(\n test_dict[\"write_checkpoint\"],\n \"none\",\n msg=\"Wrong db=False checkpointing!\",\n )\n self.assertEqual(\n test_dict[\"write_cell_structure\"],\n True,\n msg=\"Wrong db=False cell_structure!\",\n )\n\n def test_tricky_param(self):\n param_fname = REAL_PATH + \"data/tricky_param.param\"\n failed_open = False\n try:\n f = open(param_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n param_fname\n ),\n )\n if not failed_open:\n f.close()\n test_dict, s = param2dict(param_fname, db=False, debug=True, verbosity=4)\n self.assertTrue(s, \"Failed entirely, oh dear!\")\n self.assertEqual(\n test_dict[\"source\"][0].split(\"/\")[-1],\n \"tricky_param.param\",\n msg=\"Wrong source!\",\n )\n self.assertEqual(\n test_dict[\"task\"],\n \"spectral\",\n msg=\"Failed to read non colon delimited field task!\",\n )\n self.assertEqual(\n test_dict[\"perc_extra_bands\"], 40.0, msg=\"Failed to read extra bands!\"\n )\n self.assertEqual(\n test_dict[\"fix_occupancy\"],\n True,\n msg=\"Failed to read lowercase bool fix_occupancy\",\n )\n self.assertEqual(\n test_dict[\"spin_polarized\"],\n True,\n msg=\"Failed to read Anglicised spelling of polarised\",\n )\n self.assertFalse(\"spin_polarised\" in test_dict)\n self.assertEqual(\n test_dict[\"write_cell_structure\"],\n True,\n msg=\"Failed to read = delimited field write_cell_structure\",\n )\n self.assertEqual(\n test_dict[\"cut_off_energy\"],\n \"50.0 ry\",\n msg=\"Failed to non-eV cut_off_energy.\",\n )\n self.assertEqual(\n test_dict[\"devel_code\"],\n \"xc_bee: true\\nxc_bee_rand_seed: 2\\n# including comment\\nxc_bee_num_trials: 100\\n\",\n msg=\"Failed to read devel code\",\n )\n self.assertEqual(len(test_dict), 14)\n\n\nclass ScraperMiscTest(MatadorUnitTest):\n \"\"\" Test miscellaneous other scrapers. \"\"\"\n\n def test_batch_loading(self):\n \"\"\" Test passing a list of files to scraper function, which\n should be handled by decorator.\n\n \"\"\"\n castep_fname = []\n castep_fname += [REAL_PATH + \"data/castep_files/NaP_intermediates.castep\"]\n castep_fname += [\n REAL_PATH + \"data/castep_files/Na-edgecase-CollCode10101.castep\"\n ]\n castep_fname += [REAL_PATH + \"data/castep_files/KP-castep17.castep\"]\n castep_fname += [\n REAL_PATH + \"data/castep_files/Na3Zn4-swap-ReOs-OQMD_759599.castep\"\n ]\n castep_fname += [\n REAL_PATH + \"data/castep_files/TiO2_unconverged-MP-10101.castep\"\n ]\n\n cursor, failures = castep2dict(castep_fname, db=True)\n self.assertEqual(len(cursor), 4)\n self.assertEqual(len(failures), 1)\n\n cursor, failures = castep2dict(\n REAL_PATH + \"data/castep_files/*.castep\", db=True\n )\n self.assertEqual(len(cursor), 5)\n self.assertEqual(len(failures), 3)\n\n res_fname = []\n res_fname += [REAL_PATH + \"data/LiPZn-r57des.res\"]\n res_fname += [REAL_PATH + \"data/LiPZn-r57des_bodged.res\"]\n cursor, failures = res2dict(res_fname, db=True)\n self.assertEqual(len(cursor), 1)\n self.assertEqual(len(failures), 1)\n\n res_fname = []\n res_fname += [REAL_PATH + \"data/LiPZn-r57des.res\"]\n res_fname += [REAL_PATH + \"data/LiPZn-r57des_bodged.res\"]\n with self.assertRaises(Exception):\n cursor, failures = res2dict(res_fname, db=True, fail_fast=True)\n\n def test_phonon_scraper(self):\n phonon_fname = REAL_PATH + \"data/phonon_dispersion/K3P.phonon\"\n self.assertTrue(\n os.path.isfile(phonon_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n phonon_fname\n ),\n )\n ph_dict, s = phonon2dict(phonon_fname, verbosity=VERBOSITY)\n self.assertTrue(s, msg=\"Failed to read phonon file\")\n self.assertEqual(ph_dict[\"num_atoms\"], 8)\n self.assertEqual(ph_dict[\"num_branches\"], 24)\n self.assertEqual(ph_dict[\"num_modes\"], 24)\n self.assertEqual(ph_dict[\"num_kpoints\"], 250)\n self.assertEqual(ph_dict[\"freq_unit\"], \"cm-1\")\n self.assertEqual(ph_dict[\"lattice_cart\"][0], [4.961529, 2.864318, -0.00000])\n self.assertEqual(ph_dict[\"lattice_cart\"][1], [-4.961529, 2.864318, 0.00000])\n self.assertEqual(ph_dict[\"lattice_cart\"][2], [0.000000, 0.000000, 10.127257])\n self.assertEqual(ph_dict[\"positions_frac\"][0], [0.666699, 0.333301, 0.750129])\n self.assertEqual(ph_dict[\"atom_types\"][0], \"P\")\n self.assertEqual(ph_dict[\"atom_types\"][2], \"K\")\n self.assertEqual(ph_dict[\"atom_masses\"][0], 30.97376)\n self.assertEqual(ph_dict[\"atom_masses\"][2], 39.0983)\n self.assertEqual(ph_dict[\"softest_mode_freq\"], -23.654487 * INVERSE_CM_TO_EV)\n\n disp, s = phonon2dict(phonon_fname, verbosity=VERBOSITY, as_model=True)\n self.assertTrue(isinstance(disp, VibrationalDispersion))\n ph_dict[\"kpoint_branches\"] = disp.kpoint_branches\n ph_dict[\"kpoint_path_spacing\"] = disp.kpoint_path_spacing\n self.assertAlmostEqual(ph_dict[\"kpoint_path_spacing\"], 0.021, places=2)\n self.assertEqual(ph_dict[\"kpoint_branches\"][0][0], 0)\n self.assertEqual(ph_dict[\"kpoint_branches\"][0][-1], 35)\n self.assertEqual(ph_dict[\"kpoint_branches\"][1][0], 36)\n self.assertEqual(ph_dict[\"kpoint_branches\"][1][-1], 134)\n self.assertEqual(ph_dict[\"kpoint_branches\"][-2][0], 135)\n self.assertEqual(ph_dict[\"kpoint_branches\"][-1][0], 185)\n self.assertEqual(ph_dict[\"kpoint_branches\"][-1][-1], 249)\n\n def test_phonon_scraper_ir(self):\n phonon_fname = REAL_PATH + \"data/phonon_ir/h-BN_IRR.phonon\"\n self.assertTrue(\n os.path.isfile(phonon_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n phonon_fname\n ),\n )\n data, s = phonon2dict(phonon_fname, VERBOSITY=VERBOSITY)\n self.assertTrue(s)\n self.assertTrue(\"infrared_intensity\" in data)\n self.assertTrue(\"raman_intensity\" in data)\n self.assertEqual(\n np.shape(data[\"eigenvalues_q\"]), np.shape(data[\"infrared_intensity\"])\n )\n self.assertEqual(\n np.shape(data[\"eigenvalues_q\"]), np.shape(data[\"raman_intensity\"])\n )\n\n def test_phonon_dos_scraper(self):\n phonon_fname = REAL_PATH + \"data/phonon_dispersion/K3P.phonon_dos\"\n self.assertTrue(\n os.path.isfile(phonon_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n phonon_fname\n ),\n )\n dos_data, s = phonon_dos2dict(phonon_fname)\n self.assertTrue(s)\n self.assertEqual(dos_data[\"source\"], [phonon_fname])\n self.assertEqual(len(dos_data[\"dos\"]), 10001)\n self.assertEqual(len(dos_data[\"energies\"]), 10001)\n self.assertEqual(len(dos_data[\"pdos\"][\"energies\"]), 10001)\n self.assertEqual(len(dos_data[\"pdos\"][\"pdos\"]), 2)\n self.assertEqual(len(dos_data[\"pdos\"][\"projectors\"]), 2)\n self.assertEqual(len(dos_data[\"pdos\"][\"pdos\"][(\"K\", None, None)]), 10001)\n self.assertEqual(len(dos_data[\"pdos\"][\"pdos\"][(\"P\", None, None)]), 10001)\n\n dos, s = phonon_dos2dict(phonon_fname, as_model=True)\n self.assertTrue(isinstance(dos, VibrationalDOS))\n\n def test_optados_dos_scraper(self):\n odo_fname = REAL_PATH + \"data/optados_files/K3P.adaptive.dat\"\n self.assertTrue(\n os.path.isfile(odo_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n odo_fname\n ),\n )\n od_dict, s = optados2dict(odo_fname)\n self.assertTrue(s)\n self.assertEqual(len(od_dict[\"dos\"]), 529)\n self.assertEqual(len(od_dict[\"energies\"]), 529)\n self.assertEqual(od_dict[\"dos_unit_label\"], \"DOS (electrons per eV/A^3)\")\n od, s = optados2dict(odo_fname, as_model=True)\n self.assertTrue(isinstance(od, ElectronicDOS))\n\n def test_optados_pdos_scraper(self):\n odo_fname = REAL_PATH + \"data/optados_files/KP.pdos.dat\"\n failed_open = False\n try:\n f = open(odo_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n odo_fname\n ),\n )\n if not failed_open:\n f.close()\n od_dict, s = optados2dict(odo_fname)\n self.assertTrue(s)\n self.assertEqual(len(od_dict[\"sum_pdos\"]), 53684)\n self.assertEqual(len(od_dict[\"energies\"]), 53684)\n self.assertEqual(od_dict[\"num_projectors\"], 4)\n self.assertEqual(len(od_dict[\"pdos\"][(\"K\", \"s\", None)]), 53684)\n self.assertEqual(len(od_dict[\"pdos\"][(\"K\", \"p\", None)]), 53684)\n self.assertEqual(len(od_dict[\"pdos\"][(\"P\", \"s\", None)]), 53684)\n self.assertEqual(len(od_dict[\"pdos\"][(\"P\", \"p\", None)]), 53684)\n\n def test_optados_spin_pdos_scraper(self):\n odo_fname = REAL_PATH + \"data/optados_files/EDASOS-Cr.pdos.dat\"\n failed_open = False\n try:\n f = open(odo_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n odo_fname\n ),\n )\n if not failed_open:\n f.close()\n od_dict, s = optados2dict(odo_fname)\n self.assertTrue(s)\n self.assertEqual(len(od_dict[\"sum_pdos\"]), 17366)\n self.assertEqual(len(od_dict[\"energies\"]), 17366)\n self.assertEqual(od_dict[\"num_projectors\"], 10)\n self.assertEqual(len(od_dict[\"pdos\"][(\"H\", None, \"up\")]), 17366)\n self.assertEqual(len(od_dict[\"pdos\"][(\"C\", None, \"up\")]), 17366)\n self.assertEqual(len(od_dict[\"pdos\"][(\"N\", None, \"up\")]), 17366)\n self.assertEqual(len(od_dict[\"pdos\"][(\"Cl\", None, \"up\")]), 17366)\n self.assertEqual(len(od_dict[\"pdos\"][(\"Cr\", None, \"up\")]), 17366)\n self.assertEqual(len(od_dict[\"pdos\"][(\"H\", None, \"down\")]), 17366)\n self.assertEqual(len(od_dict[\"pdos\"][(\"C\", None, \"down\")]), 17366)\n self.assertEqual(len(od_dict[\"pdos\"][(\"N\", None, \"down\")]), 17366)\n self.assertEqual(len(od_dict[\"pdos\"][(\"Cl\", None, \"down\")]), 17366)\n self.assertEqual(len(od_dict[\"pdos\"][(\"Cr\", None, \"down\")]), 17366)\n\n def test_optados_pdis_scraper(self):\n odo_fname = REAL_PATH + \"data/optados_files/Si2.pdis.dat\"\n failed_open = False\n try:\n f = open(odo_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n odo_fname\n ),\n )\n if not failed_open:\n f.close()\n od_dict, s = optados2dict(odo_fname)\n self.assertTrue(s)\n self.assertEqual(len(od_dict[\"kpoints\"]), 166)\n self.assertEqual(od_dict[\"num_kpoints\"], 166)\n self.assertEqual(od_dict[\"num_bands\"], 23)\n self.assertEqual(od_dict[\"num_projectors\"], 4)\n self.assertEqual(np.shape(od_dict[\"projector_weights\"]), (166, 23, 4))\n self.assertEqual(np.shape(od_dict[\"eigenvalues\"]), (166, 23))\n self.assertEqual(od_dict[\"projectors\"][0], (\"Si\", \"s\", None))\n self.assertEqual(od_dict[\"projectors\"][1], (\"Si\", \"p\", None))\n self.assertEqual(od_dict[\"projectors\"][2], (\"Si\", \"d\", None))\n self.assertEqual(od_dict[\"projectors\"][3], (\"Si\", \"f\", None))\n self.assertEqual(od_dict[\"projector_weights\"][0][0][0], 0.99654675)\n self.assertEqual(od_dict[\"eigenvalues\"][0][0], -12.110537)\n self.assertEqual(od_dict[\"eigenvalues\"][0][-1], 24.862777)\n self.assertEqual(od_dict[\"eigenvalues\"][-1][-1], 24.771165)\n self.assertEqual(od_dict[\"projector_weights\"][0][0][-1], 0)\n self.assertEqual(od_dict[\"projector_weights\"][0][-1][1], 0.028667372)\n self.assertEqual(od_dict[\"projector_weights\"][-1][2][1], 0.99444594)\n\n odo_fname = REAL_PATH + \"data/optados_files/graphite.pdis.dat\"\n failed_open = False\n try:\n f = open(odo_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n odo_fname\n ),\n )\n if not failed_open:\n f.close()\n od_dict, s = optados2dict(odo_fname)\n self.assertTrue(s)\n self.assertEqual(len(od_dict[\"kpoints\"]), 942)\n self.assertEqual(od_dict[\"num_kpoints\"], 942)\n self.assertEqual(od_dict[\"num_bands\"], 30)\n self.assertEqual(od_dict[\"num_projectors\"], 4)\n self.assertEqual(np.shape(od_dict[\"projector_weights\"]), (942, 30, 4))\n self.assertEqual(np.shape(od_dict[\"eigenvalues\"]), (942, 30))\n self.assertEqual(od_dict[\"projectors\"][0], (\"C\", \"s\", None))\n self.assertEqual(od_dict[\"projectors\"][1], (\"C\", \"p\", None))\n self.assertEqual(od_dict[\"projectors\"][2], (\"C\", \"d\", None))\n self.assertEqual(od_dict[\"projectors\"][3], (\"C\", \"f\", None))\n self.assertEqual(od_dict[\"projector_weights\"][29][3][1], 0.85401752)\n self.assertEqual(od_dict[\"projector_weights\"][30][3][1], 0.84705066)\n self.assertEqual(od_dict[\"projector_weights\"][31][3][1], 0.84004878)\n self.assertEqual(od_dict[\"projector_weights\"][32][3][1], 0.83310338)\n self.assertEqual(od_dict[\"projector_weights\"][33][3][1], 0.82617687)\n self.assertEqual(od_dict[\"projector_weights\"][34][3][1], 0.81927189)\n self.assertEqual(od_dict[\"projector_weights\"][35][3][1], 0.81239121)\n self.assertEqual(od_dict[\"projector_weights\"][36][3][1], 0.80304369)\n self.assertEqual(od_dict[\"projector_weights\"][37][3][1], 0.79613539)\n\n def test_arbitrary_scraper(self):\n odi_fname = REAL_PATH + \"data/optados_files/testcase.odi\"\n od_dict, s = arbitrary2dict(odi_fname)\n self.assertEqual(od_dict[\"pdispersion\"], \"species\")\n self.assertEqual(od_dict[\"adaptive_smearing\"], \"1\")\n self.assertEqual(od_dict[\"set_efermi_zero\"], \"True\")\n self.assertEqual(od_dict[\"dos_per_volume\"], \"True\")\n self.assertEqual(od_dict[\"broadening\"], \"adaptive\")\n self.assertEqual(od_dict[\"dos_spacing\"], \"0.01\")\n self.assertEqual(od_dict[\"task\"], \"pdispersion\")\n self.assertTrue(od_dict[\"source\"][0].endswith(\"testcase.odi\"))\n self.assertEqual(len(od_dict[\"source\"]), 1)\n self.assertEqual(len(od_dict), 8)\n\n def test_bands(self):\n from matador.utils.chem_utils import HARTREE_TO_EV\n\n bands_fname = REAL_PATH + \"data/bands_files/KPSn.bands\"\n bs_dict, s = bands2dict(bands_fname)\n self.assertTrue(s, msg=bs_dict)\n self.assertEqual(len(bs_dict[\"kpoint_path\"]), 518)\n self.assertEqual(np.shape(bs_dict[\"eigs_s_k\"]), (1, 71, 518))\n self.assertEqual(bs_dict[\"num_kpoints\"], 518)\n self.assertEqual(bs_dict[\"num_bands\"], 71)\n self.assertAlmostEqual(bs_dict[\"fermi_energy\"], 4.0781, places=4)\n self.assertAlmostEqual(bs_dict[\"spin_fermi_energy\"][0], 4.0781, places=4)\n\n dispersion = ElectronicDispersion(bs_dict)\n self.assertLessEqual(dispersion.kpoint_path_spacing, 0.01)\n self.assertGreaterEqual(dispersion.kpoint_path_spacing, 0.009)\n self.assertAlmostEqual(dispersion.band_gap, 0.760001, places=4)\n self.assertAlmostEqual(dispersion.spin_band_gap[0], 0.760001, places=4)\n self.assertEqual(dispersion.band_gap_path_inds, [246, 235])\n\n bands_fname = REAL_PATH + \"data/bands_files/KPSn_2.bands\"\n bs_dict, s = bands2dict(bands_fname)\n self.assertTrue(s)\n self.assertEqual(len(bs_dict[\"kpoint_path\"]), 28)\n self.assertEqual(np.shape(bs_dict[\"eigs_s_k\"]), (1, 71, 28))\n self.assertEqual(bs_dict[\"num_kpoints\"], 28)\n self.assertEqual(bs_dict[\"num_bands\"], 71)\n\n dispersion = ElectronicDispersion(bs_dict)\n self.assertAlmostEqual(dispersion.fermi_energy, 4.0781, places=4)\n self.assertLessEqual(dispersion.kpoint_path_spacing, 0.3)\n self.assertGreaterEqual(dispersion.kpoint_path_spacing, 0.29)\n self.assertEqual(len(dispersion.kpoint_branches), 2)\n self.assertEqual(\n dispersion[\"band_gap_path_inds\"], dispersion[\"direct_gap_path_inds\"]\n )\n self.assertAlmostEqual(\n bs_dict[\"eigs_s_k\"][0][0][0], -0.99624287 * HARTREE_TO_EV, places=4\n )\n self.assertAlmostEqual(\n bs_dict[\"eigs_s_k\"][-1][-1][-1], 0.74794320 * HARTREE_TO_EV, places=4\n )\n\n bands_fname = REAL_PATH + \"data/bands_files/spin_polarised.bands\"\n bs_dict, s = bands2dict(bands_fname)\n self.assertTrue(s)\n self.assertEqual(len(bs_dict[\"kpoint_path\"]), 51)\n self.assertEqual(np.shape(bs_dict[\"eigs_s_k\"]), (2, 462, 51))\n self.assertEqual(bs_dict[\"num_kpoints\"], 51)\n self.assertEqual(bs_dict[\"num_bands\"], 462)\n dispersion = ElectronicDispersion(bs_dict)\n self.assertAlmostEqual(bs_dict[\"fermi_energy\"], 6.7507, places=4)\n self.assertAlmostEqual(dispersion.spin_fermi_energy[0], 6.7507, places=4)\n self.assertAlmostEqual(dispersion.spin_fermi_energy[1], 6.7507, places=4)\n self.assertLessEqual(dispersion.kpoint_path_spacing, 0.03)\n self.assertGreaterEqual(dispersion.kpoint_path_spacing, 0.01)\n self.assertEqual(len(dispersion[\"kpoint_branches\"]), 1)\n self.assertAlmostEqual(\n bs_dict[\"eigs_s_k\"][0][0][0], -1.84888124 * HARTREE_TO_EV, places=4\n )\n self.assertAlmostEqual(\n bs_dict[\"eigs_s_k\"][1][0][0], -1.84666287 * HARTREE_TO_EV, places=4\n )\n self.assertAlmostEqual(\n bs_dict[\"eigs_s_k\"][-1][-1][-1], 0.64283955 * HARTREE_TO_EV, places=4\n )\n self.assertAlmostEqual(\n bs_dict[\"eigs_s_k\"][0][-1][-1], 0.63571135 * HARTREE_TO_EV, places=4\n )\n bs, s = bands2dict(bands_fname, as_model=True)\n self.assertTrue(isinstance(bs, ElectronicDispersion))\n\n def test_qe_magres(self):\n magres_fname = REAL_PATH + \"data/magres_files/NaP_QE6.magres\"\n magres_dict, s = magres2dict(magres_fname, as_model=True)\n self.assertTrue(s)\n self.assertEqual(len(magres_dict[\"atom_types\"]), 4)\n np.testing.assert_array_equal(\n magres_dict[\"lattice_cart\"],\n np.array(\n [\n [-2.503686, 2.503686, 3.540961],\n [2.503686, -2.503686, 3.540961],\n [2.503686, 2.503686, -3.540961],\n ]\n ),\n )\n\n self.assertEqual(magres_dict[\"magres_units\"][\"ms\"], \"ppm\")\n self.assertEqual(magres_dict[\"magres_units\"][\"lattice\"], \"Angstrom\")\n self.assertEqual(magres_dict[\"magres_units\"][\"atom\"], \"Angstrom\")\n self.assertEqual(magres_dict[\"magres_units\"][\"sus\"], \"10^-6.cm^3.mol^-1\")\n\n np.testing.assert_almost_equal(\n magres_dict[\"susceptibility_tensor\"],\n [\n [-2.3100, 0.0000, -0.0000],\n [-0.0000, -2.3100, -0.0000],\n [0.0000, -0.0000, 1.4354],\n ],\n )\n np.testing.assert_almost_equal(\n magres_dict[\"chemical_shielding_isos\"],\n [518.15, 467.61, 467.61, 275.34],\n decimal=2,\n )\n\n self.assertEqual(magres_dict[\"calculator\"], \"QE-GIPAW\")\n\n def test_castep_magres(self):\n magres_fname = REAL_PATH + \"data/magres_files/LiP_CASTEP18.magres\"\n magres_crystal, s = magres2dict(magres_fname, as_model=True)\n self.assertTrue(s)\n self.assertEqual(len(magres_crystal[\"atom_types\"]), 20)\n np.testing.assert_array_equal(\n magres_crystal.lattice_cart,\n np.array(\n [\n [4.1332870000000002, 0.0000000000000000, 0.0000000000000000],\n [-8.9905292805212659e-4, 6.0637949333506347, 0.0000000000000000],\n [2.0677013018922552, 3.3924745014331725e-1, 12.368724395669441],\n ],\n ),\n )\n\n np.testing.assert_almost_equal(\n magres_crystal[\"chemical_shielding_isos\"],\n [\n 83.7,\n 84.3,\n 83.4,\n 86.6,\n 83.3,\n 85.1,\n 84.4,\n 83.8,\n 82.8,\n 83.6,\n 84.9,\n 84.9,\n 83.6,\n 82.7,\n 85.1,\n 350.0,\n 500.3,\n 353.3,\n 530.9,\n 531.2,\n ],\n decimal=1,\n )\n np.testing.assert_almost_equal(\n magres_crystal[\"chemical_shift_anisos\"],\n [\n 9.4,\n 4.4,\n 8.1,\n 2.9,\n 8.1,\n 3.4,\n 4.7,\n 9.1,\n 10.1,\n -9.5,\n 8.7,\n 8.8,\n -9.6,\n 10.4,\n 3.4,\n -393.0,\n 162.7,\n -391.2,\n 223.9,\n 224.0,\n ],\n decimal=1,\n )\n np.testing.assert_almost_equal(\n magres_crystal[\"chemical_shift_asymmetries\"],\n [\n 0.33,\n 0.76,\n 0.19,\n 0.46,\n 0.21,\n 0.84,\n 0.65,\n 0.32,\n 0.11,\n 0.92,\n 0.85,\n 0.86,\n 0.91,\n 0.11,\n 0.92,\n 0.48,\n 0.95,\n 0.47,\n 0.59,\n 0.61,\n ],\n decimal=2,\n )\n\n for ind, atom in enumerate(magres_crystal):\n self.assertEqual(\n atom[\"chemical_shielding_iso\"],\n magres_crystal[\"chemical_shielding_isos\"][ind],\n )\n self.assertEqual(\n atom[\"chemical_shift_aniso\"],\n magres_crystal[\"chemical_shift_anisos\"][ind],\n )\n self.assertEqual(\n atom[\"chemical_shift_asymmetry\"],\n magres_crystal[\"chemical_shift_asymmetries\"][ind],\n )\n\n self.assertEqual(magres_crystal[\"calculator\"], \"CASTEP\")\n self.assertEqual(magres_crystal[\"calculator_version\"], \"18.1\")\n\n self.assertEqual(magres_crystal[\"magres_units\"][\"ms\"], \"ppm\")\n self.assertEqual(magres_crystal[\"magres_units\"][\"lattice\"], \"Angstrom\")\n self.assertEqual(magres_crystal[\"magres_units\"][\"atom\"], \"Angstrom\")\n\n def test_castep_magres_efg(self):\n magres_fname = REAL_PATH + \"data/magres_files/Al2O3.magres\"\n magres_crystal, s = magres2dict(magres_fname, as_model=False, verbosity=5)\n self.assertTrue(s)\n # self.assertEqual(len(magres_crystal[\"atom_types\"]), 40)\n # np.testing.assert_array_equal(\n # magres_crystal.lattice_cart,\n # np.array(\n # [\n # [4.1332870000000002, 0.0000000000000000, 0.0000000000000000],\n # [-8.9905292805212659e-4, 6.0637949333506347, 0.0000000000000000],\n # [2.0677013018922552, 3.3924745014331725e-1, 12.368724395669441],\n # ],\n # ),\n # )\n\n np.testing.assert_almost_equal(\n magres_crystal[\"chemical_shielding_isos\"],\n [\n 161.853,\n 183.025,\n 172.432,\n 172.432,\n 167.275,\n 156.212,\n 156.212,\n 179.976,\n 182.155,\n 182.155,\n 182.817,\n 189.255,\n 189.255,\n 182.817,\n 182.155,\n 182.155,\n 179.976,\n 156.212,\n 156.212,\n 167.275,\n 172.432,\n 172.432,\n 183.025,\n 161.853,\n 543.580,\n 543.580,\n 487.296,\n 539.563,\n 489.973,\n 541.225,\n 540.768,\n 540.768,\n 483.439,\n 483.439,\n 540.768,\n 540.768,\n 541.225,\n 489.973,\n 539.563,\n 487.296,\n ],\n decimal=2,\n )\n np.testing.assert_almost_equal(\n magres_crystal[\"chemical_shift_anisos\"],\n [\n -17.8527,\n 11.7056,\n -10.1521,\n -10.1521,\n 17.2892,\n -22.7995,\n -22.7995,\n 8.9808,\n 10.5535,\n 10.5535,\n 7.8228,\n 13.6391,\n 13.6391,\n 7.8228,\n 10.5535,\n 10.5535,\n 8.9808,\n -22.7995,\n -22.7995,\n 17.2892,\n -10.1521,\n -10.1521,\n 11.7056,\n -17.8527,\n -22.4242,\n -22.4242,\n -41.4922,\n -22.2261,\n 46.6031,\n -18.5085,\n -19.0290,\n -19.0290,\n -14.2300,\n -14.2300,\n -19.0290,\n -19.0290,\n -18.5085,\n 46.6031,\n -22.2261,\n -41.4922,\n ],\n decimal=2,\n )\n np.testing.assert_almost_equal(\n magres_crystal[\"chemical_shift_asymmetries\"],\n [\n 0.5984,\n 0.3471,\n 0.1803,\n 0.1803,\n 0.2218,\n 0.7010,\n 0.7010,\n 0.2946,\n 0.4797,\n 0.4797,\n 0.7501,\n 0.0757,\n 0.0757,\n 0.7501,\n 0.4797,\n 0.4797,\n 0.2946,\n 0.7010,\n 0.7010,\n 0.2218,\n 0.1803,\n 0.1803,\n 0.3471,\n 0.5984,\n 0.4436,\n 0.4436,\n 0.5404,\n 0.8069,\n 0.1034,\n 0.3005,\n 0.4336,\n 0.4336,\n 0.1165,\n 0.1165,\n 0.4336,\n 0.4336,\n 0.3005,\n 0.1034,\n 0.8069,\n 0.5404,\n ],\n decimal=2,\n )\n\n np.testing.assert_almost_equal(\n magres_crystal[\"quadrupolar_asymmetries\"],\n [\n 0.5745,\n 0.0775,\n 0.5651,\n 0.5651,\n 0.2482,\n 0.4066,\n 0.4066,\n 0.1503,\n 0.4494,\n 0.4494,\n 0.3889,\n 0.0946,\n 0.0946,\n 0.3889,\n 0.4494,\n 0.4494,\n 0.1503,\n 0.4066,\n 0.4066,\n 0.2482,\n 0.5651,\n 0.5651,\n 0.0775,\n 0.5745,\n 0.7239,\n 0.7239,\n 0.1168,\n 0.9321,\n 0.0885,\n 0.6130,\n 0.8278,\n 0.8278,\n 0.0189,\n 0.0189,\n 0.8278,\n 0.8278,\n 0.6130,\n 0.0885,\n 0.9321,\n 0.1168,\n ],\n decimal=2,\n )\n\n np.testing.assert_almost_equal(\n magres_crystal[\"quadrupolar_couplings\"],\n [\n 4.3343,\n -2.1919,\n 4.1313,\n 4.1313,\n -2.1926,\n -2.3873,\n -2.3873,\n -1.7451,\n 3.8329,\n 3.8329,\n 3.7332,\n -1.5063,\n -1.5063,\n 3.7332,\n 3.8329,\n 3.8329,\n -1.7451,\n -2.3873,\n -2.3873,\n -2.1926,\n 4.1313,\n 4.1313,\n -2.1919,\n 4.3343,\n 5.3374,\n 5.3374,\n 8.3891,\n -5.5409,\n -7.8892,\n 5.1095,\n 4.8178,\n 4.8178,\n 2.3583,\n 2.3583,\n 4.8178,\n 4.8178,\n 5.1095,\n -7.8892,\n -5.5409,\n 8.3891,\n ],\n decimal=2,\n )\n self.assertEqual(magres_crystal[\"calculator\"], \"CASTEP\")\n self.assertEqual(magres_crystal[\"calculator_version\"], \"19.1\")\n\n self.assertEqual(magres_crystal[\"magres_units\"][\"ms\"], \"ppm\")\n self.assertEqual(magres_crystal[\"magres_units\"][\"efg\"], \"au\")\n\n def test_pwscfout(self):\n pwout_fname = REAL_PATH + \"data/NaP.out\"\n pwout_dict, s = pwout2dict(pwout_fname)\n self.assertTrue(s)\n self.assertEqual(len(pwout_dict[\"atom_types\"]), 14)\n self.assertEqual(pwout_dict[\"num_atoms\"], 14)\n self.assertTrue(\n pwout_dict[\"lattice_cart\"][0] == [5.887513122, 0.011925355, 0.011971927]\n )\n self.assertTrue(\n pwout_dict[\"lattice_cart\"][1] == [0.605472370, 5.817169640, -0.011329548]\n )\n self.assertTrue(\n pwout_dict[\"lattice_cart\"][2] == [-4.543028478, 0.450282751, 10.044268095]\n )\n self.assertTrue(pwout_dict[\"source\"][0].endswith(\"NaP.out\"))\n\n self.assertEqual(pwout_dict[\"pressure\"], 0)\n from matador.utils.chem_utils import RY_TO_EV\n\n np.testing.assert_equal(pwout_dict[\"enthalpy\"], -RY_TO_EV * 97.6314378617)\n np.testing.assert_array_almost_equal(\n pwout_dict[\"positions_frac\"][5], [0.779038368, 0.580790316, 0.631222097]\n )\n\n def test_usp(self):\n self.assertEqual(\n usp2dict(REAL_PATH + \"data/K_OTF.usp\")[\"K\"],\n \"2|1.5|9|10|11|30U:40:31(qc=6)\",\n msg=\"Failed to scrape K_OTF.usp file\",\n )\n self.assertEqual(\n usp2dict(REAL_PATH + \"data/P_OTF.usp\")[\"P\"],\n \"3|1.8|4|4|5|30:31:32\",\n msg=\"Failed to scrape P_OTF.usp file\",\n )\n self.assertEqual(\n usp2dict(REAL_PATH + \"data/Sn_OTF.usp\")[\"Sn\"],\n \"2|2|2|1.6|9.6|10.8|11.7|50U=-0.395U=+0.25:51U=-0.14U=+0.25\",\n msg=\"Failed to scrape Sn_OTF.usp file\",\n )\n\n def test_seed_metadata_scrape(self):\n doc = {}\n seed = \"blah/blah/blah4/AgBiI4-spinel-Config5-DOI-10.17638__datacat.liverpool.ac.uk__240\"\n get_seed_metadata(doc, seed)\n self.assertEqual(doc[\"doi\"], \"10.17638/datacat.liverpool.ac.uk/240\")\n doc = {}\n seed = \"blah/blah/blah4/AgBiI4-spinel-Config5-CollCode123456-from_polish_swaps_garbage\"\n get_seed_metadata(doc, seed)\n self.assertEqual(doc[\"icsd\"], 123456)\n doc = {}\n seed = \"blah/blah/blah4/AgBiI4-spinel-Config5-CollCode-123456-from_polish_swaps_garbage\"\n get_seed_metadata(doc, seed)\n self.assertEqual(doc[\"icsd\"], 123456)\n doc = {}\n seed = \"blah/blah/blah4/AgBiI4-spinel-Config5-ICSD-123456-from_polish_swaps_garbage\"\n get_seed_metadata(doc, seed)\n self.assertEqual(doc[\"icsd\"], 123456)\n doc = {}\n seed = \"blah/blah/blah4/AgBiI4-spinel-Config5-MP-123456-blah-SnPQ\"\n get_seed_metadata(doc, seed)\n self.assertEqual(doc[\"mp_id\"], 123456)\n\n def test_thermo_castep(self):\n castep_fname = REAL_PATH + \"data/CuP-thermo-test.castep\"\n test_dict, s = castep2dict(castep_fname, db=False, verbosity=VERBOSITY)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n self.assertEqual(\n test_dict[\"task\"].lower(),\n \"thermodynamicscalculation\",\n msg=\"This is not a Thermodynamics calculation...\",\n )\n self.assertEqual(\n test_dict[\"thermo_temp_final\"], 1000.0, msg=\"Wrong final temp!\"\n )\n self.assertEqual(test_dict[\"thermo_temp_init\"], 50.0, msg=\"Wrong initial temp!\")\n self.assertEqual(\n test_dict[\"thermo_temp_spacing\"], 100.0, msg=\"Wrong temp spacing!\"\n )\n self.assertEqual(\n test_dict[\"thermo_num_temp_vals\"], 11, msg=\"Wrong number of temps!\"\n )\n self.assertEqual(\n test_dict[\"thermo_zero_point_energy\"],\n 0.093412,\n msg=\"Wrong zero point energy!\",\n )\n\n thermo_db_compare = {\n \"thermo_temps\": [\n 50.0,\n 145.0,\n 240.0,\n 335.0,\n 430.0,\n 525.0,\n 620.0,\n 715.0,\n 810.0,\n 905.0,\n 1000.0,\n ],\n \"thermo_enthalpy\": [\n 0.098557,\n 0.142535,\n 0.204959,\n 0.273022,\n 0.343308,\n 0.414672,\n 0.486634,\n 0.558962,\n 0.63153,\n 0.704262,\n 0.777113,\n ],\n \"thermo_free_energy\": [\n 0.089968,\n 0.050865,\n -0.025747,\n -0.128941,\n -0.252035,\n -0.390909,\n -0.542824,\n -0.705838,\n -0.878507,\n -1.059717,\n -1.248581,\n ],\n \"thermo_entropy\": [\n 16.573,\n 60.998,\n 92.749,\n 115.772,\n 133.586,\n 148.051,\n 160.206,\n 170.678,\n 179.872,\n 188.064,\n 195.45,\n ],\n \"thermo_heat_cap\": [\n 24.686,\n 57.799,\n 67.215,\n 70.549,\n 72.047,\n 72.836,\n 73.301,\n 73.596,\n 73.795,\n 73.936,\n 74.039,\n ],\n }\n\n for num, i in enumerate(test_dict[\"thermo_temps\"]):\n self.assertEqual(\n i,\n thermo_db_compare[\"thermo_temps\"][num],\n msg=\"Wrong temperature %f\" % test_dict[\"thermo_temps\"][num],\n )\n self.assertEqual(\n test_dict[\"thermo_enthalpy\"][i],\n thermo_db_compare[\"thermo_enthalpy\"][num],\n msg=\"Wrong enthalpy %f\" % test_dict[\"thermo_enthalpy\"][i],\n )\n self.assertEqual(\n test_dict[\"thermo_free_energy\"][i],\n thermo_db_compare[\"thermo_free_energy\"][num],\n msg=\"Wrong free energy %f\" % test_dict[\"thermo_free_energy\"][i],\n )\n self.assertEqual(\n test_dict[\"thermo_entropy\"][i],\n thermo_db_compare[\"thermo_entropy\"][num],\n msg=\"Wrong entropy %f\" % test_dict[\"thermo_entropy\"][i],\n )\n self.assertEqual(\n test_dict[\"thermo_heat_cap\"][i],\n thermo_db_compare[\"thermo_heat_cap\"][num],\n msg=\"Wrong heat capacity %f\" % test_dict[\"thermo_heat_cap\"][i],\n )\n\n self.assertEqual(len(test_dict[\"phonon_fine_kpoint_list\"]), 310)\n self.assertEqual(np.shape(test_dict[\"eigs_q\"]), (1, 9, 310))\n\n def test_fortran_e100_bug(self):\n \"\"\" Test whether the scraper handles improperly formatted floats\n by Fortran when e.g. exponent < -99.\n\n \"\"\"\n optados_fname = REAL_PATH + \"data/fortran_e100_bug/fortran_e100_bug.pdis.dat\"\n pdis, s = optados2dict(optados_fname, verbosity=VERBOSITY)\n self.assertTrue(s)\n\n cell_fname = REAL_PATH + \"data/fortran_e100_bug/fortran_e100_bug.cell\"\n pdis, s = cell2dict(cell_fname, db=False, lattice=True, verbosity=VERBOSITY)\n self.assertTrue(s)\n\n\nclass CifTests(MatadorUnitTest):\n \"\"\" These tests check the cif scraper for correctness. \"\"\"\n\n def test_cif_primitive(self):\n cif_fname = REAL_PATH + \"data/cif_files/primitive.cif\"\n self.assertTrue(\n os.path.isfile(cif_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n cif_fname\n ),\n )\n test_dict, s = cif2dict(cif_fname, verbosity=VERBOSITY)\n self.assertTrue(s, \"Failed entirely, oh dear! {}\".format(test_dict))\n self.assertEqual(test_dict[\"num_atoms\"], 1)\n self.assertListEqual(test_dict[\"atom_types\"], [\"Si\"])\n\n def test_cif_partial_occ(self):\n cif_fname = REAL_PATH + \"data/cif_files/AgBiI.cif\"\n\n failed_open = False\n try:\n f = open(cif_fname, \"r\")\n except Exception:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n cif_fname\n ),\n )\n if not failed_open:\n f.close()\n test_dict, s = cif2dict(cif_fname, verbosity=VERBOSITY)\n self.assertTrue(s, \"Failed entirely, oh dear! {}\".format(test_dict))\n self.assertAlmostEqual(\n test_dict[\"num_atoms\"],\n 46.623999999999995,\n msg=\"Failed to read num_atoms!\",\n places=5,\n )\n Bi_ratio = [\n elem[1] for elem in test_dict[\"stoichiometry\"] if elem[0] == \"Bi\"\n ][0]\n I_ratio = [\n elem[1] for elem in test_dict[\"stoichiometry\"] if elem[0] == \"I\"\n ][0]\n self.assertEqual(I_ratio / Bi_ratio, 4)\n self.assertAlmostEqual(\n test_dict[\"cell_volume\"],\n 1826.0028753,\n msg=\"Wrong cell volume!\",\n places=3,\n )\n self.assertEqual(\n test_dict[\"space_group\"], \"Fd-3m\", msg=\"Wrong space group!\"\n )\n self.assertEqual(len(test_dict[\"atom_types\"]), 64)\n self.assertEqual(len(test_dict[\"positions_frac\"]), 64)\n self.assertEqual(len(test_dict[\"site_occupancy\"]), 64)\n\n def test_malicious_cif(self):\n cif_fname = REAL_PATH + \"data/cif_files/malicious.cif\"\n failed_open = False\n try:\n f = open(cif_fname, \"r\")\n except FileNotFoundError:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n cif_fname\n ),\n )\n\n with self.assertRaises(RuntimeError):\n if not failed_open:\n f.close()\n test_dict, s = cif2dict(cif_fname, verbosity=VERBOSITY)\n raise test_dict\n\n def test_high_symmetry_cif(self):\n cif_fname = REAL_PATH + \"data/cif_files/SiO_n001_CollCode1109.cif\"\n failed_open = False\n try:\n f = open(cif_fname, \"r\")\n except FileNotFoundError:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n cif_fname\n ),\n )\n if not failed_open:\n f.close()\n test_dict, s = cif2dict(cif_fname, verbosity=VERBOSITY)\n self.assertTrue(s, \"Failed entirely, oh dear! {}\".format(test_dict))\n self.assertAlmostEqual(\n test_dict[\"cell_volume\"], 2110.2, msg=\"Wrong cell volume!\", places=1\n )\n self.assertAlmostEqual(\n test_dict[\"lattice_abc\"][0], [18.4940, 4.991, 23.758], places=3\n )\n self.assertAlmostEqual(\n test_dict[\"lattice_abc\"][1], [90, 105.79, 90], places=3\n )\n self.assertEqual(len(test_dict[\"atom_types\"]), 144)\n self.assertEqual(test_dict[\"num_atoms\"], 144)\n self.assertEqual(len(test_dict[\"positions_frac\"]), 144)\n self.assertEqual(len(test_dict[\"site_occupancy\"]), 144)\n self.assertEqual(\n sum(test_dict[\"site_multiplicity\"]), test_dict[\"num_atoms\"]\n )\n\n def test_problematic_cif(self):\n cif_fname = REAL_PATH + \"data/cif_files/SiO_n002_CollCode62404.cif\"\n failed_open = False\n try:\n f = open(cif_fname, \"r\")\n except FileNotFoundError:\n failed_open = True\n self.assertFalse(\n failed_open,\n msg=\"Failed to open test case {} - please check installation.\".format(\n cif_fname\n ),\n )\n f.close()\n test_dict, s = cif2dict(cif_fname, verbosity=VERBOSITY)\n self.assertTrue(s, \"Failed entirely, oh dear! {}\".format(test_dict))\n self.assertEqual(sum(test_dict[\"site_multiplicity\"]), test_dict[\"num_atoms\"])\n self.assertEqual(sum(test_dict[\"site_occupancy\"]), test_dict[\"num_atoms\"])\n self.assertEqual(len(test_dict[\"positions_frac\"]), test_dict[\"num_atoms\"])\n\n def test_big_cif(self):\n cif_fname = REAL_PATH + \"data/cif_files/1000001.cif\"\n self.assertTrue(\n os.path.isfile(cif_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n cif_fname\n ),\n )\n\n cif, s = cif2dict(cif_fname)\n self.assertTrue(s)\n self.assertEqual(\n cif[\"stoichiometry\"], [[\"C\", 107], [\"H\", 142], [\"N\", 14], [\"O\", 26]]\n )\n self.assertEqual(cif[\"space_group\"], \"P2_12_12_1\")\n self.assertAlmostEqual(cif[\"cell_volume\"], 11309.1, places=1)\n self.assertEqual(cif[\"num_atoms\"], 1156)\n self.assertEqual(len(cif[\"positions_frac\"]), 1156)\n\n def test_tricky_cif_loops(self):\n from matador.scrapers.cif_scraper import _cif_parse_loop\n\n data_block = \"\"\"'C ' 0.0170 0.0090 2.3100 20.8439 1.0200 10.2075 1.5886 0.5687 0.8650 51.6512\n0.2156 International_Tables_Vol_IV_Table_2.2B\n'H ' 0.0000 0.0000 0.4930 10.5109 0.3229 26.1257 0.1402 3.1424 0.0408 57.7997\n0.0030 International_Tables_Vol_IV_Table_2.2B\n'N ' 0.0290 0.0180 12.2126 0.0057 3.1322 9.8933 2.0125 28.9975 1.1663 0.5826\n-11.5290 International_Tables_Vol_IV_Table_2.2B\n'O ' 0.0470 0.0320 3.0485 13.2771 2.2868 5.7011 1.5463 0.3239 0.8670 32.9089\n0.2508 International_Tables_Vol_IV_Table_2.2B\n\"\"\"\n\n keys = [\n \"_atom_type_symbol\",\n \"_atom_type_scat_dispersion_real\",\n \"_atom_type_scat_dispersion_imag\",\n \"_atom_type_scat_Cromer_Mann_a1\",\n \"_atom_type_scat_Cromer_Mann_b1\",\n \"_atom_type_scat_Cromer_Mann_a2\",\n \"_atom_type_scat_Cromer_Mann_b2\",\n \"_atom_type_scat_Cromer_Mann_a3\",\n \"_atom_type_scat_Cromer_Mann_b3\",\n \"_atom_type_scat_Cromer_Mann_a4\",\n \"_atom_type_scat_Cromer_Mann_b4\",\n \"_atom_type_scat_Cromer_Mann_c\",\n \"_atom_type_scat_source\",\n ]\n\n loop_dict = _cif_parse_loop(keys, data_block)\n self.assertListEqual(loop_dict[\"_atom_type_symbol\"], [\"C\", \"H\", \"N\", \"O\"])\n self.assertListEqual(\n loop_dict[\"_atom_type_scat_source\"],\n 4 * [\"International_Tables_Vol_IV_Table_2.2B\"],\n )\n\n data_block = \"\"\"\n 'Bi' 'Bi' -4.1077 10.2566\n 'International Tables Vol C Tables 4.2.6.8 and 6.1.1.4'\n 'C' 'C' 0.0033 0.0016 'International Tables Vol C Tables 4.2.6.8 and 6.1.1.4'\n 'Co' 'Co' 0.3494 0.9721 'International Tables Vol C Tables 4.2.6.8 and 6.1.1.4'\n 'N' 'N' 0.0061 0.0033 'International Tables Vol C Tables 4.2.6.8 and 6.1.1.4'\n 'O' 'O' 0.0106 0.0060 'International Tables Vol C Tables 4.2.6.8 and 6.1.1.4'\n 'S' 'S' 0.1246 0.1234 'International Tables Vol C Tables 4.2.6.8 and 6.1.1.4'\n \"\"\"\n keys = [\n \"_atom_type_symbol\",\n \"_atom_type_description\",\n \"_atom_type_scat_dispersion_real\",\n \"_atom_type_scat_dispersion_imag\",\n \"_atom_type_scat_source\",\n ]\n\n loop_dict = _cif_parse_loop(keys, data_block)\n\n self.assertListEqual(\n loop_dict[\"_atom_type_symbol\"], [\"Bi\", \"C\", \"Co\", \"N\", \"O\", \"S\"]\n )\n\n self.assertListEqual(\n loop_dict[\"_atom_type_description\"], [\"Bi\", \"C\", \"Co\", \"N\", \"O\", \"S\"]\n )\n\n self.assertListEqual(\n loop_dict[\"_atom_type_scat_source\"],\n 6 * [\"International Tables Vol C Tables 4.2.6.8 and 6.1.1.4\"],\n )\n\n def test_another_big_cif(self):\n cif_fname = REAL_PATH + \"data/cif_files/2.cif\"\n self.assertTrue(\n os.path.isfile(cif_fname),\n msg=\"Failed to open test case {} - please check installation.\".format(\n cif_fname\n ),\n )\n\n cif, s = cif2dict(cif_fname)\n\n self.assertTrue(s)\n self.assertEqual(cif[\"space_group\"], \"P-1\")\n self.assertAlmostEqual(cif[\"cell_volume\"], 3464.52, places=1)\n self.assertEqual(cif[\"num_atoms\"], 161)\n self.assertEqual(len(cif[\"positions_frac\"]), 161)\n\n\nclass ExportTest(MatadorUnitTest):\n \"\"\" Test file export functions. \"\"\"\n\n def test_doc2res(self):\n res_fname = REAL_PATH + \"data/LiPZn-r57des.res\"\n test_fname = \"doc2res.res\"\n doc, s = res2dict(res_fname)\n doc2res(doc, test_fname, hash_dupe=False, overwrite=True)\n doc_exported, s = res2dict(test_fname)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\")\n self.compare_res_with_res(doc, doc_exported)\n\n def test_doc2param(self):\n param_fname = REAL_PATH + \"data/param_test.param\"\n test_fname = \"dummy.param\"\n doc, s = param2dict(param_fname, db=False, verbosity=VERBOSITY)\n self.assertTrue(s, msg=\"Failed entirely: {}\".format(doc))\n doc2param(doc, test_fname, hash_dupe=False, overwrite=True)\n doc_exported, s = param2dict(test_fname, db=False)\n self.assertTrue(s, msg=\"Failed entirely: {}\".format(doc_exported))\n self.assertEqual(len(doc_exported), len(doc))\n self.assertEqual(doc[\"devel_code\"], doc_exported[\"devel_code\"])\n\n param_fname = REAL_PATH + \"data/nmr.param\"\n test_fname = \"dummy2.param\"\n doc, s = param2dict(param_fname, db=False)\n doc2param(doc, test_fname, hash_dupe=False, overwrite=True)\n doc_exported, s = param2dict(test_fname, db=False)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\")\n self.assertEqual(len(doc_exported), len(doc))\n\n def test_doc2cell(self):\n cell_fname = REAL_PATH + \"data/K5P4-phonon.cell\"\n test_fname = \"dummy1.cell\"\n\n doc, s = cell2dict(\n cell_fname, db=False, lattice=True, verbosity=VERBOSITY, positions=False\n )\n doc2cell(doc, test_fname)\n test_dict, s = cell2dict(test_fname, db=False, lattice=True, positions=False)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(test_dict))\n self.assertEqual(\n test_dict[\"lattice_cart\"][0][0],\n 11.4518745146637,\n msg=\"Failed to read lattice vectors.\",\n )\n self.assertEqual(\n test_dict[\"lattice_cart\"][1][1],\n 5.09448137301246,\n msg=\"Failed to read lattice vectors.\",\n )\n self.assertEqual(\n test_dict[\"lattice_cart\"][2][2],\n 9.18378851243459,\n msg=\"Failed to read lattice vectors.\",\n )\n self.assertEqual(\n test_dict[\"lattice_cart\"][1][0], 0.0, msg=\"Failed to read lattice vectors.\"\n )\n self.assertEqual(\n test_dict[\"symmetry_tol\"], 0.0001, msg=\"Failed to read symmetry tolerance.\"\n )\n self.assertEqual(\n test_dict[\"kpoints_mp_spacing\"],\n 0.03,\n msg=\"Failed to read kpoint grid {}\".format(test_dict[\"kpoints_mp_spacing\"]),\n )\n self.assertEqual(\n test_dict[\"phonon_kpoint_mp_grid\"],\n [2, 2, 2],\n msg=\"Failed to read kpoint grid {}\".format(\n test_dict[\"phonon_kpoint_mp_grid\"]\n ),\n )\n self.assertEqual(\n test_dict[\"phonon_kpoint_mp_offset\"],\n [0.25, 0.25, 0.25],\n msg=\"Failed to read kpoint grid {}\".format(\n test_dict[\"phonon_kpoint_mp_offset\"]\n ),\n )\n self.assertEqual(round(test_dict[\"phonon_fine_kpoint_mp_spacing\"], 2), 0.02)\n self.assertEqual(\n test_dict[\"species_pot\"][\"K\"],\n \"2|1.5|9|10|11|30U:40:31(qc=6)\",\n msg=\"Failed to read pspots.\",\n )\n self.assertEqual(\n test_dict[\"species_pot\"][\"P\"],\n \"3|1.8|4|4|5|30:31:32\",\n msg=\"Failed to read pspots.\",\n )\n self.assertTrue(test_dict[\"snap_to_symmetry\"])\n self.assertTrue(test_dict[\"symmetry_generate\"])\n self.assertEqual(test_dict[\"phonon_supercell_matrix\"][0], [3, 0, 1])\n self.assertEqual(test_dict[\"phonon_supercell_matrix\"][1], [0, 3, 0])\n self.assertEqual(test_dict[\"phonon_supercell_matrix\"][2], [0, 0, 9])\n self.assertEqual(test_dict[\"cell_constraints\"], [[1, 2, 3], [4, 4, 4]])\n # test that overwrite overwrites\n doc[\"phonon_supercell_matrix\"][2] = [0, 0, 140]\n doc2cell(doc, test_fname, overwrite=True)\n test_dict, s = cell2dict(test_fname, db=False, lattice=True, positions=False)\n self.assertEqual(test_dict[\"phonon_supercell_matrix\"][2], [0, 0, 140])\n\n # test that hash dupe doesn't\n doc[\"phonon_supercell_matrix\"][2] = [0, 0, 333]\n doc2cell(doc, test_fname, overwrite=False, hash_dupe=True)\n test_dict, s = cell2dict(test_fname, db=False, lattice=True, positions=False)\n self.assertEqual(test_dict[\"phonon_supercell_matrix\"][2], [0, 0, 140])\n\n dummy_files = glob.glob(\"dummy*.cell\")\n self.assertEqual(len(dummy_files), 2)\n\n def test_doc2cell_partial_occ_fail(self):\n cell_name = REAL_PATH + \"data/cell_files/kpoint_path.cell\"\n cell, s = cell2dict(cell_name, db=False)\n\n cell[\"site_occupancy\"] = []\n for site in cell[\"positions_frac\"]:\n cell[\"site_occupancy\"].append(0.1)\n\n self.assertTrue(s)\n with self.assertRaises(RuntimeError):\n doc2cell(cell, \"dummy\")\n\n def test_doc2cell_kpoint_path(self):\n cell_name = REAL_PATH + \"data/cell_files/kpoint_path.cell\"\n dummy_name = \"dummy2.cell\"\n cell, s = cell2dict(cell_name, db=False)\n\n doc2cell(cell, dummy_name)\n cell, s = cell2dict(dummy_name, db=False)\n\n self.assertTrue(s)\n self.assertEqual(\n cell[\"spectral_kpoints_path\"],\n [[0.0, 0.0, 0.0], [0.0, 0.0, 0.5], [0.0, 0.5, 0.0], [0.5, 0.0, 0.0]],\n )\n self.assertEqual(\n cell[\"spectral_kpoints_path_labels\"], [\"$\\\\Gamma$\", \"Z\", \"$Y$\", \"X\"]\n )\n self.assertEqual(cell[\"spectral_kpoints_path_spacing\"], 0.02)\n\n def test_doc2cell_and_param_with_spin(self):\n\n cell_fname = REAL_PATH + \"data/K5P4-phonon.cell\"\n test_fname = \"dummy3\"\n test_param = {\n \"xc_functional\": \"PBE\",\n \"task\": \"geometryoptimisation\",\n \"spin_polarized\": False,\n }\n\n doc, s = cell2dict(\n cell_fname, db=False, lattice=True, verbosity=VERBOSITY, positions=True\n )\n doc2cell(doc, test_fname + \".cell\", spin=10)\n\n doc2param(test_param, test_fname + \".param\", spin=10)\n\n param_doc, s = param2dict(test_fname + \".param\")\n cell_doc, s = cell2dict(\n test_fname + \".cell\", db=False, lattice=True, positions=True\n )\n\n self.assertTrue(param_doc[\"spin_polarized\"])\n self.assertEqual(param_doc[\"spin\"], 10)\n\n self.assertEqual(cell_doc[\"atomic_init_spins\"][0], 10)\n\n def test_doc2res_from_json(self):\n json_fname = REAL_PATH + \"data/doc2res.json\"\n test_fname = \"doc2res.res\"\n self.compare_json_with_res(json_fname, test_fname)\n\n def test_doc2res_from_json_encap(self):\n json_fname = REAL_PATH + \"data/doc2res_encap.json\"\n test_fname = \"doc2res_encap.res\"\n self.compare_json_with_res(json_fname, test_fname)\n\n def test_query2files(self):\n \"\"\" Test that MP/ICSD/OQMD structures get written correctly. \"\"\"\n json_files = glob.glob(REAL_PATH + \"data/json_query_files/*.json\")\n cursor = []\n for f in json_files:\n with open(f, \"r\") as _f:\n cursor.append(json.load(_f))\n query2files(cursor, res=True, cell=True)\n self.assertTrue(os.path.isdir(\"query\"))\n self.assertEqual(len(glob.glob(\"query/*.res\")), 3)\n self.assertEqual(len(glob.glob(\"query/*.cell\")), 3)\n fnames = [\n \"SrCu-MP_1025402-CollCode629305\",\n \"H-MP_632250\",\n \"BaTeS3-OQMD_1606-CollCode8\",\n ]\n exts = [\"cell\", \"res\"]\n for name, ext in itertools.product(fnames, exts):\n self.assertTrue(\n os.path.isfile(\"query/{}.{}\".format(name, ext)),\n msg=\"Missing {}.{}\".format(name, ext),\n )\n\n def test_large_writes(self):\n \"\"\" Fake some large queries and make sure they are not written. \"\"\"\n fake_cursor = 100 * [{\"dummy\": \"data\"}]\n with self.assertRaises(RuntimeError):\n query2files(fake_cursor, res=True, max_files=99)\n with self.assertRaises(RuntimeError):\n query2files(fake_cursor, res=True, cell=True, max_files=199)\n with self.assertRaises(RuntimeError):\n query2files(fake_cursor, res=True, cell=True, pdb=True, max_files=299)\n\n def compare_json_with_res(self, json_fname, test_fname):\n with open(json_fname, \"r\") as f:\n doc = json.load(f)\n doc2res(doc, test_fname, hash_dupe=False, overwrite=True)\n doc_exported, s = res2dict(test_fname)\n self.assertTrue(s, msg=\"Failed entirely, oh dear!\\n{}\".format(doc_exported))\n self.compare_res_with_res(doc, doc_exported)\n\n def compare_res_with_res(self, doc, doc_exported):\n for key in doc_exported:\n if key not in [\n \"source\",\n \"atom_types\",\n \"positions_frac\",\n \"stoichiometry\",\n \"user\",\n \"lattice_abc\",\n \"lattice_cart\",\n \"site_occupancy\",\n ]:\n self.assertEqual(\n doc_exported[key],\n doc[key],\n msg=\"Input and output of {} do not match after scraping.\".format(\n key\n ),\n )\n elif key == \"positions_frac\":\n for ind, atom_pos in enumerate(doc_exported[\"positions_frac\"]):\n self.assertIn(\n atom_pos,\n doc[\"positions_frac\"],\n msg=\"Atom with this position is missing.\",\n )\n self.assertEqual(\n doc_exported[\"atom_types\"][ind],\n doc[\"atom_types\"][doc[\"positions_frac\"].index(atom_pos)],\n msg=\"Atom has wrong type!\",\n )\n elif key == \"stoichiometry\":\n self.assertEqual(\n sorted(doc[\"stoichiometry\"]),\n sorted(doc_exported[\"stoichiometry\"]),\n msg=\"Stoichs do not match!\",\n )\n elif key == \"atom_types\":\n self.assertEqual(\n sorted(doc[\"atom_types\"]),\n sorted(doc_exported[\"atom_types\"]),\n msg=\"Atom types do not match!\",\n )\n elif key == \"lattice_abc\":\n np.testing.assert_almost_equal(\n doc[\"lattice_abc\"], doc_exported[\"lattice_abc\"]\n )\n elif key == \"lattice_cart\":\n np.testing.assert_almost_equal(\n doc[\"lattice_cart\"], doc_exported[\"lattice_cart\"]\n )\n"
] | [
[
"numpy.abs",
"numpy.unique",
"numpy.asarray",
"numpy.isnan",
"numpy.linalg.inv",
"numpy.linalg.det",
"numpy.argsort",
"numpy.where",
"numpy.vstack"
],
[
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.zeros_like",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
],
[
"matplotlib.pyplot.rcParams.get",
"matplotlib.pyplot.subplots",
"numpy.linspace",
"numpy.unique"
],
[
"numpy.testing.assert_equal",
"numpy.testing.assert_almost_equal",
"numpy.shape",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
atlas-calo-ml/GraphNets4Pions_LLNL | [
"fb25259124711526cc4110461f09db1d03a669f9"
] | [
"train_multiOut_weightedRegress.py"
] | [
"import numpy as np\nimport os\nimport sys\nimport glob\nimport uproot as ur\nimport matplotlib.pyplot as plt\nimport time\nimport seaborn as sns\nimport tensorflow as tf\nfrom graph_nets import utils_np\nfrom graph_nets import utils_tf\nfrom graph_nets.graphs import GraphsTuple\nimport sonnet as snt\nimport argparse\nimport yaml\nimport logging\nimport tensorflow as tf\n\nfrom modules.mpdatagen import MPGraphDataGenerator\nimport modules.multiOutBlock_wWeightedRegress as models\nsns.set_context('poster')\n\nif __name__==\"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--config', default='configs/default.yaml')\n args = parser.parse_args()\n\n config = yaml.load(open(args.config))\n\n data_config = config['data']\n model_config = config['model']\n train_config = config['training']\n\n data_dir = data_config['data_dir']\n num_train_files = data_config['num_train_files']\n num_val_files = data_config['num_val_files']\n batch_size = data_config['batch_size']\n shuffle = data_config['shuffle']\n num_procs = data_config['num_procs']\n preprocess = data_config['preprocess']\n output_dir = data_config['output_dir']\n already_preprocessed = data_config['already_preprocessed']\n\n concat_input = model_config['concat_input']\n\n epochs = train_config['epochs']\n learning_rate = train_config['learning_rate']\n alpha = train_config['alpha']\n os.environ['CUDA_VISIBLE_DEVICES'] = str(train_config['gpu'])\n log_freq = train_config['log_freq']\n save_dir = train_config['save_dir'] + '/Block_'+time.strftime(\"%Y%m%d_%H%M\")+'_'+args.config.replace('.yaml','').split('/')[-1]\n os.makedirs(save_dir, exist_ok=True)\n yaml.dump(config, open(save_dir + '/config.yaml', 'w'))\n\n logging.basicConfig(level=logging.INFO, \n format='%(message)s', \n filename=save_dir + '/output.log')\n logging.info('Using config file {}'.format(args.config))\n # logging.info('Running training for {} with concant_input: {}\\n'.format(particle_type, concat_input))\n\n pi0_files = np.sort(glob.glob(data_dir+'*graphs.v01*/*pi0*/*root'))\n pion_files = np.sort(glob.glob(data_dir+'*graphs.v01*/*pion*/*root'))\n train_start = 10\n train_end = train_start + num_train_files\n val_end = train_end + num_val_files\n pi0_train_files = pi0_files[train_start:train_end]\n pi0_val_files = pi0_files[train_end:val_end]\n pion_train_files = pion_files[train_start:train_end]\n pion_val_files = pion_files[train_end:val_end]\n\n train_output_dir = None\n val_output_dir = None\n \n # Get Data\n if preprocess:\n train_output_dir = output_dir + '/train/'\n val_output_dir = output_dir + '/val/'\n\n if already_preprocessed:\n train_files = np.sort(glob.glob(train_output_dir+'*.p'))[:num_train_files]\n val_files = np.sort(glob.glob(val_output_dir+'*.p'))[:num_val_files]\n\n pi0_train_files = train_files\n pi0_val_files = val_files\n pion_train_files = None\n pion_val_files = None\n\n\n train_output_dir = None\n val_output_dir = None\n\n data_gen_train = MPGraphDataGenerator(pi0_file_list=pi0_train_files,\n pion_file_list=pion_train_files,\n cellGeo_file=data_dir+'graph_examples/cell_geo.root',\n batch_size=batch_size,\n shuffle=shuffle,\n num_procs=num_procs,\n preprocess=preprocess,\n output_dir=train_output_dir)\n\n data_gen_val = MPGraphDataGenerator(pi0_file_list=pi0_val_files,\n pion_file_list=pion_val_files,\n cellGeo_file=data_dir+'graph_examples/cell_geo.root',\n batch_size=batch_size,\n shuffle=shuffle,\n num_procs=num_procs,\n preprocess=preprocess,\n output_dir=val_output_dir)\n\n if preprocess and not already_preprocessed:\n exit()\n\n # Optimizer.\n #optimizer = snt.optimizers.Adam(learning_rate)\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n\n model = models.MultiOutBlockWeightedRegressModel(global_output_size=1, num_outputs=2, model_config=model_config)\n\n training_loss_epoch = []\n training_loss_regress_epoch = []\n training_loss_class_epoch = []\n val_loss_epoch = []\n val_loss_regress_epoch = []\n val_loss_class_epoch = []\n \n checkpoint = tf.train.Checkpoint(module=model)\n checkpoint_prefix = os.path.join(save_dir, 'latest_model')\n latest = tf.train.latest_checkpoint(save_dir)\n if latest is not None:\n checkpoint.restore(latest)\n else:\n checkpoint.save(checkpoint_prefix)\n\n def convert_to_tuple(graphs):\n nodes = []\n edges = []\n globals = []\n senders = []\n receivers = []\n n_node = []\n n_edge = []\n offset = 0\n\n for graph in graphs:\n nodes.append(graph['nodes'])\n edges.append(graph['edges'])\n globals.append([graph['globals']])\n senders.append(graph['senders'] + offset)\n receivers.append(graph['receivers'] + offset)\n n_node.append(graph['nodes'].shape[:1])\n n_edge.append(graph['edges'].shape[:1])\n\n offset += len(graph['nodes'])\n\n nodes = tf.convert_to_tensor(np.concatenate(nodes))\n edges = tf.convert_to_tensor(np.concatenate(edges))\n globals = tf.convert_to_tensor(np.concatenate(globals))\n senders = tf.convert_to_tensor(np.concatenate(senders))\n receivers = tf.convert_to_tensor(np.concatenate(receivers))\n n_node = tf.convert_to_tensor(np.concatenate(n_node))\n n_edge = tf.convert_to_tensor(np.concatenate(n_edge))\n\n graph = GraphsTuple(\n nodes=nodes,\n edges=edges,\n globals=globals,\n senders=senders,\n receivers=receivers,\n n_node=n_node,\n n_edge=n_edge\n )\n\n return graph\n \n def get_batch(data_iter):\n for graphs, targets in data_iter:\n graphs = convert_to_tuple(graphs)\n targets = tf.convert_to_tensor(targets)\n \n yield graphs, targets\n\n samp_graph, samp_target = next(get_batch(data_gen_train.generator()))\n data_gen_train.kill_procs()\n graph_spec = utils_tf.specs_from_graphs_tuple(samp_graph, True, True, True)\n \n mae_loss = tf.keras.losses.MeanAbsoluteError()\n bce_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\n def loss_fn(targets, regress_preds, class_preds):\n regress_loss = mae_loss(targets[:,:1], regress_preds)\n class_loss = bce_loss(targets[:,1:], class_preds)\n combined_loss = alpha*regress_loss + (1 - alpha)*class_loss \n return regress_loss, class_loss, combined_loss\n\n @tf.function(input_signature=[graph_spec, tf.TensorSpec(shape=[None,2], dtype=tf.float32)])\n def train_step(graphs, targets):\n with tf.GradientTape() as tape:\n regress_output, class_output = model(graphs)\n regress_preds = regress_output.globals\n class_preds = class_output.globals\n regress_loss, class_loss, loss = loss_fn(targets, regress_preds, class_preds)\n \n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n return regress_loss, class_loss, loss\n\n @tf.function(input_signature=[graph_spec, tf.TensorSpec(shape=[None,2], dtype=tf.float32)])\n def val_step(graphs, targets):\n regress_output, class_output = model(graphs)\n regress_preds = regress_output.globals\n class_preds = class_output.globals\n regress_loss, class_loss, loss = loss_fn(targets, regress_preds, class_preds)\n\n return regress_loss, class_loss, loss, regress_preds, class_preds\n\n curr_loss = 1e5\n for e in range(epochs):\n\n logging.info('\\nStarting epoch: {}'.format(e))\n print('\\nStarting epoch: {}'.format(e))\n epoch_start = time.time()\n\n training_loss = []\n training_loss_regress = []\n training_loss_class = []\n val_loss = []\n val_loss_regress = []\n val_loss_class = []\n\n # Train\n logging.info('Training...')\n i = 1\n for graph_data_tr, targets_tr in get_batch(data_gen_train.generator()):#train_iter):\n start = time.time()\n #if i==1:\n losses_tr_rg, losses_tr_cl, losses_tr = train_step(graph_data_tr, targets_tr)\n end = time.time()\n\n training_loss.append(losses_tr.numpy())\n training_loss_regress.append(losses_tr_rg.numpy())\n training_loss_class.append(losses_tr_cl.numpy())\n\n if not (i-1)%log_freq:\n logging.info('Iter: {:04d}, Tr_loss_mean: {:.4f}, Tr_loss_rg_mean: {:.4f}, Tr_loss_cl_mean: {:.4f}, Took {:.3f}secs'. \\\n format(i, \n np.mean(training_loss), \n np.mean(training_loss_regress), \n np.mean(training_loss_class), \n end-start))\n # logging.info('Took {:.3f} secs'.format(end-start))\n \n i += 1 \n\n training_loss_epoch.append(training_loss)\n training_loss_regress_epoch.append(training_loss_regress)\n training_loss_class_epoch.append(training_loss_class)\n training_end = time.time()\n\n # validate\n logging.info('\\nValidation...')\n i = 1\n all_targets = []\n all_outputs = []\n for graph_data_val, targets_val in get_batch(data_gen_val.generator()):#val_iter):\n start = time.time()\n losses_val_rg, losses_val_cl, losses_val, regress_vals, class_vals = val_step(graph_data_val, targets_val)\n end = time.time()\n\n targets_val = targets_val.numpy()\n regress_vals = regress_vals.numpy()\n class_vals = class_vals.numpy()\n\n targets_val[:,0] = 10**targets_val[:,0]\n regress_vals = 10**regress_vals\n # targets_val[:,1] = 1 / (1 + np.exp(targets_val[:,1]))\n class_vals = tf.math.sigmoid(class_vals) # 1 / (1 + np.exp(class_vals))\n\n output_vals = np.hstack([regress_vals, class_vals])\n\n val_loss.append(losses_val.numpy())\n val_loss_regress.append(losses_val_rg.numpy())\n val_loss_class.append(losses_val_cl.numpy())\n\n all_targets.append(targets_val)\n all_outputs.append(output_vals)\n\n if not (i-1)%log_freq:\n logging.info('Iter: {:04d}, Val_loss_mean: {:.4f}, Val_loss_rg_mean: {:.4f}, Val_loss_cl_mean: {:.4f}, Took {:.3f}secs'. \\\n format(i, \n np.mean(val_loss), \n np.mean(val_loss_regress), \n np.mean(val_loss_class), \n end-start))\n # logging.info('Took {:.3f} secs'.format(end-start))\n \n i += 1\n\n epoch_end = time.time()\n\n\n all_targets = np.concatenate(all_targets)\n all_outputs = np.concatenate(all_outputs)\n \n val_loss_epoch.append(val_loss)\n val_loss_regress_epoch.append(val_loss_regress)\n val_loss_class_epoch.append(val_loss_class)\n \n np.savez(save_dir+'/losses', \n training=training_loss_epoch, validation=val_loss_epoch,\n training_regress=training_loss_regress_epoch, validation_regress=val_loss_regress_epoch,\n training_class=training_loss_class_epoch, validation_class=val_loss_class_epoch,\n )\n # checkpoint.save(checkpoint_prefix)\n \n val_mins = int((epoch_end - training_end)/60)\n val_secs = int((epoch_end - training_end)%60)\n training_mins = int((training_end - epoch_start)/60)\n training_secs = int((training_end - epoch_start)%60)\n\n logging.info('\\nEpoch {} ended\\nTraining: {:2d}:{:02d}\\nValidation: {:2d}:{:02d}'. \\\n format(e, training_mins, training_secs, val_mins, val_secs))\n print('\\nEpoch {} ended\\nTraining: {:2d}:{:02d}\\nValidation: {:2d}:{:02d}'. \\\n format(e, training_mins, training_secs, val_mins, val_secs))\n\n if np.mean(val_loss)<curr_loss:\n logging.info('\\nLoss decreased from {:.4f} to {:.4f}'.format(curr_loss, np.mean(val_loss)))\n logging.info('Checkpointing and saving predictions to:\\n{}'.format(save_dir))\n print('\\nLoss decreased from {:.4f} to {:.4f}'.format(curr_loss, np.mean(val_loss)))\n print('Checkpointing and saving predictions to:\\n{}'.format(save_dir))\n curr_loss = np.mean(val_loss)\n np.savez(save_dir+'/predictions', \n targets=all_targets, \n outputs=all_outputs)\n checkpoint.save(checkpoint_prefix)\n else: \n logging.info('\\nLoss didnt decrease from {:.4f}'.format(curr_loss))\n print('\\nLoss didnt decrease from {:.4f}'.format(curr_loss))\n\n if not (e+1)%20:\n optimizer.learning_rate = optimizer.learning_rate/10\n logging.info('\\nLearning rate decreased to: {:.3e}'.format(optimizer.learning_rate.value()))\n print('\\nLearning rate decreased to: {:.3e}'.format(optimizer.learning_rate.value()))\n\n"
] | [
[
"tensorflow.convert_to_tensor",
"numpy.hstack",
"numpy.savez",
"tensorflow.train.latest_checkpoint",
"tensorflow.train.Checkpoint",
"tensorflow.keras.losses.BinaryCrossentropy",
"numpy.concatenate",
"tensorflow.math.sigmoid",
"tensorflow.GradientTape",
"tensorflow.keras.optimizers.Adam",
"numpy.mean",
"tensorflow.TensorSpec",
"tensorflow.keras.losses.MeanAbsoluteError"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
gabrieldesousah/awsmetric2csv | [
"adcc8f42a8c1b5eba8a1f7413c24165d2e7d65ff"
] | [
"utils.py"
] | [
"import boto3\nimport datetime\nimport numpy as np\n\ncsv_headers = {\n 'ec2': [\n 'name',\n 'instance',\n 'type',\n 'hypervisor',\n 'virtualization_type',\n 'architecture',\n 'ebs_optimized',\n 'image_id',\n 'key_name',\n 'metric',\n 'low',\n 'high',\n 'ave',\n 'median',\n 'launch_time',\n 'subnet_id',\n 'vpc_id'\n ], 'rds': [\n 'instance',\n 'type',\n 'engine',\n 'engine_version',\n 'license_model',\n 'multi_az',\n 'publicly_accessible',\n 'allocated_storage',\n 'storage_type',\n 'storage_encrypted',\n 'metric',\n 'low',\n 'high',\n 'ave',\n 'median',\n 'launch_time'\n ]}\n\n# create boto clients\ncw = boto3.client('cloudwatch')\nec2 = boto3.resource('ec2')\nrds = boto3.client('rds')\n\n\ndef get_all_instances(resource):\n if resource == 'ec2':\n return ec2.instances.filter(\n Filters=[\n {'Name': 'instance-state-name', 'Values': ['running']}])\n elif resource == 'rds':\n result = rds.describe_db_instances()\n return result['DBInstances']\n else:\n return None\n\n\ndef get_metric(resource, id, period, days, metric='CPUUtilization'):\n # get current time\n now = datetime.datetime.now()\n\n # identify dimension name\n if resource == 'ec2':\n dimension_name = 'InstanceId'\n elif resource == 'rds':\n dimension_name = 'DBInstanceIdentifier'\n else:\n return None\n\n # get metric statistics\n return cw.get_metric_statistics(\n Namespace='AWS/%s' % resource.upper(),\n MetricName=metric,\n Dimensions=[{\n 'Name': dimension_name,\n 'Value': id\n }],\n StartTime=now - datetime.timedelta(days=days),\n EndTime=now,\n Period=period,\n Statistics=['Maximum'],\n Unit='Percent'\n )\n\n\ndef process_metric(result):\n # get all datapoints and add to list\n item_list = []\n for datapoint in result['Datapoints']:\n item_list.append(float(datapoint['Maximum']))\n\n # on empty datapoints, append zero to avoid zero-size array error\n if len(item_list) == 0:\n item_list.append(0)\n\n # return a numpy array\n return np.array(item_list)\n\n\ndef write_to_csv(resource, csvwriter, instance, item_list_arr):\n if resource == 'ec2':\n # get instance name\n if instance.tags:\n name_dict = next(\n (i for i in instance.tags if i['Key'] == 'Name'),\n None)\n else:\n name_dict = None\n\n # write data rows\n csvwriter.writerow([\n '' if name_dict is None else name_dict.get('Value'),\n instance.id,\n instance.instance_type,\n instance.hypervisor,\n instance.virtualization_type,\n instance.architecture,\n instance.ebs_optimized,\n instance.image_id,\n instance.key_name,\n 'CPUUtilization',\n np.min(item_list_arr),\n np.max(item_list_arr),\n np.round(np.average(item_list_arr), 2),\n np.median(item_list_arr),\n instance.launch_time,\n instance.subnet_id,\n instance.vpc_id\n ])\n elif resource == 'rds':\n # write data rows\n csvwriter.writerow([\n instance['DBInstanceIdentifier'],\n instance['DBInstanceClass'],\n instance['Engine'],\n instance['EngineVersion'],\n instance['LicenseModel'],\n instance['MultiAZ'],\n instance['PubliclyAccessible'],\n instance['AllocatedStorage'],\n instance['StorageType'],\n instance['StorageEncrypted'],\n 'CPUUtilization',\n np.min(item_list_arr),\n np.max(item_list_arr),\n np.round(np.average(item_list_arr), 2),\n np.median(item_list_arr),\n instance['InstanceCreateTime']\n ])\n"
] | [
[
"numpy.min",
"numpy.median",
"numpy.max",
"numpy.average",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yzygitzh/tutel | [
"ca6f018bf7afae2e37a74f17deddd0f5f91ec2b2"
] | [
"tutel/examples/helloworld.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\n# Recommend to initialize NUMA status at the most program begining (before any other imports)\nfrom tutel import system_init\nsystem_init.init_affinity_at_program_beginning()\n\nimport os\nimport time\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.distributed as dist\nfrom torch import nn\nimport argparse\n\nfrom tutel import moe as tutel_moe\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--local_rank', type=int, default=-1)\nparser.add_argument('--batch_size', type=int, default=16)\nparser.add_argument('--num_tokens', type=int, default=1024)\nparser.add_argument('--model_dim', type=int, default=2048)\nparser.add_argument('--hidden_size', type=int, default=2048)\nparser.add_argument('--num_local_experts', type=int, default=2)\nparser.add_argument('--dtype', type=str, default='float32')\nparser.add_argument('--fp32_gate', default=False, action='store_true')\nparser.add_argument('--top', type=int, default=2)\nparser.add_argument('--l_aux_wt', type=float, default=0.0)\nparser.add_argument('--a2a_ffn_overlap_degree', type=int, default=1)\nparser.add_argument('--num_steps', type=int, default=100)\nparser.add_argument('--save_load_checkpoint', default=False, action='store_true')\nargs = parser.parse_args()\n\nparallel_env = system_init.init_data_model_parallel()\ndist_rank, dist_world_size, dist_print = parallel_env.global_rank, parallel_env.global_size, parallel_env.dist_print\nargs.local_rank = parallel_env.local_device.index\n\nbatch_size = args.batch_size\nnum_tokens = args.num_tokens\nmodel_dim = args.model_dim\nhidden_size = args.hidden_size\nnum_local_experts = args.num_local_experts\ntop_value = args.top\na2a_ffn_overlap_degree = args.a2a_ffn_overlap_degree\ndevice = parallel_env.local_device\n\nif args.dtype == 'float32':\n torch.set_default_dtype(torch.float32)\nelif args.dtype == 'float64':\n torch.set_default_dtype(torch.float64)\nelif args.dtype == 'float16':\n torch.set_default_dtype(torch.float16)\nelif args.dtype == 'bfloat16':\n torch.set_default_dtype(torch.bfloat16)\nelse:\n raise Exception('Unrecognized data type specified: %s' % args.dtype)\n\n\nclass ExampleModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n self._moe_layer = tutel_moe.moe_layer(\n gate_type = {'type': 'top', 'k': top_value, 'fp32_gate': args.fp32_gate},\n experts = {'type': 'ffn', 'count_per_node': num_local_experts, 'hidden_size_per_expert': hidden_size, 'activation_fn': lambda x: F.relu(x)},\n model_dim = model_dim,\n scan_expert_func = lambda name, param: setattr(param, 'skip_allreduce', True),\n seeds = (1, dist_rank + 1, 1),\n a2a_ffn_overlap_degree = a2a_ffn_overlap_degree,\n ).to(device)\n\n # Summary of different parameter types: gate, local_experts\n local_count = sum([torch.numel(param) for name, param in self._moe_layer.get_parameter_iterator(param_type='local_experts')])\n shared_count = sum([torch.numel(param) for name, param in self._moe_layer.get_parameter_iterator(param_type='gate')])\n dist_print('[Statistics] param count for MoE local_experts = %s, param count for MoE gate = %s.\\n' % (local_count, shared_count))\n\n def forward(self, input):\n result = self._moe_layer(input)\n result = F.log_softmax(torch.sum(result, dim=2), dim=1)\n return result\n\nmodel = ExampleModel()\ndist_print(model)\n\nif args.save_load_checkpoint:\n checkpoint_path = './distributed-hellworld-%d-in-%d.ckpt' % (parallel_env.global_rank, parallel_env.global_size)\n if os.path.exists(checkpoint_path):\n model.load_state_dict(torch.load(checkpoint_path))\n else:\n print('Checkpoint not loaded: file `%s` is not found' % checkpoint_path)\n\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-5)\n\ntorch.manual_seed(0)\nx = torch.tensor(torch.randn([batch_size, num_tokens, model_dim], dtype=torch.float32, device='cpu').detach().numpy(), dtype=torch.get_default_dtype(), requires_grad=True, device=device)\ny = torch.LongTensor(batch_size).random_(1).to(device)\n\ntuples = (dist_world_size, args.dtype, model_dim, hidden_size, batch_size * num_tokens, num_local_experts, top_value, a2a_ffn_overlap_degree, device)\ndist_print('[Benchmark] world_size = %s, dtype = %s, model_dim = %s, hidden_size = %s, samples = %s, num_local_experts = %s, topK = %s, a2a_ffn_overlap_degree = %s, device = `%s`' % tuples)\n\naverage_time, num_steps = 0, args.num_steps\n\nparams_for_all_reduce = [p for p in model.parameters() if not hasattr(p, 'skip_allreduce') and getattr(p, 'requires_grad', False) and p.grad is not None]\n\nfor i in range(num_steps):\n\n torch.cuda.synchronize()\n t_start = time.time()\n optimizer.zero_grad()\n\n output = model(x)\n loss = F.nll_loss(output, y)\n if args.l_aux_wt:\n loss += args.l_aux_wt * model._moe_layer.l_aux\n loss.backward()\n if dist_world_size > 1:\n for p in params_for_all_reduce:\n p.grad /= dist_world_size\n dist.all_reduce(p.grad)\n optimizer.step()\n\n torch.cuda.synchronize()\n t_stop = time.time()\n dist_print('STEP-%s: DONE, loss = %s, step_time = %s sec.' % (i, float(loss.data), t_stop - t_start))\n\n if i + 10 >= num_steps:\n average_time += t_stop - t_start\n\naverage_time /= 10\ndist_print('\\n[Summary] Average synchronized step_time = %s sec.' % average_time)\n\nif args.save_load_checkpoint:\n torch.save(model.state_dict(), checkpoint_path)\n"
] | [
[
"torch.cuda.synchronize",
"torch.LongTensor",
"torch.nn.functional.nll_loss",
"torch.load",
"torch.set_default_dtype",
"torch.manual_seed",
"torch.randn",
"torch.sum",
"torch.numel",
"torch.nn.functional.relu",
"torch.get_default_dtype",
"torch.distributed.all_reduce"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZYZMarshall/Twitter-Emotion-Radar | [
"9d6ebf2464dfa1864268a9cdf69991e6cef542a3"
] | [
"Tweet_Streamer_Using_Tweepy/tweepy_streamer.py"
] | [
"import sys\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport twitter_credentials\n\n\"\"\"Tweepy module is used to stream live tweets directly from Twitter in real-time. \nThe tweets are visualized and then the TextBlob module is used to do sentiment analysis on the tweets.\"\"\"\nfrom tweepy import API \nfrom tweepy import Cursor\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport numpy as np\nimport pandas as pd\n\n\n# # # # TWITTER CLIENT # # # #\nclass TwitterClient():\n def __init__(self, twitter_user=None):\n self.auth = TwitterAuthenticator().authenticate_twitter_app()\n self.twitter_client = API(self.auth)\n\n self.twitter_user = twitter_user\n\n def get_twitter_client_api(self):\n return self.twitter_client\n\n def get_user_timeline_tweets(self, num_tweets):\n tweets = []\n for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):\n tweets.append(tweet)\n return tweets\n\n def get_friend_list(self, num_friends):\n friend_list = []\n for friend in Cursor(self.twitter_client.friends, id=self.twitter_user).items(num_friends):\n friend_list.append(friend)\n return friend_list\n\n def get_home_timeline_tweets(self, num_tweets):\n home_timeline_tweets = []\n for tweet in Cursor(self.twitter_client.home_timeline, id=self.twitter_user).items(num_tweets):\n home_timeline_tweets.append(tweet)\n return home_timeline_tweets\n\n\n# # # # TWITTER AUTHENTICATER # # # #\nclass TwitterAuthenticator():\n\n def authenticate_twitter_app(self):\n auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\n return auth\n\n# # # # TWITTER STREAMER # # # #\nclass TwitterStreamer():\n \"\"\"\n Class for streaming and processing live tweets.\n \"\"\"\n def __init__(self):\n self.twitter_autenticator = TwitterAuthenticator() \n\n def stream_tweets(self, fetched_tweets_filename, hash_tag_list):\n # This handles Twitter authetification and the connection to Twitter Streaming API\n listener = TwitterListener(fetched_tweets_filename)\n auth = self.twitter_autenticator.authenticate_twitter_app() \n stream = Stream(auth, listener)\n\n # This line filter Twitter Streams to capture data by the keywords: \n stream.filter(track=hash_tag_list)\n\n\n# # # # TWITTER STREAM LISTENER # # # #\nclass TwitterListener(StreamListener):\n \"\"\"\n This is a basic listener that just prints received tweets to stdout.\n \"\"\"\n def __init__(self, fetched_tweets_filename):\n self.fetched_tweets_filename = fetched_tweets_filename\n\n def on_data(self, data):\n try:\n print(data)\n with open(self.fetched_tweets_filename, 'a') as tf:\n tf.write(data)\n return True\n except BaseException as e:\n print(\"Error on_data %s\" % str(e))\n return True\n \n def on_error(self, status):\n if status == 420:\n # Returning False on_data method in case rate limit occurs.\n return False\n print(status)\n\n\nclass TweetAnalyzer():\n \"\"\"\n Functionality for analyzing and categorizing content from tweets.\n \"\"\"\n def tweets_to_data_frame(self, tweets):\n df = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])\n\n df['id'] = np.array([tweet.id for tweet in tweets])\n df['len'] = np.array([len(tweet.text) for tweet in tweets])\n df['date'] = np.array([tweet.created_at for tweet in tweets])\n df['source'] = np.array([tweet.source for tweet in tweets])\n df['likes'] = np.array([tweet.favorite_count for tweet in tweets])\n df['retweets'] = np.array([tweet.retweet_count for tweet in tweets])\n\n return df\n\n \nif __name__ == '__main__':\n\n twitter_client = TwitterClient()\n tweet_analyzer = TweetAnalyzer()\n\n api = twitter_client.get_twitter_client_api()\n\n tweets = api.user_timeline(screen_name=\"realDonaldTrump\", count=20)\n\n #print(dir(tweets[0]))\n #print(tweets[0].retweet_count)\n\n df = tweet_analyzer.tweets_to_data_frame(tweets)\n \n print(df.head(10))"
] | [
[
"numpy.array",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hilarioushappystar/SpiderSolitaireProject | [
"4fa04d41fbdb4d916919004966594f39ec619665"
] | [
"ai_module.py"
] | [
"###########################################\n# This is the ai_module class\n# This class handles the \"clever stuff\" such as working out best move(s) to play\n###########################################\n\n# achieves 10% win rate in 50 games (more thorough testing may be warranted)\n\nimport numpy as np\nfrom numpy import random \nfrom gamestate import Gamestate\nfrom card import Card\nimport copy\n\ndef evaluate_position(gs, cfgh): \n myeval = 100 * gs.countsuitsremoved() + gs.countsuitedbuilds() + 10*(44 - gs.counthiddencards())\n \n # do columns without face-down cards\n for foo in range(10):\n if( len( gs.columns[foo] ) == 0):\n myeval += cfgh.emptycolumnsweight\n elif( gs.columns[foo][0].isvisible):\n myeval += cfgh.emptycolumnsweight\n \n # do pollution \n for foo in range(10):\n poll = gs.compute_pollution(foo)\n if( poll == 1):\n myeval += 2 * cfgh.pollutionweight\n if( poll == 2):\n myeval += 1 * cfgh.pollutionweight\n \n # do max run length (preparing to complete suits is very important at the 4-suit level!)\n for suit in ['c','d','h','s']:\n # no scientific basis for choosing these numbers!\n tempdict = {6:1.06, 7:2.07, 8:5.08, 9:10.09, 10:20.10, 11:30.11, 12:40.12}\n runlength = gs.compute_maxrunlength(suit)\n if( runlength in tempdict):\n myeval += tempdict[runlength] * cfgh.maxrunlengthweight \n \n return myeval\n\n\n# choose the best moveblock to play \ndef choose_moveblock(gs, cfgh):\n random.seed(123456)\n threshold_param = 2\n game_result = 'STILLGOING'\n prev_eval = evaluate_position(gs,cfgh)\n \n numtries = cfgh.moveblocktries\n bestsofar_moveblock = []\n bestsofar_eval = evaluate_position(gs,cfgh)\n for mytry in range(numtries):\n \n moveblock = np.random.randint(1000,size=cfgh.moveblocklength)\n \n \n # randomly truncate \n randsize = 1 + random.randint(cfgh.moveblocklength-1)\n moveblock = moveblock[0:randsize]\n \n # now attempt both static and look-ahead evaluation \n gs2 = copy.deepcopy(gs)\n gs2.executemoveblock(moveblock,threshold_param,False)\n gs3 = copy.deepcopy(gs)\n gs3.executemoveblock(moveblock,threshold_param,True)\n avg_eval = 0.5 * (evaluate_position(gs2,cfgh) + evaluate_position(gs3,cfgh))\n \n if( avg_eval > bestsofar_eval):\n bestsofar_eval = avg_eval \n bestsofar_moveblock = moveblock\n if( avg_eval == bestsofar_eval and len(moveblock) < len(bestsofar_moveblock)):\n bestsofar_eval = avg_eval \n bestsofar_moveblock = moveblock \n movesequence = gs.executemoveblock(bestsofar_moveblock,threshold_param,True)\n \n if( evaluate_position(gs,cfgh) <= prev_eval):\n if( len( gs.stock[0]) > 0):\n gs.dealrow()\n else:\n if gs.iswon():\n game_result = 'RESULT = WIN'\n else:\n game_result = 'RESULT = LOSE'\n return (movesequence, game_result)"
] | [
[
"numpy.random.seed",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jwoos/python_digitrecognizer | [
"4a06cc7b7ee32aa6c66a391cd6595a2b5fbad38b"
] | [
"tests/unit/test_pool.py"
] | [
"from unittest import TestCase, mock\n\nimport layers\n\nimport numpy as np\nimport pytest\n\n\nclass TestPoolForward(TestCase):\n def test_max(self):\n pool = layers.pool.Pool(\n size=2,\n stride=2,\n operation=np.max,\n )\n\n data = np.zeros((4, 4, 3))\n data[:,:,0] = np.array([\n [1, 1, 2, 4],\n [5, 6, 7, 8],\n [3, 2, 1, 0],\n [1, 2, 3, 4],\n ])\n data[:,:,1] = np.array([\n [1, 1, 2, 4],\n [5, 6, 7, 8],\n [3, 2, 1, 0],\n [1, 2, 3, 4],\n ])\n data[:,:,2] = np.array([\n [1, 1, 2, 4],\n [5, 6, 7, 8],\n [3, 2, 1, 0],\n [1, 2, 3, 4],\n ])\n\n result = pool.forward(data)\n expected = np.zeros((2, 2, 3))\n expected[:,:,0] = np.array([\n [6, 8],\n [3, 4],\n ])\n expected[:,:,1] = np.array([\n [6, 8],\n [3, 4],\n ])\n expected[:,:,2] = np.array([\n [6, 8],\n [3, 4],\n ])\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Originofamonia/differential-privacy-library | [
"a889ba0f8d19c77e2b0369451ebc392969fac685",
"11759216bd418f764ff5c4a2349975b14f6e7ffb",
"a889ba0f8d19c77e2b0369451ebc392969fac685"
] | [
"diffprivlib/models/naive_bayes.py",
"tests/models/test_LogisticRegression.py",
"tests/mechanisms/test_Laplace.py"
] | [
"# MIT License\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nGaussian Naive Bayes classifier satisfying differential privacy\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport sklearn.naive_bayes as sk_nb\nfrom sklearn.utils import check_X_y\nfrom sklearn.utils.multiclass import _check_partial_fit_first_call\n\nfrom diffprivlib.accountant import BudgetAccountant\nfrom diffprivlib.mechanisms import LaplaceBoundedDomain, GeometricTruncated, LaplaceTruncated\nfrom diffprivlib.utils import PrivacyLeakWarning, warn_unused_args\nfrom diffprivlib.validation import check_bounds, clip_to_bounds\n\n\nclass GaussianNB(sk_nb.GaussianNB):\n r\"\"\"Gaussian Naive Bayes (GaussianNB) with differential privacy\n\n Inherits the :class:`sklearn.naive_bayes.GaussianNB` class from Scikit Learn and adds noise to satisfy differential\n privacy to the learned means and variances. Adapted from the work presented in [VSB13]_.\n\n Parameters\n ----------\n epsilon : float, default: 1.0\n Privacy parameter :math:`\\epsilon` for the model.\n\n bounds: tuple, optional\n Bounds of the data, provided as a tuple of the form (min, max). `min` and `max` can either be scalars, covering\n the min/max of the entire data, or vectors with one entry per feature. If not provided, the bounds are computed\n on the data when ``.fit()`` is first called, resulting in a :class:`.PrivacyLeakWarning`.\n\n priors : array-like, shape (n_classes,)\n Prior probabilities of the classes. If specified the priors are not adjusted according to the data.\n\n var_smoothing : float, default: 1e-9\n Portion of the largest variance of all features that is added to variances for calculation stability.\n\n accountant : BudgetAccountant, optional\n Accountant to keep track of privacy budget.\n\n Attributes\n ----------\n class_prior_ : array, shape (n_classes,)\n probability of each class.\n\n class_count_ : array, shape (n_classes,)\n number of training samples observed in each class.\n\n theta_ : array, shape (n_classes, n_features)\n mean of each feature per class\n\n sigma_ : array, shape (n_classes, n_features)\n variance of each feature per class\n\n epsilon_ : float\n absolute additive value to variances (unrelated to ``epsilon`` parameter for differential privacy)\n\n References\n ----------\n .. [VSB13] Vaidya, Jaideep, Basit Shafiq, Anirban Basu, and Yuan Hong. \"Differentially private naive bayes\n classification.\" In 2013 IEEE/WIC/ACM International Joint Conferences on Web Intelligence (WI) and Intelligent\n Agent Technologies (IAT), vol. 1, pp. 571-576. IEEE, 2013.\n\n \"\"\"\n\n def __init__(self, epsilon=1.0, bounds=None, priors=None, var_smoothing=1e-9, accountant=None):\n super().__init__(priors=priors, var_smoothing=var_smoothing)\n\n self.epsilon = epsilon\n self.bounds = bounds\n self.accountant = BudgetAccountant.load_default(accountant)\n\n def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None):\n self.accountant.check(self.epsilon, 0)\n\n if sample_weight is not None:\n warn_unused_args(\"sample_weight\")\n\n X, y = check_X_y(X, y)\n\n if self.bounds is None:\n warnings.warn(\"Bounds have not been specified and will be calculated on the data provided. This will \"\n \"result in additional privacy leakage. To ensure differential privacy and no additional \"\n \"privacy leakage, specify bounds for each dimension.\", PrivacyLeakWarning)\n self.bounds = (np.min(X, axis=0), np.max(X, axis=0))\n\n self.bounds = check_bounds(self.bounds, shape=X.shape[1])\n X = clip_to_bounds(X, self.bounds)\n\n self.epsilon_ = self.var_smoothing\n\n if _refit:\n self.classes_ = None\n\n if _check_partial_fit_first_call(self, classes):\n n_features = X.shape[1]\n n_classes = len(self.classes_)\n self.theta_ = np.zeros((n_classes, n_features))\n self.sigma_ = np.zeros((n_classes, n_features))\n\n self.class_count_ = np.zeros(n_classes, dtype=np.float64)\n\n if self.priors is not None:\n priors = np.asarray(self.priors)\n\n if len(priors) != n_classes:\n raise ValueError(\"Number of priors must match number of classes.\")\n if not np.isclose(priors.sum(), 1.0):\n raise ValueError(\"The sum of the priors should be 1.\")\n if (priors < 0).any():\n raise ValueError(\"Priors must be non-negative.\")\n self.class_prior_ = priors\n else:\n # Initialize the priors to zeros for each class\n self.class_prior_ = np.zeros(len(self.classes_), dtype=np.float64)\n else:\n if X.shape[1] != self.theta_.shape[1]:\n raise ValueError(\"Number of features %d does not match previous data %d.\" %\n (X.shape[1], self.theta_.shape[1]))\n # Put epsilon back in each time\n self.sigma_[:, :] -= self.epsilon_\n\n classes = self.classes_\n\n unique_y = np.unique(y)\n unique_y_in_classes = np.in1d(unique_y, classes)\n\n if not np.all(unique_y_in_classes):\n raise ValueError(\"The target label(s) %s in y do not exist in the initial classes %s\" %\n (unique_y[~unique_y_in_classes], classes))\n\n noisy_class_counts = self._noisy_class_counts(y)\n\n for _i, y_i in enumerate(unique_y):\n i = classes.searchsorted(y_i)\n X_i = X[y == y_i, :]\n\n n_i = noisy_class_counts[_i]\n\n new_theta, new_sigma = self._update_mean_variance(self.class_count_[i], self.theta_[i, :],\n self.sigma_[i, :], X_i, n_noisy=n_i)\n\n self.theta_[i, :] = new_theta\n self.sigma_[i, :] = new_sigma\n self.class_count_[i] += n_i\n\n self.sigma_[:, :] += self.epsilon_\n\n # Update if only no priors is provided\n if self.priors is None:\n # Empirical prior, with sample_weight taken into account\n self.class_prior_ = self.class_count_ / self.class_count_.sum()\n\n self.accountant.spend(self.epsilon, 0)\n\n return self\n\n def _update_mean_variance(self, n_past, mu, var, X, sample_weight=None, n_noisy=None):\n \"\"\"Compute online update of Gaussian mean and variance.\n\n Given starting sample count, mean, and variance, a new set of points X return the updated mean and variance.\n (NB - each dimension (column) in X is treated as independent -- you get variance, not covariance).\n\n Can take scalar mean and variance, or vector mean and variance to simultaneously update a number of\n independent Gaussians.\n\n See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:\n\n http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf\n\n Parameters\n ----------\n n_past : int\n Number of samples represented in old mean and variance. If sample weights were given, this should contain\n the sum of sample weights represented in old mean and variance.\n\n mu : array-like, shape (number of Gaussians,)\n Means for Gaussians in original set.\n\n var : array-like, shape (number of Gaussians,)\n Variances for Gaussians in original set.\n\n sample_weight : ignored\n Ignored in diffprivlib.\n\n n_noisy : int, optional\n Noisy count of the given class, satisfying differential privacy.\n\n Returns\n -------\n total_mu : array-like, shape (number of Gaussians,)\n Updated mean for each Gaussian over the combined set.\n\n total_var : array-like, shape (number of Gaussians,)\n Updated variance for each Gaussian over the combined set.\n \"\"\"\n if n_noisy is None:\n warnings.warn(\"Noisy class count has not been specified and will be read from the data. To use this \"\n \"method correctly, make sure it is run by the parent GaussianNB class.\", PrivacyLeakWarning)\n n_noisy = X.shape[0]\n\n if not n_noisy:\n return mu, var\n\n if sample_weight is not None:\n warn_unused_args(\"sample_weight\")\n\n # Split epsilon between each feature, using 1/3 of total budget for each of mean and variance\n n_features = X.shape[1]\n local_epsilon = self.epsilon / 3 / n_features\n\n new_mu = np.zeros((n_features,))\n new_var = np.zeros((n_features,))\n\n for feature in range(n_features):\n _X = X[:, feature]\n lower, upper = self.bounds[0][feature], self.bounds[1][feature]\n local_diameter = upper - lower\n\n mech_mu = LaplaceTruncated(epsilon=local_epsilon, delta=0, sensitivity=local_diameter,\n lower=lower * n_noisy, upper=upper * n_noisy)\n _mu = mech_mu.randomise(_X.sum()) / n_noisy\n\n local_sq_sens = max(_mu - lower, upper - _mu) ** 2\n mech_var = LaplaceBoundedDomain(epsilon=local_epsilon, delta=0, sensitivity=local_sq_sens, lower=0,\n upper=local_sq_sens * n_noisy)\n _var = mech_var.randomise(((_X - _mu) ** 2).sum()) / n_noisy\n\n new_mu[feature] = _mu\n new_var[feature] = _var\n\n if n_past == 0:\n return new_mu, new_var\n\n n_total = float(n_past + n_noisy)\n\n # Combine mean of old and new data, taking into consideration\n # (weighted) number of observations\n total_mu = (n_noisy * new_mu + n_past * mu) / n_total\n\n # Combine variance of old and new data, taking into consideration\n # (weighted) number of observations. This is achieved by combining\n # the sum-of-squared-differences (ssd)\n old_ssd = n_past * var\n new_ssd = n_noisy * new_var\n total_ssd = old_ssd + new_ssd + (n_past / float(n_noisy * n_total)) * (n_noisy * mu - n_noisy * new_mu) ** 2\n total_var = total_ssd / n_total\n\n return total_mu, total_var\n\n def _noisy_class_counts(self, y):\n unique_y = np.unique(y)\n n_total = y.shape[0]\n\n # Use 1/3 of total epsilon budget for getting noisy class counts\n mech = GeometricTruncated(epsilon=self.epsilon / 3, sensitivity=1, lower=1, upper=n_total)\n noisy_counts = np.array([mech.randomise((y == y_i).sum()) for y_i in unique_y])\n\n argsort = np.argsort(noisy_counts)\n i = 0 if noisy_counts.sum() > n_total else len(unique_y) - 1\n\n while np.sum(noisy_counts) != n_total:\n _i = argsort[i]\n sgn = np.sign(n_total - noisy_counts.sum())\n noisy_counts[_i] = np.clip(noisy_counts[_i] + sgn, 1, n_total)\n\n i = (i - sgn) % len(unique_y)\n\n return noisy_counts\n",
"import numpy as np\nfrom unittest import TestCase\n\nfrom diffprivlib.models.logistic_regression import LogisticRegression\nfrom diffprivlib.utils import global_seed, PrivacyLeakWarning, DiffprivlibCompatibilityWarning, BudgetError\n\n\nclass TestLogisticRegression(TestCase):\n def setup_method(self, method):\n global_seed(3141592653)\n\n def test_not_none(self):\n self.assertIsNotNone(LogisticRegression)\n\n def test_bad_params(self):\n X = [[1]]\n y = [0]\n\n with self.assertRaises(ValueError):\n LogisticRegression(data_norm=1, C=-1).fit(X, y)\n\n with self.assertRaises(ValueError):\n LogisticRegression(data_norm=1, C=1.2).fit(X, y)\n\n with self.assertRaises(ValueError):\n LogisticRegression(data_norm=1, max_iter=-1).fit(X, y)\n\n with self.assertRaises(ValueError):\n LogisticRegression(data_norm=1, max_iter=\"100\").fit(X, y)\n\n with self.assertRaises(ValueError):\n LogisticRegression(data_norm=1, tol=-1).fit(X, y)\n\n with self.assertRaises(ValueError):\n LogisticRegression(data_norm=1, tol=\"1\").fit(X, y)\n\n def test_one_class(self):\n X = [[1]]\n y = [0]\n\n clf = LogisticRegression(data_norm=1)\n\n with self.assertRaises(ValueError):\n clf.fit(X, y)\n\n def test_no_params(self):\n clf = LogisticRegression()\n\n X = np.array(\n [0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75,\n 5.00, 5.50])\n y = np.array([0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1])\n X = X[:, np.newaxis]\n\n with self.assertWarns(PrivacyLeakWarning):\n clf.fit(X, y)\n\n def test_smaller_norm_than_data(self):\n X = np.array(\n [0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75,\n 5.00, 5.50])\n y = np.array([0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1])\n X = X[:, np.newaxis]\n\n clf = LogisticRegression(data_norm=1.0)\n\n self.assertIsNotNone(clf.fit(X, y))\n\n def test_sample_weight_warning(self):\n X = np.array(\n [0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75,\n 5.00, 5.50])\n y = np.array([0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1])\n X = X[:, np.newaxis]\n\n clf = LogisticRegression(data_norm=5.5)\n\n with self.assertWarns(DiffprivlibCompatibilityWarning):\n clf.fit(X, y, sample_weight=np.ones_like(y))\n\n def test_trinomial(self):\n X = np.array(\n [0.50, 0.75, 1.00])\n y = np.array([0, 1, 2])\n X = X[:, np.newaxis]\n\n clf = LogisticRegression(data_norm=1.0)\n\n self.assertIsNotNone(clf.fit(X, y))\n\n def test_quadnomial(self):\n X = np.array(\n [0.25, 0.50, 0.75, 1.00])\n y = np.array([0, 1, 2, 3])\n X = X[:, np.newaxis]\n\n clf = LogisticRegression(data_norm=1.0)\n\n self.assertIsNotNone(clf.fit(X, y))\n\n def test_multi_dim_y(self):\n X = np.array(\n [0.25, 0.50, 0.75, 1.00])\n y = np.array([[0, 1, 2, 3], [4, 5, 6, 7]])\n X = X[:, np.newaxis]\n\n clf = LogisticRegression(data_norm=1.0)\n\n self.assertRaises(ValueError, clf.fit, X, y)\n\n def test_solver_warning(self):\n with self.assertWarns(DiffprivlibCompatibilityWarning):\n LogisticRegression(solver=\"newton-cg\")\n\n def test_multi_class_warning(self):\n with self.assertWarns(DiffprivlibCompatibilityWarning):\n LogisticRegression(multi_class=\"multinomial\")\n\n def test_different_results(self):\n from sklearn import datasets\n from sklearn import linear_model\n from sklearn.model_selection import train_test_split\n\n dataset = datasets.load_iris()\n X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)\n\n clf = LogisticRegression(data_norm=12)\n clf.fit(X_train, y_train)\n\n predict1 = clf.predict(X_test)\n\n clf = LogisticRegression(data_norm=12)\n clf.fit(X_train, y_train)\n\n predict2 = clf.predict(X_test)\n\n clf = linear_model.LogisticRegression(solver=\"lbfgs\", multi_class=\"ovr\")\n clf.fit(X_train, y_train)\n\n predict3 = clf.predict(X_test)\n\n self.assertTrue(np.any(predict1 != predict2) or np.any(predict1 != predict3))\n\n def test_same_results(self):\n from sklearn import datasets\n from sklearn.model_selection import train_test_split\n from sklearn import linear_model\n\n dataset = datasets.load_iris()\n X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)\n\n clf = LogisticRegression(data_norm=12, epsilon=float(\"inf\"))\n clf.fit(X_train, y_train)\n\n predict1 = clf.predict(X_test)\n\n clf = linear_model.LogisticRegression(solver=\"lbfgs\", multi_class=\"ovr\")\n clf.fit(X_train, y_train)\n\n predict2 = clf.predict(X_test)\n\n self.assertTrue(np.all(predict1 == predict2))\n\n def test_simple(self):\n X = np.array(\n [0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75,\n 5.00, 5.50] * 5)\n y = np.array([0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1] * 5)\n X = X[:, np.newaxis]\n X -= 3.0\n X /= 2.5\n\n clf = LogisticRegression(epsilon=2, data_norm=1.0)\n clf.fit(X, y)\n\n self.assertIsNotNone(clf)\n self.assertFalse(clf.predict(np.array([(0.5 - 3) / 2.5]).reshape(-1, 1)))\n self.assertTrue(clf.predict(np.array([(5.5 - 3) / 2.5]).reshape(-1, 1)))\n\n def test_accountant(self):\n from diffprivlib.accountant import BudgetAccountant\n acc = BudgetAccountant()\n\n X = np.array(\n [0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75,\n 5.00, 5.50])\n y = np.array([0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1])\n X = X[:, np.newaxis]\n X -= 3.0\n X /= 2.5\n\n clf = LogisticRegression(epsilon=2, data_norm=1.0, accountant=acc)\n clf.fit(X, y)\n self.assertEqual((2, 0), acc.total())\n\n with BudgetAccountant(3, 0) as acc2:\n clf = LogisticRegression(epsilon=2, data_norm=1.0)\n clf.fit(X, y)\n self.assertEqual((2, 0), acc2.total())\n\n with self.assertRaises(BudgetError):\n clf.fit(X, y)\n",
"from unittest import TestCase\nimport numpy as np\n\nfrom diffprivlib.mechanisms import Laplace\nfrom diffprivlib.utils import global_seed\n\n\nclass TestLaplace(TestCase):\n def setup_method(self, method):\n if method.__name__ .endswith(\"prob\"):\n global_seed(314159)\n\n self.mech = Laplace\n\n def teardown_method(self, method):\n del self.mech\n\n def test_class(self):\n from diffprivlib.mechanisms import DPMechanism\n self.assertTrue(issubclass(Laplace, DPMechanism))\n\n def test_neg_sensitivity(self):\n with self.assertRaises(ValueError):\n self.mech(epsilon=1, delta=0, sensitivity=-1)\n\n def test_str_sensitivity(self):\n with self.assertRaises(TypeError):\n self.mech(epsilon=1, delta=0, sensitivity=\"1\")\n\n def test_zero_sensitivity(self):\n mech = self.mech(epsilon=1, delta=0, sensitivity=0)\n\n for i in range(1000):\n self.assertAlmostEqual(mech.randomise(1), 1)\n\n def test_default_delta(self):\n mech = self.mech(epsilon=1, sensitivity=1)\n self.assertEqual(0.0, mech.delta)\n\n def test_neg_epsilon(self):\n with self.assertRaises(ValueError):\n self.mech(epsilon=-1, delta=0, sensitivity=1)\n\n def test_inf_epsilon(self):\n mech = self.mech(epsilon=float(\"inf\"), delta=0, sensitivity=1)\n\n for i in range(1000):\n self.assertAlmostEqual(mech.randomise(1), 1)\n\n def test_complex_epsilon(self):\n with self.assertRaises(TypeError):\n self.mech(epsilon=1 + 2j, delta=0, sensitivity=1)\n\n def test_string_epsilon(self):\n with self.assertRaises(TypeError):\n self.mech(epsilon=\"Two\", delta=0, sensitivity=1)\n\n def test_repr(self):\n repr_ = repr(self.mech(epsilon=1, delta=0, sensitivity=1))\n self.assertIn(\".Laplace(\", repr_)\n\n def test_zero_epsilon_with_delta(self):\n mech = self.mech(epsilon=0, delta=0.5, sensitivity=1)\n self.assertIsNotNone(mech.randomise(1))\n\n def test_epsilon_delta(self):\n mech = self.mech(epsilon=1, delta=0.01, sensitivity=1)\n self.assertIsNotNone(mech.randomise(1))\n\n def test_non_numeric(self):\n mech = self.mech(epsilon=1, delta=0, sensitivity=1)\n with self.assertRaises(TypeError):\n mech.randomise(\"Hello\")\n\n def test_zero_median_prob(self):\n mech = self.mech(epsilon=1, delta=0, sensitivity=1)\n vals = []\n\n for i in range(10000):\n vals.append(mech.randomise(0))\n\n median = float(np.median(vals))\n self.assertAlmostEqual(np.abs(median), 0.0, delta=0.1)\n\n def test_neighbours_prob(self):\n epsilon = 1\n runs = 10000\n mech = self.mech(epsilon=epsilon, delta=0, sensitivity=1)\n count = [0, 0]\n\n for i in range(runs):\n val0 = mech.randomise(0)\n if val0 <= 0:\n count[0] += 1\n\n val1 = mech.randomise(1)\n if val1 <= 0:\n count[1] += 1\n\n self.assertGreater(count[0], count[1])\n self.assertLessEqual(count[0] / runs, np.exp(epsilon) * count[1] / runs + 0.1)\n\n def test_bias(self):\n self.assertEqual(0.0, self.mech(epsilon=1, delta=0, sensitivity=1).bias(0))\n\n def test_variance(self):\n mech = self.mech(epsilon=1, delta=0, sensitivity=1)\n self.assertEqual(2.0, mech.variance(0))\n"
] | [
[
"sklearn.utils.check_X_y",
"numpy.unique",
"sklearn.utils.multiclass._check_partial_fit_first_call",
"numpy.clip",
"numpy.in1d",
"numpy.min",
"numpy.asarray",
"numpy.all",
"numpy.max",
"numpy.argsort",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.ones_like",
"sklearn.linear_model.LogisticRegression",
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"numpy.all",
"numpy.any",
"numpy.array"
],
[
"numpy.median",
"numpy.exp",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hx89/FBGEMM | [
"03a04eb7ecea8ee0afea42eaae7b2c2119a38886"
] | [
"fbgemm_gpu/bench/split_table_batched_embeddings_benchmark.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport itertools\nimport logging\nimport math\nimport random\nimport statistics\nimport time\nfrom typing import Callable, List, Optional, Tuple\n\nimport click\nimport numpy as np\nimport torch\n\nhaveAIBench = False\ntry:\n from aibench_observer.utils.observer import emitMetric\n\n haveAIBench = True\nexcept Exception:\n haveAIBench = False\n\nfrom fbgemm_gpu.split_table_batched_embeddings_ops import (\n BoundsCheckMode,\n CacheAlgorithm,\n ComputeDevice,\n DenseTableBatchedEmbeddingBagsCodegen,\n EmbeddingLocation,\n OptimType,\n SparseType,\n SplitTableBatchedEmbeddingBagsCodegen,\n IntNBitTableBatchedEmbeddingBagsCodegen,\n PoolingMode,\n)\nfrom numpy.random import default_rng\nfrom torch import Tensor\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef round_up(a: int, b: int) -> int:\n return int((a + b - 1) // b) * b\n\n\ndef get_device() -> torch.device:\n return (\n torch.cuda.current_device()\n if torch.cuda.is_available()\n else torch.device(\"cpu\")\n )\n\n\n# Merged indices with shape (T, B, L) -> (flattened indices with shape\n# (T * B * L), offsets with shape (T * B + 1))\ndef get_table_batched_offsets_from_dense(\n merged_indices: Tensor,\n) -> Tuple[Tensor, Tensor]:\n (T, B, L) = merged_indices.size()\n lengths = np.ones((T, B)) * L\n flat_lengths = lengths.flatten()\n return (\n merged_indices.long().contiguous().view(-1).to(get_device()),\n torch.tensor(([0] + np.cumsum(flat_lengths).tolist())).long().to(get_device()),\n )\n\n\ndef get_offsets_from_dense(indices: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n (B, L) = indices.size()\n return (\n indices.contiguous().view(-1),\n torch.tensor(\n np.cumsum(np.asarray([0] + [L for _ in range(B)])[:-1]).astype(np.int64)\n ),\n )\n\n\ndef b_indices(\n b: Callable[..., torch.Tensor],\n x: torch.Tensor,\n per_sample_weights: Optional[torch.Tensor] = None,\n use_cpu: bool = False,\n do_pooling: bool = True,\n) -> torch.Tensor:\n (indices, offsets) = get_offsets_from_dense(x)\n if do_pooling:\n return b(\n indices.cuda(),\n offsets.cuda(),\n per_sample_weights=per_sample_weights,\n )\n else:\n return b(indices.cuda())\n\n\ndef generate_requests(\n iters: int,\n B: int,\n T: int,\n L: int,\n E: int,\n # inter-batch indices reuse rate\n reuse: float = 0.0,\n # alpha <= 1.0: use uniform distribution\n # alpha > 1.0: use zipf distribution\n alpha: float = 1.0,\n weights_precision: SparseType = SparseType.FP32,\n weighted: bool = False,\n requests_data_file: Optional[str] = None,\n # Comma-separated list of table numbers\n tables: Optional[str] = None,\n) -> List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]]:\n if requests_data_file is not None:\n indices_tensor, offsets_tensor, lengths_tensor = torch.load(requests_data_file)\n\n average_L = 0\n if tables is not None:\n emb_tables = tuple(int(x) for x in tables.split(\",\"))\n indices = torch.zeros(0, dtype=indices_tensor.dtype)\n offsets = torch.zeros(1, dtype=offsets_tensor.dtype)\n total_L = 0\n for t in emb_tables:\n t_offsets = offsets_tensor[B * t : B * (t + 1) + 1]\n total_L += t_offsets[-1] - t_offsets[0]\n indices = torch.cat(\n (indices, indices_tensor[t_offsets[0] : t_offsets[-1]])\n )\n offsets = torch.cat(\n (\n offsets,\n t_offsets[1:] - t_offsets[0] + offsets[-1],\n )\n )\n indices_tensor = indices\n offsets_tensor = offsets\n average_L = int(total_L / B)\n\n assert np.prod(offsets_tensor.size()) - 1 == np.prod((T, B)), (\n f\"Requested tables: {emb_tables} \"\n f\"does not conform to inputs (T, B) = ({T}, {B}).\"\n )\n logging.warning(\n f\"Using (indices = {indices_tensor.size()}, offsets = {offsets_tensor.size()}) based \"\n f\"on tables: {emb_tables}\"\n )\n else:\n average_L = int((offsets_tensor[-1] - offsets_tensor[0]) / B)\n assert (np.prod(offsets_tensor.size()) - 1) == np.prod((T, B)), (\n f\"Data file (indices = {indices_tensor.size()}, \"\n f\"offsets = {offsets_tensor.size()}, lengths = {lengths_tensor.size()}) \"\n f\"does not conform to inputs (T, B) = ({T}, {B}).\"\n )\n\n assert (\n L == average_L\n ), f\"Requested L does not align with provided data file ({L} vs. {average_L})\"\n assert E > max(indices_tensor), (\n f\"Number of embeddings is not enough to support maximum index \"\n f\"provided by data file {E} vs. {max(indices_tensor)}\"\n )\n\n weights_tensor = (\n None\n if not weighted\n else torch.randn(indices_tensor.size(), device=get_device())\n )\n rs = []\n for _ in range(iters):\n rs.append(\n (\n indices_tensor.to(get_device()),\n offsets_tensor.to(get_device()),\n weights_tensor,\n )\n )\n return rs\n\n if alpha <= 1.0:\n all_indices = torch.randint(\n low=0,\n high=E,\n size=(iters, T, B, L),\n device=get_device(),\n dtype=torch.int32,\n )\n # each bag is usually sorted\n (all_indices, _) = torch.sort(all_indices)\n all_indices = all_indices.reshape(iters, T, B * L)\n else:\n assert E >= L, \"num-embeddings must be greater than equal to bag-size\"\n # oversample and then remove duplicates to obtain sampling without\n # replacement\n all_indices = (np.random.zipf(a=alpha, size=(iters, T, B, 3 * L)) - 1) % E\n for index_tuple in itertools.product(range(iters), range(T), range(B)):\n # sample without replacement from\n # https://stats.stackexchange.com/questions/20590/how-do-i-sample-without-replacement-using-a-sampling-with-replacement-function\n r = set()\n for x in all_indices[index_tuple]:\n if x not in r:\n r.add(x)\n if len(r) == L:\n break\n assert (len(r)) == L, \"too skewed distribution (alpha too big)\"\n all_indices[index_tuple][:L] = list(r)\n # shuffle indices so we don't have unintended spatial locality\n all_indices = torch.as_tensor(all_indices[:, :, :, :L])\n rng = default_rng()\n permutation = torch.as_tensor(\n rng.choice(E, size=all_indices.max().item() + 1, replace=False)\n )\n all_indices = permutation.gather(0, all_indices.flatten())\n all_indices = all_indices.to(get_device()).int().reshape(iters, T, B * L)\n for it in range(iters - 1):\n for t in range(T):\n reused_indices = torch.randperm(B * L, device=get_device())[\n : int(B * L * reuse)\n ]\n all_indices[it + 1, t, reused_indices] = all_indices[it, t, reused_indices]\n\n rs = []\n for it in range(iters):\n weights_tensor = (\n None if not weighted else torch.randn(T * B * L, device=get_device())\n )\n rs.append(\n get_table_batched_offsets_from_dense(all_indices[it].view(T, B, L))\n + (weights_tensor,)\n )\n return rs\n\n\ndef benchmark_requests(\n requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]],\n func: Callable[[Tensor, Tensor, Optional[Tensor]], Tensor],\n flush_gpu_cache_size_mb: int = 0,\n check_median: bool = False,\n) -> float:\n times = []\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n for (indices, offsets, weights) in requests:\n start_time = time.time()\n if torch.cuda.is_available():\n if flush_gpu_cache_size_mb:\n _ = torch.rand(\n flush_gpu_cache_size_mb * 1024 * 1024 // 4, dtype=torch.float\n )\n torch.cuda.synchronize()\n start_event.record()\n func(indices, offsets, weights)\n if torch.cuda.is_available():\n end_event.record()\n torch.cuda.synchronize()\n it_time = start_event.elapsed_time(end_event) * 1.0e-3\n times.append(it_time)\n else:\n it_time = time.time() - start_time\n times.append(it_time)\n avg_time = sum(times) / len(requests)\n median_time = statistics.median(times)\n return median_time if check_median else avg_time\n\n\ndef benchmark_requests_refer(\n requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]],\n T: int,\n B: int,\n L: int,\n E: int,\n D: int,\n pooling_mode: str,\n weighted: bool,\n flush_gpu_cache_size_mb: int = 0,\n check_median: bool = False,\n) -> float:\n do_pooling = pooling_mode in [\"sum\", \"mean\"]\n if do_pooling:\n nn_embedding_list = [\n torch.nn.EmbeddingBag(E, D, mode=pooling_mode, sparse=True).cuda()\n ] * T\n else:\n nn_embedding_list = [torch.nn.Embedding(E, D, sparse=True).cuda()] * T\n\n times = []\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n for (indices, _, weights) in requests:\n indices_list = indices.view(T, B, L).split(1)\n\n if weighted:\n assert weights is not None\n weights_list = weights.view(T, B, L).split(1)\n\n start_time = time.time()\n if torch.cuda.is_available():\n if flush_gpu_cache_size_mb:\n _ = torch.rand(\n flush_gpu_cache_size_mb * 1024 * 1024 // 4, dtype=torch.float\n )\n torch.cuda.synchronize()\n start_event.record()\n\n nn_embedding_output = (\n [\n b_indices(nn_embedding, x, use_cpu=False, do_pooling=do_pooling)\n for (nn_embedding, x) in zip(nn_embedding_list, indices_list)\n ]\n if not weighted\n else [\n b_indices(\n nn_embedding,\n x,\n per_sample_weights=xw.view(-1),\n use_cpu=False,\n do_pooling=do_pooling,\n )\n for (nn_embedding, x, xw) in zip(\n nn_embedding_list,\n indices_list,\n # pyre-fixme[61]: `weights_list` is undefined, or not always\n # defined.\n weights_list,\n )\n ]\n )\n if do_pooling:\n final_output = torch.cat(\n [f.view(B, -1) for f in nn_embedding_output], dim=1\n )\n else:\n final_output = torch.cat(nn_embedding_output, dim=0).view(-1, D)\n\n if torch.cuda.is_available():\n end_event.record()\n torch.cuda.synchronize()\n it_time = start_event.elapsed_time(end_event) * 1.0e-3\n times.append(it_time)\n else:\n it_time = time.time() - start_time\n times.append(it_time)\n avg_time = sum(times) / len(requests)\n median_time = statistics.median(times)\n return median_time if check_median else avg_time\n\n\ndef benchmark_pipelined_requests(\n requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]],\n func1: Callable[[Tensor, Tensor, Optional[Tensor]], None],\n func2: Callable[[Tensor, Tensor, Optional[Tensor]], None],\n flush_gpu_cache_size_mb: int = 0,\n) -> Tuple[float, float]:\n torch.cuda.synchronize()\n start_events = [\n (torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True))\n for _ in requests\n ]\n end_events = [\n (torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True))\n for _ in requests\n ]\n for ((indices, offsets, indices_weights), start_event, end_event) in zip(\n requests, start_events, end_events\n ):\n if flush_gpu_cache_size_mb:\n _ = torch.rand(\n flush_gpu_cache_size_mb * 1024 * 1024 // 4, dtype=torch.float\n )\n torch.cuda.synchronize()\n start_event[0].record()\n func1(indices, offsets, indices_weights)\n end_event[0].record()\n start_event[1].record()\n func2(indices, offsets, indices_weights)\n end_event[1].record()\n torch.cuda.synchronize()\n return (\n sum(\n start_event[0].elapsed_time(end_event[0]) * 1.0e-3\n for start_event, end_event in zip(start_events, end_events)\n )\n / len(requests),\n sum(\n start_event[1].elapsed_time(end_event[1]) * 1.0e-3\n for start_event, end_event in zip(start_events, end_events)\n )\n / len(requests),\n )\n\n\[email protected]()\ndef cli() -> None:\n pass\n\n\[email protected]()\n# recommended value: alpha=1.15 for training and alpha=1.09 for inference\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.FP32)\[email protected](\"--stoc\", is_flag=True, default=False)\[email protected](\"--iters\", default=100)\[email protected](\"--managed\", default=\"device\")\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.0)\[email protected](\"--row-wise/--no-row-wise\", default=True)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--weighted-num-requires-grad\", type=int, default=None)\[email protected](\"--flush-gpu-cache-size-mb\", default=0)\[email protected](\"--dense\", is_flag=True, default=False)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP32)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef device( # noqa C901\n alpha: float,\n bag_size: int,\n batch_size: int,\n embedding_dim: int,\n weights_precision: SparseType,\n stoc: bool,\n iters: int,\n managed: str,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n row_wise: bool,\n weighted: bool,\n weighted_num_requires_grad: Optional[int],\n flush_gpu_cache_size_mb: int,\n dense: bool,\n output_dtype: SparseType,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n T = num_tables\n if weighted_num_requires_grad:\n assert weighted_num_requires_grad <= T\n weighted_requires_grad_tables = np.random.choice(\n T, replace=False, size=(weighted_num_requires_grad,)\n ).tolist()\n feature_requires_grad = (\n torch.tensor(\n [1 if t in weighted_requires_grad_tables else 0 for t in range(T)]\n )\n .to(get_device())\n .int()\n )\n else:\n feature_requires_grad = None\n if mixed:\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n optimizer = OptimType.EXACT_ROWWISE_ADAGRAD if row_wise else OptimType.EXACT_ADAGRAD\n\n if managed == \"device\":\n managed_option = (\n EmbeddingLocation.DEVICE\n if torch.cuda.is_available()\n else EmbeddingLocation.HOST\n )\n else:\n managed_option = EmbeddingLocation.MANAGED\n\n if dense:\n emb = DenseTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n )\n for d in Ds\n ],\n use_cpu=not torch.cuda.is_available(),\n )\n else:\n emb = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n managed_option,\n ComputeDevice.CUDA\n if torch.cuda.is_available()\n else ComputeDevice.CPU,\n )\n for d in Ds\n ],\n optimizer=optimizer,\n learning_rate=0.1,\n eps=0.1,\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n output_dtype=output_dtype,\n )\n emb = emb.to(get_device())\n\n if weights_precision == SparseType.INT8:\n emb.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n nparams = sum(w.numel() for w in emb.split_embedding_weights())\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n logging.info(\n f\"Embedding parameters: {nparams / 1.0e9: .2f} GParam, \"\n f\"{nparams * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n logging.info(\n f\"Accessed weights per batch: {B * sum(Ds) * L * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n\n # forward\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb.forward(\n indices.long(),\n offsets.long(),\n per_sample_weights,\n feature_requires_grad=feature_requires_grad,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {param_size_multiplier * B * sum(Ds) * L / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n if output_dtype == SparseType.INT8:\n # backward bench not representative\n return\n\n grad_output = torch.randn(B, sum(Ds)).to(get_device())\n # backward\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb(\n indices.long(),\n offsets.long(),\n per_sample_weights,\n feature_requires_grad=feature_requires_grad,\n ).backward(grad_output),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"ForwardBackward, B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"BW: {3 * param_size_multiplier * B * sum(Ds) * L / time_per_iter / 1.0e9: .2f} GB/s, \"\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.FP32)\[email protected](\"--stoc\", is_flag=True, default=False)\[email protected](\"--iters\", default=100)\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.1)\[email protected](\"--uvm-tables\", default=1)\[email protected](\"--uvm-bag-size\", default=1)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--flush-gpu-cache-size-mb\", default=0)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP32)\[email protected](\"--use-cache\", is_flag=True, default=False)\[email protected](\"--cache-algorithm\", default=\"lru\")\[email protected](\"--cache-load-factor\", default=0.2)\[email protected](\"--enforce-hbm\", is_flag=True, default=False)\ndef uvm(\n alpha: bool,\n bag_size: int,\n batch_size: int,\n embedding_dim: int,\n weights_precision: SparseType,\n stoc: bool,\n iters: int,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n uvm_tables: int,\n uvm_bag_size: int,\n weighted: bool,\n flush_gpu_cache_size_mb: int,\n requests_data_file: Optional[str],\n tables: Optional[str],\n output_dtype: SparseType,\n use_cache: bool,\n cache_algorithm: str,\n cache_load_factor: float,\n enforce_hbm: bool,\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n T = num_tables\n T_uvm = uvm_tables\n assert T_uvm <= T\n assert (\n T_uvm > 0\n ), f\"T_uvm specified {T_uvm} <= 0. If not testing UVM, please use device benchmark.\"\n T_gpu = T - T_uvm\n L_uvm = uvm_bag_size\n\n cache_alg = CacheAlgorithm.LRU if cache_algorithm == \"lru\" else CacheAlgorithm.LFU\n managed_type = (\n EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED\n )\n\n if mixed:\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n emb_uvm = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n managed_type,\n ComputeDevice.CUDA,\n )\n for d in Ds[:T_uvm]\n ],\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n output_dtype=output_dtype,\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n enforce_hbm=enforce_hbm,\n ).cuda()\n\n if weights_precision == SparseType.INT8:\n emb_uvm.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n if T_gpu > 0:\n emb_gpu = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n EmbeddingLocation.DEVICE,\n ComputeDevice.CUDA,\n )\n for d in Ds[T_uvm:]\n ],\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n ).cuda()\n\n if weights_precision == SparseType.INT8:\n emb_gpu.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n emb_mixed = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n managed_option,\n ComputeDevice.CUDA,\n )\n for (d, managed_option) in zip(\n Ds,\n [managed_type] * T_uvm + [EmbeddingLocation.DEVICE] * T_gpu,\n )\n ],\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n output_dtype=output_dtype,\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n enforce_hbm=enforce_hbm,\n ).cuda()\n\n if weights_precision == SparseType.INT8:\n emb_mixed.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n requests_uvm = generate_requests(\n iters,\n B,\n T_uvm,\n L_uvm,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n\n requests_gpu = None\n if T_gpu > 0:\n requests_gpu = generate_requests(\n iters,\n B,\n T_gpu,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=False,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n output_size_multiplier = output_dtype.bit_rate() / 8.0\n read_write_bytes_uvm = (\n output_size_multiplier * B * sum(Ds[:T_uvm])\n + param_size_multiplier * B * sum(Ds[:T_uvm]) * L_uvm\n )\n\n time_per_iter = benchmark_requests(\n requests_uvm,\n lambda indices, offsets, per_sample_weights: emb_uvm.forward(\n indices.long(),\n offsets.long(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"UVM Forward, B: {B}, \"\n f\"E: {E}, T: {T_uvm}, D: {D}, L: {L_uvm}, W: {weighted}, \"\n f\"BW: {read_write_bytes_uvm / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n if T_gpu > 0:\n requests = []\n assert requests_gpu is not None\n for rs_uvm, rs_gpu in zip(requests_uvm, requests_gpu):\n indices = torch.cat([rs_uvm[0], rs_gpu[0]])\n lengths = [L_uvm] * (T_uvm * B) + [L] * (T_gpu * B)\n offsets = torch.tensor(([0] + np.cumsum(lengths).tolist())).int().cuda()\n per_sample_weights = None\n if weighted:\n assert (this_rs_uvm_weights := rs_uvm[2]) is not None\n assert (this_rs_gpu_weights := rs_gpu[2]) is not None\n per_sample_weights = torch.cat(\n [this_rs_uvm_weights, this_rs_gpu_weights]\n )\n requests.append((indices, offsets, per_sample_weights))\n\n # forward\n time_per_iter = benchmark_requests(\n requests_gpu,\n lambda indices, offsets, per_sample_weights: emb_gpu.forward(\n indices.long(),\n offsets.long(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n read_write_bytes_hbm = (\n output_size_multiplier * B * sum(Ds[T_uvm:])\n + param_size_multiplier * B * sum(Ds[T_uvm:]) * L\n )\n logging.info(\n f\"GPU Forward, B: {B}, \"\n f\"E: {E}, T: {T_gpu}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {read_write_bytes_hbm / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb_mixed.forward(\n indices.long(),\n offsets.long(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n read_write_bytes_total = read_write_bytes_uvm + read_write_bytes_hbm\n logging.info(\n f\"Mixed Forward, B: {B}, \"\n f\"E: {E}, T_GPU: {T_gpu}, T_UVM: {T_uvm}, D: {D}, L_GPU: {L}, L_UVM: {L_uvm}, W: {weighted}, \"\n f\"BW: {read_write_bytes_total / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--cache-algorithm\", default=\"lru\")\[email protected](\"--cache-load-factor\", default=0.2)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.FP32)\[email protected](\"--stoc\", is_flag=True, default=False)\[email protected](\"--long-index\", is_flag=True, default=False)\[email protected](\"--iters\", default=100)\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.1)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--flush-gpu-cache-size-mb\", default=0)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef cache( # noqa C901\n alpha: float,\n bag_size: int,\n batch_size: int,\n cache_algorithm: str,\n cache_load_factor: float,\n embedding_dim: int,\n weights_precision: SparseType,\n stoc: bool,\n iters: int,\n long_index: bool,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n weighted: bool,\n flush_gpu_cache_size_mb: int,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n optimizer = OptimType.EXACT_ROWWISE_ADAGRAD\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n T = num_tables\n cache_alg = CacheAlgorithm.LRU if cache_algorithm == \"lru\" else CacheAlgorithm.LFU\n if mixed:\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n\n emb_nc = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n EmbeddingLocation.MANAGED,\n ComputeDevice.CUDA,\n )\n for d in Ds\n ],\n optimizer=optimizer,\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n ).cuda()\n\n if weights_precision == SparseType.INT8:\n emb_nc.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n emb = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n E,\n d,\n EmbeddingLocation.MANAGED_CACHING,\n ComputeDevice.CUDA,\n )\n for d in Ds\n ],\n optimizer=optimizer,\n weights_precision=weights_precision,\n stochastic_rounding=stoc,\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n ).cuda()\n\n if weights_precision == SparseType.INT8:\n emb.init_embedding_weights_uniform(-0.0003, 0.0003)\n\n nparams = sum(w.numel() for w in emb.split_embedding_weights())\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n logging.info(\n f\"Embedding tables: {E * T} rows, {nparams / 1.0e9: .2f} GParam, \"\n f\"{nparams * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n logging.info(\n f\"Accessed weights per batch: {B * T * L} rows, \"\n f\"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n requests = generate_requests(\n 2 * iters,\n B,\n T,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n warmup_requests, requests = requests[:iters], requests[iters:]\n grad_output = torch.randn(B, sum(Ds)).cuda()\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb_nc(\n indices.long(), offsets.long(), per_sample_weights\n ).backward(grad_output),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"ForwardBackward (UVM), B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"BW: {3 * param_size_multiplier * B * sum(Ds) * L / time_per_iter / 1.0e9: .2f} GB/s, \"\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n # warm up\n for indices, offsets, _ in warmup_requests:\n emb.forward(indices.long(), offsets.long())\n # get cache miss rate (forward and backward) and exchanged cache lines (prefetch)\n cache_misses = []\n exchanged_cache_lines = []\n NOT_FOUND = -1\n for indices, offsets, _ in requests:\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.clone)[[Named(self,\n # Variable[torch._TTensor (bound to Tensor)])], Variable[torch._TTensor (bound\n # to Tensor)]], Tensor], Tensor, torch.nn.Module]` is not a function.\n old_lxu_cache_state = emb.lxu_cache_state.clone()\n emb.prefetch(indices.long(), offsets.long())\n exchanged_cache_lines.append(\n # pyre-fixme[16]: `bool` has no attribute `sum`.\n (emb.lxu_cache_state != old_lxu_cache_state)\n .sum()\n .item()\n )\n cache_misses.append((emb.lxu_cache_locations_list[0] == NOT_FOUND).sum().item())\n emb.forward(indices.long(), offsets.long())\n logging.info(\n f\"Exchanged cache lines -- mean: {sum(exchanged_cache_lines)/len(requests): .2f}, \"\n f\"max: {max(exchanged_cache_lines)}, min: {min(exchanged_cache_lines)}\"\n )\n logging.info(\n f\"Cache miss -- mean: {sum(cache_misses)/len(requests)}, \"\n f\"max: {max(cache_misses)}, min: {min(cache_misses)}\"\n )\n\n # benchmark prefetch\n emb.reset_cache_states()\n for indices, offsets, _ in warmup_requests:\n emb.forward(indices, offsets)\n prefetch_time, forward_backward_time = benchmark_pipelined_requests(\n requests,\n lambda indices, offsets, indices_weights: emb.prefetch(indices, offsets),\n lambda indices, offsets, indices_weights: emb.forward(\n indices, offsets, indices_weights\n ).backward(grad_output),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n e2e_time = prefetch_time + forward_backward_time\n\n logging.info(\n f\"ForwardBackward (LXU), reuse: {reuse}, alpha: {alpha}, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"BW: {3 * param_size_multiplier * B * sum(Ds) * L / e2e_time / 1.0e9: .2f} GB/s, \"\n f\"Tprefetch: {prefetch_time * 1.0e6:.0f}us, \"\n f\"{2 * sum(exchanged_cache_lines) * param_size_multiplier * D / prefetch_time / len(requests) / 1.0e9: .2f} GB/s, \"\n f\"Tfwdbwd: {forward_backward_time * 1.0e6:.0f}us, \"\n f\"{3 * param_size_multiplier * B * sum(Ds) * L / forward_backward_time / 1.0e9: .2f} GB/s, \"\n f\"Te2e: {e2e_time * 1.0e6:.0f}us, \"\n )\n\n\ndef benchmark_cpu_requests(\n requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]],\n func: Callable[[Tensor, Tensor, Optional[Tensor]], Tensor],\n) -> float:\n import time\n\n start_time = time.perf_counter()\n for (indices, offsets, weights) in requests:\n func(indices, offsets, weights)\n end_time = time.perf_counter()\n return (end_time - start_time) / len(requests)\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.INT4)\[email protected](\"--stoc\", is_flag=True, default=False)\[email protected](\"--iters\", default=100)\[email protected](\"--managed\", default=\"device\")\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.0)\[email protected](\"--row-wise/--no-row-wise\", default=True)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--index-remapping\", is_flag=True, default=False)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP16)\ndef nbit_cpu( # noqa C901\n alpha: float,\n bag_size: int,\n batch_size: int,\n embedding_dim: int,\n weights_precision: SparseType,\n stoc: bool,\n iters: int,\n managed: str,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n row_wise: bool,\n weighted: bool,\n index_remapping: bool,\n requests_data_file: Optional[str],\n tables: Optional[str],\n output_dtype: SparseType,\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n T = num_tables\n if mixed:\n Ds = [\n # int4 table batched emb op can only handle mixed D where D is multiple of 8\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 8)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n\n emb = IntNBitTableBatchedEmbeddingBagsCodegen(\n [(\"\", E, d, weights_precision, EmbeddingLocation.HOST) for d in Ds],\n device=\"cpu\",\n index_remapping=[torch.arange(E) for _ in Ds] if index_remapping else None,\n output_dtype=output_dtype,\n ).cpu()\n emb.fill_random_weights()\n\n nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n output_size_multiplier = output_dtype.bit_rate() / 8.0\n read_write_bytes = (\n output_size_multiplier * B * T * D + param_size_multiplier * B * T * L * D\n )\n logging.info(\n f\"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, \"\n f\"{nparams_byte / 1.0e9: .2f} GB\" # IntN TBE use byte for storage\n )\n logging.info(\n f\"Accessed weights per batch: {B * T * L} rows, \"\n f\"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n requests = [\n (a.cpu().int(), b.cpu().int(), c.cpu() if c else None) for (a, b, c) in requests\n ]\n\n time_per_iter = benchmark_cpu_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb.forward(\n indices,\n offsets,\n per_sample_weights,\n ),\n )\n\n logging.info(\n f\"{weights_precision} Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.INT4)\[email protected](\"--stoc\", is_flag=True, default=False)\[email protected](\"--managed\", default=\"device\")\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.0)\[email protected](\"--row-wise/--no-row-wise\", default=True)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--pooling\", type=str, default=\"sum\")\[email protected](\"--weighted-num-requires-grad\", type=int, default=None)\[email protected](\"--bounds-check-mode\", type=int, default=BoundsCheckMode.WARNING.value)\[email protected](\"--pruning-ratio\", type=float, default=None)\[email protected](\"--load-factor\", default=0.75)\[email protected](\"--use-array-for-index-remapping\", is_flag=True, default=True)\[email protected](\"--check-median\", is_flag=True, default=True)\[email protected](\"--iters\", default=100)\[email protected](\"--runs-of-iters\", default=5)\[email protected](\"--warmup-runs\", default=2)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP16)\[email protected](\"--report-aibench\", is_flag=True)\[email protected](\"--run-reference\", is_flag=True, default=False)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef nbit_device( # noqa C901\n alpha: float,\n bag_size: int,\n batch_size: int,\n embedding_dim: int,\n weights_precision: SparseType,\n stoc: bool,\n managed: str,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n row_wise: bool,\n weighted: bool,\n pooling: str,\n weighted_num_requires_grad: Optional[int],\n bounds_check_mode: int,\n pruning_ratio: Optional[float],\n load_factor: float,\n use_array_for_index_remapping: bool,\n check_median: bool,\n iters: int,\n runs_of_iters: int,\n warmup_runs: int,\n output_dtype: SparseType,\n report_aibench: bool,\n run_reference: bool,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n original_E = E\n T = num_tables\n index_remapping = None\n if mixed:\n # int4 table batched emb op can only handle mixed D where D is multiple of 8\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 8)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n\n mem_for_pruning = 0\n if pruning_ratio:\n assert pruning_ratio < 1 and pruning_ratio >= 0\n E = math.ceil(E * (1.0 - pruning_ratio))\n index_remapping = []\n for _ in range(T):\n mapping = torch.tensor([-1] * original_E, dtype=torch.int32)\n selected_indices = random.sample(range(original_E), E)\n for i, idx in enumerate(selected_indices):\n mapping[idx] = i\n index_remapping.append(mapping)\n if use_array_for_index_remapping:\n mem_for_pruning += mapping.numel() * 4\n else:\n mem_for_pruning += E / load_factor * 2 * 4\n\n if managed == \"device\":\n managed_option = EmbeddingLocation.DEVICE\n else:\n managed_option = EmbeddingLocation.MANAGED\n\n if pooling is None or pooling == \"sum\":\n pooling = \"sum\"\n pooling_mode = PoolingMode.SUM\n do_pooling = True\n elif pooling == \"mean\":\n pooling_mode = PoolingMode.MEAN\n do_pooling = True\n else: # \"none\"\n pooling_mode = PoolingMode.NONE\n do_pooling = False\n\n emb = IntNBitTableBatchedEmbeddingBagsCodegen(\n [(\"\", E, d, weights_precision, managed_option) for d in Ds],\n bounds_check_mode=BoundsCheckMode(bounds_check_mode),\n index_remapping=index_remapping,\n load_factor=load_factor,\n use_array_for_index_remapping=use_array_for_index_remapping,\n output_dtype=output_dtype,\n pooling_mode=pooling_mode,\n ).cuda()\n emb.fill_random_weights()\n\n nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n output_size_multiplier = output_dtype.bit_rate() / 8.0\n if do_pooling:\n read_write_bytes = (\n output_size_multiplier * B * T * D + param_size_multiplier * B * T * L * D\n )\n else:\n read_write_bytes = (\n output_size_multiplier * B * T * L * D\n + param_size_multiplier * B * T * L * D\n )\n logging.info(\n f\"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, \"\n f\"{nparams_byte / 1.0e9: .2f} GB\" # IntN TBE use byte for storage\n )\n logging.info(\n f\"Accessed weights per batch: {B * T * L} rows, \"\n f\"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n times = []\n for i in range(runs_of_iters):\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]\n\n # forward\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb.forward(\n indices.int(),\n offsets.int(),\n per_sample_weights,\n ),\n check_median=check_median,\n )\n\n # free up GPU memory\n del requests\n\n logging.info(\n f\"Iteration {i}: \"\n f\"{weights_precision} Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter * 1.0e6:.0f}us, \"\n f\"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB\"\n )\n\n if i >= warmup_runs:\n times.append(time_per_iter)\n\n time_per_iter = statistics.mean(times)\n bandwidth = read_write_bytes / time_per_iter / 1.0e9\n\n logging.info(\n f\"Average of all iterations: \"\n f\"{weights_precision} Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {bandwidth: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter * 1.0e6:.0f}us, \"\n f\"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB\"\n )\n\n if report_aibench and haveAIBench:\n print(\n emitMetric(\n type=\"NET\",\n metric=f\"bandwidth_{weights_precision}\",\n unit=\"scalar\",\n value=str(bandwidth),\n )\n )\n print(\n emitMetric(\n type=\"NET\",\n metric=f\"time_per_iter_{weights_precision}\",\n unit=\"scalar\",\n value=str(time_per_iter * 1.0e6),\n )\n )\n\n if run_reference:\n times = []\n for i in range(runs_of_iters):\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]\n\n # forward\n time_per_iter_refer = benchmark_requests_refer(\n requests,\n T,\n B,\n L,\n E,\n D,\n pooling,\n weighted,\n check_median=check_median,\n )\n\n # free up GPU memory\n del requests\n\n logging.info(\n f\"Reference (nn.Embedding(Bag)) Iteration {i}: \"\n f\"Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {read_write_bytes / time_per_iter_refer / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter_refer * 1.0e6:.0f}us \"\n )\n\n if i >= warmup_runs:\n times.append(time_per_iter_refer)\n\n time_per_iter_refer = statistics.mean(times)\n bandwidth = read_write_bytes / time_per_iter_refer / 1.0e9\n\n logging.info(\n f\"Average of all iterations: \"\n f\"Forward, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, \"\n f\"Effective BW: {bandwidth: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter_refer * 1.0e6:.0f}us \"\n )\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.INT4)\[email protected](\"--iters\", default=100)\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.1)\[email protected](\"--uvm-num-embeddings\", default=int(1e5))\[email protected](\"--uvm-tables\", default=1)\[email protected](\"--uvm-bag-size\", default=1)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--flush-gpu-cache-size-mb\", default=0)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP16)\[email protected](\"--use-cache\", is_flag=True, default=False)\[email protected](\"--cache-algorithm\", default=\"lru\")\[email protected](\"--cache-load-factor\", default=0.2)\[email protected](\"--enforce-hbm\", is_flag=True, default=False)\ndef nbit_uvm(\n alpha: bool,\n bag_size: int,\n batch_size: int,\n embedding_dim: int,\n weights_precision: SparseType,\n iters: int,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n uvm_num_embeddings: int,\n uvm_tables: int,\n uvm_bag_size: int,\n weighted: bool,\n flush_gpu_cache_size_mb: int,\n output_dtype: SparseType,\n use_cache: bool,\n cache_algorithm: str,\n cache_load_factor: float,\n enforce_hbm: bool,\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n E_uvm = uvm_num_embeddings\n T = num_tables\n T_uvm = uvm_tables\n assert T_uvm <= T\n assert (\n T_uvm > 0\n ), f\"T_uvm specified {T_uvm} <= 0. If not testing UVM, please use device benchmark.\"\n T_gpu = T - T_uvm\n L_uvm = uvm_bag_size\n cache_alg = CacheAlgorithm.LRU if cache_algorithm == \"lru\" else CacheAlgorithm.LFU\n managed_type = (\n EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED\n )\n\n logging.info(f\"T: {T}, T_uvm: {T_uvm}, T_gpu: {T_gpu}\")\n\n if mixed:\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n emb_uvm = IntNBitTableBatchedEmbeddingBagsCodegen(\n [\n (\n \"\",\n E_uvm,\n d,\n weights_precision,\n managed_type,\n )\n for d in Ds[:T_uvm]\n ],\n output_dtype=output_dtype,\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n enforce_hbm=enforce_hbm,\n ).cuda()\n emb_uvm.fill_random_weights()\n\n if T_gpu > 0:\n emb_gpu = IntNBitTableBatchedEmbeddingBagsCodegen(\n [\n (\n \"\",\n E,\n d,\n weights_precision,\n EmbeddingLocation.DEVICE,\n )\n for d in Ds[T_uvm:]\n ],\n output_dtype=output_dtype,\n ).cuda()\n emb_gpu.fill_random_weights()\n\n emb_mixed = IntNBitTableBatchedEmbeddingBagsCodegen(\n [\n (\n \"\",\n e,\n d,\n weights_precision,\n managed_option,\n )\n for (e, d, managed_option) in zip(\n [E_uvm] * T_uvm + [E] * T_gpu,\n Ds,\n [managed_type] * T_uvm + [EmbeddingLocation.DEVICE] * T_gpu,\n )\n ],\n output_dtype=output_dtype,\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n enforce_hbm=enforce_hbm,\n ).cuda()\n emb_mixed.fill_random_weights()\n\n requests_uvm = generate_requests(\n iters,\n B,\n T_uvm,\n L_uvm,\n E_uvm,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=weighted,\n )\n requests_uvm = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests_uvm]\n\n requests_gpu = None\n if T_gpu > 0:\n requests_gpu = generate_requests(\n iters,\n B,\n T_gpu,\n L,\n E,\n reuse=reuse,\n alpha=alpha,\n weights_precision=weights_precision,\n weighted=False,\n )\n requests_gpu = [\n (a.int(), b.int(), c if c else None) for (a, b, c) in requests_gpu\n ]\n\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n output_size_multiplier = output_dtype.bit_rate() / 8.0\n read_write_bytes_uvm = (\n output_size_multiplier * B * sum(Ds[:T_uvm])\n + param_size_multiplier * B * sum(Ds[:T_uvm]) * L_uvm\n )\n\n if T_gpu > 0:\n nparams_byte = sum(w.numel() for (w, _) in emb_mixed.split_embedding_weights())\n logging.info(\n f\"{weights_precision} Embedding tables: {E * T + E_uvm * T_uvm} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, \"\n f\"{nparams_byte / 1.0e9: .2f} GB\" # IntN TBE use byte for storage\n )\n logging.info(\n f\"Accessed weights per batch: {B * (T * L + T_uvm * L_uvm)} rows, \"\n f\"{B * (T * L * sum(Ds[T_uvm:]) + T_uvm * L_uvm * sum(Ds[:T_uvm])) * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n time_per_iter = benchmark_requests(\n requests_uvm,\n lambda indices, offsets, per_sample_weights: emb_uvm.forward(\n indices.int(),\n offsets.int(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"UVM NBit Forward, {weights_precision}, B: {B}, \"\n f\"E_uvm: {E_uvm}, T: {T_uvm}, D: {D}, L: {L_uvm}, W: {weighted}, \"\n f\"BW: {read_write_bytes_uvm / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n if T_gpu > 0:\n requests = []\n assert requests_gpu is not None\n for rs_uvm, rs_gpu in zip(requests_uvm, requests_gpu):\n indices = torch.cat([rs_uvm[0], rs_gpu[0]])\n lengths = [L_uvm] * (T_uvm * B) + [L] * (T_gpu * B)\n offsets = torch.tensor(([0] + np.cumsum(lengths).tolist())).int().cuda()\n per_sample_weights = None\n if weighted:\n assert (this_rs_uvm_weights := rs_uvm[2]) is not None\n assert (this_rs_gpu_weights := rs_gpu[2]) is not None\n per_sample_weights = torch.cat(\n [this_rs_uvm_weights, this_rs_gpu_weights]\n )\n requests.append((indices, offsets, per_sample_weights))\n\n # forward\n time_per_iter = benchmark_requests(\n requests_gpu,\n lambda indices, offsets, per_sample_weights: emb_gpu.forward(\n indices.int(),\n offsets.int(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n\n read_write_bytes_hbm = (\n output_size_multiplier * B * sum(Ds[T_uvm:])\n + param_size_multiplier * B * sum(Ds[T_uvm:]) * L\n )\n logging.info(\n f\"GPU NBit Forward, {weights_precision}, B: {B}, \"\n f\"E: {E}, T: {T_gpu}, D: {D}, L: {L}, W: {weighted}, \"\n f\"BW: {read_write_bytes_hbm / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb_mixed.forward(\n indices.int(),\n offsets.int(),\n per_sample_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n read_write_bytes_total = read_write_bytes_uvm + read_write_bytes_hbm\n logging.info(\n f\"Mixed NBit Forward, {weights_precision}, B: {B}, \"\n f\"E_GPU: {E}, E_UVM: {E_uvm}, T_GPU: {T_gpu}, T_UVM: {T_uvm}, D: {D}, L_GPU: {L}, L_UVM: {L_uvm}, W: {weighted}, \"\n f\"BW: {read_write_bytes_total / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"Time: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n # benchmark prefetch\n emb_mixed.reset_cache_states()\n for indices, offsets, _ in requests:\n emb_mixed.forward(indices, offsets)\n prefetch_time, forward_time = benchmark_pipelined_requests(\n requests,\n lambda indices, offsets, indices_weights: emb_mixed.prefetch(\n indices,\n offsets,\n ),\n # pyre-fixme[6]: Expected `(Tensor, Tensor, Optional[Tensor]) -> None` for\n # 3rd param but got `(indices: Any, offsets: Any, indices_weights: Any) ->\n # Tensor`.\n lambda indices, offsets, indices_weights: emb_mixed.forward(\n indices,\n offsets,\n indices_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n e2e_time = prefetch_time + forward_time\n\n logging.info(\n f\"Forward(LXU) {weights_precision}, reuse: {reuse}, alpha: {alpha}, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"Te2e: {e2e_time * 1.0e6:.0f}us, \"\n f\"e2e BW: {read_write_bytes_total / e2e_time / 1.0e9: .2f} GB/s, \"\n f\"Tprefetch: {prefetch_time * 1.0e6:.0f}us, \"\n f\"TfwdTime: {forward_time * 1.0e6:.0f}us, \"\n f\"{read_write_bytes_total / forward_time / 1.0e9: .2f} GB/s\"\n )\n\n\[email protected]()\[email protected](\"--alpha\", default=1.0)\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--cache-algorithm\", default=\"lru\")\[email protected](\"--cache-load-factor\", default=0.2)\[email protected](\"--embedding-dim\", default=128)\[email protected](\"--weights-precision\", type=SparseType, default=SparseType.INT4)\[email protected](\"--iters\", default=100)\[email protected](\"--mixed\", is_flag=True, default=False)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--reuse\", default=0.1)\[email protected](\"--weighted\", is_flag=True, default=False)\[email protected](\"--flush-gpu-cache-size-mb\", default=0)\[email protected](\"--output-dtype\", type=SparseType, default=SparseType.FP16)\[email protected](\"--enforce-hbm\", is_flag=True, default=False)\ndef nbit_cache( # noqa C901\n alpha: float,\n bag_size: int,\n batch_size: int,\n cache_algorithm: str,\n cache_load_factor: float,\n embedding_dim: int,\n weights_precision: SparseType,\n iters: int,\n mixed: bool,\n num_embeddings: int,\n num_tables: int,\n reuse: float,\n weighted: bool,\n flush_gpu_cache_size_mb: int,\n output_dtype: SparseType,\n enforce_hbm: bool,\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n D = embedding_dim\n L = bag_size\n E = num_embeddings\n T = num_tables\n cache_alg = CacheAlgorithm.LRU if cache_algorithm == \"lru\" else CacheAlgorithm.LFU\n if mixed:\n Ds = [\n round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)\n for _ in range(T)\n ]\n D = np.average(Ds)\n else:\n Ds = [D] * T\n\n emb_nc = IntNBitTableBatchedEmbeddingBagsCodegen(\n [\n (\n \"\",\n E,\n d,\n weights_precision,\n EmbeddingLocation.MANAGED,\n )\n for d in Ds\n ],\n output_dtype=output_dtype,\n enforce_hbm=enforce_hbm,\n ).cuda()\n emb_nc.fill_random_weights()\n\n emb = IntNBitTableBatchedEmbeddingBagsCodegen(\n [\n (\n \"\",\n E,\n d,\n weights_precision,\n EmbeddingLocation.MANAGED_CACHING,\n )\n for d in Ds\n ],\n cache_load_factor=cache_load_factor,\n cache_algorithm=cache_alg,\n output_dtype=output_dtype,\n enforce_hbm=enforce_hbm,\n ).cuda()\n emb.fill_random_weights()\n\n nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())\n param_size_multiplier = weights_precision.bit_rate() / 8.0\n output_size_multiplier = output_dtype.bit_rate() / 8.0\n read_write_bytes = (\n output_size_multiplier * B * sum(Ds) + param_size_multiplier * B * sum(Ds) * L\n )\n logging.info(\n f\"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, \"\n f\"{nparams_byte / 1.0e9: .2f} GB\" # IntN TBE use byte for storage\n )\n logging.info(\n f\"Accessed weights per batch: {B * T * L} rows, \"\n f\"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB\"\n )\n\n requests = generate_requests(\n 2 * iters, B, T, L, E, reuse=reuse, alpha=alpha, weighted=weighted\n )\n requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]\n warmup_requests, requests = requests[:iters], requests[iters:]\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, per_sample_weights: emb_nc(\n indices.int(), offsets.int(), per_sample_weights\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n logging.info(\n f\"Forward (UVM) {weights_precision}, B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n # exchanged_cache_lines = [100]\n # warm up\n for indices, offsets, _ in warmup_requests:\n emb.forward(indices.int(), offsets.int())\n # get cache miss rate (forward only) and exchanged cache lines (prefetch)\n cache_misses = []\n exchanged_cache_lines = []\n NOT_FOUND = -1\n for indices, offsets, _ in requests:\n # pyre-fixme[29]:\n # `Union[BoundMethod[typing.Callable(Tensor.clone)[[Named(self,\n # Variable[torch._TTensor (bound to Tensor)])], Variable[torch._TTensor (bound\n # to Tensor)]], Tensor], Tensor, torch.nn.Module]` is not a function.\n old_lxu_cache_state = emb.lxu_cache_state.clone()\n emb.prefetch(indices, offsets)\n exchanged_cache_lines.append(\n # pyre-fixme[16]: `bool` has no attribute `sum`.\n (emb.lxu_cache_state != old_lxu_cache_state)\n .sum()\n .item()\n )\n cache_misses.append(\n (emb.lxu_cache_locations_list.top() == NOT_FOUND).sum().item()\n )\n emb.forward(indices, offsets)\n logging.info(\n f\"Exchanged cache lines -- mean: {sum(exchanged_cache_lines)/len(requests): .2f}, \"\n f\"max: {max(exchanged_cache_lines)}, min: {min(exchanged_cache_lines)}\"\n )\n logging.info(\n f\"Cache miss -- mean: {sum(cache_misses)/len(requests)}, \"\n f\"max: {max(cache_misses)}, min: {min(cache_misses)}\"\n )\n\n # benchmark prefetch\n emb.reset_cache_states()\n for indices, offsets, _ in warmup_requests:\n emb.forward(indices, offsets)\n prefetch_time, forward_time = benchmark_pipelined_requests(\n requests,\n lambda indices, offsets, indices_weights: emb.prefetch(\n indices,\n offsets,\n ),\n # pyre-fixme[6]: Expected `(Tensor, Tensor, Optional[Tensor]) -> None` for\n # 3rd param but got `(indices: Any, offsets: Any, indices_weights: Any) ->\n # Tensor`.\n lambda indices, offsets, indices_weights: emb.forward(\n indices,\n offsets,\n indices_weights,\n ),\n flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,\n )\n e2e_time = prefetch_time + forward_time\n\n logging.info(\n f\"Forward(LXU) {weights_precision}, reuse: {reuse}, alpha: {alpha}, B: {B}, \"\n f\"E: {E}, T: {T}, D: {D}, L: {L}, \"\n f\"Te2e: {e2e_time * 1.0e6:.0f}us, \"\n f\"e2e BW: {read_write_bytes / e2e_time / 1.0e9: .2f} GB/s, \"\n f\"Tprefetch: {prefetch_time * 1.0e6:.0f}us, \"\n f\"{2 * sum(exchanged_cache_lines) * param_size_multiplier * D / prefetch_time / len(requests) / 1.0e9: .2f} GB/s, \"\n f\"TfwdTime: {forward_time * 1.0e6:.0f}us, \"\n f\"{read_write_bytes / forward_time / 1.0e9: .2f} GB/s\"\n )\n\n\[email protected]()\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=2048)\[email protected](\"--iters\", default=10)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=100)\[email protected](\"--load-factor\", default=0.75)\[email protected](\"--hit-rate\", default=0.9)\[email protected](\"--use-cpu\", is_flag=True, default=False)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef hashtable( # noqa C901\n bag_size: int,\n batch_size: int,\n iters: int,\n num_embeddings: int,\n num_tables: int,\n load_factor: float,\n hit_rate: float,\n use_cpu: bool,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n B = batch_size\n T = num_tables\n L = bag_size\n E = num_embeddings\n np.random.seed(42)\n torch.manual_seed(42)\n if hit_rate == 1.0:\n chosen_indices = torch.cat([torch.arange(E) for _ in range(T)], dim=0).int()\n else:\n chosen_indices = (\n torch.randint(low=0, high=int(E * 1.0 / hit_rate), size=(E * T,))\n .view(-1)\n .int()\n )\n dense_indices = torch.cat([torch.arange(E) for _ in range(T)], dim=0).int()\n offsets = torch.tensor([E * t for t in range(T + 1)]).int()\n assert offsets[-1] == chosen_indices.numel()\n assert offsets.numel() == T + 1\n assert (offsets.numel() - 1) // T == 1\n\n capacities = [round_up(int(E / load_factor), 32) for _ in range(T)]\n\n hash_table = torch.zeros(\n (sum(capacities), 2),\n dtype=torch.int32,\n )\n hash_table_offsets = torch.tensor([0] + np.cumsum(capacities).tolist()).long()\n\n assert hash_table.numel() * 4 < 2 ** 32\n # initialize\n hash_table[:, :] = -1\n torch.ops.fbgemm.pruned_hashmap_insert(\n chosen_indices, dense_indices, offsets, hash_table, hash_table_offsets\n )\n\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n\n if not use_cpu:\n hash_table = hash_table.cuda()\n hash_table_offsets = hash_table_offsets.cuda()\n requests = [(a.cuda().int(), b.cuda().int(), c) for (a, b, c) in requests]\n else:\n requests = [(a.int().cpu(), b.int().cpu(), c) for (a, b, c) in requests]\n\n empirical_hit_rate = np.mean(\n [\n torch.ops.fbgemm.pruned_hashmap_lookup(\n indices, offsets, hash_table, hash_table_offsets\n )\n .ne(-1)\n .sum()\n .item()\n / indices.numel()\n for indices, offsets, _ in requests\n ]\n )\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, _: torch.ops.fbgemm.pruned_hashmap_lookup(\n indices, offsets, hash_table, hash_table_offsets\n ),\n )\n\n logging.info(\n f\"LinearTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, \"\n f\"T: {time_per_iter * 1.0e6:.0f}us, load factor: {E * T / hash_table.shape[0] * 100:.1f}%, hit rate: {empirical_hit_rate * 100:.2f}%, Table size: {hash_table.numel() * 4 / 1.0e9:.0f} GB\"\n )\n\n if use_cpu:\n ht = torch.classes.fb.PrunedMapCPU()\n ht.insert(chosen_indices, dense_indices, offsets, T)\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, _: ht.lookup(indices, offsets),\n )\n\n logging.info(\n f\"HashTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, \"\n f\"T: {time_per_iter * 1.0e6:.0f}us, load factor: {E * T / hash_table.shape[0] * 100:.1f}%, hit rate: {empirical_hit_rate * 100:.2f}%, Table size: {hash_table.numel() * 4 / 1.0e9:.0f} GB\"\n )\n\n\[email protected]()\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=2048)\[email protected](\"--iters\", default=100)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=100)\[email protected](\"--pruning-ratio\", default=0.9)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef pruned_array( # noqa C901\n bag_size: int,\n batch_size: int,\n iters: int,\n num_embeddings: int,\n num_tables: int,\n pruning_ratio: float,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n B = batch_size\n T = num_tables\n L = bag_size\n E = num_embeddings\n np.random.seed(42)\n torch.manual_seed(42)\n assert pruning_ratio > 0 and pruning_ratio <= 1\n original_E = int(E / (1.0 - pruning_ratio))\n index_remappings = torch.tensor(\n [-1] * original_E * T, dtype=torch.int32, device=\"cuda\"\n )\n index_remappings_offsets = torch.empty(T + 1, dtype=torch.int32, device=\"cuda\")\n index_remappings_offsets[0] = 0\n dense_indicies = torch.tensor(range(E), dtype=torch.int32, device=\"cuda\")\n for t in range(T):\n selected_indices = torch.add(\n torch.randperm(original_E, device=\"cuda\"), t * original_E\n )[:E]\n index_remappings[selected_indices] = dense_indicies\n index_remappings_offsets[t + 1] = index_remappings_offsets[t] + original_E\n\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n requests = [(a.cuda().int(), b.cuda().int(), c) for (a, b, c) in requests]\n\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, _: torch.ops.fbgemm.pruned_array_lookup(\n indices,\n offsets,\n index_remappings,\n index_remappings_offsets,\n ),\n )\n\n logging.info(\n f\"LinearTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, \"\n f\"T: {time_per_iter * 1.0e6:.0f}us, Pruning Ratio: {pruning_ratio * 100:.2f}%, Table size: {original_E * T * 4 / 1.0e9:.0f} GB\"\n )\n\n\[email protected]()\[email protected](\"--bag-size\", default=20)\[email protected](\"--batch-size\", default=512)\[email protected](\"--iters\", default=100)\[email protected](\"--num-embeddings\", default=int(1e5))\[email protected](\"--num-tables\", default=32)\[email protected](\"--bounds-check-mode\", type=int, default=BoundsCheckMode.WARNING.value)\[email protected](\"--requests_data_file\", type=str, default=None)\[email protected](\"--tables\", type=str, default=None)\ndef bounds_check_indices( # noqa C901\n bag_size: int,\n batch_size: int,\n iters: int,\n num_embeddings: int,\n num_tables: int,\n bounds_check_mode: int,\n requests_data_file: Optional[str],\n tables: Optional[str],\n) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n B = batch_size\n L = bag_size\n E = num_embeddings\n T = num_tables\n\n requests = generate_requests(\n iters,\n B,\n T,\n L,\n E,\n requests_data_file=requests_data_file,\n tables=tables,\n )\n # requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]\n\n warning = torch.tensor([0]).long().to(get_device())\n rows_per_table = torch.tensor([E for _ in range(T)]).long().to(get_device())\n # forward\n time_per_iter = benchmark_requests(\n requests,\n lambda indices, offsets, _: torch.ops.fbgemm.bounds_check_indices(\n rows_per_table,\n indices,\n offsets,\n BoundsCheckMode(bounds_check_mode),\n warning,\n ),\n )\n\n logging.info(\n f\"Bounds Check Indices: B: {B}, \"\n f\"E: {E}, T: {T}, L: {L}, \"\n f\"BW: {(8 * B * T * L + 8 * (B * T + 1)) / time_per_iter / 1.0e9: .2f} GB/s, \" # noqa: B950\n f\"T: {time_per_iter * 1.0e6:.0f}us\"\n )\n\n\nif __name__ == \"__main__\":\n cli()\n"
] | [
[
"torch.load",
"torch.zeros",
"torch.cat",
"torch.randperm",
"numpy.cumsum",
"torch.nn.Embedding",
"torch.cuda.is_available",
"torch.device",
"numpy.random.default_rng",
"torch.nn.EmbeddingBag",
"torch.cuda.synchronize",
"torch.ops.fbgemm.pruned_array_lookup",
"numpy.random.zipf",
"torch.tensor",
"torch.sort",
"torch.rand",
"torch.arange",
"torch.classes.fb.PrunedMapCPU",
"torch.ops.fbgemm.pruned_hashmap_lookup",
"torch.empty",
"torch.cuda.current_device",
"numpy.random.choice",
"torch.cuda.Event",
"torch.as_tensor",
"numpy.random.seed",
"torch.ops.fbgemm.pruned_hashmap_insert",
"torch.manual_seed",
"numpy.ones",
"numpy.prod",
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Moon-sung-woo/Tacotron2_korean | [
"cb503e212b6bcae7c7b732b50887b56d11cfd543"
] | [
"train.py"
] | [
"# *****************************************************************************\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the NVIDIA CORPORATION nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# *****************************************************************************\n\nimport os\nimport time\nimport argparse\nimport numpy as np\nfrom contextlib import contextmanager\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\n\nimport torch.distributed as dist\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom apex.parallel import DistributedDataParallel as DDP\n\nimport models\nimport loss_functions\nimport data_functions\n\nimport dllogger as DLLogger\nfrom dllogger import StdOutBackend, JSONStreamBackend, Verbosity\n\nfrom scipy.io.wavfile import write as write_wav\n\nfrom apex import amp\namp.lists.functional_overrides.FP32_FUNCS.remove('softmax')\namp.lists.functional_overrides.FP16_FUNCS.append('softmax')\n\n\ndef parse_args(parser):\n \"\"\"\n Parse commandline arguments.\n \"\"\"\n\n parser.add_argument('-o', '--output', type=str, required=True,\n help='Directory to save checkpoints')\n parser.add_argument('-d', '--dataset-path', type=str,\n default='./', help='Path to dataset')\n parser.add_argument('-m', '--model-name', type=str, default='', required=True,\n help='Model to train')\n parser.add_argument('--log-file', type=str, default='nvlog.json',\n help='Filename for logging')\n parser.add_argument('--anneal-steps', nargs='*',\n help='Epochs after which decrease learning rate')\n parser.add_argument('--anneal-factor', type=float, choices=[0.1, 0.3], default=0.1,\n help='Factor for annealing learning rate')\n\n # training\n training = parser.add_argument_group('training setup')\n training.add_argument('--epochs', type=int, required=True,\n help='Number of total epochs to run')\n training.add_argument('--epochs-per-checkpoint', type=int, default=50,\n help='Number of epochs per checkpoint')\n training.add_argument('--checkpoint-path', type=str, default='',\n help='Checkpoint path to resume training')\n training.add_argument('--resume-from-last', action='store_true',\n help='Resumes training from the last checkpoint; uses the directory provided with \\'--output\\' option to search for the checkpoint \\\"checkpoint_<model_name>_last.pt\\\"')\n training.add_argument('--dynamic-loss-scaling', type=bool, default=True,\n help='Enable dynamic loss scaling')\n training.add_argument('--amp', action='store_true',\n help='Enable AMP')\n training.add_argument('--cudnn-enabled', action='store_true',\n help='Enable cudnn')\n training.add_argument('--cudnn-benchmark', action='store_true',\n help='Run cudnn benchmark')\n training.add_argument('--disable-uniform-initialize-bn-weight', action='store_true',\n help='disable uniform initialization of batchnorm layer weight')\n\n optimization = parser.add_argument_group('optimization setup')\n optimization.add_argument(\n '--use-saved-learning-rate', default=False, type=bool)\n optimization.add_argument('-lr', '--learning-rate', type=float, required=True,\n help='Learing rate')\n optimization.add_argument('--weight-decay', default=1e-6, type=float,\n help='Weight decay')\n optimization.add_argument('--grad-clip-thresh', default=1.0, type=float,\n help='Clip threshold for gradients')\n optimization.add_argument('-bs', '--batch-size', type=int, required=True,\n help='Batch size per GPU')\n optimization.add_argument('--grad-clip', default=5.0, type=float,\n help='Enables gradient clipping and sets maximum gradient norm value')\n\n # dataset parameters\n dataset = parser.add_argument_group('dataset parameters')\n dataset.add_argument('--load-mel-from-disk', action='store_true',\n help='Loads mel spectrograms from disk instead of computing them on the fly')\n dataset.add_argument('--training-files',\n default='filelists/kss_train.txt',\n type=str, help='Path to training filelist')\n dataset.add_argument('--validation-files',\n default='filelists/kss_val.txt',\n type=str, help='Path to validation filelist')\n dataset.add_argument('--text-cleaners', nargs='*',\n default=['english_cleaners'], type=str,\n help='Type of text cleaners for input text')\n\n # audio parameters\n audio = parser.add_argument_group('audio parameters')\n audio.add_argument('--max-wav-value', default=32768.0, type=float,\n help='Maximum audiowave value')\n audio.add_argument('--sampling-rate', default=22050, type=int,\n help='Sampling rate')\n audio.add_argument('--filter-length', default=1024, type=int,\n help='Filter length')\n audio.add_argument('--hop-length', default=256, type=int,\n help='Hop (stride) length')\n audio.add_argument('--win-length', default=1024, type=int,\n help='Window length')\n audio.add_argument('--mel-fmin', default=0.0, type=float,\n help='Minimum mel frequency')\n audio.add_argument('--mel-fmax', default=8000.0, type=float,\n help='Maximum mel frequency')\n\n distributed = parser.add_argument_group('distributed setup')\n # distributed.add_argument('--distributed-run', default=True, type=bool,\n # help='enable distributed run')\n distributed.add_argument('--rank', default=0, type=int,\n help='Rank of the process, do not set! Done by multiproc module')\n distributed.add_argument('--world-size', default=1, type=int,\n help='Number of processes, do not set! Done by multiproc module')\n distributed.add_argument('--dist-url', type=str, default='tcp://localhost:23456',\n help='Url used to set up distributed training')\n distributed.add_argument('--group-name', type=str, default='group_name',\n required=False, help='Distributed group name')\n distributed.add_argument('--dist-backend', default='nccl', type=str, choices={'nccl'},\n help='Distributed run backend')\n\n benchmark = parser.add_argument_group('benchmark')\n benchmark.add_argument('--bench-class', type=str, default='')\n\n return parser\n\n\ndef reduce_tensor(tensor, num_gpus):\n rt = tensor.clone()\n dist.all_reduce(rt, op=dist.reduce_op.SUM)\n rt /= num_gpus\n return rt\n\n\ndef init_distributed(args, world_size, rank, group_name):\n assert torch.cuda.is_available(), \"Distributed mode requires CUDA.\"\n print(\"Initializing Distributed\")\n\n # Set cuda device so everything is done on the right GPU.\n torch.cuda.set_device(rank % torch.cuda.device_count())\n\n # Initialize distributed communication\n dist.init_process_group(\n backend=args.dist_backend, init_method=args.dist_url,\n world_size=world_size, rank=rank, group_name=group_name)\n\n print(\"Done initializing distributed\")\n\n\ndef save_checkpoint(model, optimizer, epoch, config, amp_run, output_dir, model_name,\n local_rank, world_size):\n\n random_rng_state = torch.random.get_rng_state().cuda()\n cuda_rng_state = torch.cuda.get_rng_state(local_rank).cuda()\n\n random_rng_states_all = [torch.empty_like(random_rng_state) for _ in range(world_size)]\n cuda_rng_states_all = [torch.empty_like(cuda_rng_state) for _ in range(world_size)]\n\n if world_size > 1:\n dist.all_gather(random_rng_states_all, random_rng_state)\n dist.all_gather(cuda_rng_states_all, cuda_rng_state)\n else:\n random_rng_states_all = [random_rng_state]\n cuda_rng_states_all = [cuda_rng_state]\n\n random_rng_states_all = torch.stack(random_rng_states_all).cpu()\n cuda_rng_states_all = torch.stack(cuda_rng_states_all).cpu()\n\n if local_rank == 0:\n checkpoint = {'epoch': epoch,\n 'cuda_rng_state_all': cuda_rng_states_all,\n 'random_rng_states_all': random_rng_states_all,\n 'config': config,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()}\n if amp_run:\n checkpoint['amp'] = amp.state_dict()\n\n checkpoint_filename = \"checkpoint_{}_{}.pt\".format(model_name, epoch)\n checkpoint_path = os.path.join(\n output_dir, checkpoint_filename)\n print(\"Saving model and optimizer state at epoch {} to {}\".format(\n epoch, checkpoint_path))\n torch.save(checkpoint, checkpoint_path)\n\n symlink_src = checkpoint_filename\n symlink_dst = os.path.join(\n output_dir, \"checkpoint_{}_last.pt\".format(model_name))\n if os.path.exists(symlink_dst) and os.path.islink(symlink_dst):\n print(\"|||| Updating symlink\", symlink_dst, \"to point to\", symlink_src)\n os.remove(symlink_dst)\n\n os.symlink(symlink_src, symlink_dst)\n\n\ndef get_last_checkpoint_filename(output_dir, model_name):\n symlink = os.path.join(output_dir, \"checkpoint_{}_last.pt\".format(model_name))\n if os.path.exists(symlink):\n print(\"|||| Loading checkpoint from symlink\", symlink)\n return os.path.join(output_dir, os.readlink(symlink))\n else:\n print(\"|||| No last checkpoint available - starting from epoch 0 \")\n return \"\"\n\n\ndef load_checkpoint(model, optimizer, epoch, config, amp_run, filepath, local_rank):\n\n checkpoint = torch.load(filepath, map_location='cpu')\n\n epoch[0] = checkpoint['epoch']+1\n device_id = local_rank % torch.cuda.device_count()\n torch.cuda.set_rng_state(checkpoint['cuda_rng_state_all'][device_id])\n torch.random.set_rng_state(checkpoint['random_rng_states_all'][device_id])\n config = checkpoint['config']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n if amp_run:\n amp.load_state_dict(checkpoint['amp'])\n\n\n# adapted from: https://discuss.pytorch.org/t/opinion-eval-should-be-a-context-manager/18998/3\n# Following snippet is licensed under MIT license\n\n@contextmanager\ndef evaluating(model):\n '''Temporarily switch to evaluation mode.'''\n istrain = model.training\n try:\n model.eval()\n yield model\n finally:\n if istrain:\n model.train()\n\n\ndef validate(model, criterion, valset, epoch, batch_iter, batch_size,\n world_size, collate_fn, distributed_run, rank, batch_to_gpu):\n \"\"\"Handles all the validation scoring and printing\"\"\"\n with evaluating(model), torch.no_grad():\n val_sampler = DistributedSampler(valset) if distributed_run else None\n val_loader = DataLoader(valset, num_workers=1, shuffle=False,\n sampler=val_sampler,\n batch_size=batch_size, pin_memory=False,\n collate_fn=collate_fn)\n\n val_loss = 0.0\n num_iters = 0\n val_items_per_sec = 0.0\n for i, batch in enumerate(val_loader):\n torch.cuda.synchronize()\n iter_start_time = time.perf_counter()\n\n x, y, num_items = batch_to_gpu(batch)\n y_pred = model(x)\n loss = criterion(y_pred, y)\n if distributed_run:\n reduced_val_loss = reduce_tensor(loss.data, world_size).item()\n reduced_num_items = reduce_tensor(num_items.data, 1).item()\n else: #\n reduced_val_loss = loss.item()\n reduced_num_items = num_items.item()\n val_loss += reduced_val_loss\n\n torch.cuda.synchronize()\n iter_stop_time = time.perf_counter()\n iter_time = iter_stop_time - iter_start_time\n\n items_per_sec = reduced_num_items/iter_time\n DLLogger.log(step=(epoch, batch_iter, i), data={'val_items_per_sec': items_per_sec})\n val_items_per_sec += items_per_sec\n num_iters += 1\n\n val_loss = val_loss/(i + 1)\n\n DLLogger.log(step=(epoch,), data={'val_loss': val_loss})\n DLLogger.log(step=(epoch,), data={'val_items_per_sec':\n (val_items_per_sec/num_iters if num_iters > 0 else 0.0)})\n\n return val_loss\n\ndef adjust_learning_rate(iteration, epoch, optimizer, learning_rate,\n anneal_steps, anneal_factor, rank):\n\n p = 0\n if anneal_steps is not None:\n for i, a_step in enumerate(anneal_steps):\n if epoch >= int(a_step):\n p = p+1\n\n if anneal_factor == 0.3:\n lr = learning_rate*((0.1 ** (p//2))*(1.0 if p % 2 == 0 else 0.3))\n else:\n lr = learning_rate*(anneal_factor ** p)\n\n if optimizer.param_groups[0]['lr'] != lr:\n DLLogger.log(step=(epoch, iteration), data={'learning_rate changed': str(optimizer.param_groups[0]['lr'])+\" -> \"+str(lr)})\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Training')\n parser = parse_args(parser)\n args, _ = parser.parse_known_args()\n\n if 'LOCAL_RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n local_rank = int(os.environ['LOCAL_RANK'])\n world_size = int(os.environ['WORLD_SIZE'])\n else:\n local_rank = args.rank\n world_size = args.world_size\n\n distributed_run = world_size > 1\n\n if local_rank == 0:\n DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT,\n args.output+'/'+args.log_file),\n StdOutBackend(Verbosity.VERBOSE)])\n else:\n DLLogger.init(backends=[])\n\n for k,v in vars(args).items():\n DLLogger.log(step=\"PARAMETER\", data={k:v})\n DLLogger.log(step=\"PARAMETER\", data={'model_name':'Tacotron2_PyT'})\n\n model_name = args.model_name\n parser = models.parse_model_args(model_name, parser)\n args, _ = parser.parse_known_args()\n\n torch.backends.cudnn.enabled = args.cudnn_enabled\n torch.backends.cudnn.benchmark = args.cudnn_benchmark\n\n if distributed_run:\n init_distributed(args, world_size, local_rank, args.group_name)\n\n torch.cuda.synchronize()\n run_start_time = time.perf_counter()\n\n model_config = models.get_model_config(model_name, args)\n model = models.get_model(model_name, model_config,\n cpu_run=False,\n uniform_initialize_bn_weight=not args.disable_uniform_initialize_bn_weight)\n\n if not args.amp and distributed_run:\n model = DDP(model)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate,\n weight_decay=args.weight_decay)\n\n if args.amp:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n if distributed_run:\n model = DDP(model)\n\n try:\n sigma = args.sigma\n except AttributeError:\n sigma = None\n\n start_epoch = [0]\n\n if args.resume_from_last:\n args.checkpoint_path = get_last_checkpoint_filename(args.output, model_name)\n\n if args.checkpoint_path is not \"\":\n load_checkpoint(model, optimizer, start_epoch, model_config,\n args.amp, args.checkpoint_path, local_rank)\n\n start_epoch = start_epoch[0]\n\n criterion = loss_functions.get_loss_function(model_name, sigma)\n\n try:\n n_frames_per_step = args.n_frames_per_step\n except AttributeError:\n n_frames_per_step = None\n\n collate_fn = data_functions.get_collate_function(\n model_name, n_frames_per_step)\n trainset = data_functions.get_data_loader(\n model_name, args.dataset_path, args.training_files, args)\n if distributed_run:\n train_sampler = DistributedSampler(trainset)\n shuffle = False\n else:\n train_sampler = None\n shuffle = True\n\n train_loader = DataLoader(trainset, num_workers=1, shuffle=shuffle,\n sampler=train_sampler,\n batch_size=args.batch_size, pin_memory=False,\n drop_last=True, collate_fn=collate_fn)\n\n valset = data_functions.get_data_loader(\n model_name, args.dataset_path, args.validation_files, args)\n\n batch_to_gpu = data_functions.get_batch_to_gpu(model_name)\n\n iteration = 0\n train_epoch_items_per_sec = 0.0\n val_loss = 0.0\n num_iters = 0\n\n model.train()\n\n for epoch in range(start_epoch, args.epochs):\n torch.cuda.synchronize()\n epoch_start_time = time.perf_counter()\n # used to calculate avg items/sec over epoch\n reduced_num_items_epoch = 0\n\n train_epoch_items_per_sec = 0.0\n\n num_iters = 0\n reduced_loss = 0\n\n # if overflow at the last iteration then do not save checkpoint\n overflow = False\n\n if distributed_run:\n train_loader.sampler.set_epoch(epoch)\n\n for i, batch in enumerate(train_loader):\n torch.cuda.synchronize()\n iter_start_time = time.perf_counter()\n DLLogger.log(step=(epoch, i),\n data={'glob_iter/iters_per_epoch': str(iteration)+\"/\"+str(len(train_loader))})\n\n adjust_learning_rate(iteration, epoch, optimizer, args.learning_rate,\n args.anneal_steps, args.anneal_factor, local_rank)\n\n model.zero_grad()\n x, y, num_items = batch_to_gpu(batch)\n\n y_pred = model(x)\n loss = criterion(y_pred, y)\n\n if distributed_run:\n reduced_loss = reduce_tensor(loss.data, world_size).item()\n reduced_num_items = reduce_tensor(num_items.data, 1).item()\n else:\n reduced_loss = loss.item()\n reduced_num_items = num_items.item()\n if np.isnan(reduced_loss):\n raise Exception(\"loss is NaN\")\n\n DLLogger.log(step=(epoch,i), data={'train_loss': reduced_loss})\n\n num_iters += 1\n\n # accumulate number of items processed in this epoch\n reduced_num_items_epoch += reduced_num_items\n\n if args.amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n grad_norm = torch.nn.utils.clip_grad_norm_(\n amp.master_params(optimizer), args.grad_clip_thresh)\n else:\n loss.backward()\n grad_norm = torch.nn.utils.clip_grad_norm_(\n model.parameters(), args.grad_clip_thresh)\n\n optimizer.step()\n\n torch.cuda.synchronize()\n iter_stop_time = time.perf_counter()\n iter_time = iter_stop_time - iter_start_time\n items_per_sec = reduced_num_items/iter_time\n train_epoch_items_per_sec += items_per_sec\n\n DLLogger.log(step=(epoch, i), data={'train_items_per_sec': items_per_sec})\n DLLogger.log(step=(epoch, i), data={'train_iter_time': iter_time})\n iteration += 1\n\n torch.cuda.synchronize()\n epoch_stop_time = time.perf_counter()\n epoch_time = epoch_stop_time - epoch_start_time\n\n DLLogger.log(step=(epoch,), data={'train_items_per_sec':\n (train_epoch_items_per_sec/num_iters if num_iters > 0 else 0.0)})\n DLLogger.log(step=(epoch,), data={'train_loss': reduced_loss})\n DLLogger.log(step=(epoch,), data={'train_epoch_time': epoch_time})\n\n val_loss = validate(model, criterion, valset, epoch, iteration,\n args.batch_size, world_size, collate_fn,\n distributed_run, local_rank, batch_to_gpu)\n\n if (epoch % args.epochs_per_checkpoint == 0) and args.bench_class == \"\":\n save_checkpoint(model, optimizer, epoch, model_config,\n args.amp, args.output, args.model_name,\n local_rank, world_size)\n if local_rank == 0:\n DLLogger.flush()\n\n torch.cuda.synchronize()\n run_stop_time = time.perf_counter()\n run_time = run_stop_time - run_start_time\n DLLogger.log(step=tuple(), data={'run_time': run_time})\n DLLogger.log(step=tuple(), data={'val_loss': val_loss})\n DLLogger.log(step=tuple(), data={'train_items_per_sec':\n (train_epoch_items_per_sec/num_iters if num_iters > 0 else 0.0)})\n\n if local_rank == 0:\n DLLogger.flush()\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.cuda.synchronize",
"torch.empty_like",
"torch.distributed.init_process_group",
"torch.random.set_rng_state",
"torch.load",
"torch.utils.data.distributed.DistributedSampler",
"torch.random.get_rng_state",
"numpy.isnan",
"torch.distributed.all_gather",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.stack",
"torch.cuda.get_rng_state",
"torch.cuda.device_count",
"torch.distributed.all_reduce",
"torch.cuda.set_rng_state",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
palminde/P9Project | [
"5df03d18b74585ce1d9feefce8c183225dd27f68",
"5df03d18b74585ce1d9feefce8c183225dd27f68"
] | [
"Code/Nets.py",
"Code/CoGAN_trainer.py"
] | [
"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\n\nlayers = tf.keras.layers\n\n\n# Clip model weights to a given hypercube\nclass ClipConstraint(tf.keras.constraints.Constraint):\n # set clip value when initialized\n def __init__(self, clip_value):\n self.clip_value = clip_value\n\n # clip model weights to hypercube\n def __call__(self, weights):\n return tf.keras.backend.clip(weights, -self.clip_value, self.clip_value)\n\n # get the config\n def get_config(self):\n return {'clip_value': self.clip_value}\n\n\nprelu_init = tf.keras.initializers.Constant(0.25)\n\n\ndef encoder(args):\n input_dim = args.dataset_dim[1]\n channels = args.dataset_dim[3]\n model = keras.Sequential()\n\n # normal\n model.add(layers.Conv2D(64, (3, 3), padding='same', input_shape=[input_dim, input_dim, channels]))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n # classifier\n model.add(layers.Flatten())\n model.add(layers.Dropout(0.4))\n model.add(layers.Dense(args.noise_dim))\n # compile model\n return model\n\n\n\n# 32x32\ndef cifargan_gen(args):\n g_dim = args.g_dim\n z_dim = args.noise_dim\n img_dim = args.dataset_dim[1]\n channels = args.dataset_dim[3]\n img_resize = img_dim//(2*2*2)\n\n model = keras.Sequential()\n # foundation for 4x4 image\n model.add(layers.Dense(g_dim * img_resize * img_resize, input_dim=z_dim, kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(layers.Reshape((img_resize, img_resize, g_dim)))\n # upsample to 8x8\n model.add(layers.Conv2DTranspose(128, (4, 4), strides=(1, 1), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU(alpha=0.2))\n # upsample to 16x16\n model.add(layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU(alpha=0.2))\n # upsample to 32x32\n model.add(layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU(alpha=0.2))\n\n model.add(layers.Conv2DTranspose(256, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU(alpha=0.2))\n # output layer\n model.add(layers.Conv2D(channels, (6, 6), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n return model\n\n\ndef cifargan_disc(args):\n d_dim = args.d_dim\n input_dim = args.dataset_dim[1]\n channels = args.dataset_dim[3]\n model = keras.Sequential()\n\n # normal\n model.add(layers.Conv2D(64, (3, 3), padding='same', input_shape=[input_dim, input_dim, channels], kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(layers.LeakyReLU(alpha=0.2))\n # classifier\n model.add(layers.Flatten())\n model.add(layers.Dropout(0.4))\n model.add(layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n # compile model\n return model\n\n\n# 64x64\ndef gan64_gen(args):\n channels = args.dataset_dim[3]\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024*16*16)(noise)\n model = tf.keras.layers.Reshape((16, 16, 1024))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (3,3), strides=(2, 2), padding='same'))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.LeakyReLU(alpha=0.2))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (3,3), strides=(2, 2), padding='same'))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.LeakyReLU(alpha=0.2))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (3,3), strides=(2, 2), padding='same'))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.LeakyReLU(alpha=0.2))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (3,3), strides=(2, 2), padding='same'))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.LeakyReLU(alpha=0.2))(model)\n\n # Generator 1\n img1 = tf.keras.layers.Conv2DTranspose(channels, (6,6), strides=(1, 1), activation='tanh', padding='same')(model)\n\n return keras.Model(noise, img1)\n\n\ndef gan64_disc(args):\n d_dim = args.d_dim\n input_dim = args.dataset_dim[1]\n channels = args.dataset_dim[3]\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n model = keras.Sequential()\n # normal\n model.add(layers.Conv2D(64, (3, 3), padding='same', input_shape=img_shape))\n model.add(layers.LeakyReLU(alpha=0.2))\n\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n\n model.add(layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n\n model.add(layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n # downsample\n model.add(layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU(alpha=0.2))\n # classifier\n model.add(layers.Flatten())\n model.add(layers.Dropout(0.4))\n model.add(layers.Dense(1))\n # compile model\n return model\n\n\ndef gan128_gen(args):\n channels = args.dataset_dim[3]\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024*4*4, kernel_initializer=args.w_init, kernel_regularizer=args.wd)(noise)\n model = tf.keras.layers.Reshape((4, 4, 1024))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(64, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n # Generator 1\n img1 = (tf.keras.layers.Conv2DTranspose(32, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n img1 = (tf.keras.layers.BatchNormalization())(img1)\n img1 = (tf.keras.layers.PReLU(prelu_init))(img1)\n\n img1 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n\n return keras.Model(noise, img1)\n\n\ndef gan128_disc(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n img1 = tf.keras.layers.Input(shape=img_shape)\n\n x1 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n x1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n model = keras.Sequential()\n model.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.1))\n\n model.add(tf.keras.layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(1024, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(2048, kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd))\n\n output1 = model(x1)\n\n return keras.Model(img1, output1)\n\n\ndef gan256_gen(args):\n channels = args.dataset_dim[3]\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(2048*4*4, kernel_regularizer=args.wd)(noise)\n model = tf.keras.layers.Reshape((4, 4, 2048))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(1024, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(64, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n model = (tf.keras.layers.BatchNormalization(momentum=0.8))(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n img1 = (tf.keras.layers.Conv2DTranspose(32, (4,4), strides=(2, 2), padding='same', kernel_regularizer=args.wd))(model)\n img1 = (tf.keras.layers.BatchNormalization(momentum=0.8))(img1)\n img1 = (tf.keras.layers.PReLU(prelu_init))(img1)\n img1 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_regularizer=args.wd)(img1)\n\n return keras.Model(noise, img1)\n\n\ndef gan256_disc(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd)(img1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n x1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd)(x1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n model = keras.Sequential()\n model.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.1))\n\n model.add(tf.keras.layers.Conv2D(256, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(512, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(1024, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Conv2D(2048, (5, 5), strides=(2, 2), padding='same', kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(4096, kernel_regularizer=args.wd))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Dense(1, kernel_regularizer=args.wd))\n\n output1 = model(x1)\n\n return keras.Model(img1, output1)\n\n\n# Toy\ndef toy_gen(n_dim):\n inputs = keras.Input(shape=(n_dim,), name='digits')\n x = layers.Dense(128, activation='tanh', name='dense1')(inputs)\n x = layers.Dense(128, activation='tanh', name='dense2')(x)\n x = layers.Dense(128, activation='tanh', name='dense3')(x)\n outputs = layers.Dense(2, activation='linear', name='preds')(x)\n model = keras.Model(inputs=inputs, outputs=outputs)\n return model\n\n\ndef toy_disc(args):\n inputs = keras.Input(shape=(args.batch_size, 2), name='digits')\n x = layers.Dense(128, activation='tanh', name='dense1')(inputs)\n x = layers.Dense(128, activation='tanh', name='dense2')(x)\n x = layers.Dense(128, activation='tanh', name='dense3')(x)\n outputs = layers.Dense(1, name='preds')(x)\n model = keras.Model(inputs=inputs, outputs=outputs)\n return model\n\n\n# Mnist negative + edge\ndef cogan_generators_digit(args):\n channels = args.dataset_dim[3]\n\n output1 = []\n output2 = []\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024*4*4, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(noise)\n model = tf.keras.layers.Reshape((4, 4, 1024))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n features_4x4 = (tf.keras.layers.PReLU())(model)\n output1.append(features_4x4)\n output2.append(features_4x4)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n features_8x8 = (tf.keras.layers.PReLU())(model)\n output1.append(features_8x8)\n output2.append(features_8x8)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU())(model)\n output1.append(model)\n output2.append(model)\n\n # Generator 1\n img1 = tf.keras.layers.Conv2DTranspose(channels, (6,6), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model)\n\n # Generator 2\n img2 = tf.keras.layers.Conv2DTranspose(channels, (6,6), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model)\n\n output1.append(img1)\n output2.append(img2)\n return keras.Model(noise, output1), keras.Model(noise, output2)\n\n\ndef cogan_discriminators_digit(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(20, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.MaxPool2D()(x1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n x2 = tf.keras.layers.Conv2D(20, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n x2 = tf.keras.layers.MaxPool2D()(x2)\n\n # Shared discriminator layers\n model = keras.Sequential()\n model.add(tf.keras.layers.Conv2D(50, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.MaxPool2D())\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(500, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n model.add(tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n output1 = model(x1, training=True)\n output2 = model(x2, training=True)\n\n return keras.Model(img1, output1), keras.Model(img2, output2)\n\n\ndef cogan_generators_digit_noshare(args):\n channels = args.dataset_dim[3]\n\n\n output1 = []\n output2 = []\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024*4*4, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(noise)\n model = tf.keras.layers.Reshape((4, 4, 1024))(model)\n model = tf.keras.layers.BatchNormalization()(model)\n model = tf.keras.layers.PReLU(prelu_init)(model)\n\n # Generator 1\n if args.use_firstlayer:\n model1 = (tf.keras.layers.Conv2DTranspose(1024, (4,4), strides=(1, 1), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n features1_4x4 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(features1_4x4)\n\n model1 = (tf.keras.layers.Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(features1_4x4)\n else:\n model1 = (tf.keras.layers.Conv2DTranspose(512, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n features1_8x8 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(features1_8x8)\n\n model1 = (tf.keras.layers.Conv2DTranspose(256, (3,3), strides=(2, 2), padding='same',kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(features1_8x8)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n features1_16x16 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(features1_16x16)\n\n model1 = (tf.keras.layers.Conv2DTranspose(128, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(features1_16x16)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n model1 = (tf.keras.layers.PReLU(prelu_init))(model1)\n img1 = tf.keras.layers.Conv2DTranspose(channels, (6,6), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model1)\n\n output1.append(img1)\n\n # Generator 2\n if args.use_firstlayer:\n model2 = (tf.keras.layers.Conv2DTranspose(1024, (4,4), strides=(1, 1), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n features2_4x4 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(features2_4x4)\n model2 = (tf.keras.layers.Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,kernel_regularizer=args.wd, bias_initializer=args.bi))(features2_4x4)\n else:\n model2 = (tf.keras.layers.Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n features2_8x8 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(features2_8x8)\n\n model2 = (tf.keras.layers.Conv2DTranspose(256, (3,3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(features2_8x8)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n features2_16x16 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(features2_16x16)\n\n model2 = (tf.keras.layers.Conv2DTranspose(128, (3,3), strides=(2, 2), padding='same',kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(features2_16x16)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n model2 = (tf.keras.layers.PReLU(prelu_init))(model2)\n img2 = tf.keras.layers.Conv2DTranspose(channels, (6,6), strides=(1, 1), activation='tanh', padding='same',kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model2)\n output2.append(img2)\n\n return keras.Model(noise, output1), keras.Model(noise, output2)\n\n\ndef cogan_discriminators_digit_noshare(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(20, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.MaxPool2D()(x1)\n\n model1 = tf.keras.layers.Conv2D(50, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x1)\n model1 = tf.keras.layers.MaxPool2D()(model1)\n model1 = tf.keras.layers.Flatten()(model1)\n model1 = tf.keras.layers.Dense(500, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model1)\n model1 = tf.keras.layers.PReLU(prelu_init)(model1)\n model1 = tf.keras.layers.Dropout(0.5)(model1)\n model1 = tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n x2 = tf.keras.layers.Conv2D(20, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n x2 = tf.keras.layers.MaxPool2D()(x2)\n\n model2 = tf.keras.layers.Conv2D(50, (5, 5), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x2)\n model2 = tf.keras.layers.MaxPool2D()(model2)\n model2 = tf.keras.layers.Flatten()(model2)\n model2 = tf.keras.layers.Dense(500, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model2)\n model2 = tf.keras.layers.PReLU(prelu_init)(model2)\n model2 = tf.keras.layers.Dropout(0.5)(model2)\n model2 = tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model2)\n\n return keras.Model(img1, model1), keras.Model(img2, model2)\n\n\n# Mnist rotate\ndef cogan_generators_rotate(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n # Shared weights between generators\n model = keras.Sequential()\n model.add(tf.keras.layers.Dense(1024, input_dim=args.noise_dim, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.Dense(1024, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.Dense(1024, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.Dense(1024, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.BatchNormalization())\n\n feature_repr = model(noise)\n\n # Generator 1\n g1 = tf.keras.layers.Dense(np.prod(img_shape), activation='sigmoid', kernel_regularizer=args.wd, bias_initializer=args.bi)(feature_repr)\n img1 = tf.keras.layers.Reshape(img_shape)(g1)\n\n # Generator 2\n g2 = tf.keras.layers.Dense(np.prod(img_shape), activation='sigmoid', kernel_regularizer=args.wd, bias_initializer=args.bi)(feature_repr)\n img2 = tf.keras.layers.Reshape(img_shape)(g2)\n\n return keras.Model(noise, img1), keras.Model(noise, img2)\n\n\ndef cogan_discriminators_rotate(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n model1 = tf.keras.layers.Conv2D(20, (5,5), padding='same', kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n model1 = tf.keras.layers.MaxPool2D()(model1)\n model1 = tf.keras.layers.Conv2D(50, (5,5), padding='same', kernel_regularizer=args.wd, bias_initializer=args.bi)(model1)\n model1 = tf.keras.layers.MaxPool2D()(model1)\n model1 = tf.keras.layers.Dense(500, kernel_regularizer=args.wd, bias_initializer=args.bi)(model1)\n model1 = tf.keras.layers.LeakyReLU()(model1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n model2 = tf.keras.layers.Conv2D(20, (5,5), padding='same', kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n model2 = tf.keras.layers.MaxPool2D()(model2)\n model2 = tf.keras.layers.Conv2D(50, (5,5), padding='same', kernel_regularizer=args.wd, bias_initializer=args.bi)(model2)\n model2 = tf.keras.layers.MaxPool2D()(model2)\n model2 = tf.keras.layers.Dense(500, kernel_regularizer=args.wd, bias_initializer=args.bi)(model2)\n model2 = tf.keras.layers.LeakyReLU()(model2)\n\n # Shared discriminator layers\n model = keras.Sequential()\n model.add(tf.keras.layers.Flatten(input_shape=(8,8,500)))\n model.add(tf.keras.layers.Dense(1, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n validity1 = model(model1)\n validity2 = model(model2)\n\n return keras.Model(img1, validity1), keras.Model(img2, validity2)\n\n# Faces No share\ndef cogan_generators_faces_noshare(args):\n channels = args.dataset_dim[3]\n\n output1 = []\n output2 = []\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024 * 4 * 4, kernel_initializer=args.w_init, kernel_regularizer=args.wd,\n bias_initializer=args.bi)(noise)\n model = tf.keras.layers.Reshape((4, 4, 1024))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model1 = (tf.keras.layers.Conv2DTranspose(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n feature1_1 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(feature1_1)\n\n model1 = (tf.keras.layers.Conv2DTranspose(256, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(feature1_1)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n feature2_1 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(feature2_1)\n\n model1 = (tf.keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init,kernel_regularizer=args.wd, bias_initializer=args.bi))(feature2_1)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n feature3_1 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(feature3_1)\n\n model1 = (tf.keras.layers.Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(feature3_1)\n model1 = (tf.keras.layers.BatchNormalization())(model1)\n feature4_1 = (tf.keras.layers.PReLU(prelu_init))(model1)\n output1.append(feature4_1)\n\n # Generator 1\n img1 = (tf.keras.layers.Conv2DTranspose(32, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(feature4_1)\n img1 = (tf.keras.layers.BatchNormalization())(img1)\n img1 = (tf.keras.layers.PReLU(prelu_init))(img1)\n img1 = tf.keras.layers.Conv2DTranspose(channels, (3, 3), strides=(1, 1), activation='tanh', padding='same',\n kernel_initializer=args.w_init, kernel_regularizer=args.wd,\n bias_initializer=args.bi)(img1)\n output1.append(img1)\n\n model2 = (tf.keras.layers.Conv2DTranspose(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n feature1_2 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(feature1_2)\n\n model2 = (tf.keras.layers.Conv2DTranspose(256, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(feature1_2)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n feature2_2 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(feature2_2)\n\n model2 = (tf.keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))(feature2_2)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n feature3_2 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(feature3_2)\n\n model2 = (\n tf.keras.layers.Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))(feature3_2)\n model2 = (tf.keras.layers.BatchNormalization())(model2)\n feature4_2 = (tf.keras.layers.PReLU(prelu_init))(model2)\n output2.append(feature4_2)\n\n\n # Generator 2\n img2 = (tf.keras.layers.Conv2DTranspose(32, (4, 4), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))(feature4_2)\n img2 = (tf.keras.layers.BatchNormalization())(img2)\n img2 = (tf.keras.layers.PReLU(prelu_init))(img2)\n img2 = tf.keras.layers.Conv2DTranspose(channels, (3, 3), strides=(1, 1), activation='tanh', padding='same',\n kernel_initializer=args.w_init, kernel_regularizer=args.wd,\n bias_initializer=args.bi)(img2)\n output2.append(img2)\n\n return keras.Model(noise, output1), keras.Model(noise, output2)\n\ndef cogan_discriminators_faces_noshare(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.LayerNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n x1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi)(x1)\n x1 = tf.keras.layers.LayerNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n x2 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n x2 = tf.keras.layers.LayerNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n x2 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi)(x2)\n x2 = tf.keras.layers.LayerNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n\n # Shared discriminator layers\n model1 = keras.Sequential()\n model1.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model1.add(tf.keras.layers.LayerNormalization())\n model1.add(tf.keras.layers.PReLU(prelu_init))\n model1.add(tf.keras.layers.Dropout(0.1))\n\n model1.add(tf.keras.layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model1.add(tf.keras.layers.LayerNormalization())\n model1.add(tf.keras.layers.PReLU(prelu_init))\n model1.add(tf.keras.layers.Dropout(0.3))\n\n model1.add(tf.keras.layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model1.add(tf.keras.layers.LayerNormalization())\n model1.add(tf.keras.layers.PReLU(prelu_init))\n model1.add(tf.keras.layers.Dropout(0.3))\n\n model1.add(tf.keras.layers.Conv2D(1024, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model1.add(tf.keras.layers.LayerNormalization())\n model1.add(tf.keras.layers.PReLU(prelu_init))\n model1.add(tf.keras.layers.Dropout(0.5))\n\n model1.add(tf.keras.layers.Flatten())\n model1.add(tf.keras.layers.Dense(2048, kernel_initializer=args.w_init, kernel_regularizer=args.wd,\n bias_initializer=args.bi))\n model1.add(tf.keras.layers.LayerNormalization())\n model1.add(tf.keras.layers.PReLU(prelu_init))\n model1.add(tf.keras.layers.Dropout(0.5))\n\n model1.add(\n tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n model2 = keras.Sequential()\n model2.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model2.add(tf.keras.layers.LayerNormalization())\n model2.add(tf.keras.layers.PReLU(prelu_init))\n model2.add(tf.keras.layers.Dropout(0.1))\n\n model2.add(tf.keras.layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model2.add(tf.keras.layers.LayerNormalization())\n model2.add(tf.keras.layers.PReLU(prelu_init))\n model2.add(tf.keras.layers.Dropout(0.3))\n\n model2.add(tf.keras.layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model2.add(tf.keras.layers.LayerNormalization())\n model2.add(tf.keras.layers.PReLU(prelu_init))\n model2.add(tf.keras.layers.Dropout(0.3))\n\n model2.add(tf.keras.layers.Conv2D(1024, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init,\n kernel_regularizer=args.wd, bias_initializer=args.bi))\n model2.add(tf.keras.layers.LayerNormalization())\n model2.add(tf.keras.layers.PReLU(prelu_init))\n model2.add(tf.keras.layers.Dropout(0.5))\n\n model2.add(tf.keras.layers.Flatten())\n model2.add(tf.keras.layers.Dense(2048, kernel_initializer=args.w_init, kernel_regularizer=args.wd,\n bias_initializer=args.bi))\n model2.add(tf.keras.layers.LayerNormalization())\n model2.add(tf.keras.layers.PReLU(prelu_init))\n model2.add(tf.keras.layers.Dropout(0.5))\n\n model2.add(\n tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n output1 = model1(x1)\n output2 = model2(x2)\n\n return keras.Model(img1, output1), keras.Model(img2, output2)\n\n\n# Faces\ndef cogan_generators_faces(args):\n channels = args.dataset_dim[3]\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(1024*4*4, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(noise)\n model = tf.keras.layers.Reshape((4, 4, 1024))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(64, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n # Generator 1\n img1 = (tf.keras.layers.Conv2DTranspose(32, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n img1 = (tf.keras.layers.BatchNormalization())(img1)\n img1 = (tf.keras.layers.PReLU(prelu_init))(img1)\n img1 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n\n # Generator 2\n img2 = (tf.keras.layers.Conv2DTranspose(32, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n img2 = (tf.keras.layers.BatchNormalization())(img2)\n img2 = (tf.keras.layers.PReLU(prelu_init))(img2)\n img2 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n\n return keras.Model(noise, img1), keras.Model(noise, img2)\n\n\ndef cogan_discriminators_faces(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n x1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n x2 = tf.keras.layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n x2 = tf.keras.layers.BatchNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n x2 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x2)\n x2 = tf.keras.layers.BatchNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n\n # Shared discriminator layers\n model = keras.Sequential()\n model.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.1))\n\n model.add(tf.keras.layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(512, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(1024, (3, 3), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(2048, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n output1 = model(x1, training=True)\n output2 = model(x2, training=True)\n\n return keras.Model(img1, output1), keras.Model(img2, output2)\n\n\n# 256x256 CoGANs\ndef cogan_generators_256(args):\n channels = args.dataset_dim[3]\n\n # Shared weights between generators\n noise = tf.keras.layers.Input(shape=(args.noise_dim,))\n\n model = tf.keras.layers.Dense(2048*4*4, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(noise)\n model = tf.keras.layers.Reshape((4, 4, 2048))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(1024, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.LeakyReLU())(model)\n\n model = (tf.keras.layers.Conv2DTranspose(512, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.LeakyReLU())(model)\n\n model = (tf.keras.layers.Conv2DTranspose(256, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(128, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(64, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n model = (tf.keras.layers.Conv2DTranspose(32, (4,4), strides=(2, 2), padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))(model)\n model = (tf.keras.layers.BatchNormalization())(model)\n model = (tf.keras.layers.PReLU(prelu_init))(model)\n\n # Generator 1\n img1 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model)\n\n # Generator 2\n img2 = tf.keras.layers.Conv2DTranspose(channels, (3,3), strides=(1, 1), activation='tanh', padding='same', kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(model)\n\n return keras.Model(noise, img1), keras.Model(noise, img2)\n\n\ndef cogan_discriminators_256(args):\n img_shape = (args.dataset_dim[1], args.dataset_dim[2], args.dataset_dim[3])\n\n # Discriminator 1\n img1 = tf.keras.layers.Input(shape=img_shape)\n x1 = tf.keras.layers.Conv2D(32, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n x1 = tf.keras.layers.Conv2D(64, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x1)\n x1 = tf.keras.layers.BatchNormalization()(x1)\n x1 = tf.keras.layers.PReLU(prelu_init)(x1)\n\n # Discriminator 2\n img2 = tf.keras.layers.Input(shape=img_shape)\n x2 = tf.keras.layers.Conv2D(32, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(img2)\n x2 = tf.keras.layers.BatchNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n x2 = tf.keras.layers.Conv2D(64, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi)(x2)\n x2 = tf.keras.layers.BatchNormalization()(x2)\n x2 = tf.keras.layers.PReLU(prelu_init)(x2)\n\n # Shared discriminator layers\n model = keras.Sequential()\n model.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.1))\n\n model.add(tf.keras.layers.Conv2D(256, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(512, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.3))\n\n model.add(tf.keras.layers.Conv2D(1024, (5, 5), padding='same', strides=(2, 2), kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(2048, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.PReLU(prelu_init))\n model.add(tf.keras.layers.Dropout(0.5))\n\n model.add(tf.keras.layers.Dense(1, kernel_initializer=args.w_init, kernel_regularizer=args.wd, bias_initializer=args.bi))\n\n output1 = model(x1)\n output2 = model(x2)\n\n return keras.Model(img1, output1), keras.Model(img2, output2)\n\n\ndef mnist_classifier(args, num_classes):\n img_shape = (32, 32, 3)\n input = tf.keras.layers.Input(shape=img_shape)\n model = tf.keras.layers.Conv2D(32, (3,3))(input)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.Conv2D(64, (3,3))(model)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.MaxPooling2D((2,2))(model)\n model = tf.keras.layers.Dropout(0.25)(model)\n model = tf.keras.layers.Flatten()(model)\n model = tf.keras.layers.Dense(128)(model)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.Dropout(0.5)(model)\n output = tf.keras.layers.Dense(num_classes, activation='softmax')(model)\n\n return tf.keras.Model(input, output)\n\n\ndef celeba_classifier(args, num_classes):\n img_shape = (128,128,3)\n input = tf.keras.layers.Input(shape=img_shape)\n\n model = tf.keras.layers.Conv2D(32, (3,3))(input)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.Conv2D(64, (3,3))(model)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.MaxPooling2D((2,2))(model)\n model = tf.keras.layers.Dropout(0.25)(model)\n model = tf.keras.layers.Flatten()(model)\n model = tf.keras.layers.Dense(128)(model)\n model = tf.keras.layers.LeakyReLU()(model)\n model = tf.keras.layers.Dropout(0.5)(model)\n\n output = tf.keras.layers.Dense(num_classes, activation='sigmoid')(model)\n return tf.keras.Model(input, output)\n\n\n\n\n\n",
"from __future__ import print_function, division\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport time\nimport numpy as np\nimport os\nfrom Code import Utils as u, Nets as n, Losses as l, Penalties as p\n\n\nclass CoGANTrainer(object):\n\n def __init__(self, g1, g2, d1, d2, domain1, domain2):\n self.hist_g1 = []\n self.hist_g2 = []\n self.hist_d1 = []\n self.hist_d2 = []\n self.hist_semantic_loss = []\n self.hist_cycle_loss = []\n self.hist_weight_similarity = []\n self.hist_discpenalty1 = []\n self.hist_discpenalty2 = []\n self.hist_high_diff = []\n self.hist_low1_diff = []\n self.hist_low2_diff = []\n self.hist_style1_loss = []\n self.hist_style2_loss = []\n self.hist_content_loss = []\n self.X1 = domain1\n self.X2 = domain2\n self.full_training_time = 0\n self.discPenal = p.DiscriminatorPenalties()\n self.genPenal = p.GeneratorPenalties()\n self.content_layers = ['block5_conv2']\n self.style_layers = ['block1_conv1',\n 'block2_conv1',\n 'block3_conv1',\n 'block4_conv1',\n 'block5_conv1']\n self.num_content_layers = len(self.content_layers)\n self.num_style_layers = len(self.style_layers)\n self.vgg_feature_model = None\n\n self.d1, self.d2 = d1, d2\n self.g1, self.g2 = g1, g2\n def feature_layers(self, layer_names, args):\n vgg = tf.keras.applications.VGG19(include_top=False)\n vgg.trainable = False\n outputs = [vgg.get_layer(name).output for name in layer_names]\n\n model = tf.keras.Model([vgg.input], outputs)\n return model\n\n def gram_matrix(self,input):\n result = tf.linalg.einsum('bijc,bijd->bcd', input, input)\n num_locations = input.shape[1] * input.shape[2]\n return result/num_locations\n\n def StyleContentModel(self, inputs):\n inputs = (0.5 * inputs + 0.5) * 255\n inputs = tf.keras.applications.vgg19.preprocess_input(inputs)\n feature_outputs = self.vgg_feature_model(inputs)\n styles = feature_outputs[:self.num_style_layers]\n content = feature_outputs[self.num_style_layers:]\n\n style_outputs = [self.gram_matrix(style) for style in styles]\n\n content_dict = {content_name:value for content_name, value in zip(self.content_layers, content)}\n style_dict = {style_name:value for style_name, value in zip(self.style_layers,style_outputs)}\n\n return content_dict, style_dict\n\n\n def StyleContentLoss(self, style_fake, style_target, content_fake1, content_fake2, args):\n style_loss = tf.add_n([tf.reduce_mean((style_fake[name]-style_target[name])**2) for name in style_fake.keys()])\n content_loss = tf.add_n([tf.reduce_mean((content_fake1[name] - content_fake2[name])**2) for name in content_fake1.keys()])\n\n style_loss *= args.style_weight / self.num_style_layers\n content_loss *= args.content_weight / self.num_content_layers\n\n return style_loss, content_loss\n\n\n def train(self, args):\n if args.use_cycle:\n self.encoder = n.encoder(args)\n if args.semantic_loss:\n self.classifier = tf.keras.models.load_model(args.classifier_path)\n if args.cogan_data == 'mnist2fashion':\n self.classifier2 = tf.keras.models.load_model(args.classifier_path + '_fashion')\n if args.feature_loss:\n vgg = tf.keras.applications.VGG19(include_top=False, input_shape=(args.dataset_dim[1],args.dataset_dim[2], args.dataset_dim[3]))\n self.high_level_feature_extractor = tf.keras.Model(inputs=vgg.input, outputs=vgg.get_layer('block4_conv4').output)\n self.low_level_feature_extractor = tf.keras.Model(inputs=vgg.input, outputs=vgg.get_layer('block1_pool').output)\n if args.perceptual_loss:\n self.vgg_feature_model = self.feature_layers(self.style_layers + self.content_layers, args)\n it1 = iter(self.X1)\n it2 = iter(self.X2)\n\n # Set loss functions\n d_loss_fn, g_loss_fn = l.set_losses(args)\n\n for epoch in range(args.epochs):\n start = time.time()\n\n # ----------------------\n # Train Discriminators\n # ----------------------\n\n for i in range(args.disc_iters):\n # Select a random batch of images\n if args.cogan_data in ['mnist2edge','shapes2flowers', 'Eyeglasses', 'Smiling', 'Blond_Hair', 'Male']:\n batch1 = next(it1)\n batch2 = next(it2)\n elif args.cogan_data == 'mnist2svhn_prune':\n batch1 = next(it1)[0]\n batch2 = next(it2)\n else:\n batch1 = next(it1)[0]\n batch2 = next(it2)[0]\n\n # Sample noise as generator input\n noise = u.gen_noise(args)\n\n # Generate a batch of new images\n gen_batch1 = self.g1(noise, training=True)\n\n # d1\n with tf.GradientTape() as tape:\n # Disc response\n disc_real1 = self.d1(batch1, training=True)\n disc_fake1 = self.d1(gen_batch1[-1], training=True)\n\n # Calc loss and penalty\n d1_loss = d_loss_fn(disc_fake1, disc_real1)\n gp1 = self.discPenal.calc_penalty(gen_batch1[-1], batch1, self.d1, args) # if loss is not wgan-gp then gp=0\n self.hist_discpenalty1.append(gp1)\n d1_loss = d1_loss + (gp1 * args.penalty_weight_d)\n gradients_of_discriminator = tape.gradient(d1_loss, self.d1.trainable_variables)\n args.disc_optimizer.apply_gradients(zip(gradients_of_discriminator, self.d1.trainable_variables))\n\n # Generate a batch of new images\n gen_batch2 = self.g2(noise, training=True)\n\n # d2\n with tf.GradientTape() as tape:\n # Disc response\n disc_real2 = self.d2(batch2, training=True)\n disc_fake2 = self.d2(gen_batch2[-1], training=True)\n\n # Calc loss and penalty\n d2_loss = d_loss_fn(disc_fake2, disc_real2)\n gp2 = self.discPenal.calc_penalty(gen_batch2[-1], batch2, self.d2, args) # if loss is not wgan-gp then gp=0\n self.hist_discpenalty2.append(gp2)\n d2_loss = d2_loss + (gp2 * args.penalty_weight_d)\n gradients_of_discriminator = tape.gradient(d2_loss, self.d2.trainable_variables)\n args.disc_optimizer.apply_gradients(zip(gradients_of_discriminator, self.d2.trainable_variables))\n\n if args.loss == 'wgan' and args.disc_penalty == 'none':\n self.clip_weights(args.clip)\n\n # ------------------\n # Train Generators\n # ------------------\n\n # Sample noise as generator input\n noise = u.gen_noise(args)\n with tf.GradientTape() as tape1, tf.GradientTape() as tape2, tf.GradientTape() as tape3:\n # Adv loss\n gen1_fake = self.g1(noise, training=True)\n disc1_fake = self.d1(gen1_fake[-1], training=True)\n g1_loss = g_loss_fn(disc1_fake)\n\n gen2_fake = self.g2(noise, training=True)\n disc2_fake = self.d2(gen2_fake[-1], training=True)\n g2_loss = g_loss_fn(disc2_fake)\n \n if args.semantic_loss:\n domain1_pred = self.classifier(gen1_fake[-1])\n domain2_pred = self.classifier(gen2_fake[-1])\n diff = tf.reduce_mean(tf.math.squared_difference(domain1_pred, domain2_pred))\n # log semantic loss\n self.hist_semantic_loss.append(diff)\n g1_loss = g1_loss + diff * args.semantic_weight\n g2_loss = g2_loss + diff * args.semantic_weight\n\n\n\n penalty = self.genPenal.calc_penalty(self.g1, self.g2, args.shared_layers, args, gen1_fake, gen2_fake)\n g1_loss = g1_loss + (penalty * args.penalty_weight_g)\n g2_loss = g2_loss + (penalty * args.penalty_weight_g)\n\n\n if args.feature_loss:\n #fake1_high_features = self.high_level_feature_extractor(gen1_fake[-1])\n #fake2_high_features = self.high_level_feature_extractor(gen2_fake[-1])\n fake1_low_features = self.low_level_feature_extractor(gen1_fake[-1])\n fake2_low_features = self.low_level_feature_extractor(gen2_fake[-1])\n real1_low_features = self.low_level_feature_extractor(batch1)\n real2_low_features = self.low_level_feature_extractor(batch2)\n\n #high_diff = tf.reduce_mean(tf.math.squared_difference(fake1_high_features, fake2_high_features))\n low_test = tf.reduce_mean(tf.math.squared_difference(fake1_low_features,fake2_low_features))\n low1_diff = tf.reduce_mean(tf.math.squared_difference(fake1_low_features, real1_low_features))\n low2_diff = tf.reduce_mean(tf.math.squared_difference(fake2_low_features, real2_low_features))\n diffs1 = tf.math.l2_normalize([penalty, low1_diff])\n diffs2 = tf.math.l2_normalize([penalty, low2_diff])\n\n #high_diff = diffs1[0] * args.fl_high_weight\n low1_diff = diffs1[1] * args.fl_low_weight\n low2_diff = diffs2[1] * args.fl_low_weight\n\n\n #self.hist_high_diff.append(high_diff)\n self.hist_low1_diff.append(low1_diff)\n self.hist_low2_diff.append(low2_diff)\n\n #g1_loss = g1_loss + high_diff\n g1_loss = g1_loss + low1_diff\n #g2_loss = g2_loss + high_diff\n g2_loss = g2_loss + low2_diff\n\n #g1_loss = g1_loss + high_diff + low1_diff\n #g2_loss = g2_loss + high_diff + low2_diff\n\n if args.perceptual_loss:\n fake1_content, fake1_style = self.StyleContentModel(gen1_fake[-1])\n fake2_content, fake2_style = self.StyleContentModel(gen2_fake[-1])\n real1_content, real1_style = self.StyleContentModel(batch1)\n real2_content, real2_style = self.StyleContentModel(batch2)\n\n g1_style_loss, g1_content_loss = self.StyleContentLoss(fake1_style, real1_style, fake1_content, fake2_content, args)\n g2_style_loss, g2_content_loss = self.StyleContentLoss(fake2_style, real2_style, fake2_content, fake1_content, args)\n\n g1_loss = (g1_loss) + g1_style_loss + g1_content_loss\n g2_loss = (g2_loss) + g2_style_loss + g2_content_loss\n\n self.hist_style1_loss.append(g1_style_loss)\n self.hist_style2_loss.append(g2_style_loss)\n self.hist_content_loss.append(g1_content_loss)\n\n if args.use_cycle:\n # Recon loss\n noise_recon1 = self.encoder(gen1_fake[-1])\n noise_recon2 = self.encoder(gen2_fake[-1])\n\n #fake_recon1 = self.g1(noise_recon1, training=False)\n #fake_recon2 = self.g2(noise_recon2, training=False)\n\n noise_recon_loss1 = l.recon_criterion(noise_recon1, noise)\n noise_recon_loss2 = l.recon_criterion(noise_recon2, noise)\n\n #fake_recon_loss1 = l.recon_criterion(fake_recon1[-1], gen1_fake[-1])\n #fake_recon_loss2 = l.recon_criterion(fake_recon2[-1], gen2_fake[-1])\n\n total_recon_loss = noise_recon_loss1 + noise_recon_loss2\n\n # log cycle loss\n self.hist_cycle_loss.append(total_recon_loss)\n\n g1_loss = g1_loss + (total_recon_loss * args.cycle_weight)\n g2_loss = g2_loss + (total_recon_loss * args.cycle_weight)\n\n gradients_of_generator1 = tape1.gradient(g1_loss, self.g1.trainable_variables)\n args.gen_optimizer.apply_gradients(zip(gradients_of_generator1, self.g1.trainable_variables))\n gradients_of_generator2 = tape2.gradient(g2_loss, self.g2.trainable_variables)\n args.gen_optimizer.apply_gradients(zip(gradients_of_generator2, self.g2.trainable_variables))\n if args.use_cycle:\n gradients_of_encoder = tape3.gradient(total_recon_loss, self.encoder.trainable_variables)\n args.gen_optimizer.apply_gradients(zip(gradients_of_encoder, self.encoder.trainable_variables))\n weight_sim = self.genPenal.weight_regularizer(self.g1, self.g2, 21)\n self.full_training_time += time.time() - start\n\n '''\n # Check if shared weights are equal between generators\n a = self.g1.trainable_variables\n b = self.g2.trainable_variables\n mask = []\n\n for i in range(8):\n if np.array_equal(a[i].numpy(), b[i].numpy()):\n mask.append(1)\n else:\n mask.append(0)\n if 0 in mask:\n print(\"ERROR - weight sharing failure:\" + mask)\n '''\n\n # Collect loss values\n self.hist_d1.append(d1_loss)\n self.hist_d2.append(d2_loss)\n self.hist_g1.append(g1_loss)\n self.hist_g2.append(g2_loss)\n self.hist_weight_similarity.append(weight_sim)\n\n print(\"%d [D1 loss: %f] [D2 loss: %f] [G1 loss: %f] [G2 loss: %f] [WeightSim: %f]}\" % (epoch, d1_loss, d2_loss, g1_loss, g2_loss, weight_sim))\n\n # If at save interval => save generated image samples\n if epoch % args.images_while_training == 0:\n self.sample_images(epoch, args.seed, args.dir, args.dataset_dim[3])\n self.plot_losses(args.dir)\n self.sample_images(epoch, args.seed, args.dir, args.dataset_dim[3])\n return self.full_training_time\n\n def sample_images(self, epoch, seed, dir, channels):\n r, c = 4, 4\n gen_batch1 = self.g1.predict(seed)[-1]\n gen_batch2 = self.g2.predict(seed)[-1]\n\n gen_imgs = np.concatenate([gen_batch1, gen_batch2])\n\n # Rescale images 0 - 1\n gen_imgs = 0.5 * gen_imgs + 0.5\n\n fig, axs = plt.subplots(r, c)\n cnt = 0\n if channels == 1:\n for i in range(r):\n for j in range(c):\n axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')\n axs[i, j].axis('off')\n cnt += 1\n fig.savefig(os.path.join(dir, \"images/%d.png\" % epoch))\n plt.close()\n # color images\n else:\n for i in range(r):\n for j in range(c):\n axs[i, j].imshow(gen_imgs[cnt, :, :, :])\n axs[i, j].axis('off')\n cnt += 1\n fig.savefig(os.path.join(dir, \"images/%d.png\" % epoch))\n plt.close()\n\n def plot_losses(self, dir):\n plt.plot(self.hist_g1, label='Generator 1 loss')\n plt.plot(self.hist_g2, label='Generator 2 loss')\n plt.xlabel('Iterations')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig(os.path.join(dir, 'losses/gen_loss.png'))\n np.save(os.path.join(dir, 'losses/g1_loss.npy'),self.hist_g1)\n np.save(os.path.join(dir, 'losses/g2_loss.npy'),self.hist_g2)\n plt.close()\n\n plt.plot(self.hist_d1, label='Discriminator 1 loss')\n plt.plot(self.hist_d2, label='Discriminator 2 loss')\n plt.xlabel('Iterations')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig(os.path.join(dir, 'losses/disc_loss.png'))\n np.save(os.path.join(dir, 'losses/d1_loss.npy'),self.hist_d1)\n np.save(os.path.join(dir, 'losses/d2_loss.npy'),self.hist_d2)\n plt.close()\n\n plt.plot(self.hist_discpenalty1, label='DiscPenalty 1')\n plt.plot(self.hist_discpenalty2, label='DiscPenalty 2')\n plt.xlabel('Iterations')\n plt.ylabel('Penalty Value')\n plt.legend()\n plt.savefig(os.path.join(dir, 'losses/disc_penalty.png'))\n np.save(os.path.join(dir, 'losses/d1_penalty.npy'),self.hist_discpenalty1)\n np.save(os.path.join(dir, 'losses/d2_penalty.npy'),self.hist_discpenalty2)\n plt.close()\n\n\n plt.plot(self.hist_weight_similarity, label='weight differences')\n plt.xlabel('Iterations')\n plt.ylabel('Difference')\n plt.legend()\n plt.savefig(os.path.join(dir, 'losses/weight_diff.png'))\n np.save(os.path.join(dir, 'losses/weight_diff.npy'),self.hist_weight_similarity)\n plt.close()\n\n plt.plot(self.hist_semantic_loss, label='semantic loss')\n plt.xlabel('Iterations')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig(os.path.join(dir, 'losses/semantic_loss.png'))\n np.save(os.path.join(dir, 'losses/semantic_loss.npy'),self.hist_semantic_loss)\n plt.close()\n\n plt.plot(self.hist_cycle_loss, label='Cycle loss')\n plt.xlabel('Iterations')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig(os.path.join(dir, 'losses/cycle_loss.png'))\n np.save(os.path.join(dir, 'losses/cycle_loss.npy'),self.hist_cycle_loss)\n plt.close()\n\n #plt.plot(self.hist_high_diff, label='high_features')\n plt.plot(self.hist_low1_diff, label='low_features_gen1')\n plt.plot(self.hist_low2_diff, label='low_features_gen2')\n plt.xlabel('Iterations')\n plt.ylabel('Difference')\n plt.legend()\n plt.savefig(os.path.join(dir, 'losses/feature_loss.png'))\n plt.close()\n\n plt.plot(self.hist_style1_loss, label='style1_loss')\n plt.plot(self.hist_style2_loss, label='style2_loss')\n plt.plot(self.hist_content_loss, label='content_loss')\n plt.xlabel('Iterations')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig(os.path.join(dir, 'losses/perceptual_loss.png'))\n plt.close()\n\n\n\n def clip_weights(self, clip):\n for i, var in enumerate(self.d1.trainable_variables):\n self.d1.trainable_variables[i].assign(tf.clip_by_value(var, -clip, clip))\n #if not np.array_equiv(self.d1.trainable_variables[i].numpy(), self.d2.trainable_variables[i].numpy()):\n #print(i)\n for i, var in enumerate(self.d2.trainable_variables[6:]):\n self.d2.trainable_variables[i + 6].assign(tf.clip_by_value(var, -clip, clip))\n #if not np.array_equiv(self.d1.trainable_variables[i].numpy(), self.d2.trainable_variables[i].numpy()):\n #print(i)\n"
] | [
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.Input",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.initializers.Constant",
"tensorflow.keras.layers.PReLU",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.layers.BatchNormalization",
"numpy.prod",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.backend.clip",
"tensorflow.keras.layers.Input"
],
[
"tensorflow.clip_by_value",
"matplotlib.pyplot.legend",
"tensorflow.keras.models.load_model",
"tensorflow.keras.applications.vgg19.preprocess_input",
"tensorflow.reduce_mean",
"tensorflow.math.l2_normalize",
"tensorflow.math.squared_difference",
"matplotlib.pyplot.subplots",
"tensorflow.keras.Model",
"tensorflow.keras.applications.VGG19",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"tensorflow.linalg.einsum",
"tensorflow.GradientTape",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
oke-aditya/fashion_intel | [
"8add9a94c6a7f30cc1c70a99c2e83860b2204f11"
] | [
"fashion_intel/pytorch_cnn_trainer/utils.py"
] | [
"import numpy as np\nimport torch\nimport random\nimport matplotlib.pyplot as plt\nimport os\n\n__all__ = [\n \"seed_everything\",\n \"AverageMeter\",\n \"accuracy\",\n \"EarlyStopping\",\n \"matplotlib_imshow\",\n \"print_size_of_model\",\n]\n\n\ndef seed_everything(seed):\n \"\"\"\n Makes code deterministic using a given seed. Internally sets all seeds of torch, numpy and random.\n \"\"\"\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n return [correct[:k].view(-1).float().sum(0) * 100.0 / batch_size for k in topk]\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print(\"\\t\".join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = \"{:\" + str(num_digits) + \"d}\"\n return \"[\" + fmt + \"/\" + fmt.format(num_batches) + \"]\"\n\n\nclass EarlyStopping:\n \"\"\"Early stops the training if validation loss doesn't improve after a given patience.\"\"\"\n\n def __init__(self, patience=7, verbose=False, delta=0.0001, path=\"checkpoint.pt\"):\n \"\"\"\n Args:\n patience (int): How long to wait after last time validation loss improved.\n Default: 7\n verbose (bool): If True, prints a message for each validation loss improvement. \n Default: False\n delta (float): Minimum change in the monitored quantity to qualify as an improvement.\n Default: 0\n path (str): Path for the checkpoint to be saved to.\n Default: 'checkpoint.pt'\n \"\"\"\n self.patience = patience\n self.verbose = verbose\n self.counter = 0\n self.best_score = None\n self.early_stop = False\n self.val_loss_min = np.Inf\n self.delta = delta\n self.path = path\n\n def __call__(self, val_loss, model):\n\n score = -val_loss\n\n if self.best_score is None:\n self.best_score = score\n self.save_checkpoint(val_loss, model)\n elif score < self.best_score + self.delta:\n self.counter += 1\n print(f\"EarlyStopping counter: {self.counter} out of {self.patience}\")\n if self.counter >= self.patience:\n self.early_stop = True\n else:\n self.best_score = score\n self.save_checkpoint(val_loss, model)\n self.counter = 0\n\n def save_checkpoint(self, val_loss, model):\n \"\"\"Saves model when validation loss decrease.\"\"\"\n if self.verbose:\n print(\n f\"Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...\"\n )\n torch.save(model.state_dict(), self.path)\n self.val_loss_min = val_loss\n\n\ndef print_size_of_model(model):\n torch.save(model.state_dict(), \"temp.p\")\n print(\"Size (MB):\", os.path.getsize(\"temp.p\") / 1e6)\n os.remove(\"temp.p\")\n\n\ndef matplotlib_imshow(img, one_channel=False):\n if one_channel:\n img = img.mean(dim=0)\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n if one_channel:\n plt.imshow(npimg, cmap=\"Greys\")\n else:\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n"
] | [
[
"matplotlib.pyplot.imshow",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zzing0907/Tensorflow | [
"f0d66b2674fecc0f2be1423cf696a7e6e7c7a39d"
] | [
"tensorflow/python/ops/structured/structured_tensor_test.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for StructuredTensor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.ops.structured import structured_tensor\nfrom tensorflow.python.platform import googletest\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass StructuredTensorTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def assertAllEqual(self, a, b, msg=None):\n if not (isinstance(a, structured_tensor.StructuredTensor) or\n isinstance(b, structured_tensor.StructuredTensor)):\n return super(StructuredTensorTest, self).assertAllEqual(a, b, msg)\n if not (isinstance(a, structured_tensor.StructuredTensor) and\n isinstance(b, structured_tensor.StructuredTensor)):\n # TODO(edloper) Add support for this once structured_factory_ops is added.\n raise ValueError(\"Not supported yet\")\n\n self.assertEqual(repr(a.shape), repr(b.shape))\n self.assertEqual(set(a.field_names()), set(b.field_names()))\n for field in a.field_names():\n self.assertAllEqual(a.field_value(field), b.field_value(field))\n\n @parameterized.parameters([\n {\n \"shape\": [],\n \"fields\": {},\n },\n {\n \"shape\": [None],\n \"fields\": {},\n },\n {\n \"shape\": [1, 5, 3],\n \"fields\": {},\n },\n {\n \"shape\": [],\n \"fields\": {\"Foo\": 5, \"Bar\": [1, 2, 3]},\n },\n {\n \"shape\": [2],\n \"fields\": {\"x\": [1, 2], \"y\": [[1, 2], [3, 4]]},\n },\n {\n \"shape\": [None],\n \"fields\": {\"x\": [1, 2], \"y\": [[1, 2], [3, 4]]},\n \"expected_shape\": [2], # inferred from field values.\n },\n {\n \"shape\": [],\n \"fields\": {\n \"r\": ragged_factory_ops.constant_value([[1, 2], [3]]),\n },\n },\n {\n \"shape\": [2],\n \"fields\": {\n \"r\": ragged_factory_ops.constant_value([[1, 2], [3]]),\n },\n },\n {\n \"shape\": [2, None],\n \"fields\": {\n \"r\": ragged_factory_ops.constant_value(\n [[[1, 2], [3]], [[4, 5, 6], [7], [8, 9]]]),\n },\n },\n {\n # Note: fields must have identical row_splits.\n \"shape\": [2, None],\n \"fields\": {\n \"a\": ragged_factory_ops.constant_value([[1, 2], [3]]),\n \"b\": ragged_factory_ops.constant_value([[4, 5], [6]]),\n },\n },\n {\n # Note: fields must have identical outer row_splits.\n \"shape\": [2, None],\n \"fields\": {\n \"a\": ragged_factory_ops.constant_value(\n [[[1, 2], [3]], [[4, 5, 6], [7], [8, 9]]]),\n \"b\": ragged_factory_ops.constant_value(\n [[[1], []], [[2, 3], [4, 5, 6], [7, 8]]]),\n },\n },\n ]) # pyformat: disable\n def testFromFields(self, shape, fields, expected_shape=None):\n struct = structured_tensor.StructuredTensor.from_fields(shape, fields)\n if expected_shape is None:\n expected_shape = shape\n self.assertEqual(struct.shape.as_list(), expected_shape)\n self.assertLen(expected_shape, struct.rank)\n self.assertEqual(struct.field_names(), tuple(fields.keys()))\n for field, value in fields.items():\n self.assertIsInstance(\n struct.field_value(field),\n (ops.Tensor, structured_tensor.StructuredTensor,\n ragged_tensor.RaggedTensor))\n self.assertAllEqual(struct.field_value(field), value)\n\n def testNestedStructConstruction(self):\n rt = ragged_factory_ops.constant([[1, 2], [3]])\n struct1 = structured_tensor.StructuredTensor.from_fields([], {\"x\": [1, 2]})\n struct2 = structured_tensor.StructuredTensor.from_fields([2], {\"x\": [1, 2]})\n struct3 = structured_tensor.StructuredTensor.from_fields([], {\n \"r\": rt,\n \"s\": struct1\n })\n struct4 = structured_tensor.StructuredTensor.from_fields([2], {\n \"r\": rt,\n \"s\": struct2\n })\n\n self.assertEqual(struct3.shape.as_list(), [])\n self.assertEqual(struct3.rank, 0)\n self.assertEqual(set(struct3.field_names()), set([\"r\", \"s\"]))\n self.assertAllEqual(struct3.field_value(\"r\"), rt)\n self.assertAllEqual(struct3.field_value(\"s\"), struct1)\n\n self.assertEqual(struct4.shape.as_list(), [2])\n self.assertEqual(struct4.rank, 1)\n self.assertEqual(set(struct4.field_names()), set([\"r\", \"s\"]))\n self.assertAllEqual(struct4.field_value(\"r\"), rt)\n self.assertAllEqual(struct4.field_value(\"s\"), struct2)\n\n @parameterized.parameters([\n (object(), {}, TypeError),\n ([], object(), TypeError, \"fields must be a dictionary\"),\n ([], {1: 2}, TypeError, \"Unexpected type for key\"),\n ([], {\"x\": object()}, TypeError, \"Unexpected type for value\"),\n (None, {}, ValueError, \"StructuredTensor's shape must have known rank\"),\n ([5], {\"f\": 5}, ValueError, r\"Shapes \\(5,\\) and \\(\\) are not compatible\"),\n ([None], {\"x\": [1], \"y\": []}, ValueError,\n r\"Shapes \\([01],\\) and \\([01],\\) are not compatible\"),\n ([], {\"\": 5}, ValueError, \"Field name '' is not currently allowed.\"),\n ([], {\"_\": 5}, ValueError, \"Field name '_' is not currently allowed.\"),\n {\n # Note: fields must have identical outer row_splits.\n \"shape\": [2, None],\n \"fields\": {\n \"r1\": ragged_factory_ops.constant_value(\n [[1, 2], [3]]),\n \"r2\": ragged_factory_ops.constant_value(\n [[1, 2, 3], [4]]),\n },\n \"err\": errors.InvalidArgumentError,\n \"msg\": r\"`fields` are not consistent in the outer 2 dimension\"\n },\n ]) # pyformat: disable\n def testFromFieldsErrors(self, shape, fields, err, msg=None):\n with self.assertRaisesRegexp(err, msg):\n struct = structured_tensor.StructuredTensor.from_fields(shape, fields)\n self.evaluate(struct.field_value(struct.field_names()[0]))\n\n @parameterized.parameters([\n {\n \"shape\": [3],\n \"fields\": {\"x\": [1, 2, 3], \"y\": [[1, 2], [3, 4], [5, 6]]},\n \"row_splits\": [0, 2, 3],\n },\n ]) # pyformat: disable\n def testFromRowSplits(self, shape, fields, row_splits, expected_shape=None):\n values = structured_tensor.StructuredTensor.from_fields(shape, fields)\n struct = structured_tensor.StructuredTensor.from_row_splits(\n values, row_splits)\n if expected_shape is None:\n expected_shape = tensor_shape.TensorShape([None,\n None]).concatenate(shape[1:])\n struct.shape.assert_is_compatible_with(expected_shape)\n else:\n self.assertEqual(struct.shape.as_list(), expected_shape)\n self.assertEqual(struct.shape.rank, struct.rank)\n self.assertEqual(struct.field_names(), tuple(fields.keys()))\n for field, value in fields.items():\n self.assertIsInstance(\n struct.field_value(field),\n (ops.Tensor, structured_tensor.StructuredTensor,\n ragged_tensor.RaggedTensor))\n self.assertAllEqual(\n struct.field_value(field),\n ragged_tensor.RaggedTensor.from_row_splits(value, row_splits))\n\n @parameterized.parameters([\n ([], {}, [\"x\"], ValueError,\n r\"Shape \\(\\) must have rank at least 1\"),\n ([0], {}, [\"x\"], ValueError,\n r\"Row-partitioning tensors must have dtype int32 or int64\"),\n ([0], {}, [[0]], ValueError,\n r\"Shape \\(1, 1\\) must have rank 1\"),\n ([0], {}, np.array([], np.int32), ValueError,\n r\"row_splits may not be empty\"),\n ]) # pyformat: disable\n def testFromRowSplitsErrors(self, shape, fields, row_splits, err, msg=None):\n with self.assertRaisesRegexp(err, msg):\n values = structured_tensor.StructuredTensor.from_fields(shape, fields)\n structured_tensor.StructuredTensor.from_row_splits(values, row_splits)\n\n def testFromRowSplitsBadValueType(self):\n with self.assertRaisesRegexp(TypeError,\n \"values must be a StructuredTensor\"):\n structured_tensor.StructuredTensor.from_row_splits([1, 2], [0, 2])\n\n\nif __name__ == \"__main__\":\n googletest.main()\n"
] | [
[
"tensorflow.python.ops.structured.structured_tensor.StructuredTensor.from_fields",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant_value",
"tensorflow.python.platform.googletest.main",
"numpy.array",
"tensorflow.python.ops.structured.structured_tensor.StructuredTensor.from_row_splits",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
krsmith/DS-Unit-3-Sprint-2-SQL-and-Databases | [
"9617528ad5fd23354623926b819f98f9a063d252"
] | [
"demo_data.py"
] | [
"import sqlite3\nimport pandas as pd\n\nconn = sqlite3.connect('demo_data.sqlite3')\ncurs = conn.cursor()\n\ncreate_demo_table = \"\"\"\nCREATE TABLE demo (\n s varchar(5),\n x int,\n y int\n );\"\"\"\n\ncurs.execute(create_demo_table)\nconn.commit()\n\ncurs.execute(\"\"\"INSERT INTO demo (\n s, x, y) VALUES\"\"\" + str(('g', 3, 9)))\nconn.commit()\n\ncurs.execute(\"\"\"INSERT INTO demo (\n s, x, y) VALUES\"\"\" + str(('v', 5, 7)))\nconn.commit()\n\ncurs.execute(\"\"\"INSERT INTO demo (\n s, x, y) VALUES\"\"\" + str(('f', 8, 7)))\nconn.commit()\n\n# Queries for SC questions\n\n\n# Count how many rows you have - it should be 3!\ndef row_count():\n print(pd.read_sql_query(\"\"\"SELECT COUNT(*) as row_count\n FROM demo;\"\"\", conn))\n# row_count\n# 0 3\n\n\n# How many rows are there where both x and y are at least 5?\ndef row_xy5():\n print(pd.read_sql_query(\"\"\"SELECT COUNT(*) as row_count\n FROM demo\n WHERE x >= 5\n AND y >= 5;\"\"\", conn))\n# row_count\n# 0 2\n\n\n# How many unique values of y are there (hint - COUNT() can accept\n# a keyword DISTINCT)?\ndef y_values():\n print(pd.read_sql_query(\"\"\"SELECT COUNT(distinct y) as y_values\n FROM demo;\"\"\", conn))\n# y_values\n# 0 2\n"
] | [
[
"pandas.read_sql_query"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
fpmosley/advent-of-code | [
"507bd89795ff6a0824284c3c8d2123cf19a932a3"
] | [
"2021/day09/part01/smoke_basin.py"
] | [
"#!/usr/bin/env python\n\n'''\nAdvent of Code 2021 - Day 9: Smoke Basin (Part 1)\nhttps://adventofcode.com/2021/day/9\n'''\n\nimport numpy as np\n\n\nclass HeightMap():\n def __init__(self) -> None:\n self._grid = np.array([])\n\n def add_row(self, row):\n np_row = np.array(row)\n if self._grid.size != 0:\n self._grid = np.vstack([self._grid, np_row])\n else:\n self._grid = np_row\n\n def find_low_points(self, radius=1):\n low_points = []\n for index, point in np.ndenumerate(self._grid):\n neighbor_points = self._neighbors(radius, coordinates=index)\n\n if point < min(neighbor_points):\n low_points.append(point)\n\n return low_points\n\n def _neighbors(self, radius, coordinates=(0, 0)):\n neighbors = []\n row = coordinates[0]\n column = coordinates[1]\n\n # Get UP neighbor value\n if row >= 1:\n neighbors.append(self._grid[row - radius, column])\n\n # Get LEFT neighbor value\n if column >= 1:\n neighbors.append(self._grid[row, column - radius])\n\n # Get RIGHT neighbor value\n if column < len(self._grid[0]) - radius:\n neighbors.append(self._grid[row, column + radius])\n\n # Get DOWN neighbor value\n if row < len(self._grid) - radius:\n neighbors.append(self._grid[row + radius, column])\n\n return neighbors\n\n def __str__(self) -> str:\n output = \"\"\n for row in self._grid:\n for elem in row:\n output = output + f\"{elem:>3}\"\n output = output + \"\\n\"\n return output\n\n\ndef calculate_risk(heights):\n # Risk is 1 plus the height\n return sum([height + 1 for height in heights])\n\n\ndef main():\n\n filename = input(\"What is the input file name? \")\n\n try:\n with open(filename, \"r\") as file:\n\n # Create a new board\n area = HeightMap()\n\n # Read the rows and setup the HeightMap\n for line in file:\n line = line.strip()\n\n input_row = [int(x) for x in str(line)]\n area.add_row(input_row)\n\n print(\"The input grid: \")\n print(area)\n low_points = area.find_low_points()\n sum_risk_levels = calculate_risk(\n low_points) if low_points else None\n\n if sum_risk_levels:\n low_points_str = [str(point) for point in low_points]\n print(f\"Number of low points: {len(low_points)}\")\n print(f\"Low points: {', '.join(low_points_str)}\")\n print(\n f\"\\nThe sum of the risk levels of all low points is: {sum_risk_levels}\\n\")\n else:\n print(\"The sum of the risk levels of all low points not found.\\n\")\n\n except FileNotFoundError:\n print(f\"No such file or directory: '{filename}'\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.ndenumerate",
"numpy.array",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mohammed-Abbass/DeepEI | [
"6466556e529afd9ef747105c21cba51cbac890fe",
"6466556e529afd9ef747105c21cba51cbac890fe"
] | [
"Retention/multi_cnn.py",
"Discussion/Reply_Comments/NEIMS_A_B_comparison.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 9 09:22:42 2020\n\n@author: hcji\n\"\"\"\n\n\nimport numpy as np\nimport tensorflow.keras.backend as K\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense, Input, Flatten, Conv1D, MaxPooling1D, concatenate\nfrom tensorflow.keras import optimizers\nfrom sklearn.metrics import mean_absolute_error, r2_score\nfrom smiles_to_onehot.encoding import get_dict, one_hot_coding\n\nclass multi_CNN:\n def __init__(self, X, Y):\n self.X = X\n self.Y = Y\n self.X_tr, self.X_ts, self.Y_tr, self.Y_ts = train_test_split(X, Y, test_size=0.1)\n \n inp = Input(shape=(X.shape[1:3]))\n n = X.shape[1]\n\n hidv1 = Conv1D(n, kernel_size=2, activation='relu')(inp)\n # hidv1 = MaxPooling1D(pool_size=2)(hidv1)\n hidv1 = Conv1D(n, kernel_size=2, activation='relu')(hidv1)\n # hidv1 = MaxPooling1D(pool_size=2)(hidv1)\n hidv1 = Flatten()(hidv1)\n \n hidv2 = Conv1D(n, kernel_size=3, activation='relu')(inp)\n # hidv2 = MaxPooling1D(pool_size=3)(hidv2)\n hidv2 = Conv1D(n, kernel_size=3, activation='relu')(hidv2)\n # hidv2 = MaxPooling1D(pool_size=3)(hidv2)\n hidv2 = Flatten()(hidv2)\n \n hidv3 = Conv1D(n, kernel_size=4, activation='relu')(inp)\n # hidv3 = MaxPooling1D(pool_size=4)(hidv3)\n hidv3 = Conv1D(n, kernel_size=4, activation='relu')(hidv3)\n # hidv3 = MaxPooling1D(pool_size=4)(hidv3)\n hidv3 = Flatten()(hidv3)\n\n hid = concatenate([hidv1, hidv2, hidv3], axis=-1)\n hid = Dense(32, activation=\"relu\")(hid)\n hid = Dense(32, activation=\"relu\")(hid)\n \n prd = Dense(1, activation=\"linear\")(hid)\n opt = optimizers.Adam(lr=0.001)\n model = Model(inp, prd)\n model.compile(optimizer=opt, loss='mse', metrics=['mae'])\n self.model = model\n \n def train(self, epochs=20):\n history = self.model.fit(self.X_tr, self.Y_tr, epochs=epochs, validation_split = 0.1)\n plt.cla()\n plt.plot(history.history['val_loss'], alpha= 0.8)\n plt.plot(history.history['val_mean_absolute_error'], alpha= 0.8)\n plt.legend(['loss', 'accuracy'], loc=\"lower left\")\n plt.xlabel('epoch')\n return history\n \n def test(self):\n Y_test = self.Y_ts\n Y_pred = np.round(self.model.predict(self.X_ts))\n r2 = round(r2_score(Y_pred, Y_test), 4)\n mae = round(mean_absolute_error(Y_pred, Y_test), 4)\n\n plt.cla()\n plt.plot(Y_test, Y_pred, '.', color = 'blue')\n plt.plot([0,4500], [0,4500], color ='red')\n plt.ylabel('Predicted RI')\n plt.xlabel('Experimental RI') \n plt.text(0, 4000, 'R2='+str(r2), fontsize=12)\n plt.text(0, 3600, 'MAE='+str(mae), fontsize=12)\n plt.show()\n return r2, mae\n \n def save(self, path):\n self.model.save(path)\n K.clear_session()\n \n\nif __name__ == '__main__':\n \n import json\n \n with open('DeepEI/data/split.json', 'r') as js:\n keep = np.array(json.load(js)['keep'])\n \n smiles = np.array(json.load(open('DeepEI/data/all_smiles.json')))[keep]\n rindex = np.load('DeepEI/data/retention.npy')[keep,:]\n \n words = get_dict(smiles, save_path='DeepEI/data/words.json')\n smiles = [one_hot_coding(smi, words, max_len=100).todense() for smi in smiles]\n smiles = np.array(smiles)\n \n # simipolar\n i = np.where(~ np.isnan(rindex[:,0]))[0]\n mod = multi_CNN(smiles[i], rindex[i,0])\n mod.train()\n mod.test()\n mod.save('Retention/models/SimiStdNP_CNN_multi_model.h5')\n \n # nonpolar\n i = np.where(~ np.isnan(rindex[:,1]))[0]\n mod = multi_CNN(smiles[i], rindex[i,1])\n mod.train()\n mod.test()\n mod.save('Retention/models/StdNP_CNN_multi_model.h5')\n\n # polar\n i = np.where(~ np.isnan(rindex[:,2]))[0]\n mod = multi_CNN(smiles[i], rindex[i,2])\n mod.train()\n mod.test()\n mod.save('Retention/models/StdPolar_CNN_multi_model.h5')\n ",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 7 09:04:52 2020\n\n@author: hcji\n\"\"\"\n\n\nimport json\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom scipy.sparse import load_npz\nfrom DeepEI.utils import get_score\n\nwith open('DeepEI/data/split.json', 'r') as js:\n split = json.load(js)\nkeep = np.array(split['keep'])\n\nnist_smiles = np.array(json.load(open('DeepEI/data/all_smiles.json')))[keep]\nnist_masses = np.load('DeepEI/data/molwt.npy')[keep]\nnist_spec = load_npz('DeepEI/data/peakvec.npz').todense()[keep,:]\nneims_nist_spec = load_npz('DeepEI/data/neims_spec_nist.npz').todense()[keep,:]\n\nneims_msbk_smiles = np.array(json.load(open('DeepEI/data/neims_msbk_smiles.json')))\nneims_msbk_masses = np.load('DeepEI/data/neims_msbk_masses.npy')\nneims_msbk_spec = load_npz('DeepEI/data/neims_spec_msbk.npz').todense()\n\nmsbk_smiles = np.array(json.load(open('DeepEI/data/msbk_smiles.json')))\nmsbk_masses = np.load('DeepEI/data/msbk_masses.npy')\nmsbk_spec = load_npz('DeepEI/data/msbk_spec.npz').todense()\n\ndb_smiles = np.array(list(nist_smiles) + list(neims_msbk_smiles))\ndb_masses = np.append(nist_masses, neims_msbk_masses)\ndb_spec_a = np.append(nist_spec, neims_msbk_spec, axis=0)\ndb_spec_b = np.append(neims_nist_spec, neims_msbk_spec, axis=0)\n\ni = 70\nsmi = msbk_smiles[i]\nspecr = msbk_spec[i]\nmass = msbk_masses[i]\ncandidate = np.where(np.abs(db_masses - mass) < 5)[0]\ncand_smi = db_smiles[candidate]\n\nscores_a = get_score(specr, db_spec_a[candidate,:], m='wdp')\nscores_b = get_score(specr, db_spec_b[candidate,:], m='wdp')\n\nwh_true = np.where(cand_smi == smi)[0][0]\ntrue_score_a = scores_a[wh_true]\ntrue_score_b = scores_b[wh_true]\nrank_a = len(np.where(scores_a > true_score_a)[0]) + 1\nrank_b = len(np.where(scores_b > true_score_b)[0]) + 1\n\ntrue = candidate[wh_true]\nj = candidate[435]\n\ndecoy_smi = db_smiles[j]\n\nplt.figure(figsize=(6, 6))\nplt.vlines(np.arange(0, 2000), np.zeros(2000), db_spec_a[j], 'red', label='NIST_decoy')\nplt.vlines(np.arange(0, 2000), np.zeros(2000), -db_spec_b[j], 'blue', label='NEIMS_decoy')\nplt.axhline(0, color='black')\nplt.xlim(0, 500)\nplt.legend()\n\nplt.figure(figsize=(6, 6))\nplt.vlines(np.arange(0, 2000), np.zeros(2000), specr, 'green', label='MassBank_true')\nplt.axhline(0, color='black')\nplt.vlines(np.arange(0, 2000), np.zeros(2000), -db_spec_b[j], 'blue', label='NEIMS_decoy')\nplt.xlim(0, 500)\nplt.legend()\n\nplt.figure(figsize=(6, 6))\nplt.vlines(np.arange(0, 2000), np.zeros(2000), specr, 'green', label='MassBank_true')\nplt.axhline(0, color='black')\nplt.vlines(np.arange(0, 2000), np.zeros(2000), -db_spec_a[j], 'red', label='NIST_decoy')\nplt.xlim(0, 500)\nplt.legend()\n\nplt.figure(figsize=(6, 6))\nplt.vlines(np.arange(0, 2000), np.zeros(2000), specr, 'green', label='MassBank_true')\nplt.axhline(0, color='black')\nplt.vlines(np.arange(0, 2000), np.zeros(2000), -db_spec_a[true], 'purple', label='NEIMS_true')\nplt.xlim(0, 500)\nplt.legend()\n"
] | [
[
"matplotlib.pyplot.legend",
"sklearn.metrics.r2_score",
"sklearn.metrics.mean_absolute_error",
"matplotlib.pyplot.plot",
"tensorflow.keras.backend.clear_session",
"numpy.load",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.models.Model",
"numpy.isnan",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.cla",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.optimizers.Adam",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.layers.Input"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axhline",
"numpy.abs",
"numpy.arange",
"scipy.sparse.load_npz",
"numpy.append",
"matplotlib.pyplot.xlim",
"numpy.load",
"numpy.array",
"numpy.zeros",
"numpy.where",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
matthewclso/ivy | [
"b297fd89812ec96212ef6996a82c65fe3aab9d3c",
"b297fd89812ec96212ef6996a82c65fe3aab9d3c"
] | [
"ivy/functional/backends/mxnet/old/general.py",
"ivy/functional/backends/tensorflow/linear_algebra.py"
] | [
"\"\"\"\nCollection of MXNet general functions, wrapped to fit Ivy syntax and signature.\n\"\"\"\n\n# global\nimport ivy\n_round = round\nimport logging\nimport mxnet as _mx\nimport numpy as _np\nimport math as _math\nfrom numbers import Number\nfrom operator import mul as _mul\nfrom functools import reduce as _reduce\nimport multiprocessing as _multiprocessing\n\n# local\nfrom ivy.functional.ivy.old import default_device, default_dtype\nfrom ivy.functional.backends.mxnet.old.device import _callable_dev\nfrom ivy.functional.backends.mxnet import _handle_flat_arrays_in_out, _mxnet_init_context,\\\n _scalar_or_flat_array_to_scalar, _handle_flat_arrays_in\n\n\nDTYPE_TO_STR = {_np.dtype('int8'): 'int8',\n _np.dtype('int16'): 'int16',\n _np.dtype('int32'): 'int32',\n _np.dtype('int64'): 'int64',\n _np.dtype('uint8'): 'uint8',\n _np.dtype('uint16'): 'uint16',\n _np.dtype('uint32'): 'uint32',\n _np.dtype('uint64'): 'uint64',\n 'bfloat16': 'bfloat16',\n _np.dtype('float16'): 'float16',\n _np.dtype('float32'): 'float32',\n _np.dtype('float64'): 'float64',\n _np.dtype('bool'): 'bool',\n\n _np.int8: 'int8',\n _np.int16: 'int16',\n _np.int32: 'int32',\n _np.int64: 'int64',\n _np.uint8: 'uint8',\n _np.uint16: 'uint16',\n _np.uint32: 'uint32',\n _np.uint64: 'uint64',\n _np.float16: 'float16',\n _np.float32: 'float32',\n _np.float64: 'float64',\n _np.bool_: 'bool'}\n\nDTYPE_FROM_STR = {'int8': _np.int8,\n 'int16': _np.int16,\n 'int32': _np.int32,\n 'int64': _np.int64,\n 'uint8': _np.uint8,\n 'uint16': _np.uint16,\n 'uint32': _np.uint32,\n 'uint64': _np.uint64,\n 'bfloat16': 'bfloat16',\n 'float16': _np.float16,\n 'float32': _np.float32,\n 'float64': _np.float64,\n 'bool': _np.bool_}\n\n\n# API #\n# ----#\n\ndef array(object_in, dtype=None, dev=None):\n cont = _mxnet_init_context(default_device(dev))\n return _mx.nd.array(object_in, cont, dtype=default_dtype(dtype, object_in))\n\n\nasarray = array\n\n\ndef is_array(x, exclusive=False):\n if isinstance(x, _mx.ndarray.ndarray.NDArray):\n if exclusive and x.grad is not None:\n return False\n return True\n return False\n\n\ncopy_array = lambda x: x.copy()\n\n\n@_handle_flat_arrays_in_out\ndef array_equal(x0, x1):\n if ivy.dtype(x0, as_str=True) == 'bool':\n x0 = x0.astype('int32')\n if ivy.dtype(x1, as_str=True) == 'bool':\n x1 = x1.astype('int32')\n return _mx.nd.min(_mx.nd.broadcast_equal(x0, x1)) == 1\n\n\ndef dtype_bits(dtype_in):\n dtype_str = dtype_to_str(dtype_in)\n if 'bool' in dtype_str:\n return 1\n return int(dtype_str.replace(\"<class 'numpy.\", '').replace(\"'>\", '').replace('uint', '').replace(\n 'int', '').replace('bfloat', '').replace('float', ''))\n\n\nequal = lambda x1, x2: x1 == x2\nequal.__name__ = 'equal'\nto_numpy = lambda x: x if isinstance(x, _np.ndarray) else (_np.array(x) if isinstance(x, (int, float)) else x.asnumpy())\nto_numpy.__name__ = 'to_numpy'\nto_scalar = lambda x: x if isinstance(x, Number) else x.asscalar().item()\nto_scalar.__name__ = 'to_scalar'\nto_list = lambda x: to_numpy(x).tolist()\nto_list.__name__ = 'to_list'\nshape = lambda x, as_tensor=False: _mx.nd.shape_array(x) if as_tensor else x.shape\nshape.__name__ = 'shape'\nget_num_dims = lambda x, as_tensor=False:\\\n _mx.nd.shape_array(_mx.nd.shape_array(x)).reshape([]) if as_tensor else len(x.shape)\nminimum = lambda x, y: _mx.nd.array(_mx.nd.minimum(_scalar_or_flat_array_to_scalar(x), _scalar_or_flat_array_to_scalar(y)))\nmaximum = lambda x, y: _mx.nd.array(_mx.nd.maximum(_scalar_or_flat_array_to_scalar(x), _scalar_or_flat_array_to_scalar(y)))\n\n\n@_handle_flat_arrays_in_out\ndef clip(x, x_min, x_max):\n return _mx.nd.clip(_mx.nd.array(x), x_min, x_max)\n\n\n@_handle_flat_arrays_in_out\ndef round(x):\n return _mx.nd.round(x)\n\n\n@_handle_flat_arrays_in_out\ndef floormod(x, y):\n return x % y\n\n\n@_handle_flat_arrays_in_out\ndef floor(x):\n return _mx.nd.floor(x)\n\n\n# noinspection PyShadowingBuiltins\n@_handle_flat_arrays_in_out\ndef abs(x):\n return _mx.nd.abs(x)\n\nargmin = lambda x, axis=0: _mx.nd.argmin(x, axis)\n\n\n@_handle_flat_arrays_in_out\ndef cast(x, dtype):\n return x.astype(dtype)\n\n\nastype = cast\n\n\n# noinspection PyUnresolvedReferences\ndef arange(stop, start=0, step=1, dtype=None, dev=None):\n cont = _mxnet_init_context(default_device(dev))\n stop = stop if isinstance(stop, Number) else stop.asscalar()\n start = start if isinstance(start, Number) else start.asscalar()\n step = step if isinstance(step, Number) else step.asscalar()\n return _mx.nd.arange(start, stop, ctx=cont, step=step, dtype=dtype)\n\n\ndef _linspace(start, stop, num, cont):\n if num == 1:\n return start\n start = _mx.nd.array(start).reshape((1,)).astype('float32')\n stop = _mx.nd.array(stop).reshape((1,)).astype('float32')\n n_m_1 = _mx.nd.array(num - 1).reshape((1,)).astype('float32')\n increment = (stop - start)/n_m_1\n increment_tiled = _mx.nd.tile(increment, num - 1)\n increments = increment_tiled * _mx.nd.array(_mx.nd.np.linspace(1, num - 1, num - 1).tolist(), ctx=cont)\n ret = _mx.nd.concat(start, start + increments, dim=0)\n return ret\n\n\ndef linspace(start, stop, num, axis=None, dev=None):\n cont = _mxnet_init_context(default_device(dev))\n num = num.asnumpy()[0] if isinstance(num, _mx.nd.NDArray) else num\n start_is_array = isinstance(start, _mx.nd.NDArray)\n stop_is_array = isinstance(stop, _mx.nd.NDArray)\n start_shape = []\n if start_is_array:\n start_shape = list(start.shape)\n start = start.reshape((-1,))\n if stop_is_array:\n start_shape = list(stop.shape)\n stop = stop.reshape((-1,))\n if start_is_array and stop_is_array:\n res = [_linspace(strt, stp, num, cont) for strt, stp in zip(start, stop)]\n elif start_is_array and not stop_is_array:\n res = [_linspace(strt, stop, num, cont) for strt in start]\n elif not start_is_array and stop_is_array:\n res = [_linspace(start, stp, num, cont) for stp in stop]\n else:\n return _linspace(start, stop, num, cont)\n new_shape = start_shape + [num]\n res = _mx.nd.concat(*res, dim=-1).reshape(new_shape)\n if axis is not None:\n res = _mx.nd.swapaxes(res, axis, -1)\n return res\n\n\ndef logspace(start, stop, num, base=10., axis=None, dev=None):\n power_seq = linspace(start, stop, num, axis, default_device(dev))\n return base ** power_seq\n\n\n@_handle_flat_arrays_in_out\ndef concatenate(xs, axis=-1):\n return _mx.nd.concat(*xs, dim=axis)\n\n\ndef stack(xs, axis=0):\n if xs[0].shape == ():\n return _mx.nd.reshape(_mx.nd.stack(*[_flat_array_to_1_dim_array(x) for x in xs], axis=axis), -1)\n return _mx.nd.stack(*xs, axis=axis)\n\n\ndef unstack(x, axis, keepdims=False):\n if x.shape == ():\n return [x]\n num_outputs = x.shape[axis]\n ret = _mx.nd.split(x, num_outputs, axis, squeeze_axis=not keepdims)\n return ret if isinstance(ret, list) else [ret]\n\n\ndef split(x, num_or_size_splits=None, axis=0, with_remainder=False):\n if x.shape == ():\n if num_or_size_splits is not None and num_or_size_splits != 1:\n raise Exception('input array had no shape, but num_sections specified was {}'.format(num_or_size_splits))\n return [x]\n if num_or_size_splits == 1:\n return [x]\n elif with_remainder and isinstance(num_or_size_splits, int):\n num_or_size_splits = x.shape[axis] if not num_or_size_splits else num_or_size_splits\n num_chunks = x.shape[axis] / num_or_size_splits\n num_chunks_int = _math.floor(num_chunks)\n remainder_size = int((num_chunks - num_chunks_int) * num_or_size_splits)\n num_or_size_splits = [num_or_size_splits]*num_chunks_int + [remainder_size]\n if isinstance(num_or_size_splits, (list, tuple)):\n csum = [0] + _np.cumsum(num_or_size_splits).tolist()\n starts = csum[:-1]\n ends = csum[1:]\n if axis < 0:\n slices = [tuple([Ellipsis, slice(s, e, 1)] + [slice(None, None, None)]*int(abs(axis)-1))\n for s, e in zip(starts, ends)]\n else:\n slices = [tuple([slice(None, None, None)]*axis + [slice(s, e, 1)])\n for s, e in zip(starts, ends)]\n return [x[so] for so in slices]\n return _mx.nd.split(x, x.shape[axis] if not num_or_size_splits else num_or_size_splits, axis)\n\n\n@_handle_flat_arrays_in_out\ndef repeat(x, repeats, axis=None):\n return _mx.nd.repeat(x, repeats, axis)\n\n\ndef tile(x, reps):\n if isinstance(reps, _mx.nd.ndarray.NDArray):\n reps = reps.asnumpy().tolist()\n return _mx.nd.tile(_flat_array_to_1_dim_array(x), reps)\n\n\n@_handle_flat_arrays_in\ndef constant_pad(x, pad_width, value=0):\n if isinstance(pad_width, _mx.ndarray.ndarray.NDArray):\n pad_width = pad_width.asnumpy().tolist()\n x_shape = list(x.shape)\n num_dims = len(x_shape)\n if num_dims > 3:\n raise Exception('Invalid inputs. Pad for mxnet only supports inputs with 3 dimensions or smaller.')\n num_dims_to_add = 4 - num_dims\n new_shape = tuple([1] * num_dims_to_add + x_shape)\n mat_expanded_dims = _mx.nd.reshape(x, new_shape)\n pad_width_flat = [0]*num_dims_to_add*2 + [item for sublist in pad_width for item in sublist]\n pad_expanded_dims = _mx.nd.pad(mat_expanded_dims, mode=\"constant\", pad_width=tuple(pad_width_flat),\n constant_value=value)\n new_shape = [orig_dim + pad_width_item[0] + pad_width_item[1] for orig_dim, pad_width_item in zip(x_shape, pad_width)]\n res = _mx.nd.reshape(pad_expanded_dims, tuple(new_shape))\n return res\n\n\ndef zero_pad(x, pad_width):\n return constant_pad(x, pad_width, 0)\n\n\nswapaxes = _mx.nd.swapaxes\n\n\ndef transpose(x, axes=None):\n if axes is None:\n num_dims = len(x.shape)\n axes = list(range(num_dims))\n axes.reverse()\n return _mx.nd.transpose(x, axes)\n\n\ndef expand_dims(x, axis):\n if x.shape == ():\n return _flat_array_to_1_dim_array(x)\n return _mx.nd.expand_dims(x, axis)\n\n\n@_handle_flat_arrays_in_out\ndef where(condition, x1, x2):\n x_shape = list(x1.shape)\n condition_shape = list(condition.shape)\n if x_shape == condition_shape:\n res = _mx.nd.where(condition, x1, x2)\n return res\n tile_reps = [int(x / c) for x, c in zip(x_shape, condition_shape)]\n tiled_condition = _mx.nd.tile(condition, tile_reps)\n return _mx.nd.where(tiled_condition, x1, x2)\n\n\ndef indices_where(x):\n x_shape = x.shape\n x_flat = x.reshape((1, -1,))\n flat_indices = x_flat.astype('int32').tostype('csr').indices\n if flat_indices.shape == (0,):\n res = flat_indices.reshape((0, len(x_shape)))\n return res\n res = _mx.nd.swapaxes(_mx.nd.unravel_index(flat_indices, x_shape), 0, 1)\n return res\n\n\n@_handle_flat_arrays_in_out\ndef isinf(x):\n return _mx.nd.contrib.isinf(x).astype('bool')\n\n\nreshape = lambda x, new_shape: x.reshape(new_shape)\n\n\ndef broadcast_to(x, new_shape):\n x_shape = list(x.shape)\n num_x_dims = len(x_shape)\n num_shape_dims = len(new_shape)\n diff = num_shape_dims - num_x_dims\n if diff == 0:\n return _mx.nd.broadcast_to(x, new_shape)\n x = _mx.nd.reshape(x, [1]*diff + x_shape)\n return _mx.nd.broadcast_to(x, new_shape)\n\n\ndef squeeze(x, axis=None):\n if x.shape == ():\n if axis is None or axis == 0 or axis == -1:\n return x\n raise Exception('tried to squeeze a zero-dimensional input by axis {}'.format(axis))\n res = _mx.nd.squeeze(x, axis)\n if axis is None:\n return _1_dim_array_to_flat_array(res)\n return res\n\n\n# noinspection PyShadowingNames\n\n\n\ndef zeros_like(x, dtype=None, dev=None):\n if x.shape == ():\n return _mx.nd.array(0., ctx=_mxnet_init_context(default_device(dev)))\n mx_zeros = _mx.nd.zeros_like(x, ctx=_mxnet_init_context(default_device(dev)))\n return mx_zeros if not dtype else mx_zeros.astype(dtype)\n\n\ndef full(shape, fill_value, dtype=None, device=None):\n shape = ivy.shape_to_tuple(shape)\n cont = _mxnet_init_context(default_device(device))\n if len(shape) == 0 or 0 in shape:\n return _1_dim_array_to_flat_array(\n _mx.nd.full((1,), fill_value, cont, dtype_from_str(default_dtype(dtype, fill_value))))\n return _mx.nd.full(shape, fill_value, cont, dtype_from_str(default_dtype(dtype, fill_value)))\n\n# noinspection PyUnusedLocal\none_hot = lambda indices, depth, dev=None: _mx.nd.one_hot(indices, depth)\n\n\ndef cross(x1, x2):\n a1 = x1[..., 0:1]\n a2 = x1[..., 1:2]\n a3 = x1[..., 2:3]\n b1 = x2[..., 0:1]\n b2 = x2[..., 1:2]\n b3 = x2[..., 2:3]\n res1 = a2*b3 - a3*b2\n res2 = a3*b1 - a1*b3\n res3 = a1*b2 - a2*b1\n res = _mx.nd.concat(res1, res2, res3, dim=-1)\n return res\n\n\ndef matmul(x1, x2):\n expanded = False\n x1_shape = list(x1.shape)\n x2_shape = list(x2.shape)\n if len(x1_shape) != 3:\n num_x1_dims = len(x1_shape)\n x1 = _mx.nd.reshape(x1, [1]*max(2-num_x1_dims, 0) + [-1] + x1_shape[-min(num_x1_dims, 2):])\n expanded = True\n if len(x2_shape) != 3:\n num_x2_dims = len(x2_shape)\n x2 = _mx.nd.reshape(x2, [1]*max(2-num_x2_dims, 0) + [-1] + x2_shape[-min(num_x2_dims, 2):])\n expanded = True\n x1_batch_size = x1.shape[0]\n x2_batch_size = x2.shape[0]\n if x1_batch_size > x2_batch_size:\n x2 = _mx.nd.tile(x2, (int(x1_batch_size/x2_batch_size), 1, 1))\n elif x2_batch_size > x1_batch_size:\n x1 = _mx.nd.tile(x1, (int(x2_batch_size / x1_batch_size), 1, 1))\n res = _mx.nd.batch_dot(x1, x2)\n if expanded:\n return _mx.nd.reshape(res, list(x1_shape[:-1]) + [res.shape[-1]])\n return res\n\n\ncumsum = lambda x, axis=0: _mx.nd.cumsum(x, axis if axis >= 0 else axis % len(x.shape))\n\n\ndef cumprod(x, axis=0, exclusive=False):\n array_stack = [_mx.nd.expand_dims(chunk, axis) for chunk in unstack(x, axis)]\n if exclusive:\n array_stack = [_mx.nd.ones_like(array_stack[0])] + array_stack[:-1]\n new_array_list = [array_stack[0]]\n for array_chunk in array_stack[1:]:\n new_array_list.append(new_array_list[-1] * array_chunk)\n return _mx.nd.concat(*new_array_list, dim=axis)\n\n\ndef identity(n, dtype='float32', batch_shape=None, dev=None):\n mat = _mx.nd.eye(n, dtype=dtype).copyto(_mxnet_init_context(default_device(dev)))\n if batch_shape is None:\n return mat\n else:\n reshape_dims = [1]*len(batch_shape) + [n, n]\n tile_dims = list(batch_shape) + [1, 1]\n res = _mx.nd.tile(_mx.nd.reshape(mat, reshape_dims), tile_dims)\n return res\n\n\ndef meshgrid(*xs, indexing='ij'):\n # ToDo: implement this without reliance on NumPy backend\n xs_np = [x.as_np_ndarray() for x in xs]\n return tuple([item.as_nd_ndarray() for item in _mx.np.meshgrid(*xs_np, indexing=indexing)])\n\n\n# noinspection PyShadowingNames\ndef scatter_flat(indices, updates, size=None, tensor=None, reduction='sum', dev=None):\n if ivy.exists(tensor):\n raise Exception('MXNet scatter_flat does not support scattering into an pre-existing tensor.')\n if reduction == 'replace':\n return _mx.nd.scatter_nd(updates, _mx.nd.expand_dims(indices, 0), [size]).copyto(_mxnet_init_context(default_device(dev)))\n else:\n raise Exception('MXNet scatter_flat currently only supports reduction mode \"replace\", but {} selected.'.\n format(reduction))\n\n\n# noinspection PyShadowingNames\ndef scatter_nd(indices, updates, shape=None, tensor=None, reduction='sum', dev=None):\n if ivy.exists(tensor):\n raise Exception('MXNet scatter_flat does not support scattering into an pre-existing tensor.')\n if dev is None:\n dev = _callable_dev(indices)\n shape = list(shape)\n num_idx_dims = len(indices.shape)\n transpose_order = [num_idx_dims-1] + list(range(num_idx_dims-1))\n indices = _mx.nd.transpose(indices, transpose_order)\n shape = shape if type(shape) is list else shape.asnumpy().astype(_np.int32).tolist()\n if reduction == 'replace':\n return _mx.nd.scatter_nd(updates, indices, shape).copyto(_mxnet_init_context(dev))\n else:\n raise Exception('MXNet scatter_nd currently only supports reduction mode \"replace\", but {} selected.'.\n format(reduction))\n\n\ndef gather(params, indices, axis=-1, dev=None):\n if dev is None:\n dev = _callable_dev(params)\n index_slices = unstack(indices, -1)\n res = _mx.nd.concat(\n *[_mx.nd.expand_dims(_mx.nd.pick(params, idx_slice, axis), -1) for idx_slice in index_slices], dim=-1)\n res = _mx.nd.reshape(res, indices.shape)\n return res.copyto(_mxnet_init_context(dev))\n\n\ndef gather_nd(params, indices, dev=None):\n if dev is None:\n dev = _callable_dev(params)\n indices_shape = indices.shape\n num_idx_dims = len(indices_shape)\n transpose_order = [num_idx_dims-1] + list(range(num_idx_dims-1))\n indices = _mx.nd.transpose(indices, transpose_order)\n return _mx.nd.gather_nd(params, indices).copyto(_mxnet_init_context(dev))\n\n\ndef linear_resample(x, num_samples, axis=-1):\n x_shape = list(x.shape)\n num_x_dims = len(x_shape)\n axis = axis % num_x_dims\n x_pre_shape = x_shape[0:axis]\n x_pre_size = _reduce(_mul, x_pre_shape) if x_pre_shape else 1\n num_pre_dims = len(x_pre_shape)\n num_vals = x.shape[axis]\n x_post_shape = x_shape[axis+1:]\n x_post_size = _reduce(_mul, x_post_shape) if x_post_shape else 1\n num_post_dims = len(x_post_shape)\n xp = _mx.nd.reshape(_mx.nd.arange(num_vals*x_pre_size*x_post_size), x_shape)\n x_coords = _mx.nd.arange(num_samples) * ((num_vals-1)/(num_samples-1)) * x_post_size\n x_coords = _mx.nd.reshape(x_coords, [1]*num_pre_dims + [num_samples] + [1]*num_post_dims)\n x_coords = _mx.nd.broadcast_to(x_coords, x_pre_shape + [num_samples] + x_post_shape)\n slc = [slice(None)] * num_x_dims\n slc[axis] = slice(0, 1, 1)\n x_coords = x_coords + xp[tuple(slc)]\n x = _mx.nd.reshape(x, (-1,))\n xp = _mx.nd.reshape(xp, (-1,))\n x_coords = _mx.nd.reshape(x_coords, (-1,))\n ret = _mx.nd.array(_mx.np.interp(x_coords.asnumpy(), xp.asnumpy(), x.asnumpy()))\n return _mx.nd.reshape(ret, x_pre_shape + [num_samples] + x_post_shape)\n\n\ndef dtype(x, as_str=False):\n dt = x.dtype\n if as_str:\n return dtype_to_str(dt)\n return x.dtype\n\n\ndef dtype_to_str(dtype_in):\n if isinstance(dtype_in, str):\n return dtype_in\n return DTYPE_TO_STR[dtype_in]\n\n\ndef dtype_from_str(dtype_in):\n if not isinstance(dtype_in, str):\n return dtype_in\n return DTYPE_FROM_STR[dtype_in]\n\n\n# noinspection PyUnusedLocal\ndef compile(func, dynamic=True, example_inputs=None, static_argnums=None, static_argnames=None):\n logging.warning('MXnet does not support compiling arbitrary functions, '\n 'consider writing a function using MXNet Symbolic backend instead for compiling.\\n'\n 'Now returning the unmodified function.')\n return func\n\n\ncurrent_framework_str = lambda: 'mxnet'\ncurrent_framework_str.__name__ = 'current_framework_str'\nmultiprocessing = lambda context=None: _multiprocessing if context is None else _multiprocessing.get_context(context)\ncontainer_types = lambda: []\n\n\ndef inplace_update(x, val):\n if x.shape == ():\n raise Exception('MXNet does not support inplace updates of 0-dimensional arrays')\n x[:] = val\n return x\n\n\ndef inplace_decrement(x, val):\n if x.shape == ():\n raise Exception('MXNet does not support inplace updates of 0-dimensional arrays')\n x -= val\n return x\n\n\ndef inplace_increment(x, val):\n if x.shape == ():\n raise Exception('MXNet does not support inplace updates of 0-dimensional arrays')\n x += val\n return x\n\ninplace_arrays_supported = lambda: True\ninplace_variables_supported = lambda: True\n",
"# global\nimport tensorflow as tf\nfrom tensorflow.python.types.core import Tensor\nfrom typing import Union, Optional, Tuple, Literal\nfrom collections import namedtuple\n\n# local\nfrom ivy import inf\nimport ivy as _ivy\nfrom collections import namedtuple\n\n\n\n# noinspection PyUnusedLocal,PyShadowingBuiltins\ndef vector_norm(x: Tensor,\n axis: Optional[Union[int, Tuple[int]]] = None,\n keepdims: bool = False,\n ord: Union[int, float, Literal[inf, - inf]] = 2)\\\n -> Tensor:\n\n if ord == -float('inf'):\n tn_normalized_vector = tf.reduce_min(tf.abs(x), axis, keepdims)\n elif ord == -1:\n tn_normalized_vector = tf.reduce_sum(tf.abs(x)**ord, axis, keepdims)**(1./ord)\n\n elif ord == 0:\n tn_normalized_vector = tf.reduce_sum(tf.cast(x != 0, 'float32'), axis, keepdims).numpy()\n\n else:\n tn_normalized_vector = tf.linalg.norm(x, ord, axis, keepdims)\n\n if tn_normalized_vector.shape == tuple():\n return tf.expand_dims(tn_normalized_vector, 0)\n return tn_normalized_vector\n\n# noinspection PyPep8Naming\ndef svd(x:Tensor,full_matrices: bool = True) -> Union[Tensor, Tuple[Tensor,...]]:\n results=namedtuple(\"svd\", \"U S Vh\")\n\n batch_shape = tf.shape(x)[:-2]\n num_batch_dims = len(batch_shape)\n transpose_dims = list(range(num_batch_dims)) + [num_batch_dims + 1, num_batch_dims]\n D, U, V = tf.linalg.svd(x,full_matrices=full_matrices)\n VT = tf.transpose(V, transpose_dims)\n res=results(U, D, VT)\n return res\n\ndef diagonal(x: tf.Tensor,\n offset: int = 0,\n axis1: int = -2,\n axis2: int = -1) -> tf.Tensor:\n return tf.experimental.numpy.diagonal(x, offset, axis1=axis1, axis2=axis2)\n\n\ndef qr(x: tf.Tensor,\n mode: str = 'reduced') -> namedtuple('qr', ['Q', 'R']):\n res = namedtuple('qr', ['Q', 'R'])\n if mode == 'reduced':\n q, r = tf.linalg.qr(x, full_matrices=False)\n return res(q, r)\n elif mode == 'complete':\n q, r = tf.linalg.qr(x, full_matrices=True)\n return res(q, r)\n else:\n raise Exception(\"Only 'reduced' and 'complete' qr modes are allowed for the tensorflow backend.\")\n\n\ndef matmul(x1: tf.Tensor,\n x2: tf.Tensor) -> tf.Tensor:\n dtype_from = tf.experimental.numpy.promote_types(x1.dtype.as_numpy_dtype, x2.dtype.as_numpy_dtype)\n dtype_from = tf.as_dtype(dtype_from)\n if dtype_from.is_unsigned or dtype_from==tf.int8 or dtype_from==tf.int16:\n x1 = tf.cast(x1, tf.int64)\n x2 = tf.cast(x2, tf.int64)\n if x1.dtype != x2.dtype:\n x1 = tf.cast(x1, dtype_from)\n x2 = tf.cast(x2, dtype_from)\n\n if (x1.shape == () or x2.shape == ()\n or (len(x1.shape) == len(x2.shape) == 1 and x1.shape != x2.shape)\n or (len(x1.shape) == len(x2.shape) == 1 and x1.shape != x2.shape)\n or (len(x1.shape) == 1 and len(x2.shape) >= 2 and x1.shape[0] != x2.shape[-2])\n or (len(x2.shape) == 1 and len(x1.shape) >= 2 and x2.shape[0] != x1.shape[-1])\n or (len(x1.shape) >= 2 and len(x2.shape) >= 2 and x1.shape[-1] != x2.shape[-2])):\n raise Exception('Error,shapes not compatible')\n\n if len(x1.shape) == len(x2.shape) == 1:\n if x1.shape == 0:\n ret = tf.constant(0)\n else:\n\n ret = tf.math.multiply(x1, x2)[0]\n ret = tf.cast(ret, dtype=dtype_from)\n return ret\n\n x1_padded = False\n x1_padded_2 = False\n x2_padded = False\n\n if len(x1.shape) == 1:\n if len(x2.shape) == 2:\n x1_padded_2 = True\n elif len(x2.shape) > 2:\n x1_padded = True\n x1 = tf.expand_dims(x1, axis=0)\n\n elif len(x2.shape) == 1 and len(x1.shape) >= 2:\n x2 = tf.expand_dims(x2, axis=1)\n x2_padded = True\n\n ret = tf.matmul(x1, x2)\n\n ret = tf.cast(ret, dtype=dtype_from)\n if x1_padded_2:\n return ret[0]\n elif x1_padded:\n return tf.squeeze(ret, axis=-2)\n elif x2_padded:\n return tf.squeeze(ret, axis=-1)\n\n return ret\n\n\ndef slogdet(x:Union[_ivy.Array,_ivy.NativeArray],full_matrices: bool = True) -> Union[_ivy.Array, Tuple[_ivy.Array,...]]:\n results = namedtuple(\"slogdet\", \"sign logabsdet\")\n sign, logabsdet = tf.linalg.slogdet(x)\n res = results(sign, logabsdet)\n return res\n\n\ndef trace(x: tf.Tensor,\n offset: int = 0)\\\n -> tf.Tensor:\n return tf.trace(x, offset)\n"
] | [
[
"numpy.array",
"numpy.cumsum",
"numpy.dtype"
],
[
"tensorflow.matmul",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.as_dtype",
"tensorflow.linalg.svd",
"tensorflow.trace",
"tensorflow.shape",
"tensorflow.cast",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.linalg.qr",
"tensorflow.math.multiply",
"tensorflow.experimental.numpy.diagonal",
"tensorflow.linalg.slogdet",
"tensorflow.linalg.norm",
"tensorflow.abs",
"tensorflow.experimental.numpy.promote_types"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
datacrisis/BOBBY2 | [
"0c88d7906acb9d46929a6f220b857d358518edf0"
] | [
"src/data_utils.py"
] | [
"import torch, torchvision\nimport os, PIL, random, csv\nimport numpy as np\nimport xml.etree.ElementTree as ET\nfrom PIL import Image, ImageDraw\nfrom matplotlib import pyplot as plt\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom pathlib import Path\n\n\n\ndef compile_imgs(root_dir):\n \"\"\"\n Deprecated. Used previously when custom Dataset compiles sample paths on instatiation.\n Current custom dataset instead accepts a pre-cooked path list to pos/neg samples.\n \n Use compile_data.\n \"\"\"\n\n _ = [root_dir/i for i in os.listdir(root_dir)]\n heap_main = [root_dir/j/i for j in _ for i in os.listdir(j)] #These are folders for 3862 train seqs\n heap_main.sort()\n heap = [i/j for i in heap_main for j in os.listdir(i)]\n heap.sort()\n \n return heap\n\n\ndef compile_annots(root_dir):\n \"\"\"\n Deprecated. Used previously when custom Dataset compiles sample paths on instatiation.\n Current custom dataset instead accepts a pre-cooked path list to pos/neg samples.\n \n Use compile_data.\n \"\"\"\n \n _ = [root_dir/i for i in os.listdir(root_dir)]\n heap_main = [root_dir/j/i for j in _ for i in os.listdir(j)] #These are folders for 3862 train seqs\n heap_main.sort()\n heap = [i/j for i in heap_main for j in os.listdir(i)]\n heap.sort()\n \n return heap\n\n\ndef compile_data(img_root,ant_root,posneg_ls,pos_ls,neg_ls,neg_ls1,seed=5):\n \"\"\"\n Function that returns a dataset (hardcoded list) of pos and neg samples.\n Returns 2 lists: img_ls and annot_ls.\n \n Pos Sample: Translate and map idx from posneg.csv to \n \"\"\"\n \n ant_heap = []\n img_heap = []\n \n #Read csv\n posneg = parse_csv(posneg_ls)\n vanilla_pos = parse_csv(pos_ls)\n vanilla_neg = parse_csv(neg_ls)\n gen_neg = parse_csv(neg_ls1)\n \n #Random shuffle custom to be generated negative samples for representation.\n random.seed(seed)\n random.shuffle(gen_neg)\n \n #Idx for counting \n vp,vn,gn = 0,0,0\n \n #Parse main list\n for i in posneg:\n \n #If it's neg\n if i == 0 and vn <= len(vanilla_neg)-1:\n _ = [0,Path(vanilla_neg[vn])]\n vn += 1\n \n #If it's neg exceeding vanilla neg\n if i == 0 and vn > len(vanilla_neg)-1:\n _ = [0,Path(gen_neg[gn])]\n gn += 1\n \n #If it's pos\n if i == 1:\n _ = [1,Path(vanilla_pos[vp])]\n vp += 1\n \n ant_heap.append(_)\n \n \n #Compute equal for imgs list\n ant_base = Path(_[1])\n ant_parts = ant_base.parts[-4:-1]\n name = ant_base.stem + '.JPEG'\n img = img_root/Path(*ant_parts)/Path(name)\n \n img_heap.append([i,img])\n \n \n return img_heap,ant_heap\n \n \ndef parse_csv(file):\n \"\"\"\n Helper function that takes a csv path and return lists with csv content.\n \"\"\"\n heap = []\n \n with open(file) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n line_count += 1\n \n try:\n heap.append(int(float(*row)))\n except:\n heap.append(*row)\n print(f'Processed {file} lines.')\n \n return heap\n \n\n\ndef check_trans(img):\n \"\"\"\n Process and displays a batch of transformed scene and exems.\n \"\"\"\n simg = img.permute(1,2,0).cpu() \n\n #Plotting\n plt.imshow(simg)\n plt.show()\n\n\n\ndef rszasneed(full_img,annot):\n \"\"\"\n Helper function that takes a fullscene and the annotations to resize the scene randomly for augmentation and\n returns the proper annotations.\n\n Also accounted for the cases where exem (defined by annot) is larger than the current scene patch dimension.\n \"\"\"\n\n #Set patch size\n patch_sz = (360,360)\n \n #Size it\n img_w, img_h = full_img.size\n ex_tw, ex_th, ex_bw, ex_bh = annot[0], annot[1], annot[0] + annot[2], annot[1] + annot[3]\n \n #Setup resize range\n ratio_w = annot[2]/patch_sz[0]\n ratio_h = annot[3]/patch_sz[1]\n sz_max = max(ratio_w,ratio_h) #See which dim is the largest, that'll be the max resize up.\n \n if ratio_w <1 and ratio_h < 1: #If the exem is by default smaller than patch\n #Random resize that zooms and shrinks\n sz_fc = random.uniform(sz_max+0.5,1.5) #Make sure exem won't be larger than patch. +0.1 buffer\n new_w = img_w / sz_fc\n new_h = img_h / sz_fc\n\n elif ratio_w >= 1 or ratio_h >= 1: #If exem is larger than patch in any dim at all\n #Resize so sz of exem < sz of patch\n sz_fc = random.uniform(sz_max+0.1,sz_max+0.5) #Shrink more (max 3) since exem is large\n new_w = img_w / sz_fc\n new_h = img_h / sz_fc\n \n #Resize img and annot\n img = full_img.resize((round(new_w),round(new_h)),resample=PIL.Image.BICUBIC)\n ex_tw = ex_tw / sz_fc\n ex_th = ex_th / sz_fc\n ex_bw = ex_bw / sz_fc\n ex_bh = ex_bh / sz_fc\n annot = (ex_tw,ex_th,ex_bw,ex_bh)\n\n #Checks\n w = ex_bw - ex_tw\n h = ex_bh - ex_th\n \n assert w < patch_sz[0], \"Error! The exem w is larger than patch_w | w: {}, patch_w: {}\".format(w,patch_sz[0])\n assert h < patch_sz[1], \"Error! The exem h is larger than patch_h | h: {}, patch_h: {}\".format(h,patch_sz[1])\n\n return img, annot\n\n\ndef scene_crop_neg(full_scene,annot,scene_dim=360):\n \"\"\"\n Helper function used in gen_dt to extracte a negative 360x360 patch from full scene.\n Uses the to_square_neg function since it'll work; for both vanilla and custom negatives.\n \"\"\"\n #Crop square. Scene_dim dictates the shape of scene and the GAP on each size of a scene needed.\n scene,ant = to_square_neg(annot,full_scene,scene_dim)\n \n #Resize scene (360x360) crop to 224x224 as needed by net.\n scene = scene.resize((224,224),resample=PIL.Image.BICUBIC)\n \n #No need to compensate ant since negative smp has (0,0,0,0) ants.\n # \n return scene,ant\n\n\n\ndef scene_crop(full_scene,annot,scene_dim=360):\n \"\"\"\n Helper function used in gen_dt to extracte a positive 360x360 patch from full scene.\n \"\"\"\n\n #Normalize dim and exem location in scene. Determine the gap on each side before crop. \n full_scene,annot = rszasneed(full_scene,annot)\n img_w, img_h = full_scene.size\n ex_tw, ex_th, ex_bw, ex_bh = annot[0], annot[1], annot[2], annot[3] #Already added up in rszasneed\n nex_tw, nex_th, nex_bw, nex_bh = ex_tw/img_w, ex_th/img_h, ex_bw/img_w, ex_bh/img_h #normalized exem \n \n ###Required scene patch\n req = (scene_dim/img_w, scene_dim/img_h)\n \n #Only do compute_cc padding if needed patch sz fits in the full scene\n if req[0] <= 1 and req[1] <= 1:\n tw_n,th_n,bw_n,bh_n = compute_cc(nex_tw,nex_th,nex_bw,nex_bh,req)\n \n #Compensate \n tw = tw_n * img_w\n th = th_n * img_h\n bw = bw_n * img_w\n bh = bh_n * img_h\n\n #Crop. Needs to be PIL image.\n cropped = full_scene.crop((tw,th,bw,bh))\n rsz_fc1 = cropped.size[0]/224 #Need to return a 224 img anyhow\n rsz_fc2 = cropped.size[1]/224\n cropped = cropped.resize((224,224),resample=PIL.Image.BICUBIC) #Resize\n \n #Compensate annotations. Clip.\n ant_tw = annot[0] - tw\n ant_th = annot[1] - th\n ant_bw = annot[2] - tw\n ant_bh = annot[3] - th\n\n #Compensate annotations. Resize val.\n ant_tw,ant_bw = ant_tw/rsz_fc1, ant_bw/rsz_fc1\n ant_th,ant_bh = ant_th/rsz_fc2, ant_bh/rsz_fc2 \n ant_ = [i if i <= 224 else 224 for i in [ant_tw,ant_th,ant_bw,ant_bh]] \n\n else:\n #Otherwise use backup pseudo-optimal strat of max square cut with min scretching.\n cropped, ant_ = to_square_scene(to_visfmt(annot),full_scene)\n \n return cropped, ant_\n\n\n\ndef compute_cc(nex_tw,nex_th,nex_bw,nex_bh,req):\n \"\"\"\n Computes the spacing on each side of an exemplar for cropping. \n Returns normalized coordinates to crop with.\n\n If overflows happens in two sides of a same dimension (e.g. scene size req is larger than entire full scene)\n the function will return the largest square image possible covering the exemplar. Make sure to have a resize\n catching such cases on the return of this function.\n \"\"\"\n\n scene_w, scene_h = req[0], req[1]\n\n #Compute exem dim\n exem_w = nex_bw - nex_tw\n exem_h = nex_bh - nex_th\n \n #Catch problematic inputs\n assert scene_w > exem_w, \"Error! The scene patch asked for is smaller than the exemplar. scene_w:{},exem_w:{}\".format(scene_w,exem_w)\n assert scene_h > exem_h, \"Error! The scene patch asked for is smaller than the exemplar. scene_h:{},exem_h:{}\".format(scene_h,exem_h)\n assert req[0] <= 1, \"Error! Patch size asked for is bigger than the actual pic. req[0]: {}\".format(req[0])\n assert req[1] <= 1, \"Error! Patch size asked for is bigger than the actual pic. req[1]: {}\".format(req[1])\n\n #Size the gap needed\n req_w = scene_w - exem_w\n req_h = scene_h - exem_h\n\n #Randomize translation\n spf1 = random.uniform(0,1) #Split factor\n req_w1 = req_w * spf1\n req_w2 = req_w - req_w1\n\n spf2 = random.uniform(0,1)\n req_h1 = req_h * spf2\n req_h2 = req_h - req_h1\n\n #Check which side overflows\n ov_left = True if nex_tw < req_w1 else False \n ov_right = True if (nex_bw + req_w2) > 1 else False\n ov_top = True if nex_th < req_h1 else False\n ov_bottom = True if (nex_bh + req_h2) > 1 else False\n\n ov_FLAGS = [ov_left,ov_top,ov_right,ov_bottom]\n ov = [req_w1-nex_tw, req_h1-nex_th, (nex_bw + req_w2)-1,(nex_bh + req_h2)-1] #How much spill over\n\n need_comp = True if any(ov_FLAGS) else False\n \n #Default cropping with no spillage\n new_th = nex_th - (req_h1)\n new_bh = nex_bh + (req_h2)\n new_tw = nex_tw - (req_w1)\n new_bw = nex_bw + (req_w2)\n output = [new_tw,new_th,new_bw,new_bh]\n\n #Comp needed\n if need_comp:\n ncomp = ov_FLAGS.count(True) #How many sides\n \n #If overflow on single side only\n if ncomp == 1:\n comp_dim = ov_FLAGS.index(True)\n comp_dim_ = (comp_dim-2) if comp_dim > 1 else (comp_dim+2) #Find the opposing dim to add gap to\n comp = abs(ov[comp_dim])\n\n output[comp_dim] = 1 if comp_dim in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim_] = (output[comp_dim_]-comp) if comp_dim in (2,3) else (output[comp_dim_]+comp)\n\n return output\n\n #If overflow on more than one side.\n if ncomp > 1:\n\n #Check which sides spills\n comp_dims = []\n for i,j in enumerate(ov_FLAGS):\n if j is True:\n comp_dims.append(i)\n \n #If spill over both side of a single dimension\n if (0 in comp_dims and 2 in comp_dims) or (1 in comp_dims and 3 in comp_dims):\n raise Exception(\"Not implemented since this does not happen for the VisDrone2018-SOT dataset.\")\n \n #If spill over in sides of different dim\n else:\n comp_dim1 = comp_dims[0]\n comp_dim2 = comp_dims[1]\n comp_dim1_ = (comp_dim1-2) if comp_dim1 > 1 else (comp_dim1+2) #Find the opposing dim to add gap to\n comp_dim2_ = (comp_dim2-2) if comp_dim2 > 1 else (comp_dim2+2) #Find the opposing dim to add gap to\n comp1 = abs(ov[comp_dim1])\n comp2 = abs(ov[comp_dim2])\n\n output[comp_dim1] = 1 if comp_dim1 in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim1_] = (output[comp_dim1_]-comp1) if comp_dim1 in (2,3) else (output[comp_dim1_]+comp1)\n output[comp_dim2] = 1 if comp_dim2 in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim2_] = (output[comp_dim2_]-comp2) if comp_dim2 in (2,3) else (output[comp_dim2_]+comp2) \n\n return output\n\n\n else: #If no need comp\n return output\n\n\ndef compute_excc(nex_tw,nex_th,nex_bw,nex_bh,req):\n \"\"\"\n Computes the spacing on each side of an exemplar for cropping. \n Returns normalized coordinates to crop with.\n\n If overflows happens in two sides of a same dimension (e.g. scene size req is larger than entire full scene)\n the function will return the largest square image possible covering the exemplar. Make sure to have a resize\n catching such cases on the return of this function.\n \"\"\"\n\n scene_w, scene_h = req[0], req[1]\n\n #Compute exem dim\n exem_w = nex_bw - nex_tw\n exem_h = nex_bh - nex_th\n \n #Catch problematic inputs\n assert req[0] <= 1, \"Error! Patch size asked for is bigger than the actual pic. req[0]: {}\".format(req[0])\n assert req[1] <= 1, \"Error! Patch size asked for is bigger than the actual pic. req[1]: {}\".format(req[1])\n\n #Size the gap needed\n req_w = scene_w - exem_w\n req_h = scene_h - exem_h\n\n #Randomize translation\n spf1 = random.uniform(0,1) #Split factor\n req_w1 = req_w * spf1\n req_w2 = req_w - req_w1\n\n spf2 = random.uniform(0,1)\n req_h1 = req_h * spf2\n req_h2 = req_h - req_h1\n\n #Check which side overflows\n ov_left = True if nex_tw < req_w1 else False \n ov_right = True if (nex_bw + req_w2) > 1 else False\n ov_top = True if nex_th < req_h1 else False\n ov_bottom = True if (nex_bh + req_h2) > 1 else False\n\n ov_FLAGS = [ov_left,ov_top,ov_right,ov_bottom]\n ov = [req_w1-nex_tw, req_h1-nex_th, (nex_bw + req_w2)-1,(nex_bh + req_h2)-1] #How much spill over\n\n need_comp = True if any(ov_FLAGS) else False\n \n #Default cropping with no spillage\n new_th = nex_th - (req_h1)\n new_bh = nex_bh + (req_h2)\n new_tw = nex_tw - (req_w1)\n new_bw = nex_bw + (req_w2)\n output = [new_tw,new_th,new_bw,new_bh]\n\n #Comp needed\n if need_comp:\n ncomp = ov_FLAGS.count(True) #How many sides\n \n #If overflow on single side only\n if ncomp == 1:\n comp_dim = ov_FLAGS.index(True)\n comp_dim_ = (comp_dim-2) if comp_dim > 1 else (comp_dim+2) #Find the opposing dim to add gap to\n comp = abs(ov[comp_dim])\n\n output[comp_dim] = 1 if comp_dim in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim_] = (output[comp_dim_]-comp) if comp_dim in (2,3) else (output[comp_dim_]+comp)\n\n return output\n\n #If overflow on more than one side.\n if ncomp > 1:\n\n #Check which sides spills\n comp_dims = []\n for i,j in enumerate(ov_FLAGS):\n if j is True:\n comp_dims.append(i)\n \n #If spill over both side of a single dimension\n if (0 in comp_dims and 2 in comp_dims) or (1 in comp_dims and 3 in comp_dims):\n raise Exception(\"Not implemented since this does not happen for the VisDrone2018-SOT dataset.\")\n \n #If spill over in sides of different dim\n else:\n comp_dim1 = comp_dims[0]\n comp_dim2 = comp_dims[1]\n comp_dim1_ = (comp_dim1-2) if comp_dim1 > 1 else (comp_dim1+2) #Find the opposing dim to add gap to\n comp_dim2_ = (comp_dim2-2) if comp_dim2 > 1 else (comp_dim2+2) #Find the opposing dim to add gap to\n comp1 = abs(ov[comp_dim1])\n comp2 = abs(ov[comp_dim2])\n\n output[comp_dim1] = 1 if comp_dim1 in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim1_] = (output[comp_dim1_]-comp1) if comp_dim1 in (2,3) else (output[comp_dim1_]+comp1)\n output[comp_dim2] = 1 if comp_dim2 in (2,3) else 0 #Check which opposing side of a dim it is\n output[comp_dim2_] = (output[comp_dim2_]-comp2) if comp_dim2 in (2,3) else (output[comp_dim2_]+comp2) \n\n return output\n\n\n else: #If no need comp\n return output\n\n \n \n \n\ndef fetch_exem(img_dir,full_imgs,ex_int,annotls,percent_neg=.5):\n \"\"\"\n Generates exem dynamically. Grab 4 exem frames preceeding the current frame at a given interval.\n Exems taken with a square crop and resized to 224x224.\n \n Capable of returning a given percentage of postive/negative exem samples wrt percent_neg.\n\n [Need optimization and cleaning].\n \"\"\"\n exem_dim = 224\n \n #Parse dir\n name,suffix = img_dir.stem, img_dir.suffix\n \n buffer_size = 4 \n curr_idx = parse_idx(name) + 1 #Compensate for parse_idx which is used to find annotations for an image and hence is 1-indexed.\n to_fetch = []\n imgs_buffer = []\n \n #Check posneg\n neg_req = round(percent_neg*buffer_size)\n pos_req = buffer_size - neg_req\n posneg_FLAG = [] #Need to ensure percent negatives are enforced\n \n assert neg_req + pos_req == buffer_size, \"Error in fetch_exem posneg buffer computation!\"\n \n #Gen exem idx to fetch\n while len(to_fetch) < buffer_size:\n n_name = namify(curr_idx)\n fname = n_name + suffix\n exem_dir = Path(str(img_dir.parent) + '/' + fname)\n \n #Validate if imgs is present in dataset.\n try:\n _ = PIL.Image.open(exem_dir)\n \n except:\n \n #If it fails. Wiggle curr_idx and continue.\n if curr_idx == 0:\n curr_idx += 5 #Just picked 5 so as to not overlap with -1.\n \n else:\n curr_idx -= 1\n continue\n \n #If it passes, append and update curr_idx.\n to_fetch.append(curr_idx)\n curr_idx = max(0,curr_idx - ex_int)\n \n\n for n in to_fetch:\n n_name = namify(n)\n fname = n_name + suffix\n exem_dir = Path(str(img_dir.parent) + '/' + fname)\n \n #Annotation to crop exem from. Hardcode ilsvr ant dir format\n annot_base = Path(*annotls.parts[:-4]) \n annot_dir = annot_base/Path(*exem_dir.parts[-4:-1])/Path(n_name+'.xml')\n annot = to_visfmt(parse_xml(annot_dir)[0][0])\n posneg = 1 if any(annot) == True else 0 #For vanilla neg sample, annots are all 0\n img_ = PIL.Image.open(exem_dir)\n \n \n if neg_req > 0 and posneg == 0:\n\n #Open image and adjust bbox. Then crop, resize and cache.\n img_,comp_annot = to_square_neg(annot,img_,exem_dim)\n\n imgs_buffer.append(img_)\n neg_req -= 1\n posneg_FLAG.append(0)\n \n \n elif pos_req > 0 and posneg == 1:\n #Open image and adjust bbox. Then crop, resize and cache.\n img_,comp_annot = to_square(annot,img_)\n\n imgs_buffer.append(img_)\n pos_req -= 1 \n posneg_FLAG.append(1)\n \n \n elif neg_req > 0 and pos_req == 0 and posneg == 1:\n #Open image and adjust bbox. Then crop, resize and cache.\n img_,comp_annot = to_square_neg(annot,img_,exem_dim)\n\n imgs_buffer.append(img_)\n neg_req -= 1\n posneg_FLAG.append(0)\n\n \n elif pos_req > 0 and neg_req == 0 and posneg == 0:\n \n #If it encounters a vanilla neg sample (no target in scene) just replicate from previous exem\n if len(imgs_buffer) > 0:\n \n #May have no pos in FLAG thus exception may be raised in .index(1) below.\n try:\n idx_to_comp = posneg_FLAG.index(1)\n imgs_buffer.append(imgs_buffer[idx_to_comp])\n pos_req -= 1\n posneg_FLAG.append(1)\n \n except:\n pass\n \n elif len(imgs_buffer) == 0:\n pass\n #No pos_req is deducted here so it'll just loop and find one later.\n \n else:\n raise Exception('Encountered an unforseen and unimplemented check for posneg in fetch_exem.')\n \n \n #Check if we're short on exems for rare case where first exem is vanilla negative.\n if len(imgs_buffer) !=4:\n \n #Check if it's all empty or pos_req > 0 and no pos in posneg_FLAG\n if not posneg_FLAG or (pos_req > 0 and 1 not in posneg_FLAG):\n \n #Manually seek pos sample in sequence and call fetch_exem again.\n manual_img_dir = greedy_posseek(img_dir,full_imgs)\n imgs_buffer = fetch_exem(manual_img_dir,full_imgs,ex_int,annotls,percent_neg)\n \n \n else:\n #If there's sufficient sample in buffer\n while len(imgs_buffer) < 4:\n posneg_ = 1 if pos_req != 0 else 0\n posneg_ = 0 if neg_req != 0 else 1\n idx_to_comp = posneg_FLAG.index(posneg_)\n imgs_buffer.append(imgs_buffer[idx_to_comp])\n\n #Compensate indices\n if posneg_ ==1:\n pos_req -= 1\n elif posneg_ ==0:\n neg_req -= 1\n\n \n return imgs_buffer\n\n\n\ndef greedy_posseek(neg_img,img_ls):\n \"\"\"\n Helper function that takes an negative sample and try to find a positive one in the same sequence, \n obtained by greedily searching through the list of images.\n \"\"\"\n \n seq = neg_img.parts[-2] #Seq dir\n \n #Search\n for i in img_ls:\n stat = i[0] #pos/neg\n img_dir = str(i[1])\n \n if stat == 1 and seq in img_dir:\n target = i[1]\n break\n \n \n return target\n\n\n\n\ndef namify(idx):\n \"\"\"\n Helper function that pads a given file number and return it as per the dataset image name format.\n \"\"\"\n len_data = 6 #Ilsvr images are in the form of 000000.JPEG\n len_ = len(str(idx))\n need = len_data - len_\n\n assert len_data >= len_, \"Error! Image idx being fetched is incorrect. Invalid value.\"\n\n pad = '0'*need\n\n return pad+str(idx) \n\n\n\ndef parse_idx(img_name):\n \"\"\"\n Simple helper function that takes an image name and return the index position of the image.\n \"\"\"\n bk = 0\n\n #Find where the significant digit appears\n prefix = img_name.split('.')[0][3:]\n\n for idx,alpha in enumerate(prefix):\n if int(alpha) == 0:\n continue\n else:\n bk = idx\n break\n\n num = int(prefix[bk:]) - 1 #Since image names start from 1\n\n return num\n\n\ndef parse_ant(ant):\n \"\"\"\n Helper function used to parse the labels returned by dataloader (stringified).\n Returns a list of float.\n \"\"\"\n parsed = []\n \n for a in ant:\n i = a.strip('()').split(',')\n i = [float(j) for j in i]\n parsed.append(i)\n \n return torch.tensor(parsed)\n\n\n\ndef parse_xml(path,args=None):\n orig_shape = None\n new_shape = None\n\n if args is not None:\n orig_shape = args[0]\n new_shape = args[1]\n\n bboxes = []\n track_id = 0\n occ = 0\n w,h = 0,0\n \n tree = ET.parse(path)\n root = tree.getroot()\n\n if root.findall('object'):\n for obj in root.findall('object'):\n #Read w-h\n track_id = float(obj.find('trackid').text)\n occ = float(obj.find('occluded').text)\n w = float(root.find('size').find('width').text)\n h = float(root.find('size').find('height').text)\n\n # Read the bbox\n bbox = obj.find('bndbox')\n x_left = float(bbox.find('xmin').text)\n y_top = float(bbox.find('ymin').text)\n x_right = float(bbox.find('xmax').text)\n y_bottom = float(bbox.find('ymax').text)\n\n if orig_shape is not None and new_shape is not None:\n x_left = x_left*new_shape[1]/orig_shape[1]\n y_top = y_top*new_shape[0]/orig_shape[0]\n x_right = x_right*new_shape[1]/orig_shape[1]\n y_bottom = y_bottom*new_shape[0]/orig_shape[0]\n\n bbox = [int(x_left),int(y_top),int(x_right),int(y_bottom)]\n bboxes.append(bbox)\n \n else:\n bboxes = [[0]]\n\n\n return(bboxes,track_id,occ,w,h)\n \n \n\ndef to_visfmt(annot):\n \"\"\"\n Helper function that changes (tw,th,bw,bh) -> (tw,th,w,h).\n Used to convert annotations from ilsvr to visdrone's since most scripts are already\n written in visdrone's format.\n \"\"\"\n \n #Check if it's an empty bbox (neg scene)\n if len(annot) > 1:\n load = [annot[0], annot[1], annot[2]-annot[0], annot[3]-annot[1]]\n else:\n load = [0,0,0,0]\n \n return load\n\n\n\ndef to_square(annot,img):\n \"\"\"\n Helper function that takes in a target's bbox and convert it to a larger bbox that is square.\n Compensated annot is not needed for exemplars, but implemented as an extra.\n Used for exems.\n \"\"\"\n \n #Check needed dim\n need = max(annot[2],annot[3]) #See if it's taller or wider\n img_sz = min(img.size)\n\n #Compute center\n cw = annot[0] + (annot[2]/2)\n ch = annot[1] + (annot[3]/2)\n \n #Normalize annot\n img_w, img_h = img.size[0], img.size[1] \n annot = [annot[0],annot[1],annot[2]+annot[0],annot[3]+annot[1]] #Format needed by compute_cc\n annot_norm = [annot[0]/img_w,annot[1]/img_h,annot[2]/img_w,annot[3]/img_h]\n \n #If the req bbox to be square is > than img_sz\n if need > img_sz: \n #Squash it as little as possible by making it as square as possible\n req = (img_sz/img_w, img_sz/img_h)\n tw_n,th_n,bw_n,bh_n = compute_excc(*annot_norm,req) #Key step; compute comp \n \n #If it's within the image\n else:\n #Compute compensation when needed \n req = (need/img_w, need/img_h)\n tw_n,th_n,bw_n,bh_n = compute_excc(*annot_norm,req) #Key step; compute comp\n \n \n #Unnormalize\n tw = tw_n * img_w\n th = th_n * img_h\n bw = bw_n * img_w\n bh = bh_n * img_h\n\n #Crop and resize\n cropped = img.crop((annot[0],annot[1],annot[2],annot[3])) #Stretch\n # cropped = img.crop((tw,th,bw,bh)) #No stretch\n rsz_fc1 = cropped.size[0]/224\n rsz_fc2 = cropped.size[1]/224\n cropped = cropped.resize((224,224),resample=PIL.Image.BICUBIC) #Resize\n \n #Compensate annotations. Clip vals.\n ant_tw = max(0,annot[0] - tw)\n ant_th = max(0,annot[1] - th)\n ant_bw = max(0,(bw-tw)-(bw - annot[2])) #basically, scene_w(or h) - gap_scene_exem = exem_anot\n ant_bh = max(0,(bh-th)-(bh - annot[3])) \n \n #Compensate annotations. Resize val.\n ant_tw,ant_bw = ant_tw/rsz_fc1, ant_bw/rsz_fc1\n ant_th,ant_bh = ant_th/rsz_fc2, ant_bh/rsz_fc2 \n compensated_ant = [i if i <= 224 else 224 for i in [ant_tw,ant_th,ant_bw,ant_bh]] #Only clip max after scaling\n \n \n return cropped, compensated_ant\n \n\n \ndef to_square_neg(annot,img,size=224):\n \"\"\"\n Helper function that takes in a target's bbox and convert it to a larger bbox that is square.\n Compensated annot is not needed for exemplars, but implemented as an extra.\n Used for negative exems and scenes.\n \"\"\"\n \n #Conv annot from (tw,th,w,h) -> (tw,th,bw,bh)\n annot_ = [annot[0],annot[1],annot[2]+annot[0],annot[3]+annot[1]]\n \n #Check for empty spaces surrounding annot\n w,h = img.size[0],img.size[1]\n rw = w - annot_[2]\n bh = h - annot_[3]\n lw = annot_[0] - 0\n th = annot_[1] - 0\n \n #Det which space to take\n spaces = [lw,th,rw,bh]\n gap1 = max(spaces)\n gap1_idx = spaces.index(gap1)\n \n #Check counter dim\n if gap1_idx in [0,2]:\n gap2 = h #It's w, check h.\n \n elif gap1_idx in [1,3]:\n gap2 = w #It's h, check w.\n \n #Compute neg exem bbox\n gap_min = min(gap1,gap2)\n gap_min_idx = [gap1,gap2].index(gap_min)\n \n #If it fits\n if gap_min > size:\n \n gap_gap = gap_min - size\n begin = random.randint(0,gap_gap) #Pick random point to start exem square\n end = begin + size \n \n #If it doesn't fit in \n else:\n gap_gap = 0\n begin = random.randint(0,gap_gap) #Pick random point to start exem square\n end = begin + gap_min\n \n \n #Compensate absolute bbox val\n if gap1_idx == 0 or gap1_idx == 1:\n\n #Gap at left of bbox\n crop_ant = [begin,begin,end,end]\n\n elif gap1_idx == 2:\n\n #Gap at right of bbox\n crop_ant = [annot_[2]+begin,begin,annot_[2]+end,end] \n\n elif gap1_idx == 3:\n\n #Gap at bottom of bbox\n crop_ant = [begin,annot_[3]+begin,end,annot_[3]+end]\n \n \n #Crop img and resize if needed\n cropped = img.crop(crop_ant)\n \n if cropped.size[0] != size or cropped.size[1] != size:\n cropped = cropped.resize((size,size),resample=PIL.Image.BICUBIC) #Resize\n \n compensated_ant = [0,0,0,0] #No target in negative sample\n \n \n return cropped, compensated_ant\n\n \n \ndef to_square_scene(annot,img):\n \"\"\"\n Helper function that takes in a target's bbox and convert it to a larger bbox that is square. \n Used for scene.\n \"\"\"\n \n #Check needed dim\n need = 360 #Patch size is fixed at 360x360\n img_sz = min(img.size)\n\n #Normalize annot\n img_w, img_h = img.size[0], img.size[1] \n annot = [annot[0],annot[1],annot[2]+annot[0],annot[3]+annot[1]] #Format needed by compute_cc\n annot_norm = [annot[0]/img_w,annot[1]/img_h,annot[2]/img_w,annot[3]/img_h]\n \n #If the req bbox to be square is > than img_sz\n if need > img_sz:\n #Squash it as little as possible by making it as square as possible\n req = (img_sz/img_w, img_sz/img_h)\n tw_n,th_n,bw_n,bh_n = compute_excc(*annot_norm,req) #Key step; compute comp \n \n #If it's within the image\n else:\n #Compute compensation when needed \n req = (need/img_w, need/img_h)\n tw_n,th_n,bw_n,bh_n = compute_excc(*annot_norm,req) #Key step; compute comp\n \n #Unnormalize\n tw = tw_n * img_w\n th = th_n * img_h\n bw = bw_n * img_w\n bh = bh_n * img_h\n\n #Crop and resize\n cropped = img.crop((tw,th,bw,bh))\n rsz_fc1 = cropped.size[0]/224\n rsz_fc2 = cropped.size[1]/224\n cropped = cropped.resize((224,224),resample=PIL.Image.BICUBIC) #Resize\n\n #Compensate annotations. Clip vals.\n ant_tw = max(0,annot[0] - tw)\n ant_th = max(0,annot[1] - th)\n ant_bw = max(0,(bw-tw)-(bw - annot[2])) #basically, scene_w(or h) - gap_scene_exem = exem_anot\n ant_bh = max(0,(bh-th)-(bh - annot[3])) \n \n #Compensate annotations. Resize val.\n ant_tw,ant_bw = ant_tw/rsz_fc1, ant_bw/rsz_fc1\n ant_th,ant_bh = ant_th/rsz_fc2, ant_bh/rsz_fc2 \n compensated_ant = [i if i <= 224 else 224 for i in [ant_tw,ant_th,ant_bw,ant_bh]] #Only clip max after scaling\n \n return cropped, compensated_ant\n \n\n\ndef dt_trans(trans,scene,exems,buffer_size):\n \"\"\"\n Function to enclose the transformation sequence used in dataset.\n \"\"\"\n\n norm_trans = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n #If transforms\n if trans:\n #Make sure trans applied on exem and scene are similar\n vals = [random.uniform(0.85,1.15) for i in range(4)]\n val_hue = random.uniform(-0.1,0.1)\n scene = transforms.functional.adjust_brightness(scene,vals[0])\n scene = transforms.functional.adjust_contrast(scene,vals[1])\n scene = transforms.functional.adjust_gamma(scene,vals[2])\n scene = transforms.functional.adjust_saturation(scene,vals[3])\n scene = transforms.functional.adjust_hue(scene,val_hue)\n scene = norm_trans(scene)\n\n for i,exem_ in enumerate(exems):\n exem_ = transforms.functional.adjust_brightness(exem_,vals[0])\n exem_ = transforms.functional.adjust_contrast(exem_,vals[1])\n exem_ = transforms.functional.adjust_gamma(exem_,vals[2])\n exem_ = transforms.functional.adjust_saturation(exem_,vals[3])\n exem_ = transforms.functional.adjust_hue(exem_,val_hue)\n exem_ = norm_trans(exem_)\n exems[i] = exem_\n exems = torch.stack(exems)\n\n else:\n scene = norm_trans(scene)\n exems = [norm_trans(exem_) for exem_ in exems]\n exems = torch.stack(exems)\n\n return scene,exems\n\n\n\n\nclass gen_dt(Dataset):\n\n def __init__(self, img_root, ant_root, posneg_ls, pos_ls, neg_ls, cusneg_ls, \n percent_neg = 0,ex_int = 16, transform = True):\n \"\"\"\n General buffer dataset class.\n\n Rather than taking in img_dir, feed in instead a csv that can be decoded to return\n the paths for the negative and positive samples respectively.Instantiate different \n dataset for train and valid.The annotations for the entire train/valid set will be \n loaded on instantiation to prevent read/write at every sample.\n\n Input:\n img_root: Root to ImageNet-VID.\n ant_root: Root to annotations of corresponding img_root.\n posneg_ls: Path to posneg.csv. Determines the positivity-negativity of sample.\n pos_ls: Path to ilsvrc_vanilla_pos.csv. Used to fetch positive samples as dictated by posneg_ls.\n neg_ls: Path to ilsvrc_vanilla_neg.csv. Used to fetch negative samples as dictated by posneg_ls.\n cusneg_ls: Path to neg_below08.csv. Used to generate custom negative samples.\n percent_neg: Valid values are [0,0.25,0.5,0.75]. Used to control percentage of distractor exemplars in buffer.\n ex_int: Time interval (frames) between exemplars in buffer.\n transform: Should always be TRUE in usage. Used to transform images for PyTorch.\n\n Return:\n Img/scene: A portion of the scene with/without a target. Size of [3x224x224]. \n Img/exems: Collection of exemplars taken from the same sequence as the scene. Size of [buffer_sizex3x224x224].\n Img/pth_full: Full path for the scene.\n Img/seq_name: Name of sequence for the scene.\n Img/img_name: Name of image for the scene.\n Annot/bbox: Ground truth bounding box coordinates. Quick-fixed by stringify to prevent jumbling.\n Annot/obj: Ground truth objectness score of scene. Quick-fixed by stringify to prevent jumbling\n \"\"\"\n\n #Setup\n self.ex_int = ex_int\n self.transform = transform\n self.posneg_ls = posneg_ls\n self.pos_ls = pos_ls\n self.neg_ls = neg_ls\n self.cusneg_ls = cusneg_ls\n self.img_root = img_root\n self.ant_root = ant_root\n self.percent_neg = percent_neg \n self.transform = transform\n self.data, self.annot = compile_data(self.img_root,self.ant_root,self.posneg_ls,\n self.pos_ls,self.neg_ls,self.cusneg_ls)\n \n assert len(self.annot) == len(self.data), \"Error! The len(annot) != len(imgs)\"\n\n self.data, self.annot = np.array(self.data),np.array(self.annot)\n\n \n def __len__(self):\n return len(self.data)\n\n\n def __getitem__(self, idx):\n \n ###Parse path\n img_full = self.data[idx][1] #self.data is a list in form [[pos/neg,path],...]\n img_name = img_full.parts[-1]\n seq_name = img_full.parts[-2]\n \n ###Open Image\n full_scene = PIL.Image.open(img_full)\n \n ###Fetch label. \n #Parse_xml returns extra info. (w,h,occ,extra bboxes if >1 target)\n #self.annot is a list in form [[pos/neg,path],...] \n annot = parse_xml(self.annot[idx][1])[0][0]\n \n #convert (tw,th,bw,bh) -> (tw,th,w,h) ; (ilsvr) -> (visdrone) default format\n annot = to_visfmt(annot)\n \n ###Fetch Positive or Negative sample\n stat = self.data[idx][0]\n assert stat == self.annot[idx][0], \"Error! Both img and ant should be equal in pos/neg, not diff.\"\n \n #If it's a positive sample\n if stat == 1:\n \n #Fetch, crop and transform scene with compensated annot\n scene,annot = scene_crop(full_scene,annot) \n \n #If it's a negative sample\n elif stat == 0:\n\n #Fetch, crop and transform scene with compensated annot\n scene,annot = scene_crop_neg(full_scene,annot) \n \n else:\n raise Exception(\"Error! Invalid stat value\")\n\n \n #Fetch exemplars\n exems = fetch_exem(img_full,self.data,self.ex_int,self.annot[0][1],self.percent_neg)\n \n ###Transforms\n scene,exems = dt_trans(self.transform,scene,exems,4)\n \n \n ###Package\n load = {\"Img\":{\"scene\":scene,\n \"exem\":exems,\n \"pth_full\":str(img_full),\n \"seq_name\":str(seq_name),\n \"img_name\":str(img_name)},\n \"Annot\":{\"bbox\":str(annot),\n \"obj\":str(stat)}}\n \n \n return load\n \n"
] | [
[
"matplotlib.pyplot.imshow",
"torch.tensor",
"torch.stack",
"numpy.array",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jumpst3r/mine-pytorch | [
"41c68d1388664561996300a15e43e8cc4d805ded",
"41c68d1388664561996300a15e43e8cc4d805ded"
] | [
"mine/utils/helpers.py",
"mine/models/layers.py"
] | [
"import numpy as np\nimport torch\n\ndef batch(x, y, batch_size=1, shuffle=True):\n assert len(x) == len(\n y), \"Input and target data must contain same number of elements\"\n if isinstance(x, np.ndarray):\n x = torch.from_numpy(x).float()\n if isinstance(y, np.ndarray):\n y = torch.from_numpy(y).float()\n\n n = len(x)\n\n if shuffle:\n rand_perm = torch.randperm(n)\n x = x[rand_perm]\n y = y[rand_perm]\n\n batches = []\n for i in range(n // batch_size):\n x_b = x[i * batch_size: (i + 1) * batch_size]\n y_b = y[i * batch_size: (i + 1) * batch_size]\n\n batches.append((x_b, y_b))\n return batches\n\n",
"\nimport torch\nimport torch.nn as nn\n\n\nclass ConcatLayer(nn.Module):\n def __init__(self, dim=1):\n super().__init__()\n self.dim = dim\n\n def forward(self, x, y):\n return torch.cat((x, y), self.dim)\n\n\nclass CustomSequential(nn.Sequential):\n def forward(self, *input):\n for module in self._modules.values():\n if isinstance(input, tuple):\n input = module(*input)\n else:\n input = module(input)\n return input\n"
] | [
[
"torch.randperm",
"torch.from_numpy"
],
[
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
guci314/Chatbot_CN | [
"a0f7194252a189f8bc2b62fd16eb2abe432c0bf9",
"a0f7194252a189f8bc2b62fd16eb2abe432c0bf9"
] | [
"Chatbot_Model/Question_Pairs_Matching/train.py",
"Chatbot_Model/Text_Generator/word_sequence.py"
] | [
"import tensorflow as tf\nfrom Chatbot_Model.Question_Pairs_Matching import data_prepare\nfrom tensorflow.contrib import learn\nimport numpy as np\nfrom Chatbot_Model.Question_Pairs_Matching import esim_model\nimport Chatbot_Model.Question_Pairs_Matching.config as config\nfrom tqdm import tqdm\nfrom sklearn.metrics import f1_score\nfrom sklearn import metrics\nimport os\n\ncon = config.Config()\nparent_path = os.path.dirname(os.getcwd())\ndata_pre = data_prepare.Data_Prepare()\n\n\nclass TrainModel(object):\n '''\n 训练模型\n 保存模型\n '''\n def pre_processing(self):\n train_texta, train_textb, train_tag = data_pre.readfile(parent_path+'/data/train.txt')\n data = []\n data.extend(train_texta)\n data.extend(train_textb)\n data_pre.build_vocab(data, parent_path+'/save_model/esim' + '/vocab.pickle')\n # 加载词典\n self.vocab_processor = learn.preprocessing.VocabularyProcessor.restore(parent_path+'/save_model/esim' +\n '/vocab.pickle')\n train_texta_embedding = np.array(list(self.vocab_processor.transform(train_texta)))\n train_textb_embedding = np.array(list(self.vocab_processor.transform(train_textb)))\n\n dev_texta, dev_textb, dev_tag = data_pre.readfile(parent_path+'/data/dev.txt')\n dev_texta_embedding = np.array(list(self.vocab_processor.transform(dev_texta)))\n dev_textb_embedding = np.array(list(self.vocab_processor.transform(dev_textb)))\n return train_texta_embedding, train_textb_embedding, np.array(train_tag), \\\n dev_texta_embedding, dev_textb_embedding, np.array(dev_tag)\n\n def get_batches(self, texta, textb, tag):\n num_batch = int(len(texta) / con.Batch_Size)\n for i in range(num_batch):\n a = texta[i*con.Batch_Size:(i+1)*con.Batch_Size]\n b = textb[i*con.Batch_Size:(i+1)*con.Batch_Size]\n t = tag[i*con.Batch_Size:(i+1)*con.Batch_Size]\n yield a, b, t\n\n def get_length(self, trainX_batch):\n # sentence length\n lengths = []\n for sample in trainX_batch:\n count = 0\n for index in sample:\n if index != 0:\n count += 1\n else:\n break\n lengths.append(count)\n return lengths\n\n def trainModel(self):\n train_texta_embedding, train_textb_embedding, train_tag, \\\n dev_texta_embedding, dev_textb_embedding, dev_tag = self.pre_processing()\n # 定义训练用的循环神经网络模型\n with tf.variable_scope('esim_model', reuse=None):\n # esim model\n model = esim_model.ESIM(True, seq_length=len(train_texta_embedding[0]),\n class_num=len(train_tag[0]),\n vocabulary_size=len(self.vocab_processor.vocabulary_),\n embedding_size=con.embedding_size,\n hidden_num=con.hidden_num,\n l2_lambda=con.l2_lambda,\n learning_rate=con.learning_rate)\n\n # 训练模型\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n saver = tf.train.Saver()\n best_f1 = 0.0\n for time in range(con.epoch):\n print(\"training \" + str(time + 1) + \">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n model.is_trainning = True\n loss_all = []\n accuracy_all = []\n for texta, textb, tag in tqdm(\n self.get_batches(train_texta_embedding, train_textb_embedding, train_tag)):\n feed_dict = {\n model.text_a: texta,\n model.text_b: textb,\n model.y: tag,\n model.dropout_keep_prob: con.dropout_keep_prob,\n model.a_length: np.array(self.get_length(texta)),\n model.b_length: np.array(self.get_length(textb))\n }\n _, cost, accuracy = sess.run([model.train_op, model.loss, model.accuracy], feed_dict)\n loss_all.append(cost)\n accuracy_all.append(accuracy)\n\n print(\"第\" + str((time + 1)) + \"次迭代的损失为:\" + str(np.mean(np.array(loss_all))) + \";准确率为:\" +\n str(np.mean(np.array(accuracy_all))))\n\n def dev_step():\n \"\"\"\n Evaluates model on a dev set\n \"\"\"\n loss_all = []\n accuracy_all = []\n predictions = []\n for texta, textb, tag in tqdm(\n self.get_batches(dev_texta_embedding, dev_textb_embedding, dev_tag)):\n feed_dict = {\n model.text_a: texta,\n model.text_b: textb,\n model.y: tag,\n model.dropout_keep_prob: 1.0,\n model.a_length: np.array(self.get_length(texta)),\n model.b_length: np.array(self.get_length(textb))\n }\n dev_cost, dev_accuracy, prediction = sess.run([model.loss, model.accuracy,\n model.prediction], feed_dict)\n loss_all.append(dev_cost)\n accuracy_all.append(dev_accuracy)\n predictions.extend(prediction)\n y_true = [np.nonzero(x)[0][0] for x in dev_tag]\n y_true = y_true[0:len(loss_all)*con.Batch_Size]\n f1 = f1_score(np.array(y_true), np.array(predictions), average='weighted')\n print('分类报告:\\n', metrics.classification_report(np.array(y_true), predictions))\n print(\"验证集:loss {:g}, acc {:g}, f1 {:g}\\n\".format(np.mean(np.array(loss_all)),\n np.mean(np.array(accuracy_all)), f1))\n return f1\n\n model.is_trainning = False\n f1 = dev_step()\n\n if f1 > best_f1:\n best_f1 = f1\n saver.save(sess, parent_path + \"/save_model/esim/model.ckpt\")\n print(\"Saved model success\\n\")\n\n\nif __name__ == '__main__':\n train = TrainModel()\n train.trainModel()",
"# -*- coding: utf-8 -*-\n\n\"\"\"\n-------------------------------------------------\n File Name: WordSequence.py\n Description : 维护一个字典,把一个list(或者字符串)编码化\n Author : charl\n date: 2018/12/28\n-------------------------------------------------\n Change Activity: 2018/12/28:\n-------------------------------------------------\n\"\"\"\n\n\nimport numpy as np\n\n\nclass WordSequence(object):\n \"\"\"\n 把句子编码化(index)\n \"\"\"\n\n PAD_TAG = '<pad>'\n UNK_TAG = '<unk>'\n START_TAG = '<s>'\n END_TAG = '</s>'\n PAD = 0\n UNK = 1\n START = 2\n END = 3\n\n\n def __init__(self):\n \"\"\"\n 初始化基本的dict\n \"\"\"\n self.dict = {\n WordSequence.PAD_TAG: WordSequence.PAD,\n WordSequence.UNK_TAG: WordSequence.UNK,\n WordSequence.START_TAG: WordSequence.START,\n WordSequence.END_TAG: WordSequence.END,\n }\n self.fited = False\n\n\n def to_index(self, word):\n \"\"\"\n 把一个单字转换为index\n \"\"\"\n assert self.fited, 'WordSequence 尚未 fit'\n if word in self.dict:\n return self.dict[word]\n return WordSequence.UNK\n\n\n def to_word(self, index):\n \"\"\"\n 把一个index转换为单字\n \"\"\"\n assert self.fited, 'WordSequence 尚未 fit'\n for k, v in self.dict.items():\n if v == index:\n return k\n return WordSequence.UNK_TAG\n\n\n def size(self):\n \"\"\"\n 返回字典大小\n \"\"\"\n assert self.fited, 'WordSequence 尚未 fit'\n return len(self.dict) + 1\n\n def __len__(self):\n \"\"\"\n 返回字典大小\n \"\"\"\n return self.size()\n\n\n def fit(self, sentences, min_count=5, max_count=None, max_features=None):\n \"\"\"\n 训练 WordSequence\n Args:\n min_count 最小出现次数\n max_count 最大出现次数\n max_features 最大特征数\n\n ws = WordSequence()\n ws.fit([['hello', 'world']])\n \"\"\"\n assert not self.fited, 'WordSequence 只能 fit 一次'\n\n count = {}\n for sentence in sentences:\n arr = list(sentence)\n for a in arr:\n if a not in count:\n count[a] = 0\n count[a] += 1\n\n if min_count is not None:\n count = {k: v for k, v in count.items() if v >= min_count}\n\n if max_count is not None:\n count = {k: v for k, v in count.items() if v <= max_count}\n\n self.dict = {\n WordSequence.PAD_TAG: WordSequence.PAD,\n WordSequence.UNK_TAG: WordSequence.UNK,\n WordSequence.START_TAG: WordSequence.START,\n WordSequence.END_TAG: WordSequence.END,\n }\n\n if isinstance(max_features, int):\n count = sorted(list(count.items()), key=lambda x: x[1])\n if max_features is not None and len(count) > max_features:\n count = count[-int(max_features):]\n for w, _ in count:\n self.dict[w] = len(self.dict)\n else:\n for w in sorted(count.keys()):\n self.dict[w] = len(self.dict)\n\n self.fited = True\n\n\n def transform(self, sentence, max_len=None):\n \"\"\"\n 把句子转换为向量\n 例如输入 ['a', 'b', 'c']\n 输出 [1, 2, 3] 这个数字是字典里的编号,顺序没有意义\n \"\"\"\n assert self.fited, 'WordSequence 尚未 fit'\n\n # if max_len is not None:\n # r = [self.PAD] * max_len\n # else:\n # r = [self.PAD] * len(sentence)\n\n if max_len is not None:\n r = [self.PAD] * max_len\n else:\n r = [self.PAD] * len(sentence)\n\n for index, a in enumerate(sentence):\n if max_len is not None and index >= len(r):\n break\n r[index] = self.to_index(a)\n\n return np.array(r)\n\n\n def inverse_transform(self, indices,\n ignore_pad=False, ignore_unk=False,\n ignore_start=False, ignore_end=False):\n \"\"\"\n 把向量转换为句子,和上面的相反\n \"\"\"\n ret = []\n for i in indices:\n word = self.to_word(i)\n if word == WordSequence.PAD_TAG and ignore_pad:\n continue\n if word == WordSequence.UNK_TAG and ignore_unk:\n continue\n if word == WordSequence.START_TAG and ignore_start:\n continue\n if word == WordSequence.END_TAG and ignore_end:\n continue\n ret.append(word)\n\n return ret\n\n\ndef test():\n \"\"\"\n 测试\n \"\"\"\n ws = WordSequence()\n ws.fit([\n ['第', '一', '句', '话'],\n ['第', '二', '句', '话']\n ])\n\n indice = ws.transform(['第', '三'])\n print(indice)\n\n back = ws.inverse_transform(indice)\n print(back)\n\nif __name__ == '__main__':\n test()\n"
] | [
[
"tensorflow.contrib.learn.preprocessing.VocabularyProcessor.restore",
"numpy.nonzero",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.variable_scope",
"tensorflow.train.Saver",
"numpy.array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ValeriaTelles/Physics-Programs | [
"9fdd1b60ad5dd9c6750855bf63c2aa89383a0b1a"
] | [
"Scalar and Vector Fields/src/divCurl.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\n\n# deriv computes 1D derivative, dF/dr, using central difference method\ndef deriv(F,r):\n\t# get the length of array F (assume same length for r)\n L = F.size\n\t\n\t# create empty array to store results\n result= np.empty(L)\n\t\n\t# use central diff method for all interior points (we will build in tutorial)\n for i in range(L-2):\n result[i+1] = (F[i+2] - F[i]) / (r[i+2] - r[i])\n \n result[0] = (F[1] - F[0]) / (r[1] - r[0])\n result[L-1] = (F[L-1] - F[L-2]) / (r[L-1] - r[L-2])\n\n return result\n\n# read in the files \"vFieldX.csv\" and \"vFieldY.csv\"\nvFieldX= np.loadtxt( 'vFieldX.csv', delimiter = ',' )\nvFieldY = np.loadtxt( 'vFieldY.csv', delimiter = ',' )\n\n# Create a 2D grid of x, y points using numpy's meshgrid function (see Exercise 1)\nnx, ny = 100,100\nx = np.linspace(-5,5,nx)\ny = np.linspace(-5,5,ny)\nX, Y = np.meshgrid(x,y)\n\n# Divergence \ndivX = np.empty(X.shape)\n\nfor j in range(ny):\n divX[j,:] = deriv(vFieldX[j,:],x)\n\ndivY = np.empty(Y.shape)\n\nfor i in range(nx): \n divY[:,i] = deriv(vFieldY[:,i], y)\n\ntotalDiv = divX + divY\n\n# Curl\ncurlX = np.empty(X.shape)\n\nfor j in range(ny):\n curlX[j,:] = deriv(vFieldY[j,:], x)\n\ncurlY = np.empty(Y.shape)\n\nfor i in range(nx): \n curlY[:,i] = deriv(vFieldX[:,i], y)\n\ntotalCurl = curlX - curlY\n\n# Plotting the Divergence and Curl using subplots\nlines = 10**np.linspace(10, 12, 11)\nlines = sorted(list(-lines)+list(lines))\nfig, (ax1, ax2) = plt.subplots( nrows = 1, ncols = 2, sharex = False, sharey = False )\n\nax1.contourf(X, Y, totalDiv) #levels = Lines, colors = 'k', linewidths = 1)\nCS = ax2.contour(x, y, totalCurl, ) #levels = Lines, colors = 'k', linewidths = 1)\n\nax1.set_title('Divergence of a Vector Field', fontweight = 'bold' )\nax2.set_title('Curl of a Vector Field', fontweight = 'bold' )\n\nax1.set(xlabel = \"X\", ylabel = \"Y\")\nax2.set(xlabel = \"X\", ylabel = \"Y\")\n\nax2.clabel(CS, inline = 1, fontsize = 8)\n\nfig.set_size_inches(9, 5)\n\nplt.savefig('divCurlPlot.png', dpi=300)"
] | [
[
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.loadtxt",
"numpy.meshgrid",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ascend/pytorch | [
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc"
] | [
"test/test_npu/test_network_ops/test_norm_except_dim.py",
"test/test_npu/test_network_ops/test_upsample_bicubic2d_backward.py",
"test/test_npu/test_conv1d.py",
"test/test_npu/test_onnx/torch.onnx/export/export_onnx.py",
"test/test_npu/test_network_ops/test_tril.py",
"test/test_npu/test_dynamic_ops/test_network_topK.py",
"test/test_npu/test_onnx/torch.onnx/export/model_export-npu.py",
"test/test_npu/test_network_ops/test_logical_not.py",
"test/test_npu/test_network_ops/test_tanh.py",
"test/test_npu/test_network_ops/test_pdist.py",
"test/test_npu/test_network_ops/test_eye.py",
"test/test_npu/test_network_ops/test_addmv.py"
] | [
"# Copyright (c) 2020, Huawei Technologies.All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport math\nimport random\nfrom torch._six import nan\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\n\n\nclass TestNormExceptDim(TestCase):\n def generate_data(self, min, max, shape, dtype):\n input1 = np.random.uniform(min, max, shape).astype(dtype)\n input2 = np.random.uniform(min, max, shape).astype(dtype)\n npu_input1 = torch.from_numpy(input1)\n npu_input2 = torch.from_numpy(input2)\n\n return npu_input1, npu_input2\n\n def generate_single_data(self, min, max, shape, dtype):\n input = np.random.uniform(min, max, shape).astype(dtype)\n npu_input = torch.from_numpy(input)\n return npu_input\n\n def generate_int_dim(self, max):\n dim = np.random.randint(0, max)\n return dim\n\n def generate_bool_keepdim(self):\n keepdim = random.choice([True, False])\n return keepdim\n\n def test_norm_except_dim_type(self, device):\n def cpu_op_exec(input1, pow):\n output = torch.norm_except_dim(input1, pow=pow, dim=0)\n output = output.numpy()\n return output\n\n def npu_op_exec(input1, pow):\n print(input1.shape)\n input1 = input1.to(\"npu\")\n output = torch.norm_except_dim(input1, pow=pow, dim=0)\n output = output.to(\"cpu\")\n output = output.numpy()\n print(output.shape)\n return output\n\n def test_norm_except_dim_exec(input_type):\n input1 = self.generate_single_data(0, 100, (5, 3), input_type)\n pow = self.generate_int_dim(10)\n cpu_output = cpu_op_exec(input1, pow)\n npu_output = npu_op_exec(input1, pow)\n return cpu_output, npu_output\n\n for dtype in [np.float32]:\n cpu_output, npu_output = test_norm_except_dim_exec(dtype)\n self.assertRtolEqual(cpu_output, npu_output)\n\n \ninstantiate_device_type_tests(TestNormExceptDim, globals(), except_for=\"cpu\")\n\nif __name__ == \"__main__\":\n run_tests()",
"# Copyright (c) 2020 Huawei Technologies Co., Ltd\n# Copyright (c) 2019, Facebook CORPORATION. \n# All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport sys\nimport copy\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\n\nclass TestUpsampleBicubic2dBackward(TestCase):\n\n def cpu_op_exec(self, input1, output_size, align_corners, scale_h, scale_w):\n input1.requires_grad = True\n output = torch._C._nn.upsample_bicubic2d(input1, output_size, align_corners, scale_h, scale_w)\n output.backward(torch.ones_like(output))\n output_grad = input1.grad\n output_grad = output_grad.detach().numpy()\n return output_grad\n\n def npu_op_exec(self, input1, output_size, align_corners, scale_h, scale_w):\n input1.requires_grad = True\n output = torch._C._nn.upsample_bicubic2d(input1, output_size, align_corners, scale_h, scale_w)\n output.backward(torch.ones_like(output))\n output_grad = input1.grad\n output_grad = output_grad.to(\"cpu\").detach().numpy()\n return output_grad\n\n\n def test_upsample_bicubic2d_common_shape_format(self, device):\n shape_format = [\n [[np.float32, -1, (1, 1, 1, 1)], (1, 1), True, 0, 0, 0, 255],\n [[np.float32, -1, (2, 65535, 2, 2)], (2, 2), True, 0, 0, 0, 255],\n [[np.float32, -1, (10, 10, 786432, 8)], (786432, 8), False, 0, 0, 0, 255],\n [[np.float32, -1, (1, 1, 1, 1)], (2, 2), True, 0, 0, 0, 255],\n [[np.float32, -1, (1, 1, 2, 2)], (4, 4), True, 0, 0, 0, 255],\n [[np.float32, -1, (1, 1, 1, 1)], (2, 2), False, 0.5, 0.5, 0, 255],\n [[np.float32, -1, (1, 1, 2, 2)], (4, 4), False, 0.5, 0.5, 0, 255],\n [[np.float32, -1, (32, 32, 32, 32)], (64, 64), False, 0.5, 0.5, 0, 3402823500.0]\n ]\n for item in shape_format:\n cpu_input1, npu_input1 = create_common_tensor(item[0], item[5], item[6])\n cpu_output = self.cpu_op_exec(cpu_input1, item[1], item[2], item[3], item[4])\n npu_output = self.npu_op_exec(npu_input1, item[1], item[2], item[3], item[4])\n self.assertRtolEqual(cpu_output, npu_output)\n\n\n def test_upsample_bicubic2d_float16_shape_format(self, device):\n def cpu_op_exec_fp16(input1, output_size, align_corners, scale_h, scale_w):\n input1 = input1.to(torch.float32)\n input1.requires_grad = True\n output = torch._C._nn.upsample_bicubic2d(input1, output_size, align_corners, scale_h, scale_w)\n output.backward(torch.ones_like(output))\n output_grad = input1.grad\n output_grad = output_grad.detach().numpy()\n output_grad = output_grad.astype(np.float16)\n return output_grad\n \n shape_format = [\n [[np.float16, -1, (1, 1, 1, 1)], (1, 1), True, 0, 0, 0, 255],\n [[np.float16, -1, (2, 65535, 2, 2)], (2, 2), True, 0, 0, 0, 255],\n [[np.float16, -1, (32, 32, 32, 32)], (32, 32), False, 0, 0, 0, 6550.0],\n [[np.float16, -1, (1, 1, 1, 1)], (2, 2), True, 0, 0, 0, 255],\n [[np.float16, -1, (1, 1, 1, 1)], (2, 2), False, 0.5, 0.5, 0, 255],\n [[np.float16, -1, (1, 1, 2, 2)], (4, 4), False, 0.5, 0.5, 0, 255],\n [[np.float16, -1, (32, 32, 32, 32)], (64, 64), False, 0.5, 0.5, 0, 6550.0]\n ]\n \n for item in shape_format:\n cpu_input1, npu_input1 = create_common_tensor(item[0], item[5], item[6])\n cpu_output = cpu_op_exec_fp16(cpu_input1, item[1], item[2], item[3], item[4])\n npu_output = self.npu_op_exec(npu_input1, item[1], item[2], item[3], item[4])\n self.assertRtolEqual(cpu_output, npu_output)\n\ninstantiate_device_type_tests(TestUpsampleBicubic2dBackward, globals(), except_for='cpu')\nif __name__ == \"__main__\":\n run_tests()\n",
"# Copyright (c) 2020, Huawei Technologies.All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport copy\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\n\nclass TestConv1d(TestCase):\n def op_exec_cpu(self, input, weight, in_channels, out_channels, kernel_size, padding=0, stride=1, dilation=1, bias=True):\n m = torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias, groups=1)\n m.weight.data = weight\n output = m(input)\n output = output.detach().numpy()\n return output\n\n def op_exec_npu(self, input, weight, in_channels, out_channels, kernel_size, padding=0, stride=1, dilation=1, bias=True):\n m = torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias, groups=1)\n m.weight.data = weight\n m = m.to(\"npu\")\n output = m(input)\n output = output.to(\"cpu\")\n output = output.detach().numpy()\n return output\n\n def test_conv1d_shape_format(self, device):\n shape_format = [ \n [[np.float32, 3, (256, 32, 1, 1)], [np.float32, 3, (8, 32, 1, 1)], 0, (1, 1), (1, 1), (8)],\n [[np.float32, 3, [256, 32, 112, 112]], [np.float32, 0, [16, 32, 1, 1]], 0, 1, 1, None],\n [[np.float32, 0, [256, 3, 224, 224]], [np.float32, 0, [32, 3, 3, 3]], 0, [2, 2], 1, None],\n [[np.float32, 3, (2, 3, 3, 3)], [np.float32, 0, (3, 1, 3, 3)], 3, 1, 1, 1],\n [[np.float32, 3, [1024, 232, 7, 7]], [np.float32, 4, [232, 232, 1, 1]], 0, 1, 1, True],\n ]\n\n for item in shape_format:\n input_cpu, input_npu = create_common_tensor(item[0], -2, 2)\n weight_cpu, weight_npu = create_common_tensor(item[1], -2, 2)\n kernel_size = (item[1][2][2], item[1][2][3])\n cpu_output = self.op_exec_cpu(input_cpu, weight_cpu, item[0][2][1], item[1][2][0], kernel_size=kernel_size,\n padding=item[2], stride=item[3], dilation=item[4], bias=item[5])\n weight_npu = weight_npu.to(\"cpu\")\n npu_output = self.op_exec_npu(input_npu, weight_npu, item[0][2][1], item[1][2][0], kernel_size=kernel_size,\n padding=item[2], stride=item[3], dilation=item[4], bias=item[5]) \n self.assertRtolEqual(cpu_output, npu_output)\n \n def test_conv1d_shape_format_float16(self, device):\n def cpu_op_exec_fp16(input, weight, in_channels, out_channels, kernel_size, padding=0, stride=1, dilation=1, bias=True):\n input = input.to(torch.float32)\n weight = weight.to(torch.float32)\n m = torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias, groups=1)\n m.weight.data = weight\n output = m(input)\n output = output.detach().numpy()\n output = output.astype(np.float16)\n return output\n\n shape_format = [ \n [[np.float16, 3, (256, 32, 1, 1)], [np.float16, 3, (8, 32, 1, 1)], 0, (1, 1), (1, 1), (8)],\n [[np.float16, 3, [256, 32, 112, 112]], [np.float16, 0, [16, 32, 1, 1]], 0, 1, 1, None],\n [[np.float16, 0, [256, 3, 224, 224]], [np.float16, 0, [32, 3, 3, 3]], 0, [2, 2], 1, None],\n [[np.float16, 3, (2, 3, 3, 3)], [np.float16, 0, (3, 1, 3, 3)], 3, 1, 1, 1],\n [[np.float16, 3, [1024, 232, 7, 7]], [np.float16, 4, [232, 232, 1, 1]], 0, 1, 1, True],\n ]\n\n for item in shape_format:\n input_cpu, input_npu = create_common_tensor(item[0], -2, 2)\n weight_cpu, weight_npu = create_common_tensor(item[1], -2, 2)\n kernel_size = (item[1][2][2], item[1][2][3])\n cpu_output = cpu_op_exec_fp16(input_cpu, weight_cpu, item[0][2][1], item[1][2][0], kernel_size=kernel_size,\n padding=item[2], stride=item[3], dilation=item[4], bias=item[5])\n weight_npu = weight_npu.to(\"cpu\")\n npu_output = self.op_exec_npu(input_npu, weight_npu, item[0][2][1], item[1][2][0], kernel_size=kernel_size,\n padding=item[2], stride=item[3], dilation=item[4], bias=item[5]) \n self.assertRtolEqual(cpu_output, npu_output) \n\ninstantiate_device_type_tests(TestConv1d, globals(), except_for='cpu')\nif __name__ == \"__main__\":\n torch.npu.set_device(\"npu:6\")\n run_tests()",
"# Copyright (c) 2020 Huawei Technologies Co., Ltd\n# Copyright (c) 2019, Facebook CORPORATION. \n# All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport torchvision\nfrom export.cp_parser import *\n\ndef getDeviceStr(deviceStr, DeviceNo):\n #print(\"cp_getDeviceId test device : \",\"(\", deviceStr,\" \", DeviceNo, \")\")\n if DeviceNo == None:\n return deviceStr\n if deviceStr == 'cpu':\n return deviceStr\n elif deviceStr == 'npu' or deviceStr == 'cuda':\n loc = '{}:{}'.format(deviceStr, DeviceNo)\n return loc\n else: \n return deviceStr\n\n\ndef cp2onnx(model,cpfile,onnxfile, input_data, ispth=False,device=\"cpu\",dno=None):\n if os.path.isfile(cpfile):\n #model = torchvision.models.resnet50(pretrained=False)\n model = cp_load(model,cpfile,ispth=ispth,device=device,dno=dno)\n else :\n print(\"warning : \\\"\",cpfile,\"\\\"not exist!\")\n model.state_dict()\n deviceStr = getDeviceStr(device,dno)\n print(\"cp2onnx device: \",deviceStr,\"(\",device,\" \",dno,\")\")\n #torch.npu.set_device(\"npu:0\")\n #dummy_input = torch.randn(10, 3, 224, 224, device='npu:0')\n dummy_input = input_data.to(deviceStr)\n\n # Providing input and output names sets the display names for values\n # within the model's graph. Setting these does not change the semantics\n # of the graph; it is only for readability.\n #\n # The inputs to the network consist of the flat list of inputs (i.e.\n # the values you would pass to the forward() method) followed by the\n # flat list of parameters. You can partially specify names, i.e. provide\n # a list here shorter than the number of inputs to the model, and we will\n # only set that subset of names, starting from the beginning.\n input_names = [ \"actual_input_1\" ] #+ [ \"learned_%d\" % i for i in range(16) ]\n output_names = [ \"output1\" ]\n model = model.to(deviceStr)\n torch.onnx.export(model, dummy_input, onnxfile, verbose=True, input_names=input_names, output_names=output_names,opset_version=11)\n\n\ndef cp2onnx_dynamic_axes(model,cpfile,onnxfile,device=\"cuda\",dno=None):\n if os.path.isfile(cpfile):\n #model = torchvision.models.resnet50(pretrained=False)\n model = cp_load(model,cpfile)\n else :\n print(\"warning : \\\"\",cpfile,\"\\\"not exist!\")\n model.state_dict()\n deviceStr = getDeviceStr(device,dno)\n #torch.npu.set_device(\"npu:0\")\n #dummy_input = torch.randn(10, 3, 224, 224, device='npu:0')\n dummy_input = torch.randn(10, 3, 224, 224)\n dummy_input = dummy_input.to(deviceStr)\n\n # Providing input and output names sets the display names for values\n # within the model's graph. Setting these does not change the semantics\n # of the graph; it is only for readability.\n #\n # The inputs to the network consist of the flat list of inputs (i.e.\n # the values you would pass to the forward() method) followed by the\n # flat list of parameters. You can partially specify names, i.e. provide\n # a list here shorter than the number of inputs to the model, and we will\n # only set that subset of names, starting from the beginning.\n input_names = [ \"actual_input_1\" ] #+ [ \"learned_%d\" % i for i in range(16) ]\n output_names = [ \"output1\" ]\n model = model.to(deviceStr)\n dynamic_axes = {'actual_input_1': {0: '-1'}, 'output1': {0: '-1'}}\n torch.onnx.export(model, dummy_input, onnxfile, verbose=True, input_names=input_names, output_names=output_names,dynamic_axes=dynamic_axes,opset_version=11)\n\n\n",
"# Copyright (c) 2020 Huawei Technologies Co., Ltd\n# Copyright (c) 2019, Facebook CORPORATION. \n# All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport sys\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\n\nclass TestTril(TestCase):\n def test_tril(self, device):\n dtype_list = [np.float32, np.float16]\n format_list = [0, 3, 4]\n shape_list = [[5, 5],[4, 5, 6]]\n diagonal_list = [-1, 0, 1]\n shape_format = [\n [i, j, k, l] for i in dtype_list for j in format_list for k in shape_list for l in diagonal_list\n ]\n for item in shape_format:\n cpu_input, npu_input = create_common_tensor(item[:-1], 0, 100)\n cpu_output = self.cpu_op_exec(cpu_input, item[-1])\n npu_output = self.npu_op_exec(npu_input, item[-1])\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_tril_inplace(self, device):\n dtype_list = [np.float32, np.float16]\n format_list = [0, 3, 4]\n shape_list = [[5, 5], [4, 5, 6]]\n diagonal_list = [-1, 0, 1]\n shape_format = [\n [i, j, k, l] for i in dtype_list for j in format_list for k in shape_list for l in diagonal_list\n ]\n for item in shape_format:\n cpu_input, npu_input = create_common_tensor(item[:-1], 0, 100)\n cpu_output = self.cpu_op_inplace_exec(cpu_input, item[-1])\n npu_output = self.npu_op_inplace_exec(npu_input, item[-1])\n self.assertRtolEqual(cpu_output, npu_output)\n\n def cpu_op_exec(self, input, diagonal=0):\n output = torch.tril(input, diagonal)\n output = output.numpy()\n return output\n\n def npu_op_exec(self, input, diagonal=0):\n output = torch.tril(input, diagonal)\n output = output.to(\"cpu\")\n output = output.numpy()\n return output\n\n def cpu_op_inplace_exec(self, input, diagonal=0):\n output = input.tril_(diagonal)\n output = output.numpy()\n return output\n\n def npu_op_inplace_exec(self, input, diagonal=0):\n output = input.tril_(diagonal)\n output = output.to(\"cpu\")\n output = output.numpy()\n return output\n\ninstantiate_device_type_tests(TestTril, globals(), except_for=\"cpu\")\nif __name__ == \"__main__\":\n run_tests()\n",
"# Copyright (c) 2020, Huawei Technologies.All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\nfrom common_utils import TestCase, run_tests\nimport time\nimport os\nimport copy\n# Need export DYNAMIC_COMPILE_ENABLE=1 and export EXPERIMENTAL_DYNAMIC_PARTITION=1\n\n\nclass TopkNet(torch.nn.Module):\n def __init__(self):\n super(TopkNet, self).__init__()\n\n def forward(self, x, k):\n if x.device == torch.device(\"cpu\") and x.dtype == torch.float16:\n x = x.to(torch.float32)\n out = torch.topk(x, k)\n if x.device == torch.device(\"cpu\") and x.dtype == torch.float16:\n out = out.to(torch.float16)\n return out\n\n\nclass TestShape(TestCase):\n def create_random_shape_tensor(self, item, min_value, max_value):\n npu_format = item[0]\n dtype = item[1]\n dim = item[2]\n shape = np.random.randint(5, 10, dim)\n input_tensor = np.random.uniform(min_value, max_value, shape).astype(dtype)\n cpu_input = torch.from_numpy(input_tensor)\n npu_input = torch.from_numpy(input_tensor).npu()\n if npu_format not in (-1, 0):\n npu_input = npu_input.npu_format_cast(npu_format)\n return cpu_input, npu_input\n\n def test_dynamic_threads_support_op(self, device):\n format_list = [0]\n dtype_list = [np.float16]\n dim_list = [3, 4]\n net = TopkNet()\n net_npu = copy.deepcopy(net).to(\"npu\")\n items = [\n [i, j, k] for i in format_list for j in dtype_list for k in dim_list\n ]\n for item in items:\n if item[0] == 29 and item[2] == 1:\n continue\n for _ in range(100):\n cpu_tensor, npu_tensor = self.create_random_shape_tensor(item, -10, 10)\n k = 5\n cpu_output,cpu_indice = net(cpu_tensor, k)\n npu_output,npu_indice = net_npu(npu_tensor, k)\n self.assertRtolEqual(cpu_output.to(npu_output.dtype).numpy(), npu_output.cpu().numpy())\n self.assertRtolEqual(cpu_indice.to(torch.int32).numpy(), npu_indice.to(torch.int32).cpu().numpy())\n \n\n\ninstantiate_device_type_tests(TestShape, globals(), except_for=\"cpu\")\nif __name__ == \"__main__\":\n run_tests()\n",
"# Copyright (c) 2020 Huawei Technologies Co., Ltd\n# Copyright (c) 2019, Facebook CORPORATION. \n# All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport torchvision\ntorch.npu.set_device(\"npu:0\")\n#dummy_input = torch.randn(10, 3, 224, 224, device='npu:0')\ndummy_input = torch.randn(10, 3, 224, 224)\ndummy_input = dummy_input.to(\"npu\")\nmodel = torchvision.models.resnet50(pretrained=False)\n\n# Providing input and output names sets the display names for values\n# within the model's graph. Setting these does not change the semantics\n# of the graph; it is only for readability.\n#\n# The inputs to the network consist of the flat list of inputs (i.e.\n# the values you would pass to the forward() method) followed by the\n# flat list of parameters. You can partially specify names, i.e. provide\n# a list here shorter than the number of inputs to the model, and we will\n# only set that subset of names, starting from the beginning.\ninput_names = [ \"actual_input_1\" ] + [ \"learned_%d\" % i for i in range(16) ]\noutput_names = [ \"output1\" ]\nmodel = model.to(\"npu\")\ntorch.onnx.export(model, dummy_input, \"resnet50.onnx\", verbose=True, input_names=input_names, output_names=output_names)\n\n\n\n# 有坑 会提示下载不下来 修改下resnet.py,手动下载下来,然后放到 D:/Pytorch/models 目录下。\ndef _resnet(arch, block, layers, pretrained, progress, **kwargs):\n model = ResNet(block, layers, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch], model_dir=\"D:/Pytorch/models\",\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n",
"# Copyright (c) 2020, Huawei Technologies.All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport sys\nimport copy\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\n\nclass TestLogicalNot(TestCase):\n def cpu_op_exec(self, input):\n output = torch.logical_not(input)\n output = output.numpy()\n return output\n\n def npu_op_exec(self, input):\n output = torch.logical_not(input) \n output = output.to(\"cpu\")\n output = output.numpy()\n return output\n\n def test_logical_not_common_shape_format(self, device):\n shape_format = [\n [[np.int8, -1, 1]],\n [[np.int8, -1, (64, 10)]],\n [[np.int8, -1, (256, 2048, 7, 7)]],\n [[np.int8, -1, (32, 1, 3, 3)]],\n [[np.int32, -1, (64, 10)]],\n [[np.int32, -1, (256, 2048, 7, 7)]],\n [[np.int32, -1, (32, 1, 3, 3)]],\n [[np.uint8, -1, (64, 10)]],\n [[np.uint8, -1, (256, 2048, 7, 7)]],\n [[np.uint8, -1, (32, 1, 3, 3)]],\n [[np.float16, -1, (64, 10)]],\n [[np.float16, -1, (256, 2048, 7, 7)]],\n [[np.float16, -1, (32, 1, 3, 3)]],\n [[np.float32, -1, (64, 10)]],\n [[np.float32, -1, (256, 2048, 7, 7)]],\n [[np.float32, -1, (32, 1, 3, 3)]],\n [[np.bool, -1, (64, 10)]],\n [[np.bool, -1, (256, 2048, 7, 7)]]\n ]\n for item in shape_format:\n cpu_input, npu_input = create_common_tensor(item[0], 1, 10)\n cpu_output = self.cpu_op_exec(cpu_input)\n npu_output = self.npu_op_exec(npu_input)\n self.assertRtolEqual(cpu_output, npu_output) \n\n\n\ninstantiate_device_type_tests(TestLogicalNot, globals(), except_for=\"cpu\")\nif __name__ == \"__main__\":\n run_tests()\n \n",
"# Copyright (c) 2020, Huawei Technologies.All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport torch\nimport numpy as np\nimport sys\nimport copy\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\n\n\nclass TestTanh(TestCase):\n def cpu_op_exec(self, input1):\n output = torch.tanh(input1)\n output = output.numpy()\n return output\n\n def npu_op_exec(self, input1):\n input1 = input1.to(\"npu\")\n output = torch.tanh(input1)\n output = output.to(\"cpu\")\n output = output.numpy()\n return output\n\n def test_tanh_common_shape_format(self, device):\n shape_format = [\n [[np.float32, -1, (4, 3, 3)], 1, 100],\n [[np.float32, -1, (7,5,5)], 21474836,21474837],\n [[np.float32, -1, (4, 44, 44)], 3450,34020],\n [[np.float32, -1, (65500,3,3)], -214748,-214746],\n [[np.float32, -1, (1024, 448, 448)], 200, 300],\n [[np.float32, -1, (128, 3, 5)], 0.3219780311757745 , 92 ],\n [[np.float32, -1, (8, 7, 7)], 0.4820305734500543 , 28],\n [[np.float32, -1, (15, 8, 8)],0.8563874665918477 , 98],\n [[np.float32, -1, (11, 6, 6)], 0.0694198357720135 , 50],\n [[np.float32, -1, (24, 24, 3)], -2,-2],\n [[np.float32, -1, (6, 10, 10)], 0.6447298684351989 , 95],\n [[np.float32, -1, (3, 9, 9)], 0.8723538084975545 , 85],\n [[np.float32, -1, (5, 5, 5)], 0.8283759153463854 , 71],\n [[np.float32, -1, (5, 1, 1)], 0.24718684227306953 , 25],\n [[np.float32, -1, (14, 7, 7)], 0.3989186243492233 , 7 ],\n [[np.float32, -1, (4, 10, 10)], 0.7866457165672994 , 5],\n [[np.float32, -1, (3, 7, 7)], 0.3793216987112159 , 39],\n [[np.float32, -1, (2, 8, 8)], 0.9662927186969077 , 5 ],\n [[np.float32, -1, (3, 7, 7)], 0.9956475043306917 , 28],\n [[np.float32, -1, (7, 10, 10)], 0.769565434387681 , 9],\n [[np.float32, -1, (54, 93, 3)],0.6447298684351989 , 95],\n [[np.float32, -1, (6, 3, 3)], 0.03133650248813469 , 37 ],\n [[np.float32, -1, (65500, 1, 1)], 95, 100],\n [[np.float32, -1, (6, 3, 10)], 0.03133650248813469 , 37],\n\n ]\n for item in shape_format:\n cpu_input1, npu_input1 = create_common_tensor(item[0], item[1],item[2])\n cpu_output = self.cpu_op_exec(cpu_input1)\n npu_output = self.npu_op_exec(npu_input1)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_tanh_float16_shape_format(self, device):\n def cpu_op_exec_fp16(input1):\n input1 = input1.to(torch.float32)\n output = torch.tanh(input1)\n output = output.numpy()\n output = output.astype(np.float16)\n return output\n\n shape_format = [\n [[np.float16, -1, (65500, 1)], 212,225],\n [[np.float16, -1, (1024,448,448)], 200, 300],\n [[np.float16, -1, (16,16)], -1000, -100],\n [[np.float16, -1, (4,1)], -1.1754943508e-38,-1.1754943508e-38],\n [[np.float16, -1, (7, 5, 5)], 21474836,21474837],\n [[np.float16, -1, (4, 44, 44)], 3450,34020],\n [[np.float16, -1, (65500, 3, 3)], -214748,-214746],\n [[np.float16, -1, (64, 4, 4)], -9.313225746154785e-10,9.313225746154785e-10],\n [[np.float16, -1, (128, 3, 5)], -0.000000000000000000000000000000000000011754943508,0.000000000000000000000000000000000000011754943508],\n [[np.float16, -1, (1, 1, 1)], 0.9283381566708346 , 16],\n [[np.float16, -1, (6, 3, 10)], 0.03133650248813469 , 37],\n [[np.float16, -1, (65500, 1, 1)], 95, 100 ],\n [[np.float16, -1, (13, 5, 5)], 0.9790231845699171 , 41],\n [[np.float16, -1, (5, 7, 7)], 0.7852605507867441 , 87 ],\n [[np.float16, -1, (13, 2, 2)],0.8758750778305631 , 82],\n [[np.float16, -1, (14, 6, 6)],0.6963691068720794 , 92],\n [[np.float16, -1, (5, 6, 6)], 0.7570129172808612 , 21],\n [[np.float16, -1, (1, 10, 10)], 0.990800730328874 , 86],\n [[np.float16, -1, (4, 5, 5)], 0.7349293532899402 , 35],\n [[np.float16, -1, (6, 4, 4)], 0.7349293532899402, 35],\n [[np.float16, -1, (5, 8, 8)],0.9583309378850908 , 60],\n\n ]\n\n for item in shape_format:\n cpu_input1, npu_input1 = create_common_tensor(item[0], item[1],item[2])\n cpu_output = cpu_op_exec_fp16(cpu_input1)\n npu_output = self.npu_op_exec(npu_input1)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_tanh_inplace_common_shape_format(self, device):\n def cpu_op_inplace_exec(input1):\n output = torch.tanh_(input1)\n output = output.numpy()\n return output\n\n def npu_op_inplace_exec(input1):\n input1 = input1.to(\"npu\")\n output = torch.tanh_(input1)\n output = output.to(\"cpu\")\n output = output.numpy()\n return output\n\n shape_format = [\n [[np.float32, -1, (4, 3, 3)], 1, 100],\n [[np.float32, -1, (7,5,5)], 21474836,21474837],\n [[np.float32, -1, (4, 44, 44)], 3450,34020],\n [[np.float32, -1, (65500,3,3)], -214748,-214746]\n\n ]\n for item in shape_format:\n cpu_input1, npu_input1 = create_common_tensor(item[0], item[1],item[2])\n cpu_output = cpu_op_inplace_exec(cpu_input1)\n npu_output = npu_op_inplace_exec(npu_input1)\n self.assertRtolEqual(cpu_output, npu_output)\n\n\ninstantiate_device_type_tests(TestTanh, globals(), except_for='cpu')\n\nif __name__ == \"__main__\":\n run_tests()",
"# Copyright (c) 2020, Huawei Technologies.All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport sys\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\n\nclass TestPdist(TestCase):\n def generate_data(self, min_d, max_d, shape, dtype):\n input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)\n npu_input1 = torch.from_numpy(input1)\n return npu_input1\n\n def cpu_op_exec_default(self, input1):\n stype = input1.dtype\n if stype == torch.float16:\n input1 = input1.float() \n output = torch.nn.functional.pdist(input1)\n if stype == torch.float16:\n output = output.half() \n output = output.numpy()\n return output\n \n def npu_op_exec_default(self, input1):\n input1 = input1.to(\"npu\")\n output = torch.nn.functional.pdist(input1)\n output = output.to(\"cpu\")\n output = output.numpy()\n return output\n\n def cpu_op_exec(self, input1, p):\n stype = input1.dtype\n if stype == torch.float16:\n input1 = input1.float() \n output = torch.nn.functional.pdist(input1, p)\n if stype == torch.float16:\n output = output.half() \n output = output.numpy()\n return output\n\n def npu_op_exec(self, input1, p):\n input1 = input1.to(\"npu\")\n output = torch.nn.functional.pdist(input1, p)\n output = output.to(\"cpu\")\n output = output.numpy()\n return output\n \n def test_pdist__5_360_float16(self, device):\n npu_input1 = self.generate_data(-2, 2, (5, 360), np.float16)\n cpu_output = self.cpu_op_exec_default(npu_input1)\n npu_output = self.npu_op_exec_default(npu_input1)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_pdist_10_3600_float32(self, device):\n npu_input1 =self.generate_data(-2, 2, (10, 3600), np.float32)\n cpu_output = self.cpu_op_exec_default(npu_input1)\n npu_output = self.npu_op_exec_default(npu_input1)\n self.assertRtolEqual(cpu_output, npu_output) \n\n def test_pdist_30_301_0_float16(self, device):\n npu_input1 = self.generate_data(-2, 2, (30, 301), np.float16)\n cpu_output = self.cpu_op_exec(npu_input1, 0.0)\n npu_output = self.npu_op_exec(npu_input1, 0.0)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_pdist_10_256_0_float32(self, device):\n npu_input1 = self.generate_data(-2, 2, (10, 256), np.float32)\n cpu_output = self.cpu_op_exec(npu_input1, 0.0)\n npu_output = self.npu_op_exec(npu_input1, 0.0)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_pdist_20_234_1_float32(self, device):\n npu_input1 = self.generate_data(-2, 2, (20, 234), np.float32)\n cpu_output = self.cpu_op_exec(npu_input1, 1.0)\n npu_output = self.npu_op_exec(npu_input1, 1.0)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_pdist_10_1600_1_float16(self, device):\n npu_input1 = self.generate_data(-2, 2, (10, 1600), np.float16)\n cpu_output = self.cpu_op_exec(npu_input1, 1.0)\n npu_output = self.npu_op_exec(npu_input1, 1.0)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_pdist_8_1025_2_float32(self, device):\n npu_input1 = self.generate_data(-2, 2, (8, 1025), np.float32)\n cpu_output = self.cpu_op_exec(npu_input1, 2.0)\n npu_output = self.npu_op_exec(npu_input1, 2.0)\n self.assertRtolEqual(cpu_output, npu_output) \n\n def test_pdist_9_10250_2_float16(self, device):\n npu_input1 = self.generate_data(-2, 2, (9, 10250), np.float16)\n cpu_output = self.cpu_op_exec(npu_input1, 2.0)\n npu_output = self.npu_op_exec(npu_input1, 2.0)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_pdist_100_7025_10_float32(self, device):\n npu_input1 = self.generate_data(-2, 2, (100, 7025), np.float32)\n cpu_output = self.cpu_op_exec(npu_input1, 10.0)\n npu_output = self.npu_op_exec(npu_input1, 10.0)\n self.assertRtolEqual(cpu_output, npu_output) \n\n def test_pdist_111_10025_10_float16(self, device):\n npu_input1 = self.generate_data(-2, 2, (111, 10025), np.float16)\n cpu_output = self.cpu_op_exec(npu_input1, 10.0)\n npu_output = self.npu_op_exec(npu_input1, 10.0)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_pdist__50_0_float32(self, device):\n npu_input1 = self.generate_data(-2, 2, (50, 0), np.float32)\n cpu_output = self.cpu_op_exec_default(npu_input1)\n npu_output = self.npu_op_exec_default(npu_input1)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_pdist__1_110_float32(self, device):\n npu_input1 = self.generate_data(-2, 2, (1, 110), np.float32)\n cpu_output = self.cpu_op_exec_default(npu_input1)\n npu_output = self.npu_op_exec_default(npu_input1)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_pdist__0_0_float32(self, device):\n npu_input1 = self.generate_data(-2, 2, (0, 0), np.float32)\n cpu_output = self.cpu_op_exec_default(npu_input1)\n npu_output = self.npu_op_exec_default(npu_input1)\n self.assertRtolEqual(cpu_output, npu_output) \n\ninstantiate_device_type_tests(TestPdist, globals(), except_for='cpu')\nif __name__ == \"__main__\":\n run_tests()",
"# Copyright (c) 2020, Huawei Technologies.All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport torch\nimport numpy as np\nimport sys\nimport copy\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\n\n\nclass TestEye(TestCase):\n\n def cpu_op_exec(self, shapes):\n if shapes[0] == shapes[1]:\n output = torch.eye(shapes[0])\n else:\n output = torch.eye(shapes[0], shapes[1])\n output = output.numpy()\n return output\n \n def npu_op_exec(self, shapes):\n if shapes[0] == shapes[1]:\n output = torch.eye(shapes[0], device=\"npu\")\n else:\n output = torch.eye(shapes[0], shapes[1], device=\"npu\")\n output = output.to(\"cpu\")\n output = output.numpy()\n return output\n\n def cpu_op_out_exec(self, shapes, out):\n if shapes[0] == shapes[1]:\n torch.eye(shapes[0], out=out)\n else:\n torch.eye(shapes[0], shapes[1], out=out)\n output = out.numpy()\n return output\n\n def npu_op_out_exec(self, shapes, out):\n out = out.to(\"npu\")\n if shapes[0] == shapes[1]:\n torch.eye(shapes[0], out=out)\n else:\n torch.eye(shapes[0], shapes[1], out=out)\n output = out.to(\"cpu\")\n output = output.numpy()\n return output \n\n def test_eye_int32_common_shape_format(self, device):\n shape_format = [\n [np.int32, 0, (3563, 4000)],\n [np.int32, 0, (1350, 1762)],\n ]\n for item in shape_format:\n cpu_output = self.cpu_op_exec(item[2])\n npu_output = self.npu_op_exec(item[2])\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_eye_float32_common_shape_format(self, device):\n shape_format = [\n [np.float32, 0, (5, 5)],\n [np.float32, 0, (15, 15)],\n [np.float32, 0, (3, 5)],\n [np.float32, 0, (40, 5)],\n [np.float32, 0, (16480, 25890)],\n [np.float32, 0, (1350, 1762)],\n [np.float32, 0, (352, 4000)],\n [np.float32, 0, (3563, 4000)],\n [np.float32, 0, (1, 51)],\n [np.float32, 0, (1, 173)],\n [np.float32, 0, (1, 45000)],\n [np.float32, 0, (1, 100000)],\n ]\n for item in shape_format:\n cpu_output = self.cpu_op_exec(item[2])\n npu_output = self.npu_op_exec(item[2])\n self.assertRtolEqual(cpu_output, npu_output)\n \n def test_eye_out_float32_common_shape_format(self, device):\n shape_format = [\n [np.float32, 0, (5, 5)],\n [np.float32, 0, (3, 5)],\n [np.float32, 0, (1350, 1762)],\n [np.float32, 0, (352, 4000)],\n [np.float32, 0, (3563, 4000)],\n [np.float32, 0, (40000, 40000)]\n ]\n for item in shape_format:\n cpu_input1, npu_input1 = create_common_tensor(item, 1, 100)\n cpu_output = self.cpu_op_out_exec(item[2], cpu_input1)\n npu_output = self.npu_op_out_exec(item[2], npu_input1)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_eye_out_float32_different_shape_format(self, device):\n shape_1 = [np.float32, 0, (4000, 400)]\n shape_2 = [np.float32, 0, (4000, 4000)]\n cpu_input1 = torch.randn(shape_1[2][0], shape_1[2][1], dtype=torch.float32)\n cpu_output = self.cpu_op_out_exec(shape_2[2], cpu_input1)\n npu_input1 = torch.randn(shape_2[2][0], shape_2[2][1], dtype=torch.float32)\n npu_output = self.npu_op_out_exec(shape_2[2], npu_input1)\n self.assertRtolEqual(cpu_output, npu_output)\n\n def test_eye_float16_shape_format(self, device):\n def cpu_op_exec_fp16(shapes):\n output = torch.eye(shapes[0], shapes[1])\n output = output.numpy()\n output = output.astype(np.float16)\n return output\n \n def npu_op_exec_fp16(shapes):\n output = torch.eye(shapes[0], shapes[1], device=\"npu\", dtype=torch.float16)\n output = output.to(\"cpu\")\n output = output.numpy()\n return output\n\n shape_format = [\n [np.float16, 0, (5, 5)], \n [np.float16, 0, (3, 5)],\n [np.float32, 0, (1350, 1762)],\n [np.float32, 0, (352, 4000)],\n [np.float32, 0, (3563, 4000)]\n ]\n\n for item in shape_format:\n cpu_output = cpu_op_exec_fp16(item[2])\n npu_output = npu_op_exec_fp16(item[2])\n self.assertRtolEqual(cpu_output, npu_output) \n\ninstantiate_device_type_tests(TestEye, globals(), except_for='cpu')\nif __name__ == \"__main__\":\n run_tests()",
"# Copyright (c) 2020, Huawei Technologies.All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport sys\nimport copy\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\n\nclass TestAddmv(TestCase):\n\n def cpu_op_exec(self, a, b, c, alpha, beta):\n '''output = alpha * a @ b + beta * c'''\n output = torch.addmv(c, a, b, alpha=alpha, beta=beta)\n output = output.numpy()\n return output\n\n def npu_op_exec(self, a, b, c, alpha, beta):\n output = torch.addmv(c, a, b, alpha=alpha, beta=beta)\n output = output.to('cpu')\n output = output.numpy()\n return output\n \n def npu_op_exec_out(self,a, b, c, beta, alpha, input):\n torch.addmv(c, a, b, alpha=alpha, beta=beta, out=input)\n output = input.to(\"cpu\")\n output = output.numpy()\n return output;\n\n def test_addmv_fp16(self, device):\n shape_format = [\n #[[np.float16, 3, (4, 6)], [np.float16, 3, (6,)], [np.float16, 3, (4, )]],\n #[[np.float16, 3, (2112, 2976)], [np.float16, 3, (2976,)], [np.float16, 3, (2112, )]],\n [[np.float16, 3, (2, 3)], [np.float16, 3, (3,)], [np.float16, 3, (2, )]]\n \n ]\n for item in shape_format:\n\n input_a, npu_input_a = create_common_tensor(item[0], -2, 2)\n input_b, npu_input_b= create_common_tensor(item[1], -2, 2)\n input_c, npu_input_c= create_common_tensor(item[2], -2, 2)\n\n input_a = input_a.to(torch.float32)\n input_b = input_b.to(torch.float32)\n input_c = input_c.to(torch.float32)\n\n cpu_output = self.cpu_op_exec(input_a, input_b, input_c, 1, 1)\n npu_output = self.npu_op_exec(npu_input_a, npu_input_b, npu_input_c, 1, 1)\n\n cpu_output = cpu_output.astype(np.float16)\n self.assertRtolEqual(cpu_output, npu_output)\n \n def test_addmv_out_fp16(self, device):\n shape_format = [\n #[[np.float16, 3, (4, 6)], [np.float16, 3, (6,)], [np.float16, 3, (4, )], [np.float16, 3, (10,)]],\n #[[np.float16, 3, (2112, 2976)], [np.float16, 3, (2976,)], [np.float16, 3, (2112, )], [np.float16, 3, (4000, )]],\n [[np.float16, 3, (2, 3)], [np.float16, 3, (3,)], [np.float16, 3, (2, )], [np.float16, 3, (10,)]]\n \n ]\n for item in shape_format:\n\n input_a, npu_input_a = create_common_tensor(item[0], -2, 2)\n input_b, npu_input_b= create_common_tensor(item[1], -2, 2)\n input_c, npu_input_c= create_common_tensor(item[2], -2, 2)\n input, npu_input= create_common_tensor(item[3], -2, 2)\n\n input_a = input_a.to(torch.float32)\n input_b = input_b.to(torch.float32)\n input_c = input_c.to(torch.float32)\n\n cpu_output = self.cpu_op_exec(input_a, input_b, input_c, 1, 1)\n npu_output= self.npu_op_exec_out(npu_input_a, npu_input_b, npu_input_c, 1, 1, npu_input)\n cpu_output = cpu_output.astype(np.float16)\n \n self.assertRtolEqual(cpu_output, npu_output)\n \n def test_addmv_fp32(self, device):\n shape_format = [\n [[np.float32, 0, (2, 3)], [np.float32, 0, (3,)], [np.float32, 0, (2, )]],\n [[np.float32, 0, (3168, 320)], [np.float32, 0, (320,)], [np.float32, 0, (3168, )]],\n #[[np.float32, 0, (1696, 2560)], [np.float32, 0, (2560,)], [np.float32, 0, (1696, )]]\n ]\n for item in shape_format:\n\n input_a, npu_input_a = create_common_tensor(item[0], -2, 2)\n input_b, npu_input_b= create_common_tensor(item[1], -2, 2)\n input_c, npu_input_c= create_common_tensor(item[2], -2, 2)\n\n\n cpu_output = self.cpu_op_exec(input_a, input_b, input_c, 1, 1)\n npu_output = self.npu_op_exec(npu_input_a, npu_input_b, npu_input_c, 1, 1)\n\n self.assertRtolEqual(cpu_output, npu_output) \n \n\ninstantiate_device_type_tests(TestAddmv, globals(), except_for=\"cpu\")\nif __name__ == \"__main__\":\n run_tests()\n\n\n"
] | [
[
"numpy.random.uniform",
"torch.norm_except_dim",
"torch.from_numpy",
"numpy.random.randint"
],
[
"torch.ones_like",
"torch._C._nn.upsample_bicubic2d"
],
[
"torch.npu.set_device",
"torch.nn.Conv1d"
],
[
"torch.randn",
"torch.onnx.export"
],
[
"torch.tril"
],
[
"torch.from_numpy",
"numpy.random.uniform",
"torch.topk",
"torch.device",
"numpy.random.randint"
],
[
"torch.onnx.export",
"torch.randn",
"torch.npu.set_device"
],
[
"torch.logical_not"
],
[
"torch.tanh",
"torch.tanh_"
],
[
"numpy.random.uniform",
"torch.from_numpy",
"torch.nn.functional.pdist"
],
[
"torch.randn",
"torch.eye"
],
[
"torch.addmv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NeuroDataDesign/kdg | [
"510e27973779a59cc310e1eb7497bc29699f5e4e"
] | [
"benchmarks/spiral_exp.py"
] | [
"#%%\nfrom kdg.utils import generate_spirals, generate_gaussian_parity\nfrom kdg import kdf,kdn\nfrom keras import layers\nimport keras\n# %%\nnetwork = keras.Sequential()\n#network.add(layers.Dense(2, activation=\"relu\", input_shape=(2)))\nnetwork.add(layers.Dense(3, activation='relu', input_shape=(2,)))\nnetwork.add(layers.Dense(3, activation='relu'))\nnetwork.add(layers.Dense(units=2, activation = 'softmax'))\n\n#%%\nn_estimators = 200\nX, y = generate_gaussian_parity(sample, cluster_std=0.5)#generate_spirals(5000, noise=.8, n_class=2)\n\nmodel_kdf = kdn(network,fit_kwargs = {\n \"epochs\": 100,\n \"batch_size\": 32,\n \"verbose\": False\n }) #kdf(k=1/2.5, kwargs={'n_estimators':n_estimators})\nmodel_kdf.fit(X, y)\n# %%\nimport seaborn as sns\nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\n\np = np.arange(-2,2,step=0.006)\nq = np.arange(-2,2,step=0.006)\nxx, yy = np.meshgrid(p,q)\ntmp = np.ones(xx.shape)\n\ngrid_samples = np.concatenate(\n (\n xx.reshape(-1,1),\n yy.reshape(-1,1)\n ),\n axis=1\n ) \n \nproba_kdf = model_kdf.predict_proba(grid_samples)\nproba_rf = model_kdf.network.predict_proba(grid_samples)\n\ndata = pd.DataFrame(data={'x':grid_samples[:,0], 'y':grid_samples[:,1], 'z':proba_kdf[:,0]})\ndata = data.pivot(index='x', columns='y', values='z')\n\ndata_rf = pd.DataFrame(data={'x':grid_samples[:,0], 'y':grid_samples[:,1], 'z':proba_rf[:,0]})\ndata_rf = data_rf.pivot(index='x', columns='y', values='z')\n#%%\nsns.set_context(\"talk\")\nfig, ax = plt.subplots(2,2, figsize=(16,16))\ncmap= sns.diverging_palette(240, 10, n=9)\nax1 = sns.heatmap(data, ax=ax[0][0], vmin=0, vmax=1,cmap=cmap)\nax1.set_xticklabels(['-2','' , '', '', '', '', '','','','','0','','','','','','','','','2'])\nax1.set_yticklabels(['-2','' , '', '', '', '', '','','','','','','0','','','','','','','','','','','','','2'])\n#ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','' , '', '', '', '', '', '','','','','', '0','','' , '', '', '', '', '','','','','','','','','','','','','1'])\nax[0][0].set_title('KDF',fontsize=24)\n#ax[0][0].invert_yaxis()\n\n\nax1 = sns.heatmap(data_rf, ax=ax[0][1], vmin=0, vmax=1,cmap=cmap)\nax1.set_xticklabels(['-2','' , '', '', '', '', '','','','','0','','','','','','','','','2'])\nax1.set_yticklabels(['-2','' , '', '', '', '', '','','','','','','0','','','','','','','','','','','','','2'])\n#ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','' , '', '', '', '', '', '','','','','', '0','','' , '', '', '', '', '','','','','','','','','','','','','1'])\nax[0][1].set_title('RF',fontsize=24)\n#ax[0][1].invert_yaxis()\n\ncolors = sns.color_palette(\"Dark2\", n_colors=2)\nclr = [colors[i] for i in y]\nax[1][0].scatter(X[:, 0], X[:, 1], c=clr, s=50)\n\nplt.savefig('plots/spiral_pdf_kdn.pdf')\nplt.show()\n# %%\n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.ones",
"matplotlib.pyplot.savefig",
"numpy.meshgrid",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ChangHoon-Sung/streamlit | [
"d153db37d97faada87bf88972886cda5a624f8c8",
"83e0b80d2fa13e29e83d092a9fc4d946460bbf73",
"d153db37d97faada87bf88972886cda5a624f8c8"
] | [
"lib/tests/streamlit/help_test.py",
"lib/tests/streamlit/legacy_vega_lite_test.py",
"lib/tests/streamlit/arrow_vega_lite_test.py"
] | [
"# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"st.help unit test.\"\"\"\n\nfrom tests import testutil\nimport streamlit as st\nimport numpy as np\n\n\nclass StHelpTest(testutil.DeltaGeneratorTestCase):\n \"\"\"Test st.help.\"\"\"\n\n def test_basic_func_with_doc(self):\n \"\"\"Test basic function with docstring.\"\"\"\n\n def my_func(some_param, another_param=123):\n \"\"\"This is the doc\"\"\"\n pass\n\n st.help(my_func)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"my_func\", ds.name)\n self.assertEqual(\"help_test\", ds.module)\n self.assertEqual(\"<class 'function'>\", ds.type)\n self.assertEqual(\"(some_param, another_param=123)\", ds.signature)\n self.assertEqual(\"This is the doc\", ds.doc_string)\n\n def test_basic_func_without_doc(self):\n \"\"\"Test basic function without docstring.\"\"\"\n\n def my_func(some_param, another_param=123):\n pass\n\n st.help(my_func)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"my_func\", ds.name)\n self.assertEqual(\"help_test\", ds.module)\n self.assertEqual(\"<class 'function'>\", ds.type)\n self.assertEqual(\"(some_param, another_param=123)\", ds.signature)\n self.assertEqual(\"No docs available.\", ds.doc_string)\n\n def test_deltagenerator_func(self):\n \"\"\"Test Streamlit DeltaGenerator function.\"\"\"\n\n st.help(st.audio)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"audio\", ds.name)\n self.assertEqual(\"streamlit\", ds.module)\n self.assertEqual(\"<class 'method'>\", ds.type)\n self.assertEqual(\"(data, format='audio/wav', start_time=0)\", ds.signature)\n self.assertTrue(ds.doc_string.startswith(\"Display an audio player\"))\n\n def test_unwrapped_deltagenerator_func(self):\n \"\"\"Test unwrapped Streamlit DeltaGenerator function.\"\"\"\n st.help(st.dataframe)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"dataframe\", ds.name)\n self.assertEqual(\"streamlit\", ds.module)\n self.assertEqual(\"<class 'method'>\", ds.type)\n self.assertEqual(\"(data=None, width=None, height=None)\", ds.signature)\n self.assertTrue(ds.doc_string.startswith(\"Display a dataframe\"))\n\n def test_st_cache(self):\n \"\"\"Test st.cache function (since it's from the 'caching' module).\"\"\"\n st.help(st.cache)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"cache\", ds.name)\n self.assertEqual(\"streamlit\", ds.module)\n self.assertEqual(\"<class 'function'>\", ds.type)\n self.assertEqual(\n ds.signature,\n (\n \"(func=None, \"\n \"persist=False, \"\n \"allow_output_mutation=False, \"\n \"show_spinner=True, \"\n \"suppress_st_warning=False, \"\n \"hash_funcs=None, \"\n \"max_entries=None, \"\n \"ttl=None)\"\n ),\n )\n self.assertTrue(ds.doc_string.startswith(\"Function decorator to\"))\n\n def test_st_echo(self):\n \"\"\"Test st.echo function (since it's from __init__).\"\"\"\n st.help(st.echo)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"echo\", ds.name)\n self.assertEqual(\"streamlit\", ds.module)\n self.assertEqual(\"<class 'function'>\", ds.type)\n self.assertEqual(\"(code_location='above')\", ds.signature)\n self.assertTrue(ds.doc_string.startswith(\"Use in a `with` block\"))\n\n def test_builtin_func(self):\n \"\"\"Test a built-in function.\"\"\"\n st.help(dir)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"dir\", ds.name)\n self.assertEqual(\"builtins\", ds.module)\n self.assertEqual(\"<class 'builtin_function_or_method'>\", ds.type)\n self.assertEqual(\"\", ds.signature)\n self.assertTrue(len(ds.doc_string) > 0)\n\n def test_builtin_obj(self):\n \"\"\"Test a built-in function.\"\"\"\n st.help(123)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"\", ds.name)\n self.assertEqual(\"\", ds.module)\n self.assertEqual(\"<class 'int'>\", ds.type)\n self.assertEqual(\"\", ds.signature)\n self.assertTrue(len(ds.doc_string) > 0)\n\n def test_doc_defined_for_type(self):\n \"\"\"When the docs are defined for the type on an object, but not\n the object, we expect the docs of the type. This is the case\n of ndarray generated as follow.\n \"\"\"\n\n array = np.arange(1)\n\n st.help(array)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(\"\", ds.name)\n self.assertTrue(\"ndarray\" in ds.doc_string)\n\n def test_doc_type_is_type(self):\n \"\"\"When the type of the object is type and no docs are defined,\n we expect docs are not available\"\"\"\n\n class MyClass(object):\n pass\n\n st.help(MyClass)\n\n ds = self.get_delta_from_queue().new_element.doc_string\n self.assertEqual(type(MyClass), type)\n self.assertEqual(\"MyClass\", ds.name)\n self.assertEqual(\"help_test\", ds.module)\n self.assertEqual(\"No docs available.\", ds.doc_string)\n",
"# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"st._legacy_vega_lite unit test.\"\"\"\n\nimport pandas as pd\nimport pyarrow as pa\nimport json\n\nfrom tests import testutil\nimport streamlit as st\nfrom streamlit.errors import StreamlitAPIException\n\n\ndf1 = pd.DataFrame([[\"A\", \"B\", \"C\", \"D\"], [28, 55, 43, 91]], index=[\"a\", \"b\"]).T\n\ndf2 = pd.DataFrame([[\"E\", \"F\", \"G\", \"H\"], [11, 12, 13, 14]], index=[\"a\", \"b\"]).T\n\nautosize_spec = {\"autosize\": {\"type\": \"fit\", \"contains\": \"padding\"}}\n\n\nclass LegacyVegaLiteTest(testutil.DeltaGeneratorTestCase):\n \"\"\"Test ability to marshall vega_lite_chart protos.\"\"\"\n\n def test_no_args(self):\n \"\"\"Test that an error is raised when called with no args.\"\"\"\n with self.assertRaises(ValueError):\n st._legacy_vega_lite_chart()\n\n def test_none_args(self):\n \"\"\"Test that an error is raised when called with args set to None.\"\"\"\n with self.assertRaises(ValueError):\n st._legacy_vega_lite_chart(None, None)\n\n def test_pyarrow_table_data(self):\n \"\"\"Test that an error is raised when called with `pyarrow.Table` data.\"\"\"\n with self.assertRaises(StreamlitAPIException):\n st._legacy_vega_lite_chart(pa.Table.from_pandas(df1), {\"mark\": \"rect\"})\n\n def test_spec_but_no_data(self):\n \"\"\"Test that it can be called with only data set to None.\"\"\"\n st._legacy_vega_lite_chart(None, {\"mark\": \"rect\"})\n\n c = self.get_delta_from_queue().new_element.vega_lite_chart\n self.assertEqual(c.HasField(\"data\"), False)\n self.assertDictEqual(\n json.loads(c.spec), merge_dicts(autosize_spec, {\"mark\": \"rect\"})\n )\n\n def test_spec_in_arg1(self):\n \"\"\"Test that it can be called spec as the 1st arg.\"\"\"\n st._legacy_vega_lite_chart({\"mark\": \"rect\"})\n\n c = self.get_delta_from_queue().new_element.vega_lite_chart\n self.assertEqual(c.HasField(\"data\"), False)\n self.assertDictEqual(\n json.loads(c.spec), merge_dicts(autosize_spec, {\"mark\": \"rect\"})\n )\n\n def test_data_in_spec(self):\n \"\"\"Test passing data=df inside the spec.\"\"\"\n st._legacy_vega_lite_chart({\"mark\": \"rect\", \"data\": df1})\n\n c = self.get_delta_from_queue().new_element.vega_lite_chart\n self.assertEqual(c.HasField(\"data\"), True)\n self.assertDictEqual(\n json.loads(c.spec), merge_dicts(autosize_spec, {\"mark\": \"rect\"})\n )\n\n def test_data_values_in_spec(self):\n \"\"\"Test passing data={values: df} inside the spec.\"\"\"\n st._legacy_vega_lite_chart({\"mark\": \"rect\", \"data\": {\"values\": df1}})\n\n c = self.get_delta_from_queue().new_element.vega_lite_chart\n self.assertEqual(c.HasField(\"data\"), True)\n self.assertDictEqual(\n json.loads(c.spec), merge_dicts(autosize_spec, {\"mark\": \"rect\"})\n )\n\n def test_datasets_in_spec(self):\n \"\"\"Test passing datasets={foo: df} inside the spec.\"\"\"\n st._legacy_vega_lite_chart({\"mark\": \"rect\", \"datasets\": {\"foo\": df1}})\n\n c = self.get_delta_from_queue().new_element.vega_lite_chart\n self.assertEqual(c.HasField(\"data\"), False)\n self.assertDictEqual(\n json.loads(c.spec), merge_dicts(autosize_spec, {\"mark\": \"rect\"})\n )\n\n def test_datasets_correctly_in_spec(self):\n \"\"\"Test passing datasets={foo: df}, data={name: 'foo'} in the spec.\"\"\"\n st._legacy_vega_lite_chart(\n {\"mark\": \"rect\", \"datasets\": {\"foo\": df1}, \"data\": {\"name\": \"foo\"}}\n )\n\n c = self.get_delta_from_queue().new_element.vega_lite_chart\n self.assertEqual(c.HasField(\"data\"), False)\n self.assertDictEqual(\n json.loads(c.spec),\n merge_dicts(autosize_spec, {\"data\": {\"name\": \"foo\"}, \"mark\": \"rect\"}),\n )\n\n def test_dict_unflatten(self):\n \"\"\"Test passing a spec as keywords.\"\"\"\n st._legacy_vega_lite_chart(df1, x=\"foo\", boink_boop=100, baz={\"boz\": \"booz\"})\n\n c = self.get_delta_from_queue().new_element.vega_lite_chart\n self.assertEqual(c.HasField(\"data\"), True)\n self.assertDictEqual(\n json.loads(c.spec),\n merge_dicts(\n autosize_spec,\n {\n \"baz\": {\"boz\": \"booz\"},\n \"boink\": {\"boop\": 100},\n \"encoding\": {\"x\": \"foo\"},\n },\n ),\n )\n\n def test_use_container_width(self):\n \"\"\"Test that use_container_width=True autosets to full width.\"\"\"\n st._legacy_vega_lite_chart(df1, {\"mark\": \"rect\"}, use_container_width=True)\n\n c = self.get_delta_from_queue().new_element.vega_lite_chart\n self.assertDictEqual(\n json.loads(c.spec), merge_dicts(autosize_spec, {\"mark\": \"rect\"})\n )\n\n self.assertEqual(c.use_container_width, True)\n\n def test_width_inside_spec(self):\n \"\"\"Test the width up to Vega-Lite.\"\"\"\n st._legacy_vega_lite_chart(df1, {\"mark\": \"rect\", \"width\": 200})\n\n c = self.get_delta_from_queue().new_element.vega_lite_chart\n self.assertDictEqual(\n json.loads(c.spec),\n merge_dicts(autosize_spec, {\"mark\": \"rect\", \"width\": 200}),\n )\n\n\ndef merge_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z\n",
"# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport pandas as pd\nimport pyarrow as pa\nfrom tests import testutil\n\nimport streamlit as st\nfrom streamlit.type_util import bytes_to_data_frame, pyarrow_table_to_bytes\n\ndf1 = pd.DataFrame([[\"A\", \"B\", \"C\", \"D\"], [28, 55, 43, 91]], index=[\"a\", \"b\"]).T\ndf2 = pd.DataFrame([[\"E\", \"F\", \"G\", \"H\"], [11, 12, 13, 14]], index=[\"a\", \"b\"]).T\nautosize_spec = {\"autosize\": {\"type\": \"fit\", \"contains\": \"padding\"}}\n\n\nclass ArrowVegaLiteTest(testutil.DeltaGeneratorTestCase):\n \"\"\"Test ability to marshall arrow_vega_lite_chart protos.\"\"\"\n\n def test_no_args(self):\n \"\"\"Test that an error is raised when called with no args.\"\"\"\n with self.assertRaises(ValueError):\n st._arrow_vega_lite_chart()\n\n def test_none_args(self):\n \"\"\"Test that an error is raised when called with args set to None.\"\"\"\n with self.assertRaises(ValueError):\n st._arrow_vega_lite_chart(None, None)\n\n def test_spec_but_no_data(self):\n \"\"\"Test that it can be called with only data set to None.\"\"\"\n st._arrow_vega_lite_chart(None, {\"mark\": \"rect\"})\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n self.assertEqual(proto.HasField(\"data\"), False)\n self.assertDictEqual(\n json.loads(proto.spec), merge_dicts(autosize_spec, {\"mark\": \"rect\"})\n )\n\n def test_spec_in_arg1(self):\n \"\"\"Test that it can be called with spec as the 1st arg.\"\"\"\n st._arrow_vega_lite_chart({\"mark\": \"rect\"})\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n self.assertEqual(proto.HasField(\"data\"), False)\n self.assertDictEqual(\n json.loads(proto.spec), merge_dicts(autosize_spec, {\"mark\": \"rect\"})\n )\n\n def test_data_in_spec(self):\n \"\"\"Test passing data=df inside the spec.\"\"\"\n st._arrow_vega_lite_chart({\"mark\": \"rect\", \"data\": df1})\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n pd.testing.assert_frame_equal(\n bytes_to_data_frame(proto.data.data), df1, check_dtype=False\n )\n self.assertDictEqual(\n json.loads(proto.spec), merge_dicts(autosize_spec, {\"mark\": \"rect\"})\n )\n\n def test_data_values_in_spec(self):\n \"\"\"Test passing data={values: df} inside the spec.\"\"\"\n st._arrow_vega_lite_chart({\"mark\": \"rect\", \"data\": {\"values\": df1}})\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n pd.testing.assert_frame_equal(\n bytes_to_data_frame(proto.data.data), df1, check_dtype=False\n )\n self.assertDictEqual(\n json.loads(proto.spec),\n merge_dicts(autosize_spec, {\"mark\": \"rect\"}),\n )\n\n def test_datasets_in_spec(self):\n \"\"\"Test passing datasets={foo: df} inside the spec.\"\"\"\n st._arrow_vega_lite_chart({\"mark\": \"rect\", \"datasets\": {\"foo\": df1}})\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n self.assertEqual(proto.HasField(\"data\"), False)\n self.assertDictEqual(\n json.loads(proto.spec), merge_dicts(autosize_spec, {\"mark\": \"rect\"})\n )\n\n def test_datasets_correctly_in_spec(self):\n \"\"\"Test passing datasets={foo: df}, data={name: 'foo'} in the spec.\"\"\"\n st._arrow_vega_lite_chart(\n {\"mark\": \"rect\", \"datasets\": {\"foo\": df1}, \"data\": {\"name\": \"foo\"}}\n )\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n self.assertEqual(proto.HasField(\"data\"), False)\n self.assertDictEqual(\n json.loads(proto.spec),\n merge_dicts(autosize_spec, {\"data\": {\"name\": \"foo\"}, \"mark\": \"rect\"}),\n )\n\n def test_dict_unflatten(self):\n \"\"\"Test passing a spec as keywords.\"\"\"\n st._arrow_vega_lite_chart(df1, x=\"foo\", boink_boop=100, baz={\"boz\": \"booz\"})\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n pd.testing.assert_frame_equal(\n bytes_to_data_frame(proto.data.data), df1, check_dtype=False\n )\n self.assertDictEqual(\n json.loads(proto.spec),\n merge_dicts(\n autosize_spec,\n {\n \"baz\": {\"boz\": \"booz\"},\n \"boink\": {\"boop\": 100},\n \"encoding\": {\"x\": \"foo\"},\n },\n ),\n )\n\n def test_pyarrow_table_data(self):\n \"\"\"Test that you can pass pyarrow.Table as data.\"\"\"\n table = pa.Table.from_pandas(df1)\n st._arrow_vega_lite_chart(table, {\"mark\": \"rect\"})\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n\n self.assertEqual(proto.HasField(\"data\"), True)\n self.assertEqual(proto.data.data, pyarrow_table_to_bytes(table))\n\n def test_arrow_add_rows(self):\n \"\"\"Test that you can call _arrow_add_rows on arrow_vega_lite_chart (with data).\"\"\"\n chart = st._arrow_vega_lite_chart(df1, {\"mark\": \"rect\"})\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n self.assertEqual(proto.HasField(\"data\"), True)\n\n chart._arrow_add_rows(df2)\n\n proto = self.get_delta_from_queue().arrow_add_rows\n pd.testing.assert_frame_equal(\n bytes_to_data_frame(proto.data.data), df2, check_dtype=False\n )\n\n def test_no_args_add_rows(self):\n \"\"\"Test that you can call _arrow_add_rows on a arrow_vega_lite_chart (without data).\"\"\"\n chart = st._arrow_vega_lite_chart({\"mark\": \"rect\"})\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n self.assertEqual(proto.HasField(\"data\"), False)\n\n chart._arrow_add_rows(df1)\n\n proto = self.get_delta_from_queue().arrow_add_rows\n pd.testing.assert_frame_equal(\n bytes_to_data_frame(proto.data.data), df1, check_dtype=False\n )\n\n def test_use_container_width(self):\n \"\"\"Test that use_container_width=True autosets to full width.\"\"\"\n st._arrow_vega_lite_chart(df1, {\"mark\": \"rect\"}, use_container_width=True)\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n self.assertDictEqual(\n json.loads(proto.spec), merge_dicts(autosize_spec, {\"mark\": \"rect\"})\n )\n\n self.assertEqual(proto.use_container_width, True)\n\n def test_width_inside_spec(self):\n \"\"\"Test that Vega-Lite sets the width.\"\"\"\n st._arrow_vega_lite_chart(df1, {\"mark\": \"rect\", \"width\": 200})\n\n proto = self.get_delta_from_queue().new_element.arrow_vega_lite_chart\n self.assertDictEqual(\n json.loads(proto.spec),\n merge_dicts(autosize_spec, {\"mark\": \"rect\", \"width\": 200}),\n )\n\n\ndef merge_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z\n"
] | [
[
"numpy.arange"
],
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
RobbiePerrone320/onnx-mlir | [
"2127e9177f4cbc28b7e860b0876af936ddae80bc"
] | [
"utils/gen_onnx_mlir.py"
] | [
"#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict, OrderedDict\nfrom io import StringIO\nimport io\nimport os\nimport sys\nimport datetime\nimport argparse\n\nimport numpy as np # type: ignore\n\nfrom onnx import defs, FunctionProto, helper, OperatorStatus\nfrom onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN\nfrom onnx.backend.test.case import collect_snippets\nfrom onnx.backend.sample.ops import collect_sample_implementations\nfrom typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple\n\nimport pprint\nimport onnx\n\n# change this variable only when upgrading the ONNX support within ONNX-MLIR\ncurrent_onnx_version = \"1.9.0\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dry-run-onnx-ops\",\n help=\"Output ONNXOps.td.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nparser.add_argument(\"--dry-run-op-build-table\",\n help=\"Output OpBuildTable.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nparser.add_argument(\"--check-operation-version\",\n help=\"check whether the imported onnx package has new operation or \"\n \" newer version of operation compared with version stored in version_dicts\",\n action=\"store_true\",\n default=False)\n\nargs = parser.parse_args()\n\ncheck_operation_version = args.check_operation_version\ncurrent_onnx_version = \"1.11.0\"\n# check the version of onnx package being used\nif (not check_operation_version) and current_onnx_version != onnx.__version__ :\n print(\"version of expected onnx is {}, \".format(current_onnx_version)+\n \"while onnx package being used is {}\".format(onnx.__version__))\n quit()\n\n# Record the version of each operation that is treated as the current version.\n# To check whether the onnx package being used has newer version operation,\n# run this script with --check-operation-version flag.\n# Update this dictionary when a newer version is implemented\n# TODO: how to keep the old version\nversion_dict = {\n 'Abs': [13],\n 'Acos': [7],\n 'Acosh': [9],\n 'Adagrad': [1],\n 'Adam': [1],\n 'Add': [13],\n 'And': [7],\n 'ArgMax': [13],\n 'ArgMin': [13],\n 'ArrayFeatureExtractor': [1],\n 'Asin': [7],\n 'Asinh': [9],\n 'Atan': [7],\n 'Atanh': [9],\n 'AveragePool': [11],\n 'BatchNormalization': [9],\n 'Binarizer': [1],\n 'BitShift': [11],\n 'Cast': [13],\n 'CastMap': [1],\n 'CategoryMapper': [1],\n 'Ceil': [13],\n 'Celu': [12],\n 'Clip': [13, 12, 11, 6],\n 'Compress': [11],\n 'Concat': [13],\n 'ConcatFromSequence': [11],\n 'Constant': [13],\n 'ConstantOfShape': [9],\n 'Conv': [11],\n 'ConvInteger': [10],\n 'ConvTranspose': [11],\n 'Cos': [7],\n 'Cosh': [9],\n 'CumSum': [11],\n 'DepthToSpace': [13],\n 'DequantizeLinear': [13],\n 'Det': [11],\n 'DictVectorizer': [1],\n 'Div': [13],\n 'Dropout': [13],\n 'DynamicQuantizeLinear': [11],\n 'Einsum': [12],\n 'Elu': [6],\n 'Equal': [13],\n 'Erf': [13],\n 'Exp': [13],\n 'Expand': [13],\n 'EyeLike': [9],\n 'FeatureVectorizer': [1],\n 'Flatten': [13],\n 'Floor': [13],\n 'GRU': [7],\n 'Gather': [13],\n 'GatherElements': [13],\n 'GatherND': [13],\n 'Gemm': [13],\n 'GlobalAveragePool': [1],\n 'GlobalLpPool': [2],\n 'GlobalMaxPool': [1],\n 'Gradient': [1],\n 'Greater': [13],\n 'GreaterOrEqual': [12],\n 'HardSigmoid': [6],\n 'Hardmax': [13],\n 'Identity': [13],\n 'If': [13],\n 'Imputer': [1],\n 'InstanceNormalization': [6],\n 'IsInf': [10],\n 'IsNaN': [13],\n 'LRN': [13],\n 'LSTM': [7],\n 'LabelEncoder': [2],\n 'LeakyRelu': [6],\n 'Less': [13],\n 'LessOrEqual': [12],\n 'LinearClassifier': [1],\n 'LinearRegressor': [1],\n 'Log': [13],\n 'LogSoftmax': [13],\n 'Loop': [13],\n 'LpNormalization': [1],\n 'LpPool': [11],\n 'MatMul': [13],\n 'MatMulInteger': [10],\n 'Max': [13],\n 'MaxPool': [12],\n 'MaxRoiPool': [1],\n 'MaxUnpool': [11],\n 'Mean': [13],\n 'MeanVarianceNormalization': [13],\n 'Min': [13],\n 'Mod': [13],\n 'Momentum': [1],\n 'Mul': [13],\n 'Multinomial': [7],\n 'Neg': [13],\n 'NegativeLogLikelihoodLoss': [13],\n 'NonMaxSuppression': [11],\n 'NonZero': [13],\n 'Normalizer': [1],\n 'Not': [1],\n 'OneHot': [11],\n 'OneHotEncoder': [1],\n 'Or': [7],\n 'PRelu': [9],\n 'Pad': [13, 11, 2],\n 'Pow': [13],\n 'QLinearConv': [10],\n 'QLinearMatMul': [10],\n 'QuantizeLinear': [13],\n 'RNN': [7],\n 'RandomNormal': [1],\n 'RandomNormalLike': [1],\n 'RandomUniform': [1],\n 'RandomUniformLike': [1],\n 'Range': [11],\n 'Reciprocal': [13],\n 'ReduceL1': [13],\n 'ReduceL2': [13],\n 'ReduceLogSum': [13],\n 'ReduceLogSumExp': [13],\n 'ReduceMax': [13],\n 'ReduceMean': [13],\n 'ReduceMin': [13],\n 'ReduceProd': [13],\n 'ReduceSum': [13, 11],\n 'ReduceSumSquare': [13],\n 'Relu': [13],\n 'Reshape': [13],\n 'Resize': [13, 11, 10],\n 'ReverseSequence': [10],\n 'RoiAlign': [10],\n 'Round': [11],\n 'SVMClassifier': [1],\n 'SVMRegressor': [1],\n 'Scaler': [1],\n 'Scan': [11],\n 'Scatter': [11],\n 'ScatterElements': [13],\n 'ScatterND': [13],\n 'Selu': [6],\n 'SequenceAt': [11],\n 'SequenceConstruct': [11],\n 'SequenceEmpty': [11],\n 'SequenceErase': [11],\n 'SequenceInsert': [11],\n 'SequenceLength': [11],\n 'Shape': [13], # When going to 15, rewrite rules must also be changed for start/end\n 'Shrink': [9],\n 'Sigmoid': [13],\n 'Sign': [13],\n 'Sin': [7],\n 'Sinh': [9],\n 'Size': [13],\n 'Slice': [13],\n 'Softmax': [13],\n 'SoftmaxCrossEntropyLoss': [13],\n 'Softplus': [1],\n 'Softsign': [1],\n 'SpaceToDepth': [13],\n 'Split': [13, 11],\n 'SplitToSequence': [11],\n 'Sqrt': [13],\n 'Squeeze': [13, 11],\n 'StringNormalizer': [10],\n 'Sub': [13],\n 'Sum': [13],\n 'Tan': [7],\n 'Tanh': [13],\n 'TfIdfVectorizer': [9],\n 'ThresholdedRelu': [10],\n 'Tile': [13],\n 'TopK': [11],\n 'Transpose': [13],\n 'TreeEnsembleClassifier': [1],\n 'TreeEnsembleRegressor': [1],\n 'Unique': [11],\n 'Unsqueeze': [13, 11],\n 'Upsample': [10, 9, 7],\n 'Where': [9],\n 'Xor': [7],\n 'ZipMap': [1]}\n\n# Manual specification of attribute type.\nspecial_attr_types = dict([(\"Cast.to\", 'type')])\n\n# Special operation importing handlers.\nspecial_op_handler = dict([\n (\"BatchNormalization\", \"ImportNodeBatchNormalization\"),\n (\"Dropout\", \"ImportNodeDropout\"),\n (\"Cast\", \"ImportNodeCast\"),\n (\"MaxPool\", \"ImportNodeMaxPool\"),\n (\"Pad\", \"ImportNodePad\"),\n (\"Slice\", \"ImportNodeSlice\"),\n (\"Softmax\", \"ImportNodeSoftmax\"),\n #(\"Transpose\", \"ImportNodeTranspose\")\n])\n\n# Operations supporting canonicalization (alphabetical order).\nOpsWithCanonicalizer = [\n 'Add',\n 'Cast',\n 'Constant',\n 'Dropout',\n 'GlobalAveragePool',\n 'GlobalMaxPool',\n 'Identity',\n 'Reshape',\n 'Shape',\n 'Size',\n 'Squeeze',\n 'SqueezeV11',\n 'Transpose',\n 'Unsqueeze',\n 'UnsqueezeV11',\n]\n\n# Operations with custom verifiers (alphabetical order).\nOpsWithVerifier = [\n 'AveragePool',\n 'ArgMax',\n 'ArgMin',\n 'CategoryMapper', \n 'Compress',\n 'Concat',\n 'ConstantOfShape',\n 'Conv',\n 'DepthToSpace',\n 'Expand',\n 'Flatten',\n 'Hardmax',\n 'InstanceNormalization',\n 'Mod',\n 'NonMaxSuppression',\n 'OneHot',\n \"PRelu\",\n 'OneHotEncoder',\n 'Pow',\n 'RandomNormalLike',\n 'ReverseSequence',\n \"RoiAlign\",\n \"ScatterElements\",\n 'ScatterND',\n 'SequenceEmpty',\n 'SequenceInsert',\n 'SpaceToDepth',\n 'TopK',\n]\n\nOpsWithHelpers = {\n \"Loop\": \"\"\"\n mlir::Operation::result_range v_final();\n mlir::Operation::result_range scan_outputs();\n \"\"\",\n \"Scan\": \"\"\"\n mlir::Operation::operand_range v_initial();\n mlir::Operation::result_range v_final();\n mlir::Operation::operand_range scan_inputs();\n mlir::Operation::result_range scan_outputs();\n \"\"\"\n}\n# Interface for special handling of type inference\n# The common code are put into get_type_inference_func\nOpsWithResultTypeInference = {\n \"Constant\":\n '''if (auto attr = valueAttr()) {\n resultTypes.push_back(attr.getType());\n } else if (auto attr = sparse_valueAttr()) {\n resultTypes.push_back(attr.getType());\n }''',\n \"Cast\":\n '''// ae auto builder = mlir::OpBuilder(getContext());\n resultTypes.push_back(mlir::UnrankedTensorType::get(to()));''',\n \"ConstantOfShape\":\n '''if (auto attr = valueAttr()) {\n resultTypes.push_back(mlir::UnrankedTensorType::get(\n attr.getType().cast<ShapedType>().getElementType()));\n } else {\n resultTypes.push_back(mlir::UnrankedTensorType::get(\n FloatType::getF32(getContext())));\n }'''\n}\n\n# Add an Op in this list if the Op needs result type deduction which is required\n# when writing declarative rewriting rules. Deduced type is always\n# an UnrankedTensorType whose element type is the same as the first operand's\n# element type.\n#\n# Currently, there are only two build methods generated:\n# - one with operands and attributes having a separate parameter, and\n# - one with operands and attributes having aggregated parameters.\ncustom_builder_unranked_ops_list = [\n 'Abs',\n 'Exp',\n 'Identity',\n 'Neg',\n 'Pad',\n 'ReduceLogSum',\n 'ReduceMax',\n 'ReduceSum',\n 'ReduceSumSquare',\n 'ReduceSumV11',\n 'Softmax',\n 'Split',\n 'Sqrt',\n 'SqueezeV11',\n 'UnsqueezeV11',\n]\n# Custom builder op list for operations with broadcast; we can deduce the right\n# output type, no need to leave it undef as in the above list.\n# Ops must have two operands, not one, not three... And there shall be two.\n# TODO: handle variadic ops omitted here: Max, Min, Min, Sum.\ncustom_builder_broadcast_to_same_type_ops_list = [\n 'Add',\n 'And',\n 'Div',\n 'Mul',\n 'Or',\n 'Pow',\n 'Sub',\n 'Xor',\n]\ncustom_builder_broadcast_to_bool_ops_list = [\n 'Equal',\n 'Greater',\n 'Less',\n]\ncustom_builder_broadcast_ops_list = custom_builder_broadcast_to_same_type_ops_list + \\\n custom_builder_broadcast_to_bool_ops_list\n# union of both\ncustom_builder_ops_list = custom_builder_unranked_ops_list + custom_builder_broadcast_ops_list\n\n#a dictionary to add any special definition for an operation\ncustom_definition_misc = dict([ ('Constant',\n ''' let builders = [\n OpBuilder<(ins \"Attribute\":$sparse_value, \"Attribute\":$value), [{\n if (value) {\n auto tensorType = value.getType();\n build($_builder, $_state, tensorType, sparse_value, value,\n FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());\n } else {\n auto tensorType = sparse_value.getType();\n build($_builder, $_state, tensorType, sparse_value, value,\n FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());\n }\n }]>\n ];'''),\n ('Cast',\n ''' let builders = [\n OpBuilder<(ins \"Value\":$input, \"TypeAttr\":$to), [{\n auto resultType = mlir::UnrankedTensorType::get(to.getValue());\n build($_builder, $_state, resultType, input, to);\n }] >\n ];'''\n )])\n\nonnx_types = (\n 'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16',\n 'float', 'double', 'complex64', 'complex128', 'string'\n)\ntblgen_types = ('AnyI1', 'AnyI8', 'AnyI16', 'AnyI32', 'AnyI64',\n 'BF16', 'F16', 'F32', 'F64', 'Complex<F32>', 'Complex<F64>',\n 'StringType'\n)\n\nMAX_NUM_TYPES=20\n\ndef should_render_domain(domain): # type: (Text) -> bool\n return True\n\n\ndef display_attr_type(v): # type: (OpSchema.AttrType) -> Text\n assert isinstance(v, OpSchema.AttrType)\n s = Text(v)\n s = s[s.rfind('.') + 1:].lower()\n if s[-1] == 's':\n s = 'list of ' + s\n return s\n\n\ndef get_unique_output_name(schema, name):\n for input in schema.inputs:\n if input.name == name:\n return 'out_' + name\n return name\n\n\ndef onnx_attr_type_to_mlir_attr_type(t):\n onnx_attr_type = Text(t)\n onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()\n\n if onnx_attr_type == 'int':\n mlir_attr_type = 'SI64Attr'\n elif onnx_attr_type == 'float':\n mlir_attr_type = 'F32Attr'\n elif onnx_attr_type == 'ints':\n mlir_attr_type = 'I64ArrayAttr'\n elif onnx_attr_type == 'floats':\n mlir_attr_type = 'F32ArrayAttr'\n elif onnx_attr_type == \"string\":\n mlir_attr_type = 'StrAttr'\n elif onnx_attr_type == \"strings\":\n mlir_attr_type = 'StrArrayAttr'\n elif onnx_attr_type == 'type':\n mlir_attr_type = 'TypeAttr'\n else:\n mlir_attr_type = 'AnyAttr'\n #TODO: tensor and sparse tensor\n return mlir_attr_type\n\n\n#TODO: any better way to do this.\ndef tblgen_attr_type_to_cpp_type(t):\n if 'I64Attr' in t:\n cpp_type = 'IntegerAttr'\n elif 'F32Attr' in t:\n cpp_type = 'FloatAttr'\n elif 'I64ArrayAttr' in t or 'F32ArrayAttr' in t:\n cpp_type = 'ArrayAttr'\n elif 'StrAttr' in t:\n cpp_type = 'StringAttr'\n elif 'strings' in t:\n cpp_type = 'ArrayAttr'\n else:\n cpp_type = 'Attribute'\n return cpp_type\n\n\ndef tblgen_operand_type_to_cpp_type(op_type):\n if op_type.startswith('Variadic'):\n mytype = 'ValueRange'\n else:\n mytype = 'Value'\n return mytype\n\n\ndef np_type_to_tblgen_attr_type(tstr):\n index = -1\n for i in range(len(onnx_types)):\n if onnx_types[i] in tstr:\n index = i\n break\n if index == -1:\n return None\n else:\n return tblgen_types[i]\n\ndef get_tblgen_type_index(type_str):\n return tblgen_types.index(type_str)\n\n#the possible data structures are tensor, map and seq(tensor())\ndef get_data_structure_element(allowed_type_str):\n structure_list = ['tensor', 'seq', 'map']\n for structure in structure_list:\n if allowed_type_str.startswith(structure) :\n element = allowed_type_str.replace(\n structure+'(', '', 1).replace(')', '', 1)\n return (structure, element)\n return (None, None)\n\ndef get_allowed_elem_types(schema, input):\n #allowed_types_str = None\n # return allowed_types_str\n # TODO: enable type constraints.\n if input.typeStr :\n tstr = input.typeStr\n structure, element = get_data_structure_element(tstr);\n # In case the type is directly specified\n if structure and element :\n t = np_type_to_tblgen_attr_type(element)\n if t == None :\n return allowed_structure, None\n else :\n return structure, [t]\n else :\n return None\n if schema.type_constraints:\n for type_constraint in schema.type_constraints:\n if type_constraint.type_param_str != tstr :\n continue\n allowed_type_list=[]\n allowedTypes = type_constraint.allowed_type_strs\n allowed_structure = None\n for allowedType in allowedTypes:\n structure, element = get_data_structure_element(allowedType);\n if structure == None or element == None:\n return None, None\n\n if allowed_structure != None and allowed_structure != structure :\n return None, None\n allowed_structure = structure\n t = np_type_to_tblgen_attr_type(element)\n if t == None :\n return allowed_structure, None\n if not t in allowed_type_list :\n allowed_tyoe_list = allowed_type_list.append(t)\n\n return allowed_structure,allowed_type_list\n\n return None, None\n\n\ndef inc_indent(indent=None):\n return \"\" if indent is None else indent + ' ' * 2\n\n\ndef dec_indent(indent):\n return indent[:-2]\n\n\ndef join_args(args):\n return \", \".join(args)\n\ndef get_operands_or_results(schema, type_str_dict, is_input):\n value_list = schema.inputs if is_input else schema.outputs\n if not value_list:\n return OrderedDict()\n\n def any_type_of(types):\n assert isinstance(types, list)\n if len(types) == 1:\n return types[0]\n else:\n return \"AnyTypeOf<[{}]>\".format(\", \".join(types))\n\n name_to_types = OrderedDict()\n for i, value in enumerate(value_list):\n types = get_onnx_mlir_types(schema, type_str_dict, value)\n\n '''\n structure, elem_types = get_allowed_elem_types(schema, type_str_dict, value)\n\n if structure == 'tensor' :\n if elem_types is None:\n types = [\"AnyMemRef\", \"AnyTensor\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TensorOf<[{}]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n elif structure == 'seq' :\n # Seq is not supported yet.\n # Use of TensorOf<[AnyTensor]> as a placeholder for tablegen.\n # When the Operation is used, warning/error will be generated at runtime.\n if elem_types is None:\n types = [\"AnyMemRef\", \"TensorOf<[AnyTensor]>\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TensorOf<[TensorOf<[{}]>]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n elif structure == 'map' :\n # Map is not supported yet.\n # Use of TupleOf as a placeholder for tablegen.\n # When the Operation is used, warning/error will be generated at runtime.\n if elem_types is None:\n types = [\"AnyMemRef\", \"TupleOf<[AnyTensor]>\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TupleOf<[TensorOf<[{}]>]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n else:\n types = [\"AnyMemRef\", \"AnyTensor\"]\n '''\n\n if OpSchema.FormalParameterOption.Optional == value.option:\n types.append(\"NoneType\")\n elif OpSchema.FormalParameterOption.Variadic == value.option:\n if value.isHomogeneous:\n types = [\"Variadic<{}>\".format(any_type_of(types))]\n else:\n #TODO handle(variadic, heterogeneous) \"\n types = [\"Variadic<{}>\".format(any_type_of(types))]\n sys.stderr.write(\"warning: (variadic, heterogeneous) for \" + schema.name +\n ' ' + value.name + \"\\n\")\n\n # Since output name can coincide with that of an input, we explicitly\n # append a suffix \"_out\" to such names for disambiguation.\n if is_input:\n value_name = value.name\n else:\n value_name = get_unique_output_name(schema, value.name)\n\n name_to_types[value_name] = any_type_of(types)\n return name_to_types\n\n\ndef get_attrs(schema):\n def get_attr_type_optional(attr_type):\n return 'OptionalAttr<{}>'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type))\n\n def get_attr_type_with_default(attr_type, attr_default):\n if attr_type == OpSchema.AttrType.STRING:\n return 'DefaultValuedStrAttr<{}, \"{}\">'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)\n else:\n return 'DefaultValuedAttr<{}, \"{}\">'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)\n\n if not schema.attributes:\n return OrderedDict()\n\n name_to_type = OrderedDict()\n for _, attr in sorted(schema.attributes.items()):\n if attr.type == OpSchema.AttrType.GRAPH:\n continue\n\n qualified_attr_name = \"{}.{}\".format(schema.name, attr.name)\n if qualified_attr_name in special_attr_types:\n name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(\n special_attr_types[qualified_attr_name])\n # option holds either required or default value\n elif attr.required:\n name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(\n attr.type)\n elif attr.default_value.name:\n\n def format_value(value): # type: (Any) -> Text\n if isinstance(value, float):\n formatted = str(np.round(value, 5))\n # use default formatting, unless too long.\n if (len(formatted) > 10):\n formatted = str(\"({:e})\".format(value))\n return formatted\n elif isinstance(\n value,\n (bytes, bytearray)) and sys.version_info[0] == 3:\n return str(value.decode('utf-8'))\n return str(value)\n\n default_value = helper.get_attribute_value(attr.default_value)\n if isinstance(default_value, list):\n default_value = [format_value(val) for val in default_value]\n default_value_str = '{}'.format(default_value)\n default_value_str = default_value_str.replace('[', '{', 1)\n default_value_str = default_value_str.replace(']', '}', 1)\n if Text(attr.type) == \"AttrType.STRINGS\":\n default_value_str = default_value_str.replace(\"'\", '\\\\\"')\n else:\n default_value_str = default_value_str.replace(\"'\", '')\n else:\n default_value = format_value(default_value)\n default_value_str = default_value\n\n name_to_type[attr.name] = get_attr_type_with_default(\n attr.type, default_value_str)\n else:\n name_to_type[attr.name] = get_attr_type_optional(attr.type)\n return name_to_type\n\ndef get_numberof_list(mylist):\n expected_num = len(mylist)\n for element in mylist :\n if OpSchema.FormalParameterOption.Variadic == element.option:\n expected_num = -1\n return expected_num\n\ndef get_output_type_mapping(schema):\n mapping=[]\n for output in schema.outputs :\n #if only one type is allowed, just set that\n structure, allowed_elem_types = get_allowed_elem_types(schema, output)\n if allowed_elem_types != None and len(allowed_elem_types) == 1 :\n mapping.append(str(get_tblgen_type_index(allowed_elem_types[0])))\n continue\n\n #map the type string\n if output.typeStr :\n tstr = output.typeStr\n found = False\n for i, input in enumerate(schema.inputs):\n if input.typeStr and input.typeStr == tstr:\n mapping.append(str(i+MAX_NUM_TYPES))\n found = True\n break\n if found:\n continue\n\n #unknown output type\n mapping.append(str(-1))\n\n return mapping\n\ndef get_numberof_inout(s, indent, schema):\n expected_num_operands = get_numberof_list(schema.inputs)\n indent = inc_indent(indent)\n s += indent + \"static int getNumberOfOperands() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(expected_num_operands)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n expected_num_results = get_numberof_list(schema.outputs)\n s += indent + \"static int getNumberOfResults() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(expected_num_results)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n s += indent + \"static std::vector<int> getTypeMap() {\\n\"\n mapping = get_output_type_mapping(schema)\n indent = inc_indent(indent)\n s += indent + \"return {\" + \",\".join(mapping) + \"};\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n return s\n\n\ndef get_promotable_const_operands_func(s, indent, const_operands_name_to_idx):\n cpp_name_to_idx_literal = \"{\" + \", \".join([\n \"{{\\\"{}\\\", {}}}\".format(*name_to_idx)\n for name_to_idx in const_operands_name_to_idx\n ]) + \"}\"\n\n #s += indent + \"let extraClassDeclaration = [{\\n\"\n indent = inc_indent(indent)\n s += indent + \"std::map<std::string, size_t> promotableConstOperands() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(cpp_name_to_idx_literal)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n #indent = dec_indent(indent)\n #s += indent + \"}];\\n\"\n\n return s\n\ndef get_type_inference_func(s, indent, type_inference_code):\n indent = inc_indent(indent)\n\n s += indent + \"std::vector<mlir::Type> resultTypeInference() {\" + \"\\n\"\n indent = inc_indent(indent)\n s += indent + \"std::vector<mlir::Type> resultTypes;\" + \"\\n\"\n\n s += indent + type_inference_code + '\\n'\n\n s += indent + \"return resultTypes;\" + \"\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\" + \"\\n\"\n\n indent = dec_indent(indent)\n return s\n\ndef parse_type_str(allowedType):\n # AnyI may be used for uint because the onnx_mlir is not generating uint output\n # This will be fixed later and UI will be replace AnyI\n onnx_to_mlir_type_dict = { '(': '<[',\n ')': ']>',\n 'tensor' : 'TensorOf',\n 'seq' : 'SeqOf',\n 'map' : 'TupleOf',\n 'bool': 'I1',\n #'uint8' : 'AnyI8',\n #uint16' : 'AnyI16',\n #uint32' : 'AnyI32',\n #uint64' : 'AnyI64',\n 'uint8' : 'UI8',\n 'uint16' : 'UI16',\n 'uint32' : 'UI32',\n 'uint64' : 'UI64',\n 'int8' : 'I8',\n 'int16' : 'I16',\n 'int32' : 'I32',\n 'int64' : 'I64',\n 'float16' : 'F16',\n 'bfloat16' : 'BF16',\n 'float' : 'F32',\n 'double' : 'F64',\n 'unkown' : 'BF16',\n 'complex64' : 'Complex<F32>',\n 'complex128' : 'Complex<F64>',\n 'string' : 'StringType'}\n\n # Apply substitutions in decreasing order of key-length, so that float16 is replaced\n # before float, and uint16 is replaced before int16, etc.\n mapping = list(onnx_to_mlir_type_dict.items())\n mapping.sort(key=lambda pair:len(pair[0]), reverse=True)\n for key, item in mapping:\n allowedType = allowedType.replace(key, item)\n return allowedType\n\ndef parse_a_type_constraint(constraint):\n allowedTypes = constraint.allowed_type_strs\n mlirTypes = []\n for allowedType in allowedTypes:\n mlirType = parse_type_str(allowedType)\n mlirTypes.append(mlirType)\n # Remove redundant and sort.\n # However onnx keeps a consitently meaningful order\n # There is no redundancy as long as each onnx type is mapped uniquely\n # mlirTypes = sorted(list(set(mlirTypes)))\n\n # MemRef is always needed\n mlirTypes.append(\"AnyMemRef\")\n return mlirTypes\n\ndef parse_type_constraints(schema):\n type_str_dict = dict()\n for type_constraint in schema.type_constraints:\n type_str_dict[type_constraint.type_param_str] = parse_a_type_constraint(type_constraint)\n return type_str_dict\n\ndef get_onnx_mlir_types(schema, type_str_dict, input):\n if input.typeStr :\n if not input.typeStr in type_str_dict :\n # some arguments use type description directly\n # instead of constraint\n return [parse_type_str(input.typeStr), \"AnyMemRef\"]\n else :\n return type_str_dict[input.typeStr]\n else :\n print('No typeStr ', schema.name)\n return []\n\ndef gen_op_def(schema, with_version = False):\n indent = inc_indent()\n if with_version :\n opName = schema.name+\"V\"+str(schema.since_version)\n else :\n opName = schema.name\n s = 'def ONNX{0}Op:ONNX_Op<\"{0}\",\\n'.format(opName)\n\n regions = OrderedDict()\n for _, attr in sorted(schema.attributes.items()):\n if attr.type == OpSchema.AttrType.GRAPH:\n if attr.required:\n regions[attr.name] = \"SizedRegion<1>\"\n else:\n regions[attr.name] = \"AnyRegion\"\n\n # Generate decl for op traits.\n traits = [\"NoSideEffect\"]\n # OpsWithShapeInference:\n # Now the ShapeInference traits are added to all operation\n # Dummy implementations are added to ONNXOps.cpp\n # Error will be report if these operations are encountered at runtime\n traits.append(\"DeclareOpInterfaceMethods<ShapeInferenceOpInterface>\")\n if opName in OpsWithResultTypeInference.keys():\n traits.append(\"OpInterface<\\\"ResultTypeInferenceOpInterface\\\">\")\n if len(regions):\n traits.append(\"OpInterface<\\\"HasOnnxSubgraphOpInterface\\\">\")\n s += inc_indent(indent) + '[{}]> {{\\n'.format(join_args(traits))\n\n # Generate decl for canonicalizer.\n indent = inc_indent(indent)\n if opName in OpsWithCanonicalizer:\n s += indent + 'let hasCanonicalizer = 1;\\n'\n\n # Generate decl for summary.\n s += indent + 'let summary = \"ONNX {} operation\";\\n'.format(schema.name)\n\n # Generate description.\n s += indent + 'let description = [{\\n'\n if schema.doc:\n lines = schema.doc.lstrip().splitlines()\n for line in lines:\n escaped_line = line.replace('\"', '\\\\\"')\\\n .replace('}]', '\\\\}\\\\]')\n s += indent + '\"{}\"\\n'.format(escaped_line)\n s += indent + '}];\\n'\n\n # handle the type constraint for input and output\n # parse type constraint into onnx-mlir type string list\n type_str_dict = parse_type_constraints(schema)\n\n # Generate ins (consisting of operands and attributes).\n ins = get_operands_or_results(schema, type_str_dict, is_input=True)\n ins.update(get_attrs(schema))\n\n ins_strs = [\"{1}:${0}\".format(*i) for i in ins.items()]\n s += indent + 'let arguments = (ins {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(ins_strs))\n\n # Generate outs (operation results).\n outs = get_operands_or_results(schema, type_str_dict, is_input=False)\n outs_strs = [\"{1}:${0}\".format(*i) for i in outs.items()]\n s += indent + 'let results = (outs {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(outs_strs))\n\n regions_strs = [\"{1}:${0}\".format(*i) for i in regions.items()]\n\n if len(regions):\n s += indent + 'let regions = (region {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(regions_strs))\n\n # custom_builder_broadcast_ops_list\n\n # add custom builders\n # use element type of the first operand to construct an UnrankedTensorType for the output.\n if opName in custom_builder_ops_list:\n if len(ins) == 0:\n raise RuntimeWarning(\n \"warning: not generate custom build methods for \" +\n schema.name + \" since it does not have operands.\")\n else:\n s += indent + 'let builders = [\\n'\n # Custom builders with operands and attributes having a separate parameter.\n # E.g. OpBuilder<(ins \"Value\":$X, \"Value\":$Y, \"Attribute\":$A), [{}]>\n indent = inc_indent(indent)\n s += indent + 'OpBuilder<(ins '\n operands_dict = get_operands_or_results(schema, type_str_dict, is_input=True)\n attrs_dict = get_attrs(schema)\n s += ', '.join('\"{}\":${}'.format(tblgen_operand_type_to_cpp_type(ty),\n name) for name, ty in operands_dict.items())\n if operands_dict and attrs_dict:\n s += ', '\n s += ', '.join('\"{}\":${}'.format(tblgen_attr_type_to_cpp_type(ty),\n name) for name, ty in attrs_dict.items())\n s += '), [{\\n'\n indent = inc_indent(indent)\n\n # Get output type from first operand's type.\n first_operand_name = list(ins.items())[0][0]\n build_type_name = ''\n bool_type = \"$_builder.getI1Type()\"\n oTy = \"nullptr\"\n if opName in custom_builder_broadcast_to_bool_ops_list:\n oTy = bool_type\n if opName in custom_builder_broadcast_ops_list:\n second_operand_name = list(ins.items())[1][0]\n s += indent + 'auto lhsTy = {}.getType();\\n'. \\\n format(first_operand_name)\n s += indent + 'auto rhsTy = {}.getType();\\n'. \\\n format(second_operand_name)\n s += indent + 'auto oTy = {};\\n'.format(oTy)\n s += indent + 'auto elementType = getBroadcastedRankedType(lhsTy, rhsTy, oTy);\\n'\n s += indent + 'auto shapedType = elementType.dyn_cast_or_null<ShapedType>();\\n';\n s += indent + 'if (!shapedType || !shapedType.hasStaticShape()) {\\n';\n if opName in custom_builder_broadcast_to_bool_ops_list:\n s += indent + indent + 'elementType = {};\\n'.format(bool_type)\n else:\n s += indent + indent + 'elementType = {}'.format(first_operand_name) + \\\n '.getType().cast<ShapedType>().getElementType();\\n';\n s += indent + indent + 'elementType = UnrankedTensorType::get(elementType);\\n'\n s += indent + '}\\n';\n build_type_name = 'elementType'\n else:\n s += indent + 'auto elementType = {}'.format(first_operand_name) + \\\n '.getType().cast<ShapedType>().getElementType();\\n'\n build_type_name = 'UnrankedTensorType::get(elementType)'\n s += indent + 'build($_builder, $_state, {}'.format(build_type_name)\n for name, _ in ins.items():\n s += ', ' + name\n s += ');\\n'\n indent = dec_indent(indent)\n s += indent + '}]>,\\n'\n\n # Custom builders with all operands and attributes having aggregate parameters.\n # E.g. OpBuilder<(ins \"ValueRange operands,\n # ArrayRef<NamedAttribute> attributes\", [{}]>'\n s += indent + 'OpBuilder<(ins ' + \\\n '\"ValueRange\":$operands, \"ArrayRef<NamedAttribute>\":$attributes), [{\\n'\n indent = inc_indent(indent)\n if opName in custom_builder_broadcast_ops_list:\n s += indent + 'auto lhsTy = operands[0].getType();\\n'\n s += indent + 'auto rhsTy = operands[1].getType();\\n'\n s += indent + 'auto oTy = {};\\n'.format(oTy)\n s += indent + 'auto elementType = getBroadcastedRankedType(lhsTy, rhsTy, oTy);\\n'\n s += indent + 'auto shapedType = elementType.dyn_cast_or_null<ShapedType>();\\n';\n s += indent + 'if (!shapedType || !shapedType.hasStaticShape()) {\\n';\n if opName in custom_builder_broadcast_to_bool_ops_list:\n s += indent + indent + 'elementType = {};\\n'.format(bool_type)\n else:\n s += indent + indent + 'elementType = operands[0]' + \\\n '.getType().cast<ShapedType>().getElementType();\\n';\n s += indent + indent + 'elementType = UnrankedTensorType::get(elementType);\\n'\n s += indent + '}\\n';\n else:\n s += indent + 'auto elementType = operands[0].getType().' + \\\n 'cast<ShapedType>().getElementType();\\n'\n s += indent + 'std::vector<mlir::Type> outputTypes;\\n'\n s += indent + 'outputTypes.emplace_back({});\\n'.format(build_type_name)\n s += indent + 'build($_builder, $_state, outputTypes, operands, attributes);\\n'\n indent = dec_indent(indent)\n s += indent + '}]>'\n\n s += '\\n' + indent + '];\\n'\n\n # Generate extraClassDeclaration.\n s += indent + \"let extraClassDeclaration = [{\\n\"\n #indent = inc_indent(indent)\n\n # Generate input/output number.\n s = get_numberof_inout(s, indent, schema)\n\n if opName in OpsWithResultTypeInference:\n s = get_type_inference_func(\n s, indent, OpsWithResultTypeInference[opName])\n\n if opName in OpsWithHelpers:\n s += OpsWithHelpers[opName]\n\n if len(regions):\n s += indent + \"int64_t getSubgraphRegionIdx(const std::string& name) {\\n\"\n indent = inc_indent(indent)\n for idx, region_name in enumerate(regions.keys()):\n s += indent + \"if (name == \\\"{}\\\") return {};\\n\".format(region_name, idx)\n s += indent + \"llvm_unreachable(\\\"region with the specified name does not exist\\\");\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n s += indent + '}];\\n'\n\n if ( opName in custom_definition_misc) :\n s += custom_definition_misc[opName] + '\\n'\n\n # Generate decl for verifier.\n if opName in OpsWithVerifier:\n s += indent + 'let hasVerifier = 1;\\n'\n\n s += '}\\n\\n'\n return s\n\n\ndef gen_op_versions(file) :\n indent = inc_indent()\n s = \"\"\n for key, item in version_dict.items() :\n s += indent + 'op_dialect_version_map_[\"' + key +'\"] = '\n s += \"{\" + \"{}\".format(\", \".join(str(x) for x in item)) + \"};\\n\"\n file.write(s)\n\n\"\"\"\nspecial cases:\n* Split: attr split default value: sizeof(output1) namely 1\n* Conv: attr dilations default value is {num_dim of first input - 2, 1}\n* Conv: attr kernel_shape type is ints\n* Transpose: attr perm default value is {} empty int list\n\"\"\"\n\n\ndef gen_op_importer(schema, file, with_version=False):\n indent = inc_indent()\n if with_version :\n opName = schema.name + \"V\"+str(schema.since_version)\n else :\n opName = schema.name\n s = indent + 'import_handler_map_[\"' + opName +'\"] = \\n '\n\n expected_num_operands = len(schema.inputs)\n expected_num_results = len(schema.outputs)\n for input in schema.inputs:\n if OpSchema.FormalParameterOption.Variadic == input.option:\n expected_num_operands = -1\n for output in schema.outputs:\n if OpSchema.FormalParameterOption.Variadic == output.option:\n expected_num_results = -1\n\n # Only support special op handler for the op without version.\n if with_version:\n handler_func = \"buildOperation<mlir::ONNX{}Op>\".format(opName)\n else:\n handler_func = special_op_handler.get(\n schema.name, \"buildOperation<mlir::ONNX{}Op>\".format(opName))\n\n # Special handlers currently require expected num operands/results to be specified.\n # TODO: remove special handlers.\n args = [\"node\"]\n \"\"\"\n if expected_num_operands != -1 or expected_num_results != -1 or \"buildOperation\" not in handler_func:\n args.append(\n \"/* expected_num_operands = */ {}\".format(expected_num_operands))\n args.append(\n '/* expected_num_results = */ {}'.format(expected_num_results))\n \"\"\"\n s += inc_indent(indent) + '&onnx_mlir::detail::FrontendGenImpl::'\n s += handler_func+';\\n'\n\n file.write(s)\n\n\ndef build_operator_schemas():\n # domain -> support level -> name -> [schema]\n index = defaultdict(lambda: defaultdict(lambda: defaultdict(\n list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]\n for schema in defs.get_all_schemas_with_history():\n index[schema.domain][int(\n schema.support_level)][schema.name].append(schema)\n\n # Preprocess the Operator Schemas\n # [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]\n operator_schemas = list(\n ) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]\n exsting_ops = set() # type: Set[Text]\n for domain, _supportmap in sorted(index.items()):\n if not should_render_domain(domain):\n continue\n processed_supportmap = list()\n for _support, _namemap in sorted(_supportmap.items()):\n processed_namemap = list()\n for n, unsorted_versions in sorted(_namemap.items()):\n versions = sorted(unsorted_versions,\n key=lambda s: s.since_version)\n schema = versions[-1]\n if schema.name in exsting_ops:\n continue\n\n if check_operation_version :\n # Generate operation of the latest version of your onnx.\n exsting_ops.add(schema.name)\n processed_namemap.append((n, schema, versions))\n\n # Add checks against version_dict\n if schema.name not in version_dict :\n print(\"Check-operation-version: Operation {} is new with version {}\"\n .format(schema.name, schema.since_version))\n elif schema.since_version > version_dict[schema.name][0]:\n print(\"Check-operation-version: Operation {}\"\n .format(schema.name)+\n \" has a newer version {} over old version {}\"\n .format(schema.since_version, version_dict[schema.name][0]))\n else:\n # Generate operation according to the version in version_dict.\n if schema.name not in version_dict :\n continue\n found = False\n vcounter = 0\n for schema in reversed(versions):\n # Check the version number against the version_dict\n specified_version = version_dict[schema.name][vcounter]\n if schema.since_version == specified_version:\n exsting_ops.add(schema.name)\n processed_namemap.append((n, schema, versions))\n found = True\n vcounter += 1\n if len(version_dict[schema.name]) == vcounter :\n break\n if not found:\n print(\"Your onnx installation may be too old. \"\n \"The desired version for operation {} is not found.\".format(\n schema.name))\n sys.exit()\n processed_supportmap.append((_support, processed_namemap))\n operator_schemas.append((domain, processed_supportmap))\n return operator_schemas\n\n\ndef main(args): # type: (Type[Args]) -> None\n curr_utc_time = datetime.datetime.now(\n datetime.timezone.utc).strftime(\"%m/%d/%Y, %H:%M:%S\")\n autogen_warning = (\n '//********************************************************\\n'\n '// Do not modify this file directly.\\n'\n '// This file is automatically generated via script.\\n'\n '// Details can be found in docs/ImportONNXDefs.md .\\n'\n '//********************************************************\\n\\n')\n autogen_warning = autogen_warning.format(curr_utc_time)\n\n op_def = args.op_def\n op_def.write(autogen_warning)\n\n op_importer = args.op_importer\n op_importer.write(autogen_warning)\n gen_op_versions(op_importer)\n\n new_version_dict = dict()\n for domain, supportmap in build_operator_schemas():\n for _, namemap in supportmap:\n # Generate Op with version number if not the latest version\n previous_name = \"\"\n for op_type, schema, versions in namemap:\n if check_operation_version:\n new_version_dict[schema.name] = [schema.since_version]\n else:\n with_version = previous_name == schema.name\n gen_op_importer(schema, op_importer, with_version)\n r = gen_op_def(schema, with_version)\n op_def.write(r)\n previous_name = schema.name\n if check_operation_version :\n for key in version_dict :\n if not key in new_version_dict :\n print(\"op {} is not in the version\".format(key))\n # Assume the top version will be upgreaded to the latest version\n # The existing extra version (from index 1) will be kept\n for x in version_dict[key][1:] :\n new_version_dict[key].append(x)\n pprint.pprint(new_version_dict)\n\nif __name__ == '__main__':\n curr_dir = os.path.dirname(os.path.realpath(__file__))\n\n class Args(object):\n dry_run = args.dry_run_onnx_ops or args.dry_run_op_build_table\n\n # If either dry_run_onnx_ops or dry_run_op_build_table is true, then treat both of them\n # as true. Otherwise, one of them runs as a dry-run and one of them runs as a real run\n # creating unnecessary artifacts in the wrong locations in the build tree.\n if dry_run:\n op_def = StringIO()\n op_importer = StringIO()\n else:\n op_def_file_path = os.path.join(curr_dir, 'ONNXOps.td.inc')\n op_def = io.open(op_def_file_path, 'w', newline='')\n op_importer_file_path = os.path.join(curr_dir, 'OpBuildTable.inc')\n op_importer = io.open(op_importer_file_path, 'w', newline='')\n main(Args)\n\n # This is based on diff.py from llvm-project (llvm\\utils\\lit\\lit\\builtin_commands\\diff.py).\n # On Windows, by default, stdout uses \\r\\n for newlines, however, all the files we compare against\n # use \\n. This piece of code forces the windows stdout to use \\n for newlines.\n if sys.platform == \"win32\":\n if hasattr(sys.stdout, 'buffer'):\n # python 3\n sys.stdout = io.TextIOWrapper(sys.stdout.buffer, newline='\\n')\n else:\n # python 2.7\n import msvcrt\n msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n\n # Only output the generated values for the specifically requested dry run.\n if args.dry_run_onnx_ops:\n sys.stdout.write(Args.op_def.getvalue())\n if args.dry_run_op_build_table:\n sys.stdout.write(Args.op_importer.getvalue())\n"
] | [
[
"numpy.round"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YLFF/2004P_Pytorch-Networks | [
"2d84fe1d904b17d0c55aa2a7a7dba82dea3dae05"
] | [
"3DCNN.py"
] | [
"# --------------------------------------------------------------------------- #\n# ResNet, CVPR2016 bestpaper, https://arxiv.org/abs/1512.03385\n# pytorch implementation by Haiyang Liu ([email protected])\n# --------------------------------------------------------------------------- #\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom config import cfg\nfrom utils import load_cfg,model_complexity\n\n\n__all__ = ['ResNet18','ResNet34','ResNet50','ResNet101','ResNet152']\n\n\nclass LambdaLayer(nn.Module):\n def __init__(self, lambd):\n super(LambdaLayer, self).__init__()\n self.lambd = lambd\n\n def forward(self, x):\n return self.lambd(x)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self,in_dim,out_dim,stride=1,op=\"A\"):\n super(BasicBlock,self).__init__()\n self.subconv_1 = nn.Sequential(\n nn.Conv2d(in_dim,out_dim,3,stride,1,bias=False),\n nn.BatchNorm2d(out_dim),\n nn.ReLU(inplace=True),)\n self.subconv_2 = nn.Sequential(\n nn.Conv2d(out_dim,out_dim,3,1,1,bias=False),\n nn.BatchNorm2d(out_dim))\n if in_dim == out_dim and stride == 1:\n self.downsample = nn.Sequential()\n elif op == 'A':\n self.downsample =LambdaLayer(lambda x: F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, out_dim//4, out_dim//4), \"constant\", 0))\n else:\n self.downsample = nn.Sequential(\n nn.Conv2d(in_dim,out_dim,1,stride,0,bias=False),\n nn.BatchNorm2d(out_dim),\n )\n \n def forward(self,input_):\n x_0 = self.subconv_1(input_)\n x_1 = self.subconv_2(x_0)\n x_input = self.downsample(input_) \n x_final = F.relu(x_input + x_1,inplace=True)\n return x_final\n\n\nclass BottleNeck(nn.Module):\n expansion = 4\n def __init__(self,in_dim,out_dim,stride=1):\n super(BottleNeck,self).__init__()\n self.subconv_1 = nn.Sequential(\n nn.Conv2d(in_dim,int(out_dim/self.expansion),1,stride,0,bias=False),\n nn.BatchNorm2d(int(out_dim/self.expansion)),\n nn.ReLU(inplace=True),)\n self.subconv_2 = nn.Sequential(\n nn.Conv2d(int(out_dim/self.expansion),\n int(out_dim/self.expansion),3,1,1,bias=False),\n nn.BatchNorm2d(int(out_dim/self.expansion)),\n nn.ReLU(inplace=True),)\n self.subconv_3 = nn.Sequential(\n nn.Conv2d(int(out_dim/self.expansion),out_dim,1,1,0,bias=False),\n nn.BatchNorm2d(out_dim),)\n if in_dim == out_dim and stride == 1:\n self.downsample = None\n else:\n self.downsample = nn.Sequential(\n nn.Conv2d(in_dim,out_dim,1,stride,0,bias=False),\n nn.BatchNorm2d(out_dim),\n )\n\n def forward(self,input_):\n x_input = input_\n x_0 = self.subconv_1(input_)\n x_1 = self.subconv_2(x_0)\n x_2 = self.subconv_3(x_1)\n if self.downsample is not None:\n x_input = self.downsample(input_)\n print(x_input.shape)\n x_final = F.relu(x_input+x_2,inplace=True)\n return x_final\n \n\nclass ResNet(nn.Module):\n def __init__(self, cfg, logger):\n '''\n block, BLOCK_LIST, in_dim, \n class_num, BASE=64, use_fc=True, CONV1=(7,2,3),\n MAX_POOL=True, pretrained=False\n '''\n super(ResNet,self).__init__()\n self.head_conv = nn.Sequential(\n nn.Conv2d(cfg.IN_DIM,cfg.BASE,cfg.CONV1[0],cfg.CONV1[1],cfg.CONV1[2],bias=False),\n nn.BatchNorm2d(cfg.BASE),\n nn.ReLU(inplace=True),)\n if cfg.MAX_POOL:\n self.maxpool_1 = nn.MaxPool2d(3,2,1)\n else:\n self.maxpool_1 = nn.Sequential()\n block = BottleNeck if cfg.BLOCK == 'bottleneck' else BasicBlock\n b_ = block.expansion\n self.layer_1 = self._make_layer(block,cfg.BASE,cfg.BASE*b_,cfg.BLOCK_LIST[0],1)\n self.layer_2 = self._make_layer(block,cfg.BASE*b_,cfg.BASE*2*b_,cfg.BLOCK_LIST[1],2)\n self.layer_3 = self._make_layer(block,cfg.BASE*2*b_,cfg.BASE*4*b_,cfg.BLOCK_LIST[2],2)\n self.layer_4 = self._make_layer(block,cfg.BASE*4*b_,cfg.BASE*8*b_,cfg.BLOCK_LIST[3],2)\n\n final_feature = cfg.BASE*4*b_ if cfg.BLOCK_LIST[3] == 0 else cfg.BASE*8*b_\n if cfg.USE_FC:\n self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))\n self.fc_1 = nn.Sequential(\n nn.Flatten(),\n nn.Linear(final_feature,cfg.CLASS_NUM),)\n else:\n self.avgpool_1 = nn.Sequential()\n self.fc_1 = nn.Sequential()\n self.logger = logger\n self.pretrained = cfg.PRETRAINED\n self._initialization()\n \n def _initialization(self):\n if self.pretrained is not False:\n self.modules.load_state_dict(model_zoo.load_url(model_urls[self.pretrained]))\n #TODO(liu):check it correct or not.\n else:\n for name, sub_module in self.named_modules():\n if isinstance(sub_module, nn.Conv2d) or isinstance(sub_module, nn.ConvTranspose2d) or \\\n isinstance(sub_module, nn.Linear):\n nn.init.kaiming_normal_(sub_module.weight)\n # nn.init.kaiming_normal_(sub_module.weight,mode='fan_out'\n # ,nonlinearity='relu')\n if self.logger is not None:\n self.logger.info('init {}.weight as kaiming_normal_'.format(name))\n if sub_module.bias is not None:\n nn.init.constant_(sub_module.bias, 0.0)\n if self.logger is not None:\n self.logger.info('init {}.bias as 0'.format(name))\n # elif isinstance(sub_module, nn.BatchNorm2d):\n # nn.init.constant_(sub_module.weight,1)\n # nn.init.constant_(sub_module.bias,0)\n # if self.logger is not None:\n # self.logger.info('init {}.weight as constant_ 1'.format(name))\n # self.logger.info('init {}.bias as constant_ 0'.format(name))\n \n def _make_layer(self,block,in_dim,out_dim,layer_num,stride):\n net_layers = []\n if layer_num == 0:\n return nn.Sequential()\n else: \n for layer in range(layer_num):\n if layer == 0:\n net_layers.append(block(in_dim,out_dim,stride))\n else:\n net_layers.append(block(out_dim,out_dim,1))\n return nn.Sequential(*net_layers)\n \n def forward(self,input_):\n x = self.head_conv(input_)\n x = self.maxpool_1(x)\n \n x = self.layer_1(x)\n \n x = self.layer_2(x)\n \n x = self.layer_3(x)\n \n x = self.layer_4(x)\n x = self.avgpool_1(x)\n x = self.fc_1(x)\n \n return x \n\n\nclass ThreeDCNN(nn.Module):\n def __init__(self,cfg,logger):\n super(ThreeDCNN,self).__init__()\n self.res1 = ResNet(cfg,logger)\n self.res2 = ResNet(cfg,logger)\n self.res3 = ResNet(cfg,logger)\n self.getheatmap_1 = nn.Conv2d(128,19,1,1,0)\n self.getheatmap_2 = nn.Conv2d(128,19,1,1,0)\n self.getheatmap_3 = nn.Conv2d(128,19,1,1,0)\n\n self.getdepth_1 = nn.Conv2d(128,1,1,1,0)\n self.getdepth_2 = nn.Conv2d(128,1,1,1,0)\n self.getdepth_3 = nn.Conv2d(128,1,1,1,0)\n\n self.tdcnn1 = nn.Conv3d(19,128,3,1,1)#b,in,d,h,w,\n self.tdcnn2 = nn.Conv3d(128,128,3,1,1)\n self.maxpool3d_1 = nn.MaxPool3d(3,1,0)\n self.tdcnn3 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn331 = nn.Conv3d(128,128,3,1,1)\n\n\n self.tdcnn332 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn333 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn334 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn335 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn336 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn337= nn.Conv3d(128,128,3,1,1)\n self.tdcnn338 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn339 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn3310 = nn.Conv3d(128,128,3,1,1)\n \n\n\n self.tdcnn4 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn5 = nn.Conv3d(128,19,3,1,1)\n\n\n self.tdcnn6 = nn.Conv3d(1,128,3,1,1)#b,in,d,h,w,\n self.tdcnn7 = nn.Conv3d(128,128,3,1,1)\n self.maxpool3d_2 = nn.MaxPool3d(3,1,0)\n self.tdcnn8 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn88 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn9 = nn.Conv3d(128,128,3,1,1)\n self.tdcnn10 = nn.Conv3d(128,1,3,1,1)\n\n def forward(self,x):\n x1 = x[:,0,:,:,:]\n x2 = x[:,1,:,:,:]\n x3 = x[:,2,:,:,:]\n\n output1 = self.res1(x1)\n output2 = self.res2(x2)\n output3 = self.res3(x3)\n \n #print(output1.shape)\n de_output1 = self.getdepth_1(output1)\n de_output2 = self.getdepth_2(output2)\n de_output3 = self.getdepth_3(output3)\n\n he_output1 = self.getheatmap_1(output1)#(b,19,h,w)\n he_output2 = self.getheatmap_2(output2)\n he_output3 = self.getheatmap_3(output3)\n \n he_3d = torch.cat((he_output1.unsqueeze(2),\n he_output2.unsqueeze(2),\n he_output3.unsqueeze(2)),dim=2)#(b,19,3,h,w)\n de_3d = torch.cat((de_output1.unsqueeze(2),\n de_output2.unsqueeze(2),\n de_output3.unsqueeze(2)),dim=2)\n \n he_3d = self.tdcnn1(he_3d)\n he_3d = self.tdcnn2(he_3d)\n he_3d = self.maxpool3d_1(he_3d)\n he_3d = self.tdcnn3(he_3d)\n he_3d = self.tdcnn331(he_3d)\n\n he_3d = self.tdcnn332(he_3d)\n he_3d = self.tdcnn333(he_3d)\n he_3d = self.tdcnn334(he_3d)\n he_3d = self.tdcnn335(he_3d)\n he_3d = self.tdcnn336(he_3d)\n he_3d = self.tdcnn337(he_3d)\n he_3d = self.tdcnn338(he_3d)\n he_3d = self.tdcnn339(he_3d)\n he_3d = self.tdcnn3310(he_3d)\n\n he_3d = self.tdcnn4(he_3d)\n he_3d = self.tdcnn5(he_3d)\n\n de_3d = self.tdcnn6(de_3d)\n de_3d = self.tdcnn7(de_3d)\n de_3d = self.maxpool3d_2(de_3d)\n de_3d = self.tdcnn8(de_3d)\n de_3d = self.tdcnn88(de_3d)\n de_3d = self.tdcnn9(de_3d)\n de_3d = self.tdcnn10(de_3d)\n \n return de_3d, he_3d\n\n \nif __name__ == \"__main__\":\n logger = load_cfg(cfg)\n model = ThreeDCNN(cfg.MODEL,logger).cuda()\n from ptflops import get_model_complexity_info\n flops, params = get_model_complexity_info(model, (3,3,368,368), \n as_strings=True, print_per_layer_stat=True)\n logger.info('{:<30} {:<8}'.format('Computational complexity: ', flops))\n logger.info('{:<30} {:<8}'.format('Number of parameters: ', params))\n \n fakeinput = torch.ones((8,3,3,368,368)).cuda()\n output = model(fakeinput)\n mem = torch.cuda.memory_cached() / 1E9\n print(mem)\n\n \n\n\n\n\n\n\n\n\n\n\n# ------------------------------- mistakes ---------------------------------- #\n# downsample also need add batchnorm\n# add first, then relu\n# add input, not first conv output.\n# no bias for all conv layers\n# when using /, need add int()\n# usually we use fin_in for LeCun and he init, here we use fan_out\n# ---------------------------------- end ------------------------------------ #\n\n\n# ---------------------------------- notes ---------------------------------- #\n# main idea: short cut connection\n# parameters: 2.5M Res50, 6M Res152, 1.1M Res20, BN+ReLU\n# sgd+momentum 1e-1 0.9 divide 10 * 3 \n# batch size 256\n# weight decay 1e-4\n# input: resize and crop samll side to 256×256 then augment to 224\n# output: linear 1000 + softmax\n# TODO: Check details in training,testing. bn-relu-conv?\n# TODO: Training check: False\n# ---------------------------------- end ------------------------------------ #\n\n\n# ------------------------- resnet18 model summary -------------------------- #\n# Layer (type) Output Shape Param #\n# ================================================================\n# Conv2d-1 [-1, 64, 112, 112] 9,408\n# BatchNorm2d-2 [-1, 64, 112, 112] 128\n# ReLU-3 [-1, 64, 112, 112] 0\n# MaxPool2d-4 [-1, 64, 56, 56] 0\n# Conv2d-5 [-1, 64, 56, 56] 36,864\n# BatchNorm2d-6 [-1, 64, 56, 56] 128\n# ReLU-7 [-1, 64, 56, 56] 0\n# Conv2d-8 [-1, 64, 56, 56] 36,864\n# ...\n# BatchNorm2d-54 [-1, 512, 7, 7] 1,024\n# ReLU-55 [-1, 512, 7, 7] 0\n# Conv2d-56 [-1, 512, 7, 7] 2,359,296\n# BatchNorm2d-57 [-1, 512, 7, 7] 1,024\n# BasicBlock-58 [-1, 512, 7, 7] 0\n# AdaptiveAvgPool2d-59 [-1, 512, 1, 1] 0\n# Flatten-60 [-1, 512] 0\n# Linear-61 [-1, 1000] 513,000\n# Softmax-62 [-1, 1000] 0\n# ================================================================\n# Total params: 11,689,512\n# Trainable params: 11,689,512\n# Non-trainable params: 0\n# ----------------------------------------------------------------\n# Input size (MB): 0.57\n# Forward/backward pass size (MB): 57.06\n# Params size (MB): 44.59\n# Estimated Total Size (MB): 102.23\n# ---------------------------------- end ------------------------------------ #"
] | [
[
"torch.nn.Sequential",
"torch.ones",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Flatten",
"torch.nn.MaxPool3d",
"torch.nn.Conv3d",
"torch.nn.functional.relu",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_",
"torch.nn.functional.pad",
"torch.cuda.memory_cached"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ravinsinghd/opencv-basic | [
"b3b59b8808c739bc403b2ef7b499b03225bfab5f",
"b3b59b8808c739bc403b2ef7b499b03225bfab5f"
] | [
"draw.py",
"matplot_image.py"
] | [
"import numpy as np\nimport cv2\nimg=np.zeros((512,512,3),np.uint8)\ncv2.line(img,(0,0),(511,511),(255,0,0),5)\ncv2.rectangle(img,(384,0),(510,218),(0,255,0),3)\ncv2.circle(img,(447,63),63,(0,0,255),9)\ncv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1)\npts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)\npts = pts.reshape((-1,1,2))\ncv2.polylines(img,[pts],False,(0,255,255))\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)\ncv2.imshow('image',img)\nk=cv2.waitKey(0)",
"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nimg=cv2.imread('image.jpg',0)\nplt.imshow(img,cmap='gray',interpolation=\"bicubic\")\n#plt.xticks([]),plt.yticks([])\nplt.show()"
] | [
[
"numpy.array",
"numpy.zeros"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
w-kq/hhhh777 | [
"1be3a333128edf7ab50ef2dc3b281d6f561d9fc0"
] | [
"SLME.py"
] | [
"from numpy import *\r\nimport matplotlib.pyplot as plt\r\nfrom io import BytesIO\r\n\r\ndata_am15 = open(\"AM15G.dat\",'r', encoding='utf-8') # solar spectrum\r\ndata_alpha = open(\"absorption.dat\", 'r',encoding='utf-8') #光吸收系数\r\nEg = 1.40 #带隙,单位eV\r\nL = 3 #输入厚度,单位为μm\r\nf = 1 #f,直接带隙为1,间接带隙需要修改\r\nL_max = 3 #厚度最大值,单位微米\r\nT = 300 #温度,单位K\r\ne = ev = 1.60217648740E-19\r\nh = 6.626068E-34\r\nc = 299792458\r\nh_ev = h / ev\r\nc_nm = c * 1E9\r\nPi = 3.1415926\r\nk = 1.3806505E-23 #单位J/K\r\nk_ev = k / ev\r\nPin = 1000 #太阳光输入效率\r\n\r\n\r\n#将太阳光谱数据转换成二维列表\r\nam15 = []\r\nfor line in data_am15:\r\n s = line.strip().split('\\t')\r\n s1 = ' '.join(s) + '\\n'\r\n s2 = s1.split()\r\n if s2 != []:\r\n am15.append([s2[0],s2[1]])\r\ndata_am15.close()\r\n\r\n#将光吸收系数变为二维列表\r\nalpha = []\r\nfor line in data_alpha:\r\n s = line.strip().split('\\t')\r\n s1 = ' '.join(s) + '\\n'\r\n s2 = s1.split()\r\n if s2 != []:\r\n alpha.append([float(s2[0]), float(s2[1])])\r\n\r\n\r\n# preparing the data for calculating slme\r\n# 差值过程,思路就是将光吸收与SLME的横坐标对标\r\ndata_in = []\r\n\r\nfor l in range(1, len(am15)) : #am15为太阳光谱\r\n# x = am15[l].split()\r\n hv = float(am15[l][0]) #波长,nm\r\n nhv = float(am15[l][1]) #入射能量\r\n for ll in range(len(alpha)-1) :\r\n if alpha[ll][0] <= hv and alpha[ll+1][0] >= hv :\r\n fact = (hv - alpha[ll][0])/(alpha[ll+1][0] - alpha[ll][0])\r\n tmp1 = alpha[ll][1]*(1-fact) + fact*alpha[ll+1][1]\r\n data_in.append([hv, nhv, tmp1])\r\n #数据内容分别为波长,太阳光入射能量,tmp1为光吸收系数\r\n break\r\n\r\ndat = open('data_in_1.dat','w',encoding='utf-8')\r\nfor i in range(len(data_in)):\r\n string = str(data_in[i][0]) + '\\t' + str(data_in[i][1]) + '\\t' + str(data_in[i][2]) +'\\n'\r\n# print(string)\r\n dat.write(string)\r\ndat.close()\r\n\r\ndef get_I(l,f=1,data_in=data_in):\r\n#产生短路电流和暗电流的函数,需要修改的参数有:l,厚度,单位微米;f,直接带隙为1,间接带隙需要修改\r\n\r\n Isc = 0.0\r\n I0 = 0.0\r\n L = l * 1E-4 # 厚度,单位微米\r\n\r\n for l in range(len(data_in) - 1):\r\n hv0 = data_in[l][0] # 积分单元矩阵左横坐标\r\n hv1 = data_in[l + 1][0] # 积分单元矩阵右横坐标\r\n #\r\n des1 = hv1 - hv0\r\n #\r\n aE0 = 1.0 - exp(-2.0 * L * data_in[l][2])\r\n aE1 = 1.0 - exp(-2.0 * L * data_in[l + 1][2])\r\n\r\n is0 = data_in[l][1] * (hv0 / h / c_nm) * aE0\r\n is1 = data_in[l + 1][1] * (hv1 / h / c_nm) * aE1\r\n\r\n Isc = Isc + e * (is0 + is1) * des1 / 2.0\r\n\r\n hv_0 = 1240 / hv0\r\n hv_1 = 1240 / hv1\r\n des2 = hv_0 - hv_1\r\n\r\n irb0 = 2 * Pi * hv_0 ** 2 / h_ev ** 3 / c ** 2 * (exp(-1 * hv_0 / k_ev / T)) * aE0\r\n irb1 = 2 * Pi * hv_1 ** 2 / h_ev ** 3 / c ** 2 * (exp(-1 * hv_1 / k_ev / T)) * aE1\r\n\r\n I0 = I0 + e * Pi / f * (irb0 + irb1) * des2 / 2.0\r\n\r\n return Isc, I0\r\n\r\ndef get_JVcurve(Isc, I0, Eg):\r\n#产生JV曲线的函数,需要用到get_I输出的参数,Eg为带隙,单位为eV\r\n I = []\r\n V = []\r\n npts = int(Eg / 0.001)\r\n for ll in range(npts):\r\n Vap = ll * 0.001\r\n i = Isc - I0 * (exp(Vap / k_ev / T) - 1)\r\n # print(I)\r\n I.append(i)\r\n V.append(Vap)\r\n if i <= 0:\r\n break\r\n\r\n plt.plot(V,I,'r', label='J-V curve')\r\n plt.ylim(0,Isc+50) # xlim、ylim:分别设置X、Y轴的显示范围\r\n plt.xlim(0,Vap+0.05)\r\n plt.title(\"JV curve\") # title:设置子图的标题\r\n plt.savefig('JV-curve.png')\r\n plt.show()\r\n\r\n dat = open('JV-curve.dat', 'w', encoding='utf-8')\r\n for i in range(len(I)):\r\n string = str(V[i]) + '\\t' + str(I[i]) + '\\t' + str(I[i]*V[i]) +'\\n'\r\n # print(string)\r\n dat.write(string)\r\n dat.close()\r\n\r\n print('JV-curve中的信息:')\r\n print('开路电压 = ', Vap)\r\n print('短路电流 = ', Isc)\r\n print('SLME =' + str(get_slme(Isc,I0)) + '\\t' + '厚度 = ' + str(L) + 'μm')\r\n\r\n return 0\r\n\r\ndef get_slme(Isc,I0):\r\n#计算SLME的函数,会同时打印出短路电流,开路电压和SLME数据,需要用到get_I的输出参数\r\n npts = int(Eg / 0.001)\r\n maxIV = 0\r\n IVtmp = 0\r\n for ll in range(npts):\r\n Vap = ll * 0.001\r\n I = Isc - I0 * (exp(Vap / k_ev / T) - 1)\r\n IVtmp = Vap * I\r\n # print(I)\r\n if IVtmp >= maxIV:\r\n maxIV = IVtmp\r\n elif I <= 0:\r\n break\r\n# print(\"短路电流 = \", Isc, \"A/m2\")\r\n# print(\"开路电压 = \", Vap, \"V\")\r\n slme = maxIV / Pin\r\n# print(\"SLME = \", slme)\r\n return slme\r\n\r\n\r\n#主函数部分\r\n#第一部分是画给定厚度的JV曲线,同时给出开路电压,短路电流和SLME\r\n\r\n\r\nIsc,I0 = get_I(l=L, f=f)\r\n#print(I0)\r\nget_JVcurve(Isc, I0, Eg)\r\nget_slme(Isc,I0)\r\n\r\n\r\n#第二部分是画SLME随厚度变化曲线,需要输入曲线中厚度最大值和曲线撒点数\r\n\r\nn = 100 #曲线撒的点\r\n\r\nnpts = int(L_max*n)\r\nY = []\r\nX = []\r\ndat = open('SLME-curve.dat', 'w', encoding='utf-8')\r\nslme = 0\r\nslme_max = 0\r\nfor i in range(npts+1):\r\n l = i / n\r\n Isc, I0 = get_I(l=l)\r\n# print(\"厚度 =\", l,\"μm\")\r\n slme = get_slme(Isc, I0)\r\n Y.append(slme)\r\n X.append(l)\r\n dat.write(str(l) + '\\t' + str(slme) + '\\n')\r\n if slme >= slme_max:\r\n slme_max = slme\r\n l_max = l\r\ndat.close()\r\nprint('SLME-curve内信息:')\r\nprint('SLME_max = ' + str(slme_max) + '\\t' + '厚度 = ' + str(l_max) + 'μm')\r\n\r\nplt.plot(X,Y)\r\nplt.ylim(0,Y[-1]+0.025) # xlim、ylim:分别设置X、Y轴的显示范围\r\nplt.xlim(0,L_max)\r\nplt.title(\"SLME curve\") # title:设置子图的标题\r\nplt.savefig('SLME-curve.png')\r\nplt.show()\r\n\r\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WeiCheng302/image-segmentation-keras | [
"b34aef73f1a350cc0b4034eeb2dedd61642b9ccb"
] | [
"keras_segmentation/pretrained.py"
] | [
"import keras\nimport tensorflow as tf\nfrom .models.all_models import model_from_name\n\n\ndef model_from_checkpoint_path(model_config, latest_weights):\n\n model = model_from_name[model_config['model_class']](\n model_config['n_classes'], input_height=model_config['input_height'],\n input_width=model_config['input_width'])\n model.load_weights(latest_weights)\n return model\n\n\ndef resnet_pspnet_VOC12_v0_1():\n\n model_config = {\n \"output_height\": 96,\n \"input_height\": 384,\n \"input_width\": 576,\n \"n_classes\": 151,\n \"model_class\": \"resnet50_pspnet\",\n \"output_width\": 144\n }\n\n REPO_URL = \"https://github.com/divamgupta/image-segmentation-keras\"\n MODEL_PATH = \"pretrained_model_1/r2_voc12_resnetpspnet_384x576.24\"\n model_url = \"{0}/releases/download/{1}\".format(REPO_URL, MODEL_PATH)\n latest_weights = tf.keras.utils.get_file(model_url.split(\"/\")[-1], model_url)\n\n return model_from_checkpoint_path(model_config, latest_weights)\n\n\n# pretrained model converted from caffe by Vladkryvoruchko ... thanks !\ndef pspnet_50_ADE_20K():\n\n model_config = {\n \"input_height\": 473,\n \"input_width\": 473,\n \"n_classes\": 150,\n \"model_class\": \"pspnet_50\",\n }\n\n model_url = \"https://www.dropbox.com/s/\" \\\n \"0uxn14y26jcui4v/pspnet50_ade20k.h5?dl=1\"\n latest_weights = tf.keras.utils.get_file(\"pspnet50_ade20k.h5\", model_url)\n\n return model_from_checkpoint_path(model_config, latest_weights)\n\n\ndef pspnet_101_cityscapes():\n\n model_config = {\n \"input_height\": 713,\n \"input_width\": 713,\n \"n_classes\": 19,\n \"model_class\": \"pspnet_101\",\n }\n\n model_url = \"https://www.dropbox.com/s/\" \\\n \"c17g94n946tpalb/pspnet101_cityscapes.h5?dl=1\"\n latest_weights = tf.keras.utils.get_file(\"pspnet101_cityscapes.h5\", model_url)\n\n return model_from_checkpoint_path(model_config, latest_weights)\n\n\ndef pspnet_101_voc12():\n\n model_config = {\n \"input_height\": 473,\n \"input_width\": 473,\n \"n_classes\": 21,\n \"model_class\": \"pspnet_101\",\n }\n\n model_url = \"https://www.dropbox.com/s/\" \\\n \"uvqj2cjo4b9c5wg/pspnet101_voc2012.h5?dl=1\"\n latest_weights = tf.keras.utils.get_file(\"pspnet101_voc2012.h5\", model_url)\n\n return model_from_checkpoint_path(model_config, latest_weights)\n"
] | [
[
"tensorflow.keras.utils.get_file"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.