repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
icyyy/information_value
[ "8d71ab742e285f452e1dc0dc7952a924a27167ec" ]
[ "src/information_value.py" ]
[ "import numpy as np\nimport math\nfrom scipy import stats\nfrom sklearn.utils.multiclass import type_of_target\n\nclass WOE:\n def __init__(self):\n self._WOE_MIN = -20\n self._WOE_MAX = 20\n\n def woe(self, X, y, event=1):\n '''\n Calculate woe of each feature category and information value\n :param X: 2-D numpy array explanatory features which should be discreted already\n :param y: 1-D numpy array target variable which should be binary\n :param event: value of binary stands for the event to predict\n :return: numpy array of woe dictionaries, each dictionary contains woe values for categories of each feature\n numpy array of information value of each feature\n '''\n self.check_target_binary(y)\n X1 = self.feature_discretion(X)\n\n res_woe = []\n res_iv = []\n for i in range(0, X1.shape[-1]):\n x = X1[:, i]\n woe_dict, iv1 = self.woe_single_x(x, y, event)\n res_woe.append(woe_dict)\n res_iv.append(iv1)\n return np.array(res_woe), np.array(res_iv)\n\n def woe_single_x(self, x, y, event=1):\n '''\n calculate woe and information for a single feature\n :param x: 1-D numpy starnds for single feature\n :param y: 1-D numpy array target variable\n :param event: value of binary stands for the event to predict\n :return: dictionary contains woe values for categories of this feature\n information value of this feature\n '''\n self.check_target_binary(y)\n\n event_total, non_event_total = self.count_binary(y, event=event)\n x_labels = np.unique(x)\n woe_dict = {}\n iv = 0\n for x1 in x_labels:\n y1 = y[np.where(x == x1)[0]]\n event_count, non_event_count = self.count_binary(y1, event=event)\n rate_event = 1.0 * event_count / event_total\n rate_non_event = 1.0 * non_event_count / non_event_total\n if rate_event == 0:\n woe1 = self._WOE_MIN\n elif rate_non_event == 0:\n woe1 = self._WOE_MAX\n else:\n woe1 = math.log(rate_event / rate_non_event)\n woe_dict[x1] = woe1\n iv += (rate_event - rate_non_event) * woe1\n return woe_dict, iv\n\n def woe_replace(self, X, woe_arr):\n '''\n replace the explanatory feature categories with its woe value\n :param X: 2-D numpy array explanatory features which should be discreted already\n :param woe_arr: numpy array of woe dictionaries, each dictionary contains woe values for categories of each feature\n :return: the new numpy array in which woe values filled\n '''\n if X.shape[-1] != woe_arr.shape[-1]:\n raise ValueError('WOE dict array length must be equal with features length')\n\n res = np.copy(X).astype(float)\n idx = 0\n for woe_dict in woe_arr:\n for k in woe_dict.keys():\n woe = woe_dict[k]\n res[:, idx][np.where(res[:, idx] == k)[0]] = woe * 1.0\n idx += 1\n\n return res\n\n def combined_iv(self, X, y, masks, event=1):\n '''\n calcute the information vlaue of combination features\n :param X: 2-D numpy array explanatory features which should be discreted already\n :param y: 1-D numpy array target variable\n :param masks: 1-D numpy array of masks stands for which features are included in combination,\n e.g. np.array([0,0,1,1,1,0,0,0,0,0,1]), the length should be same as features length\n :param event: value of binary stands for the event to predict\n :return: woe dictionary and information value of combined features\n '''\n if masks.shape[-1] != X.shape[-1]:\n raise ValueError('Masks array length must be equal with features length')\n\n x = X[:, np.where(masks == 1)[0]]\n tmp = []\n for i in range(x.shape[0]):\n tmp.append(self.combine(x[i, :]))\n\n dumy = np.array(tmp)\n # dumy_labels = np.unique(dumy)\n woe, iv = self.woe_single_x(dumy, y, event)\n return woe, iv\n\n def combine(self, list):\n res = ''\n for item in list:\n res += str(item)\n return res\n\n def count_binary(self, a, event=1):\n event_count = (a == event).sum()\n non_event_count = a.shape[-1] - event_count\n return event_count, non_event_count\n\n def check_target_binary(self, y):\n '''\n check if the target variable is binary, raise error if not.\n :param y:\n :return:\n '''\n y_type = type_of_target(y)\n if y_type not in ['binary']:\n raise ValueError('Label type must be binary')\n\n def feature_discretion(self, X):\n '''\n Discrete the continuous features of input data X, and keep other features unchanged.\n :param X : numpy array\n :return: the numpy array in which all continuous features are discreted\n '''\n temp = []\n for i in range(0, X.shape[-1]):\n x = X[:, i]\n x_type = type_of_target(x)\n if x_type == 'continuous':\n x1 = self.discrete(x)\n temp.append(x1)\n else:\n temp.append(x)\n return np.array(temp).T\n\n def discrete(self, x):\n '''\n Discrete the input 1-D numpy array using 5 equal percentiles\n :param x: 1-D numpy array\n :return: discreted 1-D numpy array\n '''\n res = np.array([0] * x.shape[-1], dtype=int)\n for i in range(5):\n point1 = stats.scoreatpercentile(x, i * 20)\n point2 = stats.scoreatpercentile(x, (i + 1) * 20)\n x1 = x[np.where((x >= point1) & (x <= point2))]\n mask = np.in1d(x, x1)\n res[mask] = (i + 1)\n return res\n\n @property\n def WOE_MIN(self):\n return self._WOE_MIN\n @WOE_MIN.setter\n def WOE_MIN(self, woe_min):\n self._WOE_MIN = woe_min\n @property\n def WOE_MAX(self):\n return self._WOE_MAX\n @WOE_MAX.setter\n def WOE_MAX(self, woe_max):\n self._WOE_MAX = woe_max\n" ]
[ [ "numpy.unique", "numpy.in1d", "numpy.copy", "scipy.stats.scoreatpercentile", "sklearn.utils.multiclass.type_of_target", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
kylejn27/dask
[ "3327b2e158dbadf2057685fdb51b74ce3129416e" ]
[ "dask/array/core.py" ]
[ "import math\nimport operator\nimport os\nimport pickle\nimport re\nimport sys\nimport traceback\nimport uuid\nimport warnings\nfrom bisect import bisect\nfrom collections.abc import Iterable, Iterator, Mapping\nfrom functools import partial, wraps, reduce\nfrom itertools import product, zip_longest\nfrom numbers import Number, Integral\nfrom operator import add, getitem, mul\nfrom threading import Lock\n\nfrom tlz import partition, concat, first, groupby, accumulate, frequencies\nfrom tlz.curried import pluck\nimport numpy as np\n\nfrom . import chunk\nfrom .. import config, compute\nfrom ..base import (\n DaskMethodsMixin,\n tokenize,\n dont_optimize,\n compute_as_if_collection,\n persist,\n is_dask_collection,\n)\nfrom ..blockwise import broadcast_dimensions\nfrom ..context import globalmethod\nfrom ..utils import (\n ndeepmap,\n ignoring,\n concrete,\n derived_from,\n is_integer,\n IndexCallable,\n funcname,\n SerializableLock,\n Dispatch,\n factors,\n parse_bytes,\n has_keyword,\n M,\n ndimlist,\n format_bytes,\n typename,\n)\nfrom ..core import quote\nfrom ..delayed import delayed, Delayed\nfrom .. import threaded, core\nfrom ..sizeof import sizeof\nfrom ..highlevelgraph import HighLevelGraph\nfrom .numpy_compat import _Recurser, _make_sliced_dtype\nfrom .slicing import slice_array, replace_ellipsis, cached_cumsum\nfrom .blockwise import blockwise\nfrom .chunk_types import is_valid_array_chunk, is_valid_chunk_type\n\n\nconfig.update_defaults({\"array\": {\"chunk-size\": \"128MiB\", \"rechunk-threshold\": 4}})\n\n\nconcatenate_lookup = Dispatch(\"concatenate\")\ntensordot_lookup = Dispatch(\"tensordot\")\neinsum_lookup = Dispatch(\"einsum\")\nconcatenate_lookup.register((object, np.ndarray), np.concatenate)\ntensordot_lookup.register((object, np.ndarray), np.tensordot)\neinsum_lookup.register((object, np.ndarray), np.einsum)\n\nunknown_chunk_message = (\n \"\\n\\n\"\n \"A possible solution: \"\n \"https://docs.dask.org/en/latest/array-chunks.html#unknown-chunks\\n\"\n \"Summary: to compute chunks sizes, use\\n\\n\"\n \" x.compute_chunk_sizes() # for Dask Array `x`\\n\"\n \" ddf.to_dask_array(lengths=True) # for Dask DataFrame `ddf`\"\n)\n\n\nclass PerformanceWarning(Warning):\n \"\"\" A warning given when bad chunking may cause poor performance \"\"\"\n\n\ndef getter(a, b, asarray=True, lock=None):\n if isinstance(b, tuple) and any(x is None for x in b):\n b2 = tuple(x for x in b if x is not None)\n b3 = tuple(\n None if x is None else slice(None, None)\n for x in b\n if not isinstance(x, Integral)\n )\n return getter(a, b2, asarray=asarray, lock=lock)[b3]\n\n if lock:\n lock.acquire()\n try:\n c = a[b]\n if asarray:\n c = np.asarray(c)\n finally:\n if lock:\n lock.release()\n return c\n\n\ndef getter_nofancy(a, b, asarray=True, lock=None):\n \"\"\"A simple wrapper around ``getter``.\n\n Used to indicate to the optimization passes that the backend doesn't\n support fancy indexing.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)\n\n\ndef getter_inline(a, b, asarray=True, lock=None):\n \"\"\"A getter function that optimizations feel comfortable inlining\n\n Slicing operations with this function may be inlined into a graph, such as\n in the following rewrite\n\n **Before**\n\n >>> a = x[:10] # doctest: +SKIP\n >>> b = a + 1 # doctest: +SKIP\n >>> c = a * 2 # doctest: +SKIP\n\n **After**\n\n >>> b = x[:10] + 1 # doctest: +SKIP\n >>> c = x[:10] * 2 # doctest: +SKIP\n\n This inlining can be relevant to operations when running off of disk.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)\n\n\nfrom .optimization import optimize, fuse_slice\n\n\n# __array_function__ dict for mapping aliases and mismatching names\n_HANDLED_FUNCTIONS = {}\n\n\ndef implements(*numpy_functions):\n \"\"\"Register an __array_function__ implementation for dask.array.Array\n\n Register that a function implements the API of a NumPy function (or several\n NumPy functions in case of aliases) which is handled with\n ``__array_function__``.\n\n Parameters\n ----------\n \\\\*numpy_functions : callables\n One or more NumPy functions that are handled by ``__array_function__``\n and will be mapped by `implements` to a `dask.array` function.\n \"\"\"\n\n def decorator(dask_func):\n for numpy_function in numpy_functions:\n _HANDLED_FUNCTIONS[numpy_function] = dask_func\n\n return dask_func\n\n return decorator\n\n\ndef check_if_handled_given_other(f):\n \"\"\"Check if method is handled by Dask given type of other\n\n Ensures proper deferral to upcast types in dunder operations without\n assuming unknown types are automatically downcast types.\n \"\"\"\n\n @wraps(f)\n def wrapper(self, other):\n if (\n is_valid_array_chunk(other)\n or isinstance(other, (self.__class__, list, tuple, np.generic))\n or \"dask.dataframe.core.Scalar\" in str(other.__class__)\n ):\n return f(self, other)\n else:\n return NotImplemented\n\n return wrapper\n\n\ndef slices_from_chunks(chunks):\n \"\"\"Translate chunks tuple to a set of slices in product order\n\n >>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE\n [(slice(0, 2, None), slice(0, 3, None)),\n (slice(0, 2, None), slice(3, 6, None)),\n (slice(0, 2, None), slice(6, 9, None)),\n (slice(2, 4, None), slice(0, 3, None)),\n (slice(2, 4, None), slice(3, 6, None)),\n (slice(2, 4, None), slice(6, 9, None))]\n \"\"\"\n cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]\n slices = [\n [slice(s, s + dim) for s, dim in zip(starts, shapes)]\n for starts, shapes in zip(cumdims, chunks)\n ]\n return list(product(*slices))\n\n\ndef getem(\n arr,\n chunks,\n getitem=getter,\n shape=None,\n out_name=None,\n lock=False,\n asarray=True,\n dtype=None,\n):\n \"\"\"Dask getting various chunks from an array-like\n\n >>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n\n >>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n \"\"\"\n out_name = out_name or arr\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n keys = product([out_name], *(range(len(bds)) for bds in chunks))\n slices = slices_from_chunks(chunks)\n\n if (\n has_keyword(getitem, \"asarray\")\n and has_keyword(getitem, \"lock\")\n and (not asarray or lock)\n ):\n values = [(getitem, arr, x, asarray, lock) for x in slices]\n else:\n # Common case, drop extra parameters\n values = [(getitem, arr, x) for x in slices]\n\n return dict(zip(keys, values))\n\n\ndef dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):\n \"\"\"Dot product of many aligned chunks\n\n >>> x = np.array([[1, 2], [1, 2]])\n >>> y = np.array([[10, 20], [10, 20]])\n >>> dotmany([x, x, x], [y, y, y])\n array([[ 90, 180],\n [ 90, 180]])\n\n Optionally pass in functions to apply to the left and right chunks\n\n >>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)\n array([[150, 150],\n [150, 150]])\n \"\"\"\n if leftfunc:\n A = map(leftfunc, A)\n if rightfunc:\n B = map(rightfunc, B)\n return sum(map(partial(np.dot, **kwargs), A, B))\n\n\ndef _concatenate2(arrays, axes=[]):\n \"\"\"Recursively Concatenate nested lists of arrays along axes\n\n Each entry in axes corresponds to each level of the nested list. The\n length of axes should correspond to the level of nesting of arrays.\n If axes is an empty list or tuple, return arrays, or arrays[0] if\n arrays is a list.\n\n >>> x = np.array([[1, 2], [3, 4]])\n >>> _concatenate2([x, x], axes=[0])\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n >>> _concatenate2([x, x], axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n >>> _concatenate2([[x, x], [x, x]], axes=[0, 1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4],\n [1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Supports Iterators\n >>> _concatenate2(iter([x, x]), axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Special Case\n >>> _concatenate2([x, x], axes=())\n array([[1, 2],\n [3, 4]])\n \"\"\"\n if axes == ():\n if isinstance(arrays, list):\n return arrays[0]\n else:\n return arrays\n\n if isinstance(arrays, Iterator):\n arrays = list(arrays)\n if not isinstance(arrays, (list, tuple)):\n return arrays\n if len(axes) > 1:\n arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]\n concatenate = concatenate_lookup.dispatch(\n type(max(arrays, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n return concatenate(arrays, axis=axes[0])\n\n\ndef apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype=\"dtype\", nout=None):\n \"\"\"\n Tries to infer output dtype of ``func`` for a small set of input arguments.\n\n Parameters\n ----------\n func: Callable\n Function for which output dtype is to be determined\n\n args: List of array like\n Arguments to the function, which would usually be used. Only attributes\n ``ndim`` and ``dtype`` are used.\n\n kwargs: dict\n Additional ``kwargs`` to the ``func``\n\n funcname: String\n Name of calling function to improve potential error messages\n\n suggest_dtype: None/False or String\n If not ``None`` adds suggestion to potential error message to specify a dtype\n via the specified kwarg. Defaults to ``'dtype'``.\n\n nout: None or Int\n ``None`` if function returns single output, integer if many.\n Deafults to ``None``.\n\n Returns\n -------\n : dtype or List of dtype\n One or many dtypes (depending on ``nout``)\n \"\"\"\n args = [\n np.ones((1,) * x.ndim, dtype=x.dtype) if isinstance(x, Array) else x\n for x in args\n ]\n try:\n with np.errstate(all=\"ignore\"):\n o = func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = \"\".join(traceback.format_tb(exc_traceback))\n suggest = (\n (\n \"Please specify the dtype explicitly using the \"\n \"`{dtype}` kwarg.\\n\\n\".format(dtype=suggest_dtype)\n )\n if suggest_dtype\n else \"\"\n )\n msg = (\n \"`dtype` inference failed in `{0}`.\\n\\n\"\n \"{1}\"\n \"Original error is below:\\n\"\n \"------------------------\\n\"\n \"{2}\\n\\n\"\n \"Traceback:\\n\"\n \"---------\\n\"\n \"{3}\"\n ).format(funcname, suggest, repr(e), tb)\n else:\n msg = None\n if msg is not None:\n raise ValueError(msg)\n return o.dtype if nout is None else tuple(e.dtype for e in o)\n\n\ndef normalize_arg(x):\n \"\"\"Normalize user provided arguments to blockwise or map_blocks\n\n We do a few things:\n\n 1. If they are string literals that might collide with blockwise_token then we\n quote them\n 2. IF they are large (as defined by sizeof) then we put them into the\n graph on their own by using dask.delayed\n \"\"\"\n if is_dask_collection(x):\n return x\n elif isinstance(x, str) and re.match(r\"_\\d+\", x):\n return delayed(x)\n elif isinstance(x, list) and len(x) >= 10:\n return delayed(x)\n elif sizeof(x) > 1e6:\n return delayed(x)\n else:\n return x\n\n\ndef _pass_extra_kwargs(func, keys, *args, **kwargs):\n \"\"\"Helper for :func:`map_blocks` to pass `block_info` or `block_id`.\n\n For each element of `keys`, a corresponding element of args is changed\n to a keyword argument with that key, before all arguments re passed on\n to `func`.\n \"\"\"\n kwargs.update(zip(keys, args))\n return func(*args[len(keys) :], **kwargs)\n\n\ndef map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n \"\"\"Map a function across all blocks of a dask array.\n\n Parameters\n ----------\n func : callable\n Function to apply to every block in the array.\n args : dask arrays or other objects\n dtype : np.dtype, optional\n The ``dtype`` of the output array. It is recommended to provide this.\n If not provided, will be inferred by applying the function to a small\n set of fake data.\n chunks : tuple, optional\n Chunk shape of resulting blocks if the function does not preserve\n shape. If not provided, the resulting array is assumed to have the same\n block structure as the first input array.\n drop_axis : number or iterable, optional\n Dimensions lost by the function.\n new_axis : number or iterable, optional\n New dimensions created by the function. Note that these are applied\n after ``drop_axis`` (if present).\n token : string, optional\n The key prefix to use for the output array. If not provided, will be\n determined from the function name.\n name : string, optional\n The key name to use for the output array. Note that this fully\n specifies the output key name, and must be unique. If not provided,\n will be determined by a hash of the arguments.\n **kwargs :\n Other keyword arguments to pass to function. Values must be constants\n (not dask.arrays)\n\n See Also\n --------\n dask.array.blockwise : Generalized operation with control over block alignment.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(6, chunks=3)\n\n >>> x.map_blocks(lambda x: x * 2).compute()\n array([ 0, 2, 4, 6, 8, 10])\n\n The ``da.map_blocks`` function can also accept multiple arrays.\n\n >>> d = da.arange(5, chunks=2)\n >>> e = da.arange(5, chunks=2)\n\n >>> f = map_blocks(lambda a, b: a + b**2, d, e)\n >>> f.compute()\n array([ 0, 2, 6, 12, 20])\n\n If the function changes shape of the blocks then you must provide chunks\n explicitly.\n\n >>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))\n\n You have a bit of freedom in specifying chunks. If all of the output chunk\n sizes are the same, you can provide just that chunk size as a single tuple.\n\n >>> a = da.arange(18, chunks=(6,))\n >>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))\n\n If the function changes the dimension of the blocks you must specify the\n created or destroyed dimensions.\n\n >>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),\n ... new_axis=[0, 2])\n\n If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to\n add the necessary number of axes on the left.\n\n Map_blocks aligns blocks by block positions without regard to shape. In the\n following example we have two arrays with the same number of blocks but\n with different shape and chunk sizes.\n\n >>> x = da.arange(1000, chunks=(100,))\n >>> y = da.arange(100, chunks=(10,))\n\n The relevant attribute to match is numblocks.\n\n >>> x.numblocks\n (10,)\n >>> y.numblocks\n (10,)\n\n If these match (up to broadcasting rules) then we can map arbitrary\n functions across blocks\n\n >>> def func(a, b):\n ... return np.array([a.max(), b.max()])\n\n >>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')\n dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>\n\n >>> _.compute()\n array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,\n 69, 799, 79, 899, 89, 999, 99])\n\n Your block function get information about where it is in the array by\n accepting a special ``block_info`` keyword argument.\n\n >>> def func(block, block_info=None):\n ... pass\n\n This will receive the following information:\n\n >>> block_info # doctest: +SKIP\n {0: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)]},\n None: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)],\n 'chunk-shape': (100,),\n 'dtype': dtype('float64')}}\n\n For each argument and keyword arguments that are dask arrays (the positions\n of which are the first index), you will receive the shape of the full\n array, the number of chunks of the full array in each dimension, the chunk\n location (for example the fourth chunk over in the first dimension), and\n the array location (for example the slice corresponding to ``40:50``). The\n same information is provided for the output, with the key ``None``, plus\n the shape and dtype that should be returned.\n\n These features can be combined to synthesize an array from scratch, for\n example:\n\n >>> def func(block_info=None):\n ... loc = block_info[None]['array-location'][0]\n ... return np.arange(loc[0], loc[1])\n\n >>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)\n dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>\n\n >>> _.compute()\n array([0, 1, 2, 3, 4, 5, 6, 7])\n\n You may specify the key name prefix of the resulting task in the graph with\n the optional ``token`` keyword argument.\n\n >>> x.map_blocks(lambda x: x + 1, name='increment') # doctest: +SKIP\n dask.array<increment, shape=(100,), dtype=int64, chunksize=(10,), chunktype=numpy.ndarray>\n \"\"\"\n if not callable(func):\n msg = (\n \"First argument must be callable function, not %s\\n\"\n \"Usage: da.map_blocks(function, x)\\n\"\n \" or: da.map_blocks(function, x, y, z)\"\n )\n raise TypeError(msg % type(func).__name__)\n if token:\n warnings.warn(\"The token= keyword to map_blocks has been moved to name=\")\n name = token\n\n name = \"%s-%s\" % (name or funcname(func), tokenize(func, *args, **kwargs))\n new_axes = {}\n\n if isinstance(drop_axis, Number):\n drop_axis = [drop_axis]\n if isinstance(new_axis, Number):\n new_axis = [new_axis] # TODO: handle new_axis\n\n arrs = [a for a in args if isinstance(a, Array)]\n\n argpairs = [\n (a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)\n for a in args\n ]\n if arrs:\n out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]\n else:\n out_ind = ()\n\n original_kwargs = kwargs\n\n if dtype is None and meta is None:\n dtype = apply_infer_dtype(func, args, original_kwargs, \"map_blocks\")\n\n if drop_axis:\n out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)\n if new_axis is None and chunks is not None and len(out_ind) < len(chunks):\n new_axis = range(len(chunks) - len(out_ind))\n if new_axis:\n # new_axis = [x + len(drop_axis) for x in new_axis]\n out_ind = list(out_ind)\n for ax in sorted(new_axis):\n n = len(out_ind) + len(drop_axis)\n out_ind.insert(ax, n)\n if chunks is not None:\n new_axes[n] = chunks[ax]\n else:\n new_axes[n] = 1\n out_ind = tuple(out_ind)\n if max(new_axis) > max(out_ind):\n raise ValueError(\"New_axis values do not fill in all dimensions\")\n\n if chunks is not None:\n if len(chunks) != len(out_ind):\n raise ValueError(\n \"Provided chunks have {0} dims, expected {1} \"\n \"dims.\".format(len(chunks), len(out_ind))\n )\n adjust_chunks = dict(zip(out_ind, chunks))\n else:\n adjust_chunks = None\n\n out = blockwise(\n func,\n out_ind,\n *concat(argpairs),\n name=name,\n new_axes=new_axes,\n dtype=dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=adjust_chunks,\n meta=meta,\n **kwargs,\n )\n\n extra_argpairs = []\n extra_names = []\n # If func has block_id as an argument, construct an array of block IDs and\n # prepare to inject it.\n if has_keyword(func, \"block_id\"):\n block_id_name = \"block-id-\" + out.name\n block_id_dsk = {\n (block_id_name,) + block_id: block_id\n for block_id in product(*(range(len(c)) for c in out.chunks))\n }\n block_id_array = Array(\n block_id_dsk,\n block_id_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_id_array, out_ind))\n extra_names.append(\"block_id\")\n\n # If func has block_info as an argument, construct an array of block info\n # objects and prepare to inject it.\n if has_keyword(func, \"block_info\"):\n starts = {}\n num_chunks = {}\n shapes = {}\n\n for i, (arg, in_ind) in enumerate(argpairs):\n if in_ind is not None:\n shapes[i] = arg.shape\n if drop_axis:\n # We concatenate along dropped axes, so we need to treat them\n # as if there is only a single chunk.\n starts[i] = [\n (\n cached_cumsum(arg.chunks[j], initial_zero=True)\n if ind in out_ind\n else [0, arg.shape[j]]\n )\n for j, ind in enumerate(in_ind)\n ]\n num_chunks[i] = tuple(len(s) - 1 for s in starts[i])\n else:\n starts[i] = [\n cached_cumsum(c, initial_zero=True) for c in arg.chunks\n ]\n num_chunks[i] = arg.numblocks\n out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]\n\n block_info_name = \"block-info-\" + out.name\n block_info_dsk = {}\n for block_id in product(*(range(len(c)) for c in out.chunks)):\n # Get position of chunk, indexed by axis labels\n location = {out_ind[i]: loc for i, loc in enumerate(block_id)}\n info = {}\n for i, shape in shapes.items():\n # Compute chunk key in the array, taking broadcasting into\n # account. We don't directly know which dimensions are\n # broadcast, but any dimension with only one chunk can be\n # treated as broadcast.\n arr_k = tuple(\n location.get(ind, 0) if num_chunks[i][j] > 1 else 0\n for j, ind in enumerate(argpairs[i][1])\n )\n info[i] = {\n \"shape\": shape,\n \"num-chunks\": num_chunks[i],\n \"array-location\": [\n (starts[i][ij][j], starts[i][ij][j + 1])\n for ij, j in enumerate(arr_k)\n ],\n \"chunk-location\": arr_k,\n }\n\n info[None] = {\n \"shape\": out.shape,\n \"num-chunks\": out.numblocks,\n \"array-location\": [\n (out_starts[ij][j], out_starts[ij][j + 1])\n for ij, j in enumerate(block_id)\n ],\n \"chunk-location\": block_id,\n \"chunk-shape\": tuple(\n out.chunks[ij][j] for ij, j in enumerate(block_id)\n ),\n \"dtype\": dtype,\n }\n block_info_dsk[(block_info_name,) + block_id] = info\n\n block_info = Array(\n block_info_dsk,\n block_info_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_info, out_ind))\n extra_names.append(\"block_info\")\n\n if extra_argpairs:\n # Rewrite the Blockwise layer. It would be nice to find a way to\n # avoid doing it twice, but it's currently needed to determine\n # out.chunks from the first pass. Since it constructs a Blockwise\n # rather than an expanded graph, it shouldn't be too expensive.\n out = blockwise(\n _pass_extra_kwargs,\n out_ind,\n func,\n None,\n tuple(extra_names),\n None,\n *concat(extra_argpairs),\n *concat(argpairs),\n name=out.name,\n dtype=out.dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=dict(zip(out_ind, out.chunks)),\n meta=meta,\n **kwargs,\n )\n\n return out\n\n\ndef broadcast_chunks(*chunkss):\n \"\"\"Construct a chunks tuple that broadcasts many chunks tuples\n\n >>> a = ((5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((5, 5),)\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((1,), (5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((3, 3,), (5, 5),)\n >>> broadcast_chunks(a, b)\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]\n \"\"\"\n if not chunkss:\n return ()\n elif len(chunkss) == 1:\n return chunkss[0]\n n = max(map(len, chunkss))\n chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]\n result = []\n for i in range(n):\n step1 = [c[i] for c in chunkss2]\n if all(c == (1,) for c in step1):\n step2 = step1\n else:\n step2 = [c for c in step1 if c != (1,)]\n if len(set(step2)) != 1:\n raise ValueError(\"Chunks do not align: %s\" % str(step2))\n result.append(step2[0])\n return tuple(result)\n\n\ndef store(\n sources,\n targets,\n lock=True,\n regions=None,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n \"\"\"Store dask arrays in array-like objects, overwrite data in target\n\n This stores dask arrays into object that supports numpy-style setitem\n indexing. It stores values chunk by chunk so that it does not have to\n fill up memory. For best performance you can align the block size of\n the storage target with the block size of your array.\n\n If your data fits in memory then you may prefer calling\n ``np.array(myarray)`` instead.\n\n Parameters\n ----------\n\n sources: Array or iterable of Arrays\n targets: array-like or Delayed or iterable of array-likes and/or Delayeds\n These should support setitem syntax ``target[10:20] = ...``\n lock: boolean or threading.Lock, optional\n Whether or not to lock the data stores while storing.\n Pass True (lock each file individually), False (don't lock) or a\n particular ``threading.Lock`` object to be shared among all writes.\n regions: tuple of slices or list of tuples of slices\n Each ``region`` tuple in ``regions`` should be such that\n ``target[region].shape = source.shape``\n for the corresponding source and target in sources and targets,\n respectively. If this is a tuple, the contents will be assumed to be\n slices, so do not provide a tuple of tuples.\n compute: boolean, optional\n If true compute immediately, return ``dask.delayed.Delayed`` otherwise\n return_stored: boolean, optional\n Optionally return the stored result (default False).\n\n Examples\n --------\n >>> x = ... # doctest: +SKIP\n\n >>> import h5py # doctest: +SKIP\n >>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP\n >>> dset = f.create_dataset('/data', shape=x.shape,\n ... chunks=x.chunks,\n ... dtype='f8') # doctest: +SKIP\n\n >>> store(x, dset) # doctest: +SKIP\n\n Alternatively store many arrays at the same time\n\n >>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP\n \"\"\"\n\n if isinstance(sources, Array):\n sources = [sources]\n targets = [targets]\n\n if any(not isinstance(s, Array) for s in sources):\n raise ValueError(\"All sources must be dask array objects\")\n\n if len(sources) != len(targets):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d]\"\n % (len(sources), len(targets))\n )\n\n if isinstance(regions, tuple) or regions is None:\n regions = [regions]\n\n if len(sources) > 1 and len(regions) == 1:\n regions *= len(sources)\n\n if len(sources) != len(regions):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d] than regions [%d]\"\n % (len(sources), len(targets), len(regions))\n )\n\n # Optimize all sources together\n sources_dsk = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])\n sources_dsk = Array.__dask_optimize__(\n sources_dsk, list(core.flatten([e.__dask_keys__() for e in sources]))\n )\n sources2 = [Array(sources_dsk, e.name, e.chunks, meta=e) for e in sources]\n\n # Optimize all targets together\n targets2 = []\n targets_keys = []\n targets_dsk = []\n for e in targets:\n if isinstance(e, Delayed):\n targets2.append(e.key)\n targets_keys.extend(e.__dask_keys__())\n targets_dsk.append(e.__dask_graph__())\n elif is_dask_collection(e):\n raise TypeError(\"Targets must be either Delayed objects or array-likes\")\n else:\n targets2.append(e)\n\n targets_dsk = HighLevelGraph.merge(*targets_dsk)\n targets_dsk = Delayed.__dask_optimize__(targets_dsk, targets_keys)\n\n load_stored = return_stored and not compute\n toks = [str(uuid.uuid1()) for _ in range(len(sources))]\n store_dsk = HighLevelGraph.merge(\n *[\n insert_to_ooc(s, t, lock, r, return_stored, load_stored, tok)\n for s, t, r, tok in zip(sources2, targets2, regions, toks)\n ]\n )\n store_keys = list(store_dsk.keys())\n\n store_dsk = HighLevelGraph.merge(store_dsk, targets_dsk, sources_dsk)\n\n if return_stored:\n load_store_dsk = store_dsk\n if compute:\n store_dlyds = [Delayed(k, store_dsk) for k in store_keys]\n store_dlyds = persist(*store_dlyds, **kwargs)\n store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])\n\n load_store_dsk = retrieve_from_ooc(store_keys, store_dsk, store_dsk_2)\n\n result = tuple(\n Array(load_store_dsk, \"load-store-%s\" % t, s.chunks, meta=s)\n for s, t in zip(sources, toks)\n )\n\n return result\n else:\n name = \"store-\" + str(uuid.uuid1())\n dsk = HighLevelGraph.merge({name: store_keys}, store_dsk)\n result = Delayed(name, dsk)\n\n if compute:\n result.compute(**kwargs)\n return None\n else:\n return result\n\n\ndef blockdims_from_blockshape(shape, chunks):\n \"\"\"\n\n >>> blockdims_from_blockshape((10, 10), (4, 3))\n ((4, 4, 2), (3, 3, 3, 1))\n >>> blockdims_from_blockshape((10, 0), (4, 0))\n ((4, 4, 2), (0,))\n \"\"\"\n if chunks is None:\n raise TypeError(\"Must supply chunks= keyword argument\")\n if shape is None:\n raise TypeError(\"Must supply shape= keyword argument\")\n if np.isnan(sum(shape)) or np.isnan(sum(chunks)):\n raise ValueError(\n \"Array chunk sizes are unknown. shape: %s, chunks: %s%s\"\n % (shape, chunks, unknown_chunk_message)\n )\n if not all(map(is_integer, chunks)):\n raise ValueError(\"chunks can only contain integers.\")\n if not all(map(is_integer, shape)):\n raise ValueError(\"shape can only contain integers.\")\n shape = tuple(map(int, shape))\n chunks = tuple(map(int, chunks))\n return tuple(\n ((bd,) * (d // bd) + ((d % bd,) if d % bd else ()) if d else (0,))\n for d, bd in zip(shape, chunks)\n )\n\n\ndef finalize(results):\n if not results:\n return concatenate3(results)\n results2 = results\n while isinstance(results2, (tuple, list)):\n if len(results2) > 1:\n return concatenate3(results)\n else:\n results2 = results2[0]\n return unpack_singleton(results)\n\n\nCHUNKS_NONE_ERROR_MESSAGE = \"\"\"\nYou must specify a chunks= keyword argument.\nThis specifies the chunksize of your array blocks.\n\nSee the following documentation page for details:\n https://docs.dask.org/en/latest/array-creation.html#chunks\n\"\"\".strip()\n\n\nclass Array(DaskMethodsMixin):\n \"\"\"Parallel Dask Array\n\n A parallel nd-array comprised of many numpy arrays arranged in a grid.\n\n This constructor is for advanced uses only. For normal use see the\n ``da.from_array`` function.\n\n Parameters\n ----------\n dask : dict\n Task dependency graph\n name : string\n Name of array in dask\n shape : tuple of ints\n Shape of the entire array\n chunks: iterable of tuples\n block sizes along each dimension\n dtype : str or dtype\n Typecode or data-type for the new Dask Array\n meta : empty ndarray\n empty ndarray created with same NumPy backend, ndim and dtype as the\n Dask Array being created (overrides dtype)\n\n See Also\n --------\n dask.array.from_array\n \"\"\"\n\n __slots__ = \"dask\", \"_name\", \"_cached_keys\", \"_chunks\", \"_meta\"\n\n def __new__(cls, dask, name, chunks, dtype=None, meta=None, shape=None):\n self = super(Array, cls).__new__(cls)\n assert isinstance(dask, Mapping)\n if not isinstance(dask, HighLevelGraph):\n dask = HighLevelGraph.from_collections(name, dask, dependencies=())\n self.dask = dask\n self.name = str(name)\n meta = meta_from_array(meta, dtype=dtype)\n\n if (\n isinstance(chunks, str)\n or isinstance(chunks, tuple)\n and chunks\n and any(isinstance(c, str) for c in chunks)\n ):\n dt = meta.dtype\n else:\n dt = None\n self._chunks = normalize_chunks(chunks, shape, dtype=dt)\n if self._chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n\n self._meta = meta_from_array(meta, ndim=self.ndim, dtype=dtype)\n\n for plugin in config.get(\"array_plugins\", ()):\n result = plugin(self)\n if result is not None:\n self = result\n\n return self\n\n def __reduce__(self):\n return (Array, (self.dask, self.name, self.chunks, self.dtype))\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_layers__(self):\n return (self.name,)\n\n def __dask_keys__(self):\n if self._cached_keys is not None:\n return self._cached_keys\n\n name, chunks, numblocks = self.name, self.chunks, self.numblocks\n\n def keys(*args):\n if not chunks:\n return [(name,)]\n ind = len(args)\n if ind + 1 == len(numblocks):\n result = [(name,) + args + (i,) for i in range(numblocks[ind])]\n else:\n result = [keys(*(args + (i,))) for i in range(numblocks[ind])]\n return result\n\n self._cached_keys = result = keys()\n return result\n\n def __dask_tokenize__(self):\n return self.name\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"array_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return Array, (self.name, self.chunks, self.dtype, self._meta)\n\n @property\n def numblocks(self):\n return tuple(map(len, self.chunks))\n\n @property\n def npartitions(self):\n return reduce(mul, self.numblocks, 1)\n\n def compute_chunk_sizes(self):\n \"\"\"\n Compute the chunk sizes for a Dask array. This is especially useful\n when the chunk sizes are unknown (e.g., when indexing one Dask array\n with another).\n\n Notes\n -----\n This function modifies the Dask array in-place.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.from_array([-2, -1, 0, 1, 2], chunks=2)\n >>> x.chunks\n ((2, 2, 1),)\n >>> y = x[x <= 0]\n >>> y.chunks\n ((nan, nan, nan),)\n >>> y.compute_chunk_sizes() # in-place computation\n dask.array<getitem, shape=(3,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>\n >>> y.chunks\n ((2, 1, 0),)\n\n \"\"\"\n x = self\n chunk_shapes = x.map_blocks(\n _get_chunk_shape,\n dtype=int,\n chunks=tuple(len(c) * (1,) for c in x.chunks) + ((x.ndim,),),\n new_axis=x.ndim,\n )\n\n c = []\n for i in range(x.ndim):\n s = x.ndim * [0] + [i]\n s[i] = slice(None)\n s = tuple(s)\n\n c.append(tuple(chunk_shapes[s]))\n\n # `map_blocks` assigns numpy dtypes\n # cast chunk dimensions back to python int before returning\n x._chunks = tuple(\n [tuple([int(chunk) for chunk in chunks]) for chunks in compute(tuple(c))[0]]\n )\n return x\n\n @property\n def shape(self):\n return tuple(cached_cumsum(c, initial_zero=True)[-1] for c in self.chunks)\n\n @property\n def chunksize(self):\n return tuple(max(c) for c in self.chunks)\n\n @property\n def dtype(self):\n return self._meta.dtype\n\n def _get_chunks(self):\n return self._chunks\n\n def _set_chunks(self, chunks):\n msg = (\n \"Can not set chunks directly\\n\\n\"\n \"Please use the rechunk method instead:\\n\"\n \" x.rechunk({})\\n\\n\"\n \"If trying to avoid unknown chunks, use\\n\"\n \" x.compute_chunk_sizes()\"\n )\n raise TypeError(msg.format(chunks))\n\n chunks = property(_get_chunks, _set_chunks, \"chunks property\")\n\n def __len__(self):\n if not self.chunks:\n raise TypeError(\"len() of unsized object\")\n return sum(self.chunks[0])\n\n def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n # Verify all arrays are properly handled by Dask\n if not isinstance(x, Array) and not is_valid_array_chunk(x):\n return NotImplemented\n\n if method == \"__call__\":\n if numpy_ufunc is np.matmul:\n from .routines import matmul\n\n # special case until apply_gufunc handles optional dimensions\n return matmul(*inputs, **kwargs)\n if numpy_ufunc.signature is not None:\n from .gufunc import apply_gufunc\n\n return apply_gufunc(\n numpy_ufunc, numpy_ufunc.signature, *inputs, **kwargs\n )\n if numpy_ufunc.nout > 1:\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc(*inputs, **kwargs)\n else:\n return elemwise(numpy_ufunc, *inputs, **kwargs)\n elif method == \"outer\":\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc.outer(*inputs, **kwargs)\n else:\n return NotImplemented\n\n def __repr__(self):\n \"\"\"\n\n >>> import dask.array as da\n >>> da.ones((10, 10), chunks=(5, 5), dtype='i4')\n dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5), chunktype=numpy.ndarray>\n \"\"\"\n chunksize = str(self.chunksize)\n name = self.name.rsplit(\"-\", 1)[0]\n return \"dask.array<%s, shape=%s, dtype=%s, chunksize=%s, chunktype=%s.%s>\" % (\n name,\n self.shape,\n self.dtype,\n chunksize,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n )\n\n def _repr_html_(self):\n table = self._repr_html_table()\n try:\n grid = self.to_svg(size=config.get(\"array.svg.size\", 120))\n except NotImplementedError:\n grid = \"\"\n\n both = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n table,\n \"</td>\",\n \"<td>\",\n grid,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n return \"\\n\".join(both)\n\n def _repr_html_table(self):\n if \"sparse\" in typename(type(self._meta)):\n nbytes = None\n cbytes = None\n elif not math.isnan(self.nbytes):\n nbytes = format_bytes(self.nbytes)\n cbytes = format_bytes(np.prod(self.chunksize) * self.dtype.itemsize)\n else:\n nbytes = \"unknown\"\n cbytes = \"unknown\"\n\n table = [\n \"<table>\",\n \" <thead>\",\n \" <tr><td> </td><th> Array </th><th> Chunk </th></tr>\",\n \" </thead>\",\n \" <tbody>\",\n \" <tr><th> Bytes </th><td> %s </td> <td> %s </td></tr>\"\n % (nbytes, cbytes)\n if nbytes is not None\n else \"\",\n \" <tr><th> Shape </th><td> %s </td> <td> %s </td></tr>\"\n % (str(self.shape), str(self.chunksize)),\n \" <tr><th> Count </th><td> %d Tasks </td><td> %d Chunks </td></tr>\"\n % (len(self.__dask_graph__()), self.npartitions),\n \" <tr><th> Type </th><td> %s </td><td> %s.%s </td></tr>\"\n % (\n self.dtype,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n ),\n \" </tbody>\",\n \"</table>\",\n ]\n return \"\\n\".join(table)\n\n @property\n def ndim(self):\n return len(self.shape)\n\n @property\n def size(self):\n \"\"\" Number of elements in array \"\"\"\n return reduce(mul, self.shape, 1)\n\n @property\n def nbytes(self):\n \"\"\" Number of bytes in array \"\"\"\n return self.size * self.dtype.itemsize\n\n @property\n def itemsize(self):\n \"\"\" Length of one array element in bytes \"\"\"\n return self.dtype.itemsize\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, val):\n self._name = val\n # Clear the key cache when the name is reset\n self._cached_keys = None\n\n __array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix\n\n def __array__(self, dtype=None, **kwargs):\n x = self.compute()\n if dtype and x.dtype != dtype:\n x = x.astype(dtype)\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n return x\n\n def __array_function__(self, func, types, args, kwargs):\n import dask.array as module\n\n def handle_nonmatching_names(func, args, kwargs):\n if func not in _HANDLED_FUNCTIONS:\n warnings.warn(\n \"The `{}` function is not implemented by Dask array. \"\n \"You may want to use the da.map_blocks function \"\n \"or something similar to silence this warning. \"\n \"Your code may stop working in a future release.\".format(\n func.__module__ + \".\" + func.__name__\n ),\n FutureWarning,\n )\n # Need to convert to array object (e.g. numpy.ndarray or\n # cupy.ndarray) as needed, so we can call the NumPy function\n # again and it gets the chance to dispatch to the right\n # implementation.\n args, kwargs = compute(args, kwargs)\n return func(*args, **kwargs)\n\n return _HANDLED_FUNCTIONS[func](*args, **kwargs)\n\n # First, verify that all types are handled by Dask. Otherwise, return NotImplemented.\n if not all(type is Array or is_valid_chunk_type(type) for type in types):\n return NotImplemented\n\n # Now try to find a matching function name. If that doesn't work, we may\n # be dealing with an alias or a function that's simply not in the Dask API.\n # Handle aliases via the _HANDLED_FUNCTIONS dict mapping, and warn otherwise.\n for submodule in func.__module__.split(\".\")[1:]:\n try:\n module = getattr(module, submodule)\n except AttributeError:\n return handle_nonmatching_names(func, args, kwargs)\n\n if not hasattr(module, func.__name__):\n return handle_nonmatching_names(func, args, kwargs)\n\n da_func = getattr(module, func.__name__)\n if da_func is func:\n return handle_nonmatching_names(func, args, kwargs)\n return da_func(*args, **kwargs)\n\n @property\n def _elemwise(self):\n return elemwise\n\n @wraps(store)\n def store(self, target, **kwargs):\n r = store([self], [target], **kwargs)\n\n if kwargs.get(\"return_stored\", False):\n r = r[0]\n\n return r\n\n def to_svg(self, size=500):\n \"\"\"Convert chunks from Dask Array into an SVG Image\n\n Parameters\n ----------\n chunks: tuple\n size: int\n Rough size of the image\n\n Examples\n --------\n >>> x.to_svg(size=500) # doctest: +SKIP\n\n Returns\n -------\n text: An svg string depicting the array as a grid of chunks\n \"\"\"\n from .svg import svg\n\n return svg(self.chunks, size=size)\n\n def to_hdf5(self, filename, datapath, **kwargs):\n \"\"\"Store array in HDF5 file\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n return to_hdf5(filename, datapath, self, **kwargs)\n\n def to_dask_dataframe(self, columns=None, index=None, meta=None):\n \"\"\"Convert dask Array to dask Dataframe\n\n Parameters\n ----------\n columns: list or string\n list of column names if DataFrame, single string if Series\n index : dask.dataframe.Index, optional\n An optional *dask* Index to use for the output Series or DataFrame.\n\n The default output index depends on whether the array has any unknown\n chunks. If there are any unknown chunks, the output has ``None``\n for all the divisions (one per chunk). If all the chunks are known,\n a default index with known divsions is created.\n\n Specifying ``index`` can be useful if you're conforming a Dask Array\n to an existing dask Series or DataFrame, and you would like the\n indices to match.\n meta : object, optional\n An optional `meta` parameter can be passed for dask\n to specify the concrete dataframe type to use for partitions of\n the Dask dataframe. By default, pandas DataFrame is used.\n\n See Also\n --------\n dask.dataframe.from_dask_array\n \"\"\"\n from ..dataframe import from_dask_array\n\n return from_dask_array(self, columns=columns, index=index, meta=meta)\n\n def __bool__(self):\n if self.size > 1:\n raise ValueError(\n \"The truth value of a {0} is ambiguous. \"\n \"Use a.any() or a.all().\".format(self.__class__.__name__)\n )\n else:\n return bool(self.compute())\n\n __nonzero__ = __bool__ # python 2\n\n def _scalarfunc(self, cast_type):\n if self.size > 1:\n raise TypeError(\"Only length-1 arrays can be converted to Python scalars\")\n else:\n return cast_type(self.compute())\n\n def __int__(self):\n return self._scalarfunc(int)\n\n __long__ = __int__ # python 2\n\n def __float__(self):\n return self._scalarfunc(float)\n\n def __complex__(self):\n return self._scalarfunc(complex)\n\n def __setitem__(self, key, value):\n from .routines import where\n\n if isinstance(key, Array):\n if isinstance(value, Array) and value.ndim > 1:\n raise ValueError(\"boolean index array should have 1 dimension\")\n y = where(key, value, self)\n self._meta = y._meta\n self.dask = y.dask\n self.name = y.name\n self._chunks = y.chunks\n return self\n else:\n raise NotImplementedError(\n \"Item assignment with %s not supported\" % type(key)\n )\n\n def __getitem__(self, index):\n # Field access, e.g. x['a'] or x[['a', 'b']]\n if isinstance(index, str) or (\n isinstance(index, list) and index and all(isinstance(i, str) for i in index)\n ):\n if isinstance(index, str):\n dt = self.dtype[index]\n else:\n dt = _make_sliced_dtype(self.dtype, index)\n\n if dt.shape:\n new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))\n chunks = self.chunks + tuple((i,) for i in dt.shape)\n return self.map_blocks(\n getitem, index, dtype=dt.base, chunks=chunks, new_axis=new_axis\n )\n else:\n return self.map_blocks(getitem, index, dtype=dt)\n\n if not isinstance(index, tuple):\n index = (index,)\n\n from .slicing import (\n normalize_index,\n slice_with_int_dask_array,\n slice_with_bool_dask_array,\n )\n\n index2 = normalize_index(index, self.shape)\n dependencies = {self.name}\n for i in index2:\n if isinstance(i, Array):\n dependencies.add(i.name)\n\n if any(isinstance(i, Array) and i.dtype.kind in \"iu\" for i in index2):\n self, index2 = slice_with_int_dask_array(self, index2)\n if any(isinstance(i, Array) and i.dtype == bool for i in index2):\n self, index2 = slice_with_bool_dask_array(self, index2)\n\n if all(isinstance(i, slice) and i == slice(None) for i in index2):\n return self\n\n out = \"getitem-\" + tokenize(self, index2)\n dsk, chunks = slice_array(out, self.name, self.chunks, index2, self.itemsize)\n\n graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])\n\n meta = meta_from_array(self._meta, ndim=len(chunks))\n if np.isscalar(meta):\n meta = np.array(meta)\n\n return Array(graph, out, chunks, meta=meta)\n\n def _vindex(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n if any(k is None for k in key):\n raise IndexError(\n \"vindex does not support indexing with None (np.newaxis), \"\n \"got {}\".format(key)\n )\n if all(isinstance(k, slice) for k in key):\n if all(\n k.indices(d) == slice(0, d).indices(d) for k, d in zip(key, self.shape)\n ):\n return self\n raise IndexError(\n \"vindex requires at least one non-slice to vectorize over \"\n \"when the slices are not over the entire array (i.e, x[:]). \"\n \"Use normal slicing instead when only using slices. Got: {}\".format(key)\n )\n return _vindex(self, *key)\n\n @property\n def vindex(self):\n \"\"\"Vectorized indexing with broadcasting.\n\n This is equivalent to numpy's advanced indexing, using arrays that are\n broadcast against each other. This allows for pointwise indexing:\n\n >>> x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> x = from_array(x, chunks=2)\n >>> x.vindex[[0, 1, 2], [0, 1, 2]].compute()\n array([1, 5, 9])\n\n Mixed basic/advanced indexing with slices/arrays is also supported. The\n order of dimensions in the result follows those proposed for\n `ndarray.vindex <https://github.com/numpy/numpy/pull/6256>`_:\n the subspace spanned by arrays is followed by all slices.\n\n Note: ``vindex`` provides more general functionality than standard\n indexing, but it also has fewer optimizations and can be significantly\n slower.\n \"\"\"\n return IndexCallable(self._vindex)\n\n def _blocks(self, index):\n from .slicing import normalize_index\n\n if not isinstance(index, tuple):\n index = (index,)\n if sum(isinstance(ind, (np.ndarray, list)) for ind in index) > 1:\n raise ValueError(\"Can only slice with a single list\")\n if any(ind is None for ind in index):\n raise ValueError(\"Slicing with np.newaxis or None is not supported\")\n index = normalize_index(index, self.numblocks)\n index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)\n\n name = \"blocks-\" + tokenize(self, index)\n\n new_keys = np.array(self.__dask_keys__(), dtype=object)[index]\n\n chunks = tuple(\n tuple(np.array(c)[i].tolist()) for c, i in zip(self.chunks, index)\n )\n\n keys = product(*(range(len(c)) for c in chunks))\n\n layer = {(name,) + key: tuple(new_keys[key].tolist()) for key in keys}\n\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n return Array(graph, name, chunks, meta=self)\n\n @property\n def blocks(self):\n \"\"\"Slice an array by blocks\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.blocks[0].compute()\n array([0, 1])\n >>> x.blocks[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.blocks[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.blocks[[-1, 0]].compute()\n array([8, 9, 0, 1])\n\n Returns\n -------\n A Dask array\n \"\"\"\n return IndexCallable(self._blocks)\n\n @property\n def partitions(self):\n \"\"\"Slice an array by partitions. Alias of dask array .blocks attribute.\n\n This alias allows you to write agnostic code that works with both\n dask arrays and dask dataframes.\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.partitions[0].compute()\n array([0, 1])\n >>> x.partitions[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.partitions[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.partitions[[-1, 0]].compute()\n array([8, 9, 0, 1])\n >>> all(x.partitions[:].compute() == x.blocks[:].compute())\n True\n\n Returns\n -------\n A Dask array\n \"\"\"\n return self.blocks\n\n @derived_from(np.ndarray)\n def dot(self, other):\n from .routines import tensordot\n\n return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))\n\n @property\n def A(self):\n return self\n\n @property\n def T(self):\n return self.transpose()\n\n @derived_from(np.ndarray)\n def transpose(self, *axes):\n from .routines import transpose\n\n if not axes:\n axes = None\n elif len(axes) == 1 and isinstance(axes[0], Iterable):\n axes = axes[0]\n if (axes == tuple(range(self.ndim))) or (axes == tuple(range(-self.ndim, 0))):\n # no transpose necessary\n return self\n else:\n return transpose(self, axes=axes)\n\n @derived_from(np.ndarray)\n def ravel(self):\n from .routines import ravel\n\n return ravel(self)\n\n flatten = ravel\n\n @derived_from(np.ndarray)\n def choose(self, choices):\n from .routines import choose\n\n return choose(self, choices)\n\n @derived_from(np.ndarray)\n def reshape(self, *shape):\n from .reshape import reshape\n\n if len(shape) == 1 and not isinstance(shape[0], Number):\n shape = shape[0]\n return reshape(self, shape)\n\n def topk(self, k, axis=-1, split_every=None):\n \"\"\"The top k elements of an array.\n\n See ``da.topk`` for docstring\"\"\"\n from .reductions import topk\n\n return topk(self, k, axis=axis, split_every=split_every)\n\n def argtopk(self, k, axis=-1, split_every=None):\n \"\"\"The indices of the top k elements of an array.\n\n See ``da.argtopk`` for docstring\"\"\"\n from .reductions import argtopk\n\n return argtopk(self, k, axis=axis, split_every=split_every)\n\n def astype(self, dtype, **kwargs):\n \"\"\"Copy of the array, cast to a specified type.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'unsafe'\n for backwards compatibility.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n copy : bool, optional\n By default, astype always returns a newly allocated array. If this\n is set to False and the `dtype` requirement is satisfied, the input\n array is returned instead of a copy.\n \"\"\"\n # Scalars don't take `casting` or `copy` kwargs - as such we only pass\n # them to `map_blocks` if specified by user (different than defaults).\n extra = set(kwargs) - {\"casting\", \"copy\"}\n if extra:\n raise TypeError(\n \"astype does not take the following keyword \"\n \"arguments: {0!s}\".format(list(extra))\n )\n casting = kwargs.get(\"casting\", \"unsafe\")\n dtype = np.dtype(dtype)\n if self.dtype == dtype:\n return self\n elif not np.can_cast(self.dtype, dtype, casting=casting):\n raise TypeError(\n \"Cannot cast array from {0!r} to {1!r}\"\n \" according to the rule \"\n \"{2!r}\".format(self.dtype, dtype, casting)\n )\n return self.map_blocks(chunk.astype, dtype=dtype, astype_dtype=dtype, **kwargs)\n\n def __abs__(self):\n return elemwise(operator.abs, self)\n\n @check_if_handled_given_other\n def __add__(self, other):\n return elemwise(operator.add, self, other)\n\n @check_if_handled_given_other\n def __radd__(self, other):\n return elemwise(operator.add, other, self)\n\n @check_if_handled_given_other\n def __and__(self, other):\n return elemwise(operator.and_, self, other)\n\n @check_if_handled_given_other\n def __rand__(self, other):\n return elemwise(operator.and_, other, self)\n\n @check_if_handled_given_other\n def __div__(self, other):\n return elemwise(operator.div, self, other)\n\n @check_if_handled_given_other\n def __rdiv__(self, other):\n return elemwise(operator.div, other, self)\n\n @check_if_handled_given_other\n def __eq__(self, other):\n return elemwise(operator.eq, self, other)\n\n @check_if_handled_given_other\n def __gt__(self, other):\n return elemwise(operator.gt, self, other)\n\n @check_if_handled_given_other\n def __ge__(self, other):\n return elemwise(operator.ge, self, other)\n\n def __invert__(self):\n return elemwise(operator.invert, self)\n\n @check_if_handled_given_other\n def __lshift__(self, other):\n return elemwise(operator.lshift, self, other)\n\n @check_if_handled_given_other\n def __rlshift__(self, other):\n return elemwise(operator.lshift, other, self)\n\n @check_if_handled_given_other\n def __lt__(self, other):\n return elemwise(operator.lt, self, other)\n\n @check_if_handled_given_other\n def __le__(self, other):\n return elemwise(operator.le, self, other)\n\n @check_if_handled_given_other\n def __mod__(self, other):\n return elemwise(operator.mod, self, other)\n\n @check_if_handled_given_other\n def __rmod__(self, other):\n return elemwise(operator.mod, other, self)\n\n @check_if_handled_given_other\n def __mul__(self, other):\n return elemwise(operator.mul, self, other)\n\n @check_if_handled_given_other\n def __rmul__(self, other):\n return elemwise(operator.mul, other, self)\n\n @check_if_handled_given_other\n def __ne__(self, other):\n return elemwise(operator.ne, self, other)\n\n def __neg__(self):\n return elemwise(operator.neg, self)\n\n @check_if_handled_given_other\n def __or__(self, other):\n return elemwise(operator.or_, self, other)\n\n def __pos__(self):\n return self\n\n @check_if_handled_given_other\n def __ror__(self, other):\n return elemwise(operator.or_, other, self)\n\n @check_if_handled_given_other\n def __pow__(self, other):\n return elemwise(operator.pow, self, other)\n\n @check_if_handled_given_other\n def __rpow__(self, other):\n return elemwise(operator.pow, other, self)\n\n @check_if_handled_given_other\n def __rshift__(self, other):\n return elemwise(operator.rshift, self, other)\n\n @check_if_handled_given_other\n def __rrshift__(self, other):\n return elemwise(operator.rshift, other, self)\n\n @check_if_handled_given_other\n def __sub__(self, other):\n return elemwise(operator.sub, self, other)\n\n @check_if_handled_given_other\n def __rsub__(self, other):\n return elemwise(operator.sub, other, self)\n\n @check_if_handled_given_other\n def __truediv__(self, other):\n return elemwise(operator.truediv, self, other)\n\n @check_if_handled_given_other\n def __rtruediv__(self, other):\n return elemwise(operator.truediv, other, self)\n\n @check_if_handled_given_other\n def __floordiv__(self, other):\n return elemwise(operator.floordiv, self, other)\n\n @check_if_handled_given_other\n def __rfloordiv__(self, other):\n return elemwise(operator.floordiv, other, self)\n\n @check_if_handled_given_other\n def __xor__(self, other):\n return elemwise(operator.xor, self, other)\n\n @check_if_handled_given_other\n def __rxor__(self, other):\n return elemwise(operator.xor, other, self)\n\n @check_if_handled_given_other\n def __matmul__(self, other):\n from .routines import matmul\n\n return matmul(self, other)\n\n @check_if_handled_given_other\n def __rmatmul__(self, other):\n from .routines import matmul\n\n return matmul(other, self)\n\n @check_if_handled_given_other\n def __divmod__(self, other):\n from .ufunc import divmod\n\n return divmod(self, other)\n\n @check_if_handled_given_other\n def __rdivmod__(self, other):\n from .ufunc import divmod\n\n return divmod(other, self)\n\n @derived_from(np.ndarray)\n def any(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import any\n\n return any(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def all(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import all\n\n return all(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def min(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import min\n\n return min(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def max(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import max\n\n return max(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmin(self, axis=None, split_every=None, out=None):\n from .reductions import argmin\n\n return argmin(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmax(self, axis=None, split_every=None, out=None):\n from .reductions import argmax\n\n return argmax(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import sum\n\n return sum(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def trace(self, offset=0, axis1=0, axis2=1, dtype=None):\n from .reductions import trace\n\n return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)\n\n @derived_from(np.ndarray)\n def prod(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import prod\n\n return prod(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def mean(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import mean\n\n return mean(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def std(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import std\n\n return std(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def var(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import var\n\n return var(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n def moment(\n self,\n order,\n axis=None,\n dtype=None,\n keepdims=False,\n ddof=0,\n split_every=None,\n out=None,\n ):\n \"\"\"Calculate the nth centralized moment.\n\n Parameters\n ----------\n order : int\n Order of the moment that is returned, must be >= 2.\n axis : int, optional\n Axis along which the central moment is computed. The default is to\n compute the moment of the flattened array.\n dtype : data-type, optional\n Type to use in computing the moment. For arrays of integer type the\n default is float64; for arrays of float types it is the same as the\n array type.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the\n result as dimensions with size one. With this option, the result\n will broadcast correctly against the original array.\n ddof : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n N - ddof, where N represents the number of elements. By default\n ddof is zero.\n\n Returns\n -------\n moment : ndarray\n\n References\n ----------\n .. [1] Pebay, Philippe (2008), \"Formulas for Robust, One-Pass Parallel\n Computation of Covariances and Arbitrary-Order Statistical Moments\",\n Technical Report SAND2008-6212, Sandia National Laboratories.\n\n \"\"\"\n\n from .reductions import moment\n\n return moment(\n self,\n order,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @wraps(map_blocks)\n def map_blocks(self, func, *args, **kwargs):\n return map_blocks(func, self, *args, **kwargs)\n\n def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):\n \"\"\"Map a function over blocks of the array with some overlap\n\n We share neighboring zones between blocks of the array, then map a\n function, then trim away the neighboring strips.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block\n depth: int, tuple, or dict\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis\n boundary: str, tuple, dict\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n **kwargs:\n Other keyword arguments valid in ``map_blocks``\n\n Examples\n --------\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> import dask.array as da\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> d.map_overlap(lambda x: x + x.size, depth=1).compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n \"\"\"\n from .overlap import map_overlap\n\n return map_overlap(\n func, self, depth=depth, boundary=boundary, trim=trim, **kwargs\n )\n\n @derived_from(np.ndarray)\n def cumsum(self, axis, dtype=None, out=None):\n from .reductions import cumsum\n\n return cumsum(self, axis, dtype, out=out)\n\n @derived_from(np.ndarray)\n def cumprod(self, axis, dtype=None, out=None):\n from .reductions import cumprod\n\n return cumprod(self, axis, dtype, out=out)\n\n @derived_from(np.ndarray)\n def squeeze(self, axis=None):\n from .routines import squeeze\n\n return squeeze(self, axis)\n\n def rechunk(self, chunks=\"auto\", threshold=None, block_size_limit=None):\n \"\"\" See da.rechunk for docstring \"\"\"\n from . import rechunk # avoid circular import\n\n return rechunk(self, chunks, threshold, block_size_limit)\n\n @property\n def real(self):\n from .ufunc import real\n\n return real(self)\n\n @property\n def imag(self):\n from .ufunc import imag\n\n return imag(self)\n\n def conj(self):\n from .ufunc import conj\n\n return conj(self)\n\n @derived_from(np.ndarray)\n def clip(self, min=None, max=None):\n from .ufunc import clip\n\n return clip(self, min, max)\n\n def view(self, dtype=None, order=\"C\"):\n \"\"\"Get a view of the array as a new data type\n\n Parameters\n ----------\n dtype:\n The dtype by which to view the array.\n The default, None, results in the view having the same data-type\n as the original array.\n order: string\n 'C' or 'F' (Fortran) ordering\n\n This reinterprets the bytes of the array under a new dtype. If that\n dtype does not have the same size as the original array then the shape\n will change.\n\n Beware that both numpy and dask.array can behave oddly when taking\n shape-changing views of arrays under Fortran ordering. Under some\n versions of NumPy this function will fail when taking shape-changing\n views of Fortran ordered arrays if the first dimension has chunks of\n size one.\n \"\"\"\n if dtype is None:\n dtype = self.dtype\n else:\n dtype = np.dtype(dtype)\n mult = self.dtype.itemsize / dtype.itemsize\n\n if order == \"C\":\n chunks = self.chunks[:-1] + (\n tuple(ensure_int(c * mult) for c in self.chunks[-1]),\n )\n elif order == \"F\":\n chunks = (\n tuple(ensure_int(c * mult) for c in self.chunks[0]),\n ) + self.chunks[1:]\n else:\n raise ValueError(\"Order must be one of 'C' or 'F'\")\n\n return self.map_blocks(\n chunk.view, dtype, order=order, dtype=dtype, chunks=chunks\n )\n\n @derived_from(np.ndarray)\n def swapaxes(self, axis1, axis2):\n from .routines import swapaxes\n\n return swapaxes(self, axis1, axis2)\n\n @derived_from(np.ndarray)\n def round(self, decimals=0):\n from .routines import round\n\n return round(self, decimals=decimals)\n\n def copy(self):\n \"\"\"\n Copy array. This is a no-op for dask.arrays, which are immutable\n \"\"\"\n if self.npartitions == 1:\n return self.map_blocks(M.copy)\n else:\n return Array(self.dask, self.name, self.chunks, meta=self)\n\n def __deepcopy__(self, memo):\n c = self.copy()\n memo[id(self)] = c\n return c\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into an array of ``dask.delayed`` objects, one per chunk.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n\n See Also\n --------\n dask.array.from_delayed\n \"\"\"\n keys = self.__dask_keys__()\n graph = self.__dask_graph__()\n if optimize_graph:\n graph = self.__dask_optimize__(graph, keys) # TODO, don't collape graph\n name = \"delayed-\" + self.name\n graph = HighLevelGraph.from_collections(name, graph, dependencies=())\n L = ndeepmap(self.ndim, lambda k: Delayed(k, graph), keys)\n return np.array(L, dtype=object)\n\n @derived_from(np.ndarray)\n def repeat(self, repeats, axis=None):\n from .creation import repeat\n\n return repeat(self, repeats, axis=axis)\n\n @derived_from(np.ndarray)\n def nonzero(self):\n from .routines import nonzero\n\n return nonzero(self)\n\n def to_zarr(self, *args, **kwargs):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n See function ``to_zarr()`` for parameters.\n \"\"\"\n return to_zarr(self, *args, **kwargs)\n\n def to_tiledb(self, uri, *args, **kwargs):\n \"\"\"Save array to the TileDB storage manager\n\n See function ``to_tiledb()`` for argument documentation.\n\n See https://docs.tiledb.io for details about the format and engine.\n \"\"\"\n from .tiledb_io import to_tiledb\n\n return to_tiledb(self, uri, *args, **kwargs)\n\n\ndef ensure_int(f):\n i = int(f)\n if i != f:\n raise ValueError(\"Could not coerce %f to integer\" % f)\n return i\n\n\ndef normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):\n \"\"\"Normalize chunks to tuple of tuples\n\n This takes in a variety of input types and information and produces a full\n tuple-of-tuples result for chunks, suitable to be passed to Array or\n rechunk or any other operation that creates a Dask array.\n\n Parameters\n ----------\n chunks: tuple, int, dict, or string\n The chunks to be normalized. See examples below for more details\n shape: Tuple[int]\n The shape of the array\n limit: int (optional)\n The maximum block size to target in bytes,\n if freedom is given to choose\n dtype: np.dtype\n previous_chunks: Tuple[Tuple[int]] optional\n Chunks from a previous array that we should use for inspiration when\n rechunking auto dimensions. If not provided but auto-chunking exists\n then auto-dimensions will prefer square-like chunk shapes.\n\n Examples\n --------\n Specify uniform chunk sizes\n\n >>> normalize_chunks((2, 2), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Also passes through fully explicit tuple-of-tuples\n\n >>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Cleans up lists to tuples\n\n >>> normalize_chunks([[2, 2], [3, 3]])\n ((2, 2), (3, 3))\n\n Expands integer inputs 10 -> (10, 10)\n\n >>> normalize_chunks(10, shape=(30, 5))\n ((10, 10, 10), (5,))\n\n Expands dict inputs\n\n >>> normalize_chunks({0: 2, 1: 3}, shape=(6, 6))\n ((2, 2, 2), (3, 3))\n\n The values -1 and None get mapped to full size\n\n >>> normalize_chunks((5, -1), shape=(10, 10))\n ((5, 5), (10,))\n\n Use the value \"auto\" to automatically determine chunk sizes along certain\n dimensions. This uses the ``limit=`` and ``dtype=`` keywords to\n determine how large to make the chunks. The term \"auto\" can be used\n anywhere an integer can be used. See array chunking documentation for more\n information.\n\n >>> normalize_chunks((\"auto\",), shape=(20,), limit=5, dtype='uint8')\n ((5, 5, 5, 5),)\n\n You can also use byte sizes (see ``dask.utils.parse_bytes``) in place of\n \"auto\" to ask for a particular size\n\n >>> normalize_chunks(\"1kiB\", shape=(2000,), dtype='float32')\n ((250, 250, 250, 250, 250, 250, 250, 250),)\n\n Respects null dimensions\n\n >>> normalize_chunks((), shape=(0, 0))\n ((0,), (0,))\n \"\"\"\n if dtype and not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n if chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n if isinstance(chunks, list):\n chunks = tuple(chunks)\n if isinstance(chunks, (Number, str)):\n chunks = (chunks,) * len(shape)\n if isinstance(chunks, dict):\n chunks = tuple(chunks.get(i, None) for i in range(len(shape)))\n if isinstance(chunks, np.ndarray):\n chunks = chunks.tolist()\n if not chunks and shape and all(s == 0 for s in shape):\n chunks = ((0,),) * len(shape)\n\n if (\n shape\n and len(shape) == 1\n and len(chunks) > 1\n and all(isinstance(c, (Number, str)) for c in chunks)\n ):\n chunks = (chunks,)\n\n if shape and len(chunks) != len(shape):\n raise ValueError(\n \"Chunks and shape must be of the same length/dimension. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n if -1 in chunks or None in chunks:\n chunks = tuple(s if c == -1 or c is None else c for c, s in zip(chunks, shape))\n\n # If specifying chunk size in bytes, use that value to set the limit.\n # Verify there is only one consistent value of limit or chunk-bytes used.\n for c in chunks:\n if isinstance(c, str) and c != \"auto\":\n parsed = parse_bytes(c)\n if limit is None:\n limit = parsed\n elif parsed != limit:\n raise ValueError(\n \"Only one consistent value of limit or chunk is allowed.\"\n \"Used %s != %s\" % (parsed, limit)\n )\n # Substitute byte limits with 'auto' now that limit is set.\n chunks = tuple(\"auto\" if isinstance(c, str) and c != \"auto\" else c for c in chunks)\n\n if any(c == \"auto\" for c in chunks):\n chunks = auto_chunks(chunks, shape, limit, dtype, previous_chunks)\n\n if shape is not None:\n chunks = tuple(c if c not in {None, -1} else s for c, s in zip(chunks, shape))\n\n if chunks and shape is not None:\n chunks = sum(\n (\n blockdims_from_blockshape((s,), (c,))\n if not isinstance(c, (tuple, list))\n else (c,)\n for s, c in zip(shape, chunks)\n ),\n (),\n )\n for c in chunks:\n if not c:\n raise ValueError(\n \"Empty tuples are not allowed in chunks. Express \"\n \"zero length dimensions with 0(s) in chunks\"\n )\n\n if shape is not None:\n if len(chunks) != len(shape):\n raise ValueError(\n \"Input array has %d dimensions but the supplied \"\n \"chunks has only %d dimensions\" % (len(shape), len(chunks))\n )\n if not all(\n c == s or (math.isnan(c) or math.isnan(s))\n for c, s in zip(map(sum, chunks), shape)\n ):\n raise ValueError(\n \"Chunks do not add up to shape. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n\n return tuple(tuple(int(x) if not math.isnan(x) else x for x in c) for c in chunks)\n\n\ndef _compute_multiplier(limit: int, dtype, largest_block: int, result):\n \"\"\"\n Utility function for auto_chunk, to fin how much larger or smaller the ideal\n chunk size is relative to what we have now.\n \"\"\"\n return (\n limit\n / dtype.itemsize\n / largest_block\n / np.prod(list(r if r != 0 else 1 for r in result.values()))\n )\n\n\ndef auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):\n \"\"\"Determine automatic chunks\n\n This takes in a chunks value that contains ``\"auto\"`` values in certain\n dimensions and replaces those values with concrete dimension sizes that try\n to get chunks to be of a certain size in bytes, provided by the ``limit=``\n keyword. If multiple dimensions are marked as ``\"auto\"`` then they will\n all respond to meet the desired byte limit, trying to respect the aspect\n ratio of their dimensions in ``previous_chunks=``, if given.\n\n Parameters\n ----------\n chunks: Tuple\n A tuple of either dimensions or tuples of explicit chunk dimensions\n Some entries should be \"auto\"\n shape: Tuple[int]\n limit: int, str\n The maximum allowable size of a chunk in bytes\n previous_chunks: Tuple[Tuple[int]]\n\n See also\n --------\n normalize_chunks: for full docstring and parameters\n \"\"\"\n if previous_chunks is not None:\n previous_chunks = tuple(\n c if isinstance(c, tuple) else (c,) for c in previous_chunks\n )\n chunks = list(chunks)\n\n autos = {i for i, c in enumerate(chunks) if c == \"auto\"}\n if not autos:\n return tuple(chunks)\n\n if limit is None:\n limit = config.get(\"array.chunk-size\")\n if isinstance(limit, str):\n limit = parse_bytes(limit)\n\n if dtype is None:\n raise TypeError(\"DType must be known for auto-chunking\")\n\n if dtype.hasobject:\n raise NotImplementedError(\n \"Can not use auto rechunking with object dtype. \"\n \"We are unable to estimate the size in bytes of object data\"\n )\n\n for x in tuple(chunks) + tuple(shape):\n if (\n isinstance(x, Number)\n and np.isnan(x)\n or isinstance(x, tuple)\n and np.isnan(x).any()\n ):\n raise ValueError(\n \"Can not perform automatic rechunking with unknown \"\n \"(nan) chunk sizes.%s\" % unknown_chunk_message\n )\n\n limit = max(1, limit)\n\n largest_block = np.prod(\n [cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != \"auto\"]\n )\n\n if previous_chunks:\n # Base ideal ratio on the median chunk size of the previous chunks\n result = {a: np.median(previous_chunks[a]) for a in autos}\n\n ideal_shape = []\n for i, s in enumerate(shape):\n chunk_frequencies = frequencies(previous_chunks[i])\n mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])\n if mode > 1 and count >= len(previous_chunks[i]) / 2:\n ideal_shape.append(mode)\n else:\n ideal_shape.append(s)\n\n # How much larger or smaller the ideal chunk size is relative to what we have now\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n last_multiplier = 0\n last_autos = set()\n while (\n multiplier != last_multiplier or autos != last_autos\n ): # while things change\n last_multiplier = multiplier # record previous values\n last_autos = set(autos) # record previous values\n\n # Expand or contract each of the dimensions appropriately\n for a in sorted(autos):\n if ideal_shape[a] == 0:\n result[a] = 0\n continue\n proposed = result[a] * multiplier ** (1 / len(autos))\n if proposed > shape[a]: # we've hit the shape boundary\n autos.remove(a)\n largest_block *= shape[a]\n chunks[a] = shape[a]\n del result[a]\n else:\n result[a] = round_to(proposed, ideal_shape[a])\n\n # recompute how much multiplier we have left, repeat\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n for k, v in result.items():\n chunks[k] = v\n return tuple(chunks)\n\n else:\n size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))\n small = [i for i in autos if shape[i] < size]\n if small:\n for i in small:\n chunks[i] = (shape[i],)\n return auto_chunks(chunks, shape, limit, dtype)\n\n for i in autos:\n chunks[i] = round_to(size, shape[i])\n\n return tuple(chunks)\n\n\ndef round_to(c, s):\n \"\"\"Return a chunk dimension that is close to an even multiple or factor\n\n We want values for c that are nicely aligned with s.\n\n If c is smaller than s then we want the largest factor of s that is less than the\n desired chunk size, but not less than half, which is too much. If no such\n factor exists then we just go with the original chunk size and accept an\n uneven chunk at the end.\n\n If c is larger than s then we want the largest multiple of s that is still\n smaller than c.\n \"\"\"\n if c <= s:\n try:\n return max(f for f in factors(s) if c / 2 <= f <= c)\n except ValueError: # no matching factors within factor of two\n return max(1, int(c))\n else:\n return c // s * s\n\n\ndef _get_chunk_shape(a):\n s = np.asarray(a.shape, dtype=int)\n return s[len(s) * (None,) + (slice(None),)]\n\n\ndef from_array(\n x,\n chunks=\"auto\",\n name=None,\n lock=False,\n asarray=None,\n fancy=True,\n getitem=None,\n meta=None,\n):\n \"\"\"Create dask array from something that looks like an array\n\n Input must have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing.\n\n Parameters\n ----------\n x : array_like\n chunks : int, tuple\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A blockshape like (1000, 1000).\n - Explicit sizes of all blocks along all dimensions like\n ((1000, 1000, 500), (400, 400)).\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n\n -1 or None as a blocksize indicate the size of the corresponding\n dimension.\n name : str, optional\n The key name to use for the array. Defaults to a hash of ``x``.\n By default, hash uses python's standard sha1. This behaviour can be\n changed by installing cityhash, xxhash or murmurhash. If installed,\n a large-factor speedup can be obtained in the tokenisation step.\n Use ``name=False`` to generate a random name instead of hashing (fast)\n\n .. note::\n\n Because this ``name`` is used as the key in task graphs, you should\n ensure that it uniquely identifies the data contained within. If\n you'd like to provide a descriptive name that is still unique, combine\n the descriptive name with :func:`dask.base.tokenize` of the\n ``array_like``. See :ref:`graphs` for more.\n\n lock : bool or Lock, optional\n If ``x`` doesn't support concurrent reads then provide a lock here, or\n pass in True to have dask.array create one for you.\n asarray : bool, optional\n If True then call np.asarray on chunks to convert them to numpy arrays.\n If False then chunks are passed through unchanged.\n If None (default) then we use True if the ``__array_function__`` method\n is undefined.\n fancy : bool, optional\n If ``x`` doesn't support fancy indexing (e.g. indexing with lists or\n arrays) then set to False. Default is True.\n meta : Array-like, optional\n The metadata for the resulting dask array. This is the kind of array\n that will result from slicing the input array.\n Defaults to the input array.\n\n Examples\n --------\n\n >>> x = h5py.File('...')['/data/path'] # doctest: +SKIP\n >>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP\n\n If your underlying datastore does not support concurrent reads then include\n the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple\n arrays to coordinate around the same lock.\n\n >>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP\n\n If your underlying datastore has a ``.chunks`` attribute (as h5py and zarr\n datasets do) then a multiple of that chunk shape will be used if you\n do not provide a chunk shape.\n\n >>> a = da.from_array(x, chunks='auto') # doctest: +SKIP\n >>> a = da.from_array(x, chunks='100 MiB') # doctest: +SKIP\n >>> a = da.from_array(x) # doctest: +SKIP\n\n If providing a name, ensure that it is unique\n\n >>> import dask.base\n >>> token = dask.base.tokenize(x) # doctest: +SKIP\n >>> a = da.from_array('myarray-' + token) # doctest: +SKIP\n\n Numpy ndarrays are eagerly sliced and then embedded in the graph.\n\n >>> import dask.array\n >>> a = dask.array.from_array(np.array([[1, 2], [3, 4]]), chunks=(1,1))\n >>> a.dask[a.name, 0, 0][0]\n array([1])\n\n \"\"\"\n if isinstance(x, Array):\n raise ValueError(\n \"Array is already a dask array. Use 'asarray' or \" \"'rechunk' instead.\"\n )\n elif is_dask_collection(x):\n warnings.warn(\n \"Passing an object to dask.array.from_array which is already a \"\n \"Dask collection. This can lead to unexpected behavior.\"\n )\n\n if isinstance(x, (list, tuple, memoryview) + np.ScalarType):\n x = np.array(x)\n\n if asarray is None:\n asarray = not hasattr(x, \"__array_function__\")\n\n previous_chunks = getattr(x, \"chunks\", None)\n\n chunks = normalize_chunks(\n chunks, x.shape, dtype=x.dtype, previous_chunks=previous_chunks\n )\n\n if name in (None, True):\n token = tokenize(x, chunks)\n original_name = \"array-original-\" + token\n name = name or \"array-\" + token\n elif name is False:\n original_name = name = \"array-\" + str(uuid.uuid1())\n else:\n original_name = name\n\n if lock is True:\n lock = SerializableLock()\n\n is_ndarray = type(x) is np.ndarray\n is_single_block = all(len(c) == 1 for c in chunks)\n # Always use the getter for h5py etc. Not using isinstance(x, np.ndarray)\n # because np.matrix is a subclass of np.ndarray.\n if is_ndarray and not is_single_block and not lock:\n # eagerly slice numpy arrays to prevent memory blowup\n # GH5367, GH5601\n slices = slices_from_chunks(chunks)\n keys = product([name], *(range(len(bds)) for bds in chunks))\n values = [x[slc] for slc in slices]\n dsk = dict(zip(keys, values))\n\n elif is_ndarray and is_single_block:\n # No slicing needed\n dsk = {(name,) + (0,) * x.ndim: x}\n else:\n if getitem is None:\n if fancy:\n getitem = getter\n else:\n getitem = getter_nofancy\n\n dsk = getem(\n original_name,\n chunks,\n getitem=getitem,\n shape=x.shape,\n out_name=name,\n lock=lock,\n asarray=asarray,\n dtype=x.dtype,\n )\n dsk[original_name] = x\n\n # Workaround for TileDB, its indexing is 1-based,\n # and doesn't seems to support 0-length slicing\n if x.__class__.__module__.split(\".\")[0] == \"tiledb\" and hasattr(x, \"_ctx_\"):\n return Array(dsk, name, chunks, dtype=x.dtype)\n\n if meta is None:\n meta = x\n\n return Array(dsk, name, chunks, meta=meta, dtype=getattr(x, \"dtype\", None))\n\n\ndef from_zarr(\n url, component=None, storage_options=None, chunks=None, name=None, **kwargs\n):\n \"\"\"Load array from the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be loaded, something like ``'foo/bar'``.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n chunks: tuple of ints or tuples of ints\n Passed to ``da.from_array``, allows setting the chunks on\n initialisation, if the chunking scheme in the on-disc dataset is not\n optimal for the calculations to follow.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n kwargs: passed to ``zarr.Array``.\n \"\"\"\n import zarr\n\n storage_options = storage_options or {}\n if isinstance(url, zarr.Array):\n z = url\n elif isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n else:\n mapper = url\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n chunks = chunks if chunks is not None else z.chunks\n if name is None:\n name = \"from-zarr-\" + tokenize(z, component, storage_options, chunks, **kwargs)\n return from_array(z, chunks, name=name)\n\n\ndef to_zarr(\n arr,\n url,\n component=None,\n storage_options=None,\n overwrite=False,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n arr: dask.array\n Data to store\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be created/over-written.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n overwrite: bool\n If given array already exists, overwrite=False will cause an error,\n where overwrite=True will replace the existing data. Note that this\n check is done at computation time, not during graph creation.\n compute, return_stored: see ``store()``\n kwargs: passed to the ``zarr.create()`` function, e.g., compression options\n\n Raises\n ------\n ValueError\n If ``arr`` has unknown chunk sizes, which is not supported by Zarr.\n\n See Also\n --------\n dask.array.Array.compute_chunk_sizes\n\n \"\"\"\n import zarr\n\n if np.isnan(arr.shape).any():\n raise ValueError(\n \"Saving a dask array with unknown chunk sizes is not \"\n \"currently supported by Zarr.%s\" % unknown_chunk_message\n )\n\n if isinstance(url, zarr.Array):\n z = url\n if isinstance(z.store, (dict, zarr.DictStore)) and \"distributed\" in config.get(\n \"scheduler\", \"\"\n ):\n raise RuntimeError(\n \"Cannot store into in memory Zarr Array using \"\n \"the Distributed Scheduler.\"\n )\n arr = arr.rechunk(z.chunks)\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)\n\n if not _check_regular_chunks(arr.chunks):\n raise ValueError(\n \"Attempt to save array to zarr with irregular \"\n \"chunking, please call `arr.rechunk(...)` first.\"\n )\n\n storage_options = storage_options or {}\n\n if isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n else:\n # assume the object passed is already a mapper\n mapper = url\n\n chunks = [c[0] for c in arr.chunks]\n\n # The zarr.create function has the side-effect of immediately\n # creating metadata on disk. This may not be desired,\n # particularly if compute=False. The caller may be creating many\n # arrays on a slow filesystem, with the desire that any I/O be\n # sharded across workers (not done serially on the originating\n # machine). Or the caller may decide later to not to do this\n # computation, and so nothing should be written to disk.\n z = delayed(zarr.create)(\n shape=arr.shape,\n chunks=chunks,\n dtype=arr.dtype,\n store=mapper,\n path=component,\n overwrite=overwrite,\n **kwargs,\n )\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)\n\n\ndef _check_regular_chunks(chunkset):\n \"\"\"Check if the chunks are regular\n\n \"Regular\" in this context means that along every axis, the chunks all\n have the same size, except the last one, which may be smaller\n\n Parameters\n ----------\n chunkset: tuple of tuples of ints\n From the ``.chunks`` attribute of an ``Array``\n\n Returns\n -------\n True if chunkset passes, else False\n\n Examples\n --------\n >>> import dask.array as da\n >>> arr = da.zeros(10, chunks=(5, ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 3, 3, 1), ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 1, 3, 3), ))\n >>> _check_regular_chunks(arr.chunks)\n False\n \"\"\"\n for chunks in chunkset:\n if len(chunks) == 1:\n continue\n if len(set(chunks[:-1])) > 1:\n return False\n if chunks[-1] > chunks[0]:\n return False\n return True\n\n\ndef from_delayed(value, shape, dtype=None, meta=None, name=None):\n \"\"\"Create a dask array from a dask delayed value\n\n This routine is useful for constructing dask arrays in an ad-hoc fashion\n using dask delayed, particularly when combined with stack and concatenate.\n\n The dask array will consist of a single chunk.\n\n Examples\n --------\n >>> import dask\n >>> import dask.array as da\n >>> value = dask.delayed(np.ones)(5)\n >>> array = da.from_delayed(value, (5,), dtype=float)\n >>> array\n dask.array<from-value, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>\n >>> array.compute()\n array([1., 1., 1., 1., 1.])\n \"\"\"\n from ..delayed import delayed, Delayed\n\n if not isinstance(value, Delayed) and hasattr(value, \"key\"):\n value = delayed(value)\n\n name = name or \"from-value-\" + tokenize(value, shape, dtype, meta)\n dsk = {(name,) + (0,) * len(shape): value.key}\n chunks = tuple((d,) for d in shape)\n # TODO: value._key may not be the name of the layer in value.dask\n # This should be fixed after we build full expression graphs\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[value])\n return Array(graph, name, chunks, dtype=dtype, meta=meta)\n\n\ndef from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):\n \"\"\"Create dask array in a single block by calling a function\n\n Calling the provided function with func(*args, **kwargs) should return a\n NumPy array of the indicated shape and dtype.\n\n Examples\n --------\n\n >>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))\n >>> a.compute()\n array([0, 1, 2])\n\n This works particularly well when coupled with dask.array functions like\n concatenate and stack:\n\n >>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]\n >>> stack(arrays).compute()\n array([0, 1, 2, 3, 4])\n \"\"\"\n name = name or \"from_func-\" + tokenize(func, shape, dtype, args, kwargs)\n if args or kwargs:\n func = partial(func, *args, **kwargs)\n dsk = {(name,) + (0,) * len(shape): (func,)}\n chunks = tuple((i,) for i in shape)\n return Array(dsk, name, chunks, dtype)\n\n\ndef common_blockdim(blockdims):\n \"\"\"Find the common block dimensions from the list of block dimensions\n\n Currently only implements the simplest possible heuristic: the common\n block-dimension is the only one that does not span fully span a dimension.\n This is a conservative choice that allows us to avoid potentially very\n expensive rechunking.\n\n Assumes that each element of the input block dimensions has all the same\n sum (i.e., that they correspond to dimensions of the same size).\n\n Examples\n --------\n >>> common_blockdim([(3,), (2, 1)])\n (2, 1)\n >>> common_blockdim([(1, 2), (2, 1)])\n (1, 1, 1)\n >>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align\n \"\"\"\n if not any(blockdims):\n return ()\n non_trivial_dims = set([d for d in blockdims if len(d) > 1])\n if len(non_trivial_dims) == 1:\n return first(non_trivial_dims)\n if len(non_trivial_dims) == 0:\n return max(blockdims, key=first)\n\n if np.isnan(sum(map(sum, blockdims))):\n raise ValueError(\n \"Arrays chunk sizes (%s) are unknown.\\n\\n\"\n \"A possible solution:\\n\"\n \" x.compute_chunk_sizes()\" % blockdims\n )\n\n if len(set(map(sum, non_trivial_dims))) > 1:\n raise ValueError(\"Chunks do not add up to same value\", blockdims)\n\n # We have multiple non-trivial chunks on this axis\n # e.g. (5, 2) and (4, 3)\n\n # We create a single chunk tuple with the same total length\n # that evenly divides both, e.g. (4, 1, 2)\n\n # To accomplish this we walk down all chunk tuples together, finding the\n # smallest element, adding it to the output, and subtracting it from all\n # other elements and remove the element itself. We stop once we have\n # burned through all of the chunk tuples.\n # For efficiency's sake we reverse the lists so that we can pop off the end\n rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]\n total = sum(first(non_trivial_dims))\n i = 0\n\n out = []\n while i < total:\n m = min(c[-1] for c in rchunks)\n out.append(m)\n for c in rchunks:\n c[-1] -= m\n if c[-1] == 0:\n c.pop()\n i += m\n\n return tuple(out)\n\n\ndef unify_chunks(*args, **kwargs):\n \"\"\"\n Unify chunks across a sequence of arrays\n\n This utility function is used within other common operations like\n ``map_blocks`` and ``blockwise``. It is not commonly used by end-users\n directly.\n\n Parameters\n ----------\n *args: sequence of Array, index pairs\n Sequence like (x, 'ij', y, 'jk', z, 'i')\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.ones(10, chunks=((5, 2, 3),))\n >>> y = da.ones(10, chunks=((2, 3, 5),))\n >>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')\n >>> chunkss\n {'i': (2, 3, 2, 3)}\n\n >>> x = da.ones((100, 10), chunks=(20, 5))\n >>> y = da.ones((10, 100), chunks=(4, 50))\n >>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk', 'constant', None)\n >>> chunkss # doctest: +SKIP\n {'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}\n\n >>> unify_chunks(0, None)\n ({}, [0])\n\n Returns\n -------\n chunkss : dict\n Map like {index: chunks}.\n arrays : list\n List of rechunked arrays.\n\n See Also\n --------\n common_blockdim\n \"\"\"\n if not args:\n return {}, []\n\n arginds = [\n (asanyarray(a) if ind is not None else a, ind) for a, ind in partition(2, args)\n ] # [x, ij, y, jk]\n args = list(concat(arginds)) # [(x, ij), (y, jk)]\n warn = kwargs.get(\"warn\", True)\n\n arrays, inds = zip(*arginds)\n if all(ind is None for ind in inds):\n return {}, list(arrays)\n if all(ind == inds[0] for ind in inds) and all(\n a.chunks == arrays[0].chunks for a in arrays\n ):\n return dict(zip(inds[0], arrays[0].chunks)), arrays\n\n nameinds = []\n blockdim_dict = dict()\n max_parts = 0\n for a, ind in arginds:\n if ind is not None:\n nameinds.append((a.name, ind))\n blockdim_dict[a.name] = a.chunks\n max_parts = max(max_parts, a.npartitions)\n else:\n nameinds.append((a, ind))\n\n chunkss = broadcast_dimensions(nameinds, blockdim_dict, consolidate=common_blockdim)\n nparts = np.prod(list(map(len, chunkss.values())))\n\n if warn and nparts and nparts >= max_parts * 10:\n warnings.warn(\n \"Increasing number of chunks by factor of %d\" % (nparts / max_parts),\n PerformanceWarning,\n stacklevel=3,\n )\n\n arrays = []\n for a, i in arginds:\n if i is None:\n arrays.append(a)\n else:\n chunks = tuple(\n chunkss[j]\n if a.shape[n] > 1\n else a.shape[n]\n if not np.isnan(sum(chunkss[j]))\n else None\n for n, j in enumerate(i)\n )\n if chunks != a.chunks and all(a.chunks):\n arrays.append(a.rechunk(chunks))\n else:\n arrays.append(a)\n return chunkss, arrays\n\n\ndef unpack_singleton(x):\n \"\"\"\n\n >>> unpack_singleton([[[[1]]]])\n 1\n >>> unpack_singleton(np.array(np.datetime64('2000-01-01')))\n array('2000-01-01', dtype='datetime64[D]')\n \"\"\"\n while isinstance(x, (list, tuple)):\n try:\n x = x[0]\n except (IndexError, TypeError, KeyError):\n break\n return x\n\n\ndef block(arrays, allow_unknown_chunksizes=False):\n \"\"\"\n Assemble an nd-array from nested lists of blocks.\n\n Blocks in the innermost lists are concatenated along the last\n dimension (-1), then these are concatenated along the second-last\n dimension (-2), and so on until the outermost list is reached\n\n Blocks can be of any dimension, but will not be broadcasted using the normal\n rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``\n the same for all blocks. This is primarily useful for working with scalars,\n and means that code like ``block([v, 1])`` is valid, where\n ``v.ndim == 1``.\n\n When the nested list is two levels deep, this allows block matrices to be\n constructed from their components.\n\n Parameters\n ----------\n arrays : nested list of array_like or scalars (but not tuples)\n If passed a single ndarray or scalar (a nested list of depth 0), this\n is returned unmodified (and not copied).\n\n Elements shapes must match along the appropriate axes (without\n broadcasting), but leading 1s will be prepended to the shape as\n necessary to make the dimensions match.\n\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Returns\n -------\n block_array : ndarray\n The array assembled from the given blocks.\n\n The dimensionality of the output is equal to the greatest of:\n * the dimensionality of all the inputs\n * the depth to which the input list is nested\n\n Raises\n ------\n ValueError\n * If list depths are mismatched - for instance, ``[[a, b], c]`` is\n illegal, and should be spelt ``[[a, b], [c]]``\n * If lists are empty - for instance, ``[[a, b], []]``\n\n See Also\n --------\n concatenate : Join a sequence of arrays together.\n stack : Stack arrays in sequence along a new dimension.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n vsplit : Split array into a list of multiple sub-arrays vertically.\n\n Notes\n -----\n\n When called with only scalars, ``block`` is equivalent to an ndarray\n call. So ``block([[1, 2], [3, 4]])`` is equivalent to\n ``array([[1, 2], [3, 4]])``.\n\n This function does not enforce that the blocks lie on a fixed grid.\n ``block([[a, b], [c, d]])`` is not restricted to arrays of the form::\n\n AAAbb\n AAAbb\n cccDD\n\n But is also allowed to produce, for some ``a, b, c, d``::\n\n AAAbb\n AAAbb\n cDDDD\n\n Since concatenation happens along the last axis first, `block` is _not_\n capable of producing the following directly::\n\n AAAbb\n cccbb\n cccDD\n\n Matlab's \"square bracket stacking\", ``[A, B, ...; p, q, ...]``, is\n equivalent to ``block([[A, B, ...], [p, q, ...]])``.\n \"\"\"\n\n # This was copied almost verbatim from numpy.core.shape_base.block\n # See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt\n # or NUMPY_LICENSE.txt within this directory\n\n def atleast_nd(x, ndim):\n x = asanyarray(x)\n diff = max(ndim - x.ndim, 0)\n if diff == 0:\n return x\n else:\n return x[(None,) * diff + (Ellipsis,)]\n\n def format_index(index):\n return \"arrays\" + \"\".join(\"[{}]\".format(i) for i in index)\n\n rec = _Recurser(recurse_if=lambda x: type(x) is list)\n\n # ensure that the lists are all matched in depth\n list_ndim = None\n any_empty = False\n for index, value, entering in rec.walk(arrays):\n if type(value) is tuple:\n # not strictly necessary, but saves us from:\n # - more than one way to do things - no point treating tuples like\n # lists\n # - horribly confusing behaviour that results when tuples are\n # treated like ndarray\n raise TypeError(\n \"{} is a tuple. \"\n \"Only lists can be used to arrange blocks, and np.block does \"\n \"not allow implicit conversion from tuple to ndarray.\".format(\n format_index(index)\n )\n )\n if not entering:\n curr_depth = len(index)\n elif len(value) == 0:\n curr_depth = len(index) + 1\n any_empty = True\n else:\n continue\n\n if list_ndim is not None and list_ndim != curr_depth:\n raise ValueError(\n \"List depths are mismatched. First element was at depth {}, \"\n \"but there is an element at depth {} ({})\".format(\n list_ndim, curr_depth, format_index(index)\n )\n )\n list_ndim = curr_depth\n\n # do this here so we catch depth mismatches first\n if any_empty:\n raise ValueError(\"Lists cannot be empty\")\n\n # convert all the arrays to ndarrays\n arrays = rec.map_reduce(arrays, f_map=asanyarray, f_reduce=list)\n\n # determine the maximum dimension of the elements\n elem_ndim = rec.map_reduce(arrays, f_map=lambda xi: xi.ndim, f_reduce=max)\n ndim = max(list_ndim, elem_ndim)\n\n # first axis to concatenate along\n first_axis = ndim - list_ndim\n\n # Make all the elements the same dimension\n arrays = rec.map_reduce(\n arrays, f_map=lambda xi: atleast_nd(xi, ndim), f_reduce=list\n )\n\n # concatenate innermost lists on the right, outermost on the left\n return rec.map_reduce(\n arrays,\n f_reduce=lambda xs, axis: concatenate(\n list(xs), axis=axis, allow_unknown_chunksizes=allow_unknown_chunksizes\n ),\n f_kwargs=lambda axis: dict(axis=(axis + 1)),\n axis=first_axis,\n )\n\n\ndef concatenate(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Concatenate arrays along an existing axis\n\n Given a sequence of dask Arrays form a new dask Array by stacking them\n along an existing dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.concatenate(data, axis=0)\n >>> x.shape\n (12, 4)\n\n >>> da.concatenate(data, axis=1).shape\n (4, 12)\n\n Result is a new dask Array\n\n See Also\n --------\n stack\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to concatenate\")\n\n seq_metas = [meta_from_array(s) for s in seq]\n _concatenate = concatenate_lookup.dispatch(\n type(max(seq_metas, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n meta = _concatenate(seq_metas, axis=axis)\n\n # Promote types to match meta\n seq = [a.astype(meta.dtype) for a in seq]\n\n # Find output array shape\n ndim = len(seq[0].shape)\n shape = tuple(\n sum((a.shape[i] for a in seq)) if i == axis else seq[0].shape[i]\n for i in range(ndim)\n )\n\n # Drop empty arrays\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n if axis < 0:\n axis = ndim + axis\n if axis >= ndim:\n msg = (\n \"Axis must be less than than number of dimensions\"\n \"\\nData has %d dimensions, but got axis=%d\"\n )\n raise ValueError(msg % (ndim, axis))\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n elif n == 1:\n return seq2[0]\n\n if not allow_unknown_chunksizes and not all(\n i == axis or all(x.shape[i] == seq2[0].shape[i] for x in seq2)\n for i in range(ndim)\n ):\n if any(map(np.isnan, seq2[0].shape)):\n raise ValueError(\n \"Tried to concatenate arrays with unknown\"\n \" shape %s.\\n\\nTwo solutions:\\n\"\n \" 1. Force concatenation pass\"\n \" allow_unknown_chunksizes=True.\\n\"\n \" 2. Compute shapes with \"\n \"[x.compute_chunk_sizes() for x in seq]\" % str(seq2[0].shape)\n )\n raise ValueError(\"Shapes do not align: %s\", [x.shape for x in seq2])\n\n inds = [list(range(ndim)) for i in range(n)]\n for i, ind in enumerate(inds):\n ind[axis] = -(i + 1)\n\n uc_args = list(concat(zip(seq2, inds)))\n _, seq2 = unify_chunks(*uc_args, warn=False)\n\n bds = [a.chunks for a in seq2]\n\n chunks = (\n seq2[0].chunks[:axis]\n + (sum([bd[axis] for bd in bds], ()),)\n + seq2[0].chunks[axis + 1 :]\n )\n\n cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq2]))\n\n names = [a.name for a in seq2]\n\n name = \"concatenate-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n values = [\n (names[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[1 : axis + 1]\n + (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[axis + 2 :]\n for key in keys\n ]\n\n dsk = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)\n\n\ndef load_store_chunk(x, out, index, lock, return_stored, load_stored):\n \"\"\"\n A function inserted in a Dask graph for storing a chunk.\n\n Parameters\n ----------\n x: array-like\n An array (potentially a NumPy one)\n out: array-like\n Where to store results too.\n index: slice-like\n Where to store result from ``x`` in ``out``.\n lock: Lock-like or False\n Lock to use before writing to ``out``.\n return_stored: bool\n Whether to return ``out``.\n load_stored: bool\n Whether to return the array stored in ``out``.\n Ignored if ``return_stored`` is not ``True``.\n\n Examples\n --------\n\n >>> a = np.ones((5, 6))\n >>> b = np.empty(a.shape)\n >>> load_store_chunk(a, b, (slice(None), slice(None)), False, False, False)\n \"\"\"\n\n result = None\n if return_stored and not load_stored:\n result = out\n\n if lock:\n lock.acquire()\n try:\n if x is not None:\n out[index] = np.asanyarray(x)\n if return_stored and load_stored:\n result = out[index]\n finally:\n if lock:\n lock.release()\n\n return result\n\n\ndef store_chunk(x, out, index, lock, return_stored):\n return load_store_chunk(x, out, index, lock, return_stored, False)\n\n\ndef load_chunk(out, index, lock):\n return load_store_chunk(None, out, index, lock, True, True)\n\n\ndef insert_to_ooc(\n arr, out, lock=True, region=None, return_stored=False, load_stored=False, tok=None\n):\n \"\"\"\n Creates a Dask graph for storing chunks from ``arr`` in ``out``.\n\n Parameters\n ----------\n arr: da.Array\n A dask array\n out: array-like\n Where to store results too.\n lock: Lock-like or bool, optional\n Whether to lock or with what (default is ``True``,\n which means a ``threading.Lock`` instance).\n region: slice-like, optional\n Where in ``out`` to store ``arr``'s results\n (default is ``None``, meaning all of ``out``).\n return_stored: bool, optional\n Whether to return ``out``\n (default is ``False``, meaning ``None`` is returned).\n load_stored: bool, optional\n Whether to handling loading from ``out`` at the same time.\n Ignored if ``return_stored`` is not ``True``.\n (default is ``False``, meaning defer to ``return_stored``).\n tok: str, optional\n Token to use when naming keys\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> insert_to_ooc(d, a) # doctest: +SKIP\n \"\"\"\n\n if lock is True:\n lock = Lock()\n\n slices = slices_from_chunks(arr.chunks)\n if region:\n slices = [fuse_slice(region, slc) for slc in slices]\n\n name = \"store-%s\" % (tok or str(uuid.uuid1()))\n func = store_chunk\n args = ()\n if return_stored and load_stored:\n name = \"load-%s\" % name\n func = load_store_chunk\n args = args + (load_stored,)\n\n dsk = {\n (name,) + t[1:]: (func, t, out, slc, lock, return_stored) + args\n for t, slc in zip(core.flatten(arr.__dask_keys__()), slices)\n }\n\n return dsk\n\n\ndef retrieve_from_ooc(keys, dsk_pre, dsk_post=None):\n \"\"\"\n Creates a Dask graph for loading stored ``keys`` from ``dsk``.\n\n Parameters\n ----------\n keys: Sequence\n A sequence containing Dask graph keys to load\n dsk_pre: Mapping\n A Dask graph corresponding to a Dask Array before computation\n dsk_post: Mapping, optional\n A Dask graph corresponding to a Dask Array after computation\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> g = insert_to_ooc(d, a)\n >>> retrieve_from_ooc(g.keys(), g) # doctest: +SKIP\n \"\"\"\n\n if not dsk_post:\n dsk_post = {k: k for k in keys}\n\n load_dsk = {\n (\"load-\" + k[0],) + k[1:]: (load_chunk, dsk_post[k]) + dsk_pre[k][3:-1]\n for k in keys\n }\n\n return load_dsk\n\n\ndef asarray(a, **kwargs):\n \"\"\"Convert the input to a dask array.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asarray(x)\n dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asarray(y)\n dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n return stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asarray(a)\n return from_array(a, getitem=getter_inline, **kwargs)\n\n\ndef asanyarray(a):\n \"\"\"Convert the input to a dask array.\n\n Subclasses of ``np.ndarray`` will be passed through as chunks unchanged.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asanyarray(x)\n dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asanyarray(y)\n dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asanyarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n return stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asanyarray(a)\n return from_array(a, chunks=a.shape, getitem=getter_inline, asarray=False)\n\n\ndef is_scalar_for_elemwise(arg):\n \"\"\"\n\n >>> is_scalar_for_elemwise(42)\n True\n >>> is_scalar_for_elemwise('foo')\n True\n >>> is_scalar_for_elemwise(True)\n True\n >>> is_scalar_for_elemwise(np.array(42))\n True\n >>> is_scalar_for_elemwise([1, 2, 3])\n True\n >>> is_scalar_for_elemwise(np.array([1, 2, 3]))\n False\n >>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))\n False\n >>> is_scalar_for_elemwise(np.dtype('i4'))\n True\n \"\"\"\n # the second half of shape_condition is essentially just to ensure that\n # dask series / frame are treated as scalars in elemwise.\n maybe_shape = getattr(arg, \"shape\", None)\n shape_condition = not isinstance(maybe_shape, Iterable) or any(\n is_dask_collection(x) for x in maybe_shape\n )\n\n return (\n np.isscalar(arg)\n or shape_condition\n or isinstance(arg, np.dtype)\n or (isinstance(arg, np.ndarray) and arg.ndim == 0)\n )\n\n\ndef broadcast_shapes(*shapes):\n \"\"\"\n Determines output shape from broadcasting arrays.\n\n Parameters\n ----------\n shapes : tuples\n The shapes of the arguments.\n\n Returns\n -------\n output_shape : tuple\n\n Raises\n ------\n ValueError\n If the input shapes cannot be successfully broadcast together.\n \"\"\"\n if len(shapes) == 1:\n return shapes[0]\n out = []\n for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):\n if np.isnan(sizes).any():\n dim = np.nan\n else:\n dim = 0 if 0 in sizes else np.max(sizes)\n if any(i not in [-1, 0, 1, dim] and not np.isnan(i) for i in sizes):\n raise ValueError(\n \"operands could not be broadcast together with \"\n \"shapes {0}\".format(\" \".join(map(str, shapes)))\n )\n out.append(dim)\n return tuple(reversed(out))\n\n\ndef elemwise(op, *args, **kwargs):\n \"\"\"Apply elementwise function across arguments\n\n Respects broadcasting rules\n\n Examples\n --------\n >>> elemwise(add, x, y) # doctest: +SKIP\n >>> elemwise(sin, x) # doctest: +SKIP\n\n See Also\n --------\n blockwise\n \"\"\"\n out = kwargs.pop(\"out\", None)\n if not set([\"name\", \"dtype\"]).issuperset(kwargs):\n msg = \"%s does not take the following keyword arguments %s\"\n raise TypeError(\n msg % (op.__name__, str(sorted(set(kwargs) - set([\"name\", \"dtype\"]))))\n )\n\n args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]\n\n shapes = []\n for arg in args:\n shape = getattr(arg, \"shape\", ())\n if any(is_dask_collection(x) for x in shape):\n # Want to exclude Delayed shapes and dd.Scalar\n shape = ()\n shapes.append(shape)\n\n shapes = [s if isinstance(s, Iterable) else () for s in shapes]\n out_ndim = len(\n broadcast_shapes(*shapes)\n ) # Raises ValueError if dimensions mismatch\n expr_inds = tuple(range(out_ndim))[::-1]\n\n need_enforce_dtype = False\n if \"dtype\" in kwargs:\n dt = kwargs[\"dtype\"]\n else:\n # We follow NumPy's rules for dtype promotion, which special cases\n # scalars and 0d ndarrays (which it considers equivalent) by using\n # their values to compute the result dtype:\n # https://github.com/numpy/numpy/issues/6240\n # We don't inspect the values of 0d dask arrays, because these could\n # hold potentially very expensive calculations. Instead, we treat\n # them just like other arrays, and if necessary cast the result of op\n # to match.\n vals = [\n np.empty((1,) * max(1, a.ndim), dtype=a.dtype)\n if not is_scalar_for_elemwise(a)\n else a\n for a in args\n ]\n try:\n dt = apply_infer_dtype(op, vals, {}, \"elemwise\", suggest_dtype=False)\n except Exception:\n return NotImplemented\n need_enforce_dtype = any(\n not is_scalar_for_elemwise(a) and a.ndim == 0 for a in args\n )\n\n name = kwargs.get(\"name\", None) or \"%s-%s\" % (funcname(op), tokenize(op, dt, *args))\n\n blockwise_kwargs = dict(dtype=dt, name=name, token=funcname(op).strip(\"_\"))\n if need_enforce_dtype:\n blockwise_kwargs[\"enforce_dtype\"] = dt\n blockwise_kwargs[\"enforce_dtype_function\"] = op\n op = _enforce_dtype\n result = blockwise(\n op,\n expr_inds,\n *concat(\n (a, tuple(range(a.ndim)[::-1]) if not is_scalar_for_elemwise(a) else None)\n for a in args\n ),\n **blockwise_kwargs,\n )\n\n return handle_out(out, result)\n\n\ndef handle_out(out, result):\n \"\"\"Handle out parameters\n\n If out is a dask.array then this overwrites the contents of that array with\n the result\n \"\"\"\n if isinstance(out, tuple):\n if len(out) == 1:\n out = out[0]\n elif len(out) > 1:\n raise NotImplementedError(\"The out parameter is not fully supported\")\n else:\n out = None\n if isinstance(out, Array):\n if out.shape != result.shape:\n raise ValueError(\n \"Mismatched shapes between result and out parameter. \"\n \"out=%s, result=%s\" % (str(out.shape), str(result.shape))\n )\n out._chunks = result.chunks\n out.dask = result.dask\n out._meta = result._meta\n out.name = result.name\n elif out is not None:\n msg = (\n \"The out parameter is not fully supported.\"\n \" Received type %s, expected Dask Array\" % type(out).__name__\n )\n raise NotImplementedError(msg)\n else:\n return result\n\n\ndef _enforce_dtype(*args, **kwargs):\n \"\"\"Calls a function and converts its result to the given dtype.\n\n The parameters have deliberately been given unwieldy names to avoid\n clashes with keyword arguments consumed by blockwise\n\n A dtype of `object` is treated as a special case and not enforced,\n because it is used as a dummy value in some places when the result will\n not be a block in an Array.\n\n Parameters\n ----------\n enforce_dtype : dtype\n Result dtype\n enforce_dtype_function : callable\n The wrapped function, which will be passed the remaining arguments\n \"\"\"\n dtype = kwargs.pop(\"enforce_dtype\")\n function = kwargs.pop(\"enforce_dtype_function\")\n\n result = function(*args, **kwargs)\n if hasattr(result, \"dtype\") and dtype != result.dtype and dtype != object:\n if not np.can_cast(result, dtype, casting=\"same_kind\"):\n raise ValueError(\n \"Inferred dtype from function %r was %r \"\n \"but got %r, which can't be cast using \"\n \"casting='same_kind'\"\n % (funcname(function), str(dtype), str(result.dtype))\n )\n if np.isscalar(result):\n # scalar astype method doesn't take the keyword arguments, so\n # have to convert via 0-dimensional array and back.\n result = result.astype(dtype)\n else:\n try:\n result = result.astype(dtype, copy=False)\n except TypeError:\n # Missing copy kwarg\n result = result.astype(dtype)\n return result\n\n\ndef broadcast_to(x, shape, chunks=None):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n x : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n chunks : tuple, optional\n If provided, then the result will use these chunks instead of the same\n chunks as the source array. Setting chunks explicitly as part of\n broadcast_to is more efficient than rechunking afterwards. Chunks are\n only allowed to differ from the original shape along dimensions that\n are new on the result or have size 1 the input array.\n\n Returns\n -------\n broadcast : dask array\n\n See Also\n --------\n :func:`numpy.broadcast_to`\n \"\"\"\n x = asarray(x)\n shape = tuple(shape)\n\n if x.shape == shape and (chunks is None or chunks == x.chunks):\n return x\n\n ndim_new = len(shape) - x.ndim\n if ndim_new < 0 or any(\n new != old for new, old in zip(shape[ndim_new:], x.shape) if old != 1\n ):\n raise ValueError(\"cannot broadcast shape %s to shape %s\" % (x.shape, shape))\n\n if chunks is None:\n chunks = tuple((s,) for s in shape[:ndim_new]) + tuple(\n bd if old > 1 else (new,)\n for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])\n )\n else:\n chunks = normalize_chunks(\n chunks, shape, dtype=x.dtype, previous_chunks=x.chunks\n )\n for old_bd, new_bd in zip(x.chunks, chunks[ndim_new:]):\n if old_bd != new_bd and old_bd != (1,):\n raise ValueError(\n \"cannot broadcast chunks %s to chunks %s: \"\n \"new chunks must either be along a new \"\n \"dimension or a dimension of size 1\" % (x.chunks, chunks)\n )\n\n name = \"broadcast_to-\" + tokenize(x, shape, chunks)\n dsk = {}\n\n enumerated_chunks = product(*(enumerate(bds) for bds in chunks))\n for new_index, chunk_shape in (zip(*ec) for ec in enumerated_chunks):\n old_index = tuple(\n 0 if bd == (1,) else i for bd, i in zip(x.chunks, new_index[ndim_new:])\n )\n old_key = (x.name,) + old_index\n new_key = (name,) + new_index\n dsk[new_key] = (np.broadcast_to, old_key, quote(chunk_shape))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, dtype=x.dtype)\n\n\n@derived_from(np)\ndef broadcast_arrays(*args, **kwargs):\n subok = bool(kwargs.pop(\"subok\", False))\n\n to_array = asanyarray if subok else asarray\n args = tuple(to_array(e) for e in args)\n\n if kwargs:\n raise TypeError(\"unsupported keyword argument(s) provided\")\n\n # Unify uneven chunking\n inds = [list(reversed(range(x.ndim))) for x in args]\n uc_args = concat(zip(args, inds))\n _, args = unify_chunks(*uc_args, warn=False)\n\n shape = broadcast_shapes(*(e.shape for e in args))\n chunks = broadcast_chunks(*(e.chunks for e in args))\n\n result = [broadcast_to(e, shape=shape, chunks=chunks) for e in args]\n\n return result\n\n\ndef offset_func(func, offset, *args):\n \"\"\"Offsets inputs by offset\n\n >>> double = lambda x: x * 2\n >>> f = offset_func(double, (10,))\n >>> f(1)\n 22\n >>> f(300)\n 620\n \"\"\"\n\n def _offset(*args):\n args2 = list(map(add, args, offset))\n return func(*args2)\n\n with ignoring(Exception):\n _offset.__name__ = \"offset_\" + func.__name__\n\n return _offset\n\n\ndef chunks_from_arrays(arrays):\n \"\"\"Chunks tuple from nested list of arrays\n\n >>> x = np.array([1, 2])\n >>> chunks_from_arrays([x, x])\n ((2, 2),)\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x], [x]])\n ((1, 1), (2,))\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x, x]])\n ((1,), (2, 2))\n\n >>> chunks_from_arrays([1, 1])\n ((1, 1),)\n \"\"\"\n if not arrays:\n return ()\n result = []\n dim = 0\n\n def shape(x):\n try:\n return x.shape\n except AttributeError:\n return (1,)\n\n while isinstance(arrays, (list, tuple)):\n result.append(tuple([shape(deepfirst(a))[dim] for a in arrays]))\n arrays = arrays[0]\n dim += 1\n return tuple(result)\n\n\ndef deepfirst(seq):\n \"\"\"First element in a nested list\n\n >>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])\n 1\n \"\"\"\n if not isinstance(seq, (list, tuple)):\n return seq\n else:\n return deepfirst(seq[0])\n\n\ndef shapelist(a):\n \"\"\" Get the shape of nested list \"\"\"\n if type(a) is list:\n return tuple([len(a)] + list(shapelist(a[0])))\n else:\n return ()\n\n\ndef reshapelist(shape, seq):\n \"\"\"Reshape iterator to nested shape\n\n >>> reshapelist((2, 3), range(6))\n [[0, 1, 2], [3, 4, 5]]\n \"\"\"\n if len(shape) == 1:\n return list(seq)\n else:\n n = int(len(seq) / shape[0])\n return [reshapelist(shape[1:], part) for part in partition(n, seq)]\n\n\ndef transposelist(arrays, axes, extradims=0):\n \"\"\"Permute axes of nested list\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1])\n [[[1, 1], [1, 1], [1, 1]]]\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)\n [[[[1], [1]], [[1], [1]], [[1], [1]]]]\n \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n if extradims < 0:\n raise ValueError(\"`newdims` should be positive\")\n if len(axes) > len(set(axes)):\n raise ValueError(\"`axes` should be unique\")\n\n ndim = max(axes) + 1\n shape = shapelist(arrays)\n newshape = [\n shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)\n ]\n\n result = list(core.flatten(arrays))\n return reshapelist(newshape, result)\n\n\ndef stack(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Stack arrays along a new axis\n\n Given a sequence of dask arrays, form a new dask array by stacking them\n along a new dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.stack(data, axis=0)\n >>> x.shape\n (3, 4, 4)\n\n >>> da.stack(data, axis=1).shape\n (4, 3, 4)\n\n >>> da.stack(data, axis=-1).shape\n (4, 4, 3)\n\n Result is a new dask Array\n\n See Also\n --------\n concatenate\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to stack\")\n if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):\n idx = first(i for i in enumerate(seq) if i[1].shape != seq[0].shape)\n raise ValueError(\n \"Stacked arrays must have the same shape. \"\n \"The first array had shape {0}, while array \"\n \"{1} has shape {2}.\".format(seq[0].shape, idx[0] + 1, idx[1].shape)\n )\n\n meta = np.stack([meta_from_array(a) for a in seq], axis=axis)\n seq = [x.astype(meta.dtype) for x in seq]\n\n ndim = meta.ndim - 1\n if axis < 0:\n axis = ndim + axis + 1\n shape = tuple(\n len(seq)\n if i == axis\n else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])\n for i in range(meta.ndim)\n )\n\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n\n ind = list(range(ndim))\n uc_args = list(concat((x, ind) for x in seq2))\n _, seq2 = unify_chunks(*uc_args)\n\n assert len(set(a.chunks for a in seq2)) == 1 # same chunks\n chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]\n\n names = [a.name for a in seq2]\n name = \"stack-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n inputs = [\n (names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys\n ]\n values = [\n (\n getitem,\n inp,\n (slice(None, None, None),) * axis\n + (None,)\n + (slice(None, None, None),) * (ndim - axis),\n )\n for inp in inputs\n ]\n\n layer = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)\n\n\ndef concatenate3(arrays):\n \"\"\"Recursive np.concatenate\n\n Input should be a nested list of numpy arrays arranged in the order they\n should appear in the array itself. Each array should have the same number\n of dimensions as the desired output and the nesting of the lists.\n\n >>> x = np.array([[1, 2]])\n >>> concatenate3([[x, x, x], [x, x, x]])\n array([[1, 2, 1, 2, 1, 2],\n [1, 2, 1, 2, 1, 2]])\n\n >>> concatenate3([[x, x], [x, x], [x, x]])\n array([[1, 2, 1, 2],\n [1, 2, 1, 2],\n [1, 2, 1, 2]])\n \"\"\"\n from .utils import IS_NEP18_ACTIVE\n\n # We need this as __array_function__ may not exist on older NumPy versions.\n # And to reduce verbosity.\n NDARRAY_ARRAY_FUNCTION = getattr(np.ndarray, \"__array_function__\", None)\n\n arrays = concrete(arrays)\n if not arrays:\n return np.empty(0)\n\n advanced = max(\n core.flatten(arrays, container=(list, tuple)),\n key=lambda x: getattr(x, \"__array_priority__\", 0),\n )\n\n if IS_NEP18_ACTIVE and not all(\n NDARRAY_ARRAY_FUNCTION\n is getattr(arr, \"__array_function__\", NDARRAY_ARRAY_FUNCTION)\n for arr in arrays\n ):\n try:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=tuple(range(x.ndim)))\n except TypeError:\n pass\n\n if concatenate_lookup.dispatch(type(advanced)) is not np.concatenate:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=list(range(x.ndim)))\n\n ndim = ndimlist(arrays)\n if not ndim:\n return arrays\n chunks = chunks_from_arrays(arrays)\n shape = tuple(map(sum, chunks))\n\n def dtype(x):\n try:\n return x.dtype\n except AttributeError:\n return type(x)\n\n result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))\n\n for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):\n if hasattr(arr, \"ndim\"):\n while arr.ndim < ndim:\n arr = arr[None, ...]\n result[idx] = arr\n\n return result\n\n\ndef concatenate_axes(arrays, axes):\n \"\"\" Recursively call np.concatenate along axes \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n\n extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))\n return concatenate3(transposelist(arrays, axes, extradims=extradims))\n\n\ndef to_hdf5(filename, *args, **kwargs):\n \"\"\"Store arrays in HDF5 file\n\n This saves several dask arrays into several datapaths in an HDF5 file.\n It creates the necessary datasets and handles clean file opening/closing.\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP\n\n or\n\n >>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP\n\n This can also be used as a method on a single Array\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n if len(args) == 1 and isinstance(args[0], dict):\n data = args[0]\n elif len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Array):\n data = {args[0]: args[1]}\n else:\n raise ValueError(\"Please provide {'/data/path': array} dictionary\")\n\n chunks = kwargs.pop(\"chunks\", True)\n\n import h5py\n\n with h5py.File(filename, mode=\"a\") as f:\n dsets = [\n f.require_dataset(\n dp,\n shape=x.shape,\n dtype=x.dtype,\n chunks=tuple([c[0] for c in x.chunks]) if chunks is True else chunks,\n **kwargs,\n )\n for dp, x in data.items()\n ]\n store(list(data.values()), dsets)\n\n\ndef interleave_none(a, b):\n \"\"\"\n\n >>> interleave_none([0, None, 2, None], [1, 3])\n (0, 1, 2, 3)\n \"\"\"\n result = []\n i = j = 0\n n = len(a) + len(b)\n while i + j < n:\n if a[i] is not None:\n result.append(a[i])\n i += 1\n else:\n result.append(b[j])\n i += 1\n j += 1\n return tuple(result)\n\n\ndef keyname(name, i, okey):\n \"\"\"\n\n >>> keyname('x', 3, [None, None, 0, 2])\n ('x', 3, 0, 2)\n \"\"\"\n return (name, i) + tuple(k for k in okey if k is not None)\n\n\ndef _vindex(x, *indexes):\n \"\"\"Point wise indexing with broadcasting.\n\n >>> x = np.arange(56).reshape((7, 8))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55]])\n\n >>> d = from_array(x, chunks=(3, 4))\n >>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])\n >>> result.compute()\n array([ 0, 9, 48, 7])\n \"\"\"\n indexes = replace_ellipsis(x.ndim, indexes)\n\n nonfancy_indexes = []\n reduced_indexes = []\n for i, ind in enumerate(indexes):\n if isinstance(ind, Number):\n nonfancy_indexes.append(ind)\n elif isinstance(ind, slice):\n nonfancy_indexes.append(ind)\n reduced_indexes.append(slice(None))\n else:\n nonfancy_indexes.append(slice(None))\n reduced_indexes.append(ind)\n\n nonfancy_indexes = tuple(nonfancy_indexes)\n reduced_indexes = tuple(reduced_indexes)\n\n x = x[nonfancy_indexes]\n\n array_indexes = {}\n for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)):\n if not isinstance(ind, slice):\n ind = np.array(ind, copy=True)\n if ind.dtype.kind == \"b\":\n raise IndexError(\"vindex does not support indexing with boolean arrays\")\n if ((ind >= size) | (ind < -size)).any():\n raise IndexError(\n \"vindex key has entries out of bounds for \"\n \"indexing along axis %s of size %s: %r\" % (i, size, ind)\n )\n ind %= size\n array_indexes[i] = ind\n\n if array_indexes:\n x = _vindex_array(x, array_indexes)\n\n return x\n\n\ndef _vindex_array(x, dict_indexes):\n \"\"\"Point wise indexing with only NumPy Arrays.\"\"\"\n\n try:\n broadcast_indexes = np.broadcast_arrays(*dict_indexes.values())\n except ValueError as e:\n # note: error message exactly matches numpy\n shapes_str = \" \".join(str(a.shape) for a in dict_indexes.values())\n raise IndexError(\n \"shape mismatch: indexing arrays could not be \"\n \"broadcast together with shapes \" + shapes_str\n ) from e\n broadcast_shape = broadcast_indexes[0].shape\n\n lookup = dict(zip(dict_indexes, broadcast_indexes))\n flat_indexes = [\n lookup[i].ravel().tolist() if i in lookup else None for i in range(x.ndim)\n ]\n flat_indexes.extend([None] * (x.ndim - len(flat_indexes)))\n\n flat_indexes = [\n list(index) if index is not None else index for index in flat_indexes\n ]\n bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]\n bounds2 = [b for i, b in zip(flat_indexes, bounds) if i is not None]\n axis = _get_axis(flat_indexes)\n token = tokenize(x, flat_indexes)\n out_name = \"vindex-merge-\" + token\n\n points = list()\n for i, idx in enumerate(zip(*[i for i in flat_indexes if i is not None])):\n block_idx = [\n np.searchsorted(b, ind, \"right\") - 1 for b, ind in zip(bounds2, idx)\n ]\n inblock_idx = [\n ind - bounds2[k][j] for k, (ind, j) in enumerate(zip(idx, block_idx))\n ]\n points.append((i, tuple(block_idx), tuple(inblock_idx)))\n\n chunks = [c for i, c in zip(flat_indexes, x.chunks) if i is None]\n chunks.insert(0, (len(points),) if points else (0,))\n chunks = tuple(chunks)\n\n if points:\n per_block = groupby(1, points)\n per_block = dict((k, v) for k, v in per_block.items() if v)\n\n other_blocks = list(\n product(\n *[\n list(range(len(c))) if i is None else [None]\n for i, c in zip(flat_indexes, x.chunks)\n ]\n )\n )\n\n full_slices = [slice(None, None) if i is None else None for i in flat_indexes]\n\n name = \"vindex-slice-\" + token\n vindex_merge_name = \"vindex-merge-\" + token\n dsk = {}\n for okey in other_blocks:\n for i, key in enumerate(per_block):\n dsk[keyname(name, i, okey)] = (\n _vindex_transpose,\n (\n _vindex_slice,\n (x.name,) + interleave_none(okey, key),\n interleave_none(\n full_slices, list(zip(*pluck(2, per_block[key])))\n ),\n ),\n axis,\n )\n dsk[keyname(vindex_merge_name, 0, okey)] = (\n _vindex_merge,\n [list(pluck(0, per_block[key])) for key in per_block],\n [keyname(name, i, okey) for i in range(len(per_block))],\n )\n\n result_1d = Array(\n HighLevelGraph.from_collections(out_name, dsk, dependencies=[x]),\n out_name,\n chunks,\n x.dtype,\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n # output has a zero dimension, just create a new zero-shape array with the\n # same dtype\n from .wrap import empty\n\n result_1d = empty(\n tuple(map(sum, chunks)), chunks=chunks, dtype=x.dtype, name=out_name\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n\ndef _get_axis(indexes):\n \"\"\"Get axis along which point-wise slicing results lie\n\n This is mostly a hack because I can't figure out NumPy's rule on this and\n can't be bothered to go reading.\n\n >>> _get_axis([[1, 2], None, [1, 2], None])\n 0\n >>> _get_axis([None, [1, 2], [1, 2], None])\n 1\n >>> _get_axis([None, None, [1, 2], [1, 2]])\n 2\n \"\"\"\n ndim = len(indexes)\n indexes = [slice(None, None) if i is None else [0] for i in indexes]\n x = np.empty((2,) * ndim)\n x2 = x[tuple(indexes)]\n return x2.shape.index(1)\n\n\ndef _vindex_slice(block, points):\n \"\"\" Pull out point-wise slices from block \"\"\"\n points = [p if isinstance(p, slice) else list(p) for p in points]\n return block[tuple(points)]\n\n\ndef _vindex_transpose(block, axis):\n \"\"\" Rotate block so that points are on the first dimension \"\"\"\n axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))\n return block.transpose(axes)\n\n\ndef _vindex_merge(locations, values):\n \"\"\"\n\n >>> locations = [0], [2, 1]\n >>> values = [np.array([[1, 2, 3]]),\n ... np.array([[10, 20, 30], [40, 50, 60]])]\n\n >>> _vindex_merge(locations, values)\n array([[ 1, 2, 3],\n [40, 50, 60],\n [10, 20, 30]])\n \"\"\"\n locations = list(map(list, locations))\n values = list(values)\n\n n = sum(map(len, locations))\n\n shape = list(values[0].shape)\n shape[0] = n\n shape = tuple(shape)\n\n dtype = values[0].dtype\n\n x = np.empty(shape, dtype=dtype)\n\n ind = [slice(None, None) for i in range(x.ndim)]\n for loc, val in zip(locations, values):\n ind[0] = loc\n x[tuple(ind)] = val\n\n return x\n\n\ndef to_npy_stack(dirname, x, axis=0):\n \"\"\"Write dask array to a stack of .npy files\n\n This partitions the dask.array along one axis and stores each block along\n that axis as a single .npy file in the specified directory\n\n Examples\n --------\n >>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP\n >>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP\n\n The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``\n respectively, as is specified by the chunk size along the zeroth axis::\n\n $ tree data/\n data/\n |-- 0.npy\n |-- 1.npy\n |-- 2.npy\n |-- info\n\n The ``info`` file stores the dtype, chunks, and axis information of the array.\n You can load these stacks with the ``da.from_npy_stack`` function.\n\n >>> y = da.from_npy_stack('data/') # doctest: +SKIP\n\n See Also\n --------\n from_npy_stack\n \"\"\"\n\n chunks = tuple((c if i == axis else (sum(c),)) for i, c in enumerate(x.chunks))\n xx = x.rechunk(chunks)\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n meta = {\"chunks\": chunks, \"dtype\": x.dtype, \"axis\": axis}\n\n with open(os.path.join(dirname, \"info\"), \"wb\") as f:\n pickle.dump(meta, f)\n\n name = \"to-npy-stack-\" + str(uuid.uuid1())\n dsk = {\n (name, i): (np.save, os.path.join(dirname, \"%d.npy\" % i), key)\n for i, key in enumerate(core.flatten(xx.__dask_keys__()))\n }\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[xx])\n compute_as_if_collection(Array, graph, list(dsk))\n\n\ndef from_npy_stack(dirname, mmap_mode=\"r\"):\n \"\"\"Load dask array from stack of npy files\n\n See ``da.to_npy_stack`` for docstring\n\n Parameters\n ----------\n dirname: string\n Directory of .npy files\n mmap_mode: (None or 'r')\n Read data in memory map mode\n \"\"\"\n with open(os.path.join(dirname, \"info\"), \"rb\") as f:\n info = pickle.load(f)\n\n dtype = info[\"dtype\"]\n chunks = info[\"chunks\"]\n axis = info[\"axis\"]\n\n name = \"from-npy-stack-%s\" % dirname\n keys = list(product([name], *[range(len(c)) for c in chunks]))\n values = [\n (np.load, os.path.join(dirname, \"%d.npy\" % i), mmap_mode)\n for i in range(len(chunks[axis]))\n ]\n dsk = dict(zip(keys, values))\n\n return Array(dsk, name, chunks, dtype)\n\n\nfrom .utils import meta_from_array\n" ]
[ [ "numpy.can_cast", "numpy.asarray", "numpy.isnan", "numpy.median", "numpy.dtype", "numpy.ones", "numpy.max", "numpy.asanyarray", "numpy.isscalar", "numpy.searchsorted", "numpy.errstate", "numpy.prod", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ShamimSuf/AI_FlappyBird_GeneticAlgo
[ "438da76b1327ef3167dea42706dce4aaeccc8136" ]
[ "AI_Flappy.py" ]
[ "#This flappy will have 6 inputs (i1 to i6) : up, down, bird top-right to up-block-low-right, bird top-left to up-block-low-left, so on. \n\nimport numpy as np\nimport pygame\nimport time\nimport random\nfrom random import randint\n\npygame.init()\n\n#6 input nodes\ni_ROW = 1\ni_COL = 6\n\n#3 hidden layer nodes\n#input to hidden layer nodes\t\t\nw1_ROW = 6\nw1_COL = 3\n\n#hiddenb layer to op layer nodes\nw2_ROW = 3\nw2_COL = 1\n\nclass Colors:\n\tdef __init__(self): \n\t\t#https://www.webucator.com/blog/2015/03/python-color-constants-module/\n\t\tself.black = (0,0,0)\n\t\tself.white = (255,255,255)\n\t\tself.cornflowerblue = (100,149,237)\n\t\tself.azure4 = (131,139,139)\n\t\tself.cadetblue1\t= (152,245,255)\n\nclass GameWindow:\n\tdef __init__(self):\n\t\tcolors = Colors()\n\t\t\n\t\tself.surfaceWidth = 800\n\t\tself.surfaceHeight = 400\t\n\t\tself.surface = pygame.display.set_mode((self.surfaceWidth, self.surfaceHeight))\n\t\tself.clock = pygame.time.Clock()\n\t\tself.surface.fill(colors.azure4)\n\t\tpygame.display.set_caption('Flappy Bork')\n\nclass Point:\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\t\nclass Block:\n\tdef __init__(self, gameWindow, x, y_gap, gapHeight):\n\t\t\n\t\tself.gameWindow\t= gameWindow\n\t\tself.x \t\t\t= x\n\t\tself.y_gap\t\t= y_gap\n\t\tself.gapHeight\t= gapHeight\n\t\t\t\t\n\t\tself.blockWidth = 50\n\t\tself.blockSpeed = 5\n\t\t\n\tdef draw(self):\n\t\tcolors = Colors()\n\t\tpygame.draw.rect( self.gameWindow.surface, colors.white, (self.x, 0, self.blockWidth, self.y_gap))\n\t\tpygame.draw.rect( self.gameWindow.surface, colors.white, (self.x, self.y_gap + self.gapHeight, self.blockWidth, self.gameWindow.surfaceHeight - self.y_gap - self.gapHeight))\n\n\tdef reset(self):\n\t\t#Reset block\n\t\tself.x = gameWindow.surfaceWidth - self.blockWidth\n\t\tself.y_gap = random_obj.randint(0, gameWindow.surfaceHeight - 150)\n\t\n#Global Variables (Shitty way of implementing! )\t\nrandom_obj = random.SystemRandom()\t\nmutation_rate = 0.3 #between 0,1\ngeneration = 0\ngameWindow = GameWindow()\nblock = Block (gameWindow, gameWindow.surfaceWidth - 50, randint(0, gameWindow.surfaceHeight - 150), 150)\nblock.draw()\t\t\t\t\n\n#Game Over\ndef gg_wp():\n\tuser_input = False\n\twhile not user_input:\n\t\tfor event in pygame.event.get():\n\n\t\t\tif event.type==pygame.KEYDOWN:\n\t\t\t\tuser_input = True\n\n\t\t\t\t#restart game via SPACE key\n\t\t\t\tif event.key == pygame.K_SPACE:\n\t\t\t\t\tmain()\n\n\t\t\telse:\n\t\t\t\tpygame.quit()\n\t\t\t\tquit()\n\t\t\t\t\n\t\t\t\t\ndef getRandMatrix(row, col):\n\tmatrix = np.random.rand(row, col)\n\tfor i in range(matrix.shape[0]):\n\t\tfor j in range(matrix.shape[1]):\n\t\t\tmatrix[i][j] = np.random.uniform(-1, 1)\n\treturn matrix\n\n# ReLU activation Function\ndef ReLU(z):\n\tz[z < 0] = 0\n\treturn z\n\n# Softmax or averaging the value\ndef softmax(z):\n\tsummation = np.sum(z)\n\tif summation == 0.0:\n\t\tsummation = 1.0\n\tfor i in range(len(z)):\n\t\tz[i] = z[i]/summation\n\treturn z\n\n# Sigmoid Activation Function\ndef sigmoid(z):\n\treturn 1.0/(1.0 + np.exp(-z))\n\nclass Brain:\t\n\tdef __init__(self):\t\t\n\t\tself.w1_matrix = getRandMatrix(w1_ROW, w1_COL)\n\t\tself.w2_matrix = getRandMatrix(w2_ROW, w2_COL)\n\t\t\n\tdef feedforward(self, i_matrix):\n\t\top1_matrix = np.dot(i_matrix, \tself.w1_matrix)\n\t\top2_matrix = np.dot(op1_matrix, self.w2_matrix)\n\t\top_final = sigmoid(op2_matrix)\t\n\t\tif( op_final > 0.5):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\nclass Bird:\n\tdef __init__(self, point):\n\t\tself.brain = Brain()\n\t\tself.life = True\n\t\tself.score = 0\n\t\tself.fitness = 0\n\t\t\t\t\n\t\tself.point = point\n\t\tself.size = 50\n\t\tself.fall_speed = 8\n\t\t\t\n\t\t#Increment score everytime bird is alive (increment per frame the bird is alive)\n\t\tself.incr_score = 1;\n\t\t\n\t\t#Input coords\n\t\tself.p1 = Point(0, 0)\n\t\tself.p2 = Point(0, 0)\n\t\tself.p3 = Point(0, 0)\n\t\tself.p4 = Point(0, 0)\n\t\tself.p5 = Point(0, 0)\n\t\tself.p6 = Point(0, 0)\n\t\tself.p7 = Point(0, 0)\n\t\t\n\tdef draw(self):\n\t\tcolors = Colors()\n\t\tpygame.draw.rect( gameWindow.surface, colors.cadetblue1, (self.point.x, self.point.y, self.size, self.size))\n\t\t\n\t\t#get block\n\t\tcurrent_block = block\n\t\t\n\t\t#draw the lines\n\t\tcolors = Colors()\n\t\tcurrent_block = block\n\t\tmidpoint = Point(self.point.x + self.size/2, self.point.y + self.size/2)\n\t\ti1 = midpoint.y - self.size/2\n\t\ti2 = midpoint.y + self.size/2\n\t\t\n\t\tblock_up_low_right\t = Point(current_block.x + current_block.blockWidth\t, current_block.y_gap) \n\t\tblock_up_low_left\t = Point(current_block.x\t\t\t\t\t\t\t, current_block.y_gap)\n\t\tblock_lower_up_left\t = Point(current_block.x\t\t\t\t\t\t\t, current_block.y_gap + current_block.gapHeight)\n\t\tblock_lower_up_right = Point(current_block.x + current_block.blockWidth\t,current_block.y_gap + current_block.gapHeight)\n\t\t\n\t\tp1 = Point( midpoint.x, i1 )\n\t\tp2 = Point( midpoint.x, i2)\n\t\tp3 = Point( midpoint.x + self.size/2, midpoint.y - self.size/2)\n\t\tp4 = Point( midpoint.x - self.size/2, midpoint.y - self.size/2)\n\t\tp5 = Point( midpoint.x - self.size/2, midpoint.y + self.size/2)\n\t\tp6 = Point( midpoint.x + self.size/2, midpoint.y + self.size/2)\n\t\t\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p1.x, p1.y), (p1.x, 0))\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p2.x, p2.y), (p2.x, gameWindow.surfaceHeight))\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p3.x, p3.y), (current_block.x + current_block.blockWidth\t, current_block.y_gap))\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p4.x, p4.y), (current_block.x\t\t\t\t\t\t\t\t, current_block.y_gap))\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p5.x, p5.y), (current_block.x\t\t\t\t\t\t\t\t, current_block.y_gap + current_block.gapHeight))\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p6.x, p6.y), (current_block.x + current_block.blockWidth\t,current_block.y_gap + current_block.gapHeight))\n\t\t\t\t\t\t\n\tdef move(self):\t\t\n\t\t#self.point.y += self.fall_speed\t\t\n\t\tself.score = self.score + 1\n\t\tself.think()\n\n\t\t#self boundary restrict wrt Game Window \n\t\tif ( self.point.x <= 0):\n\t\t\tself.point.x = 0\n\t\tif ( self.point.x >= gameWindow.surfaceWidth - self.size):\n\t\t\tself.point.x = gameWindow.surfaceWidth - self.size\t\t\t\n\t\tif ( self.point.y <= 0):\n\t\t\tself.point.y = 0\n\t\tif ( self.point.y >= gameWindow.surfaceHeight - self.size):\n\t\t\tself.point.y = gameWindow.surfaceHeight - self.size\n\t\t\n\t\t#get_block\n\t\tcurrent_block = block\n\t\t\t\t\n\t\t#check collision\n\t\t#Check if self has collided with block\n\t\tif( (current_block.x < (self.point.x + self.size) < (current_block.x + current_block.blockWidth)) or\n\t\t\t(current_block.x < (self.point.x) \t\t\t < (current_block.x + current_block.blockWidth))):\n\t\t\tif(not( (current_block.y_gap < self.point.y < current_block.y_gap + current_block.gapHeight) and \n\t\t\t\t\t(current_block.y_gap < (self.point.y + self.size) < current_block.y_gap + current_block.gapHeight))):\n\t\t\t\tself.fitness = self.score\n\t\t\t\tself.life = False\n\t\t\t\tself.score = 0.0 \n\t\t\n\t\t#check collision with up and low\n\t\tif (self.point.y == 0) or (self.point.y + 50 == gameWindow.surfaceHeight):\n\t\t\tself.fitness = self.score\n\t\t\tself.life = False\n\t\t\tself.score = 0.0 \n\t\t\n\tdef think(self):\n\t\ti_matrix = self.get_input_matrix()\n\t\tdoFlap = self.brain.feedforward(i_matrix)\n\t\t\n\t\tif doFlap == 1:\n\t\t\tself.fall_speed = 8\n\t\telse:\n\t\t\tself.fall_speed = -3\n\t\t\t\n\t\t#Update player movement value\n\t\tself.point.y += self.fall_speed\n\t\t\t\t\n\tdef reset(self):\n\t\tself.point = Point( 50, gameWindow.surfaceHeight/2)\n\t\tself.life = True\n\n\tdef get_input_matrix(self):\n\t\tcurrent_block = block\n\t\t\n\t\t#TO_DO calculate i_matrix\n\t\tmidpoint = Point(self.point.x + self.size/2, self.point.y + self.size/2)\n\t\t\t\t\n\t\t#i1 = (current_block.x + current_block.blockWidth) - midpoint.x\n\t\t#i2 = current_block.x - midpoint.x\n\t\t\n\t\t#distance of player from up and down\n\t\ti1 = midpoint.y - self.size/2\n\t\ti2 = gameWindow.surfaceHeight - (midpoint.y + self.size/2)\n\t\t\n\t\tblock_up_low_right\t = Point(current_block.x + current_block.blockWidth\t, current_block.y_gap) \n\t\tblock_up_low_left\t = Point(current_block.x\t\t\t\t\t\t\t, current_block.y_gap)\n\t\tblock_lower_up_left\t = Point(current_block.x\t\t\t\t\t\t\t, current_block.y_gap + current_block.gapHeight)\n\t\tblock_lower_up_right = Point(current_block.x + current_block.blockWidth\t,current_block.y_gap + current_block.gapHeight)\n\t\t\n\t\tp1 = Point( midpoint.x, i1 )\n\t\tp2 = Point( midpoint.x, i2)\n\t\tp3 = Point( midpoint.x + self.size/2, midpoint.y - self.size/2)\n\t\tp4 = Point( midpoint.x - self.size/2, midpoint.y - self.size/2)\n\t\tp5 = Point( midpoint.x - self.size/2, midpoint.y + self.size/2)\n\t\tp6 = Point( midpoint.x + self.size/2, midpoint.y + self.size/2)\n\n\t\ti3 = np.sqrt(np.square(p3.x - block_up_low_right.x) + np.square(p3.y - block_up_low_right.y))\n\t\ti4 = np.sqrt(np.square(p4.x - block_up_low_left.x) + np.square(p4.y - block_up_low_left.y))\n\t\ti5 = np.sqrt(np.square(p5.x - block_lower_up_left.x) + np.square(p5.y - block_lower_up_left.y))\n\t\ti6 = np.sqrt(np.square(p6.x - block_lower_up_right.x) + np.square(p6.y - block_lower_up_right.y))\n\t\t\n\t\tself.p1 = p1\n\t\tself.p2 = p2\n\t\tself.p3 = p3\n\t\tself.p4 = p4\n\t\tself.p5 = p5\n\t\tself.p6 = p6\t\t\n\t\t\n\t\t'''\n\t\tupblock_lower_left = Point(current_block.x\t\t\t\t\t\t\t\t,current_block.y_gap)\n\t\tupblock_lower_right = Point(current_block.x + current_block.blockWidth\t,current_block.y_gap)\n\t\tlowblock_up_right\t= Point(current_block.x + current_block.blockWidth\t,current_block.y_gap + current_block.gapHeight)\n\t\tlowblock_up_left\t= Point(current_block.x\t\t\t\t\t\t\t\t,current_block.y_gap + current_block.gapHeight)\n\t\t\n\t\ti3 = np.sqrt(np.square(midpoint.x - upblock_lower_left.x) \t+ np.square(midpoint.y - upblock_lower_left.y))\n\t\ti4 = np.sqrt(np.square(midpoint.x - upblock_lower_right.x) \t+ np.square(midpoint.y - upblock_lower_right.y))\n\t\ti5 = np.sqrt(np.square(midpoint.x - lowblock_up_right.x) \t+ np.square(midpoint.y - lowblock_up_right.y))\n\t\ti6 = np.sqrt(np.square(midpoint.x - lowblock_up_left.x) \t+ np.square(midpoint.y - lowblock_up_left.y))\n\t\t'''\n\t\t\n\t\treturn np.array([i1, i2, i3, i4, i5, i6])\n\nclass Population:\n\tdef __init__(self):\n\t\tself.population = []\n\t\tself.eliminated = []\t\n\t\n\tdef createPopulation(self):\n\t\tfor i in range(12):\n\t\t\tbird = Bird(Point( 50, gameWindow.surfaceHeight/2))\n\t\t\tself.population.append(bird)\n\t\t\t\n\tdef move(self):\n\t\tfor bird in self.population:\n\t\t\tbird.move()\n\n\t\tpopCopy = self.population[:]\n\t\tfor bird in popCopy:\n\t\t\tif not bird.life:\n\t\t\t\tself.eliminated.append(bird)\n\t\t\t\tself.population.remove(bird)\n\n\t\tif self.population == []:\n\t\t\tself.evolve()\n\t\n\t#aka def reproduce(self)\n\tdef evolve(self):\n\t\tglobal generation\t\t\n\t\tgeneration = generation + 1\n\t\t\n\t\t#reset block since all birds are dead\n\t\t#evolve is called when entire population is dead\n\t\tblock.reset()\n\t\t\t\n\t\t#Draw block\n\t\t#block.draw()\n\t\t\n\t\tself.crossbreed() #takes top and creates babies\n\t\tself.mutate()\n\n\tdef crossbreed(self):\n\t\tself.eliminated.sort(key=lambda x: x.fitness, reverse=True)\n\t\t\n\t\t#assuming 12 birds in population\n\t\tbaby1, baby2 = self.getBabies(self.eliminated[0], self.eliminated[1]) \n\t\tbaby3, baby4 = self.getBabies(self.eliminated[2], self.eliminated[3])\n\t\tbaby5, baby6 = self.getBabies(self.eliminated[4], self.eliminated[5])\n\t\t\n\t\tfor i in range(6):\n\t\t\tself.population.append(self.eliminated[i])\n\t\t\n\t\tself.population.append(baby1)\n\t\tself.population.append(baby2)\n\t\tself.population.append(baby3)\n\t\tself.population.append(baby4)\n\t\tself.population.append(baby5)\n\t\tself.population.append(baby6)\n\t\t\n\t\t#clear self.eliminated list\n\t\tself.eliminated = []\n\n\tdef getBabies(self, parent_bird1, parent_bird2):\n\t\tbaby1 = Bird (Point( 50, gameWindow.surfaceHeight/2))\n\t\tbaby2 = Bird (Point( 50, gameWindow.surfaceHeight/2))\n\t\t\n\t\t#w1 matrix\n\t\tfor i in range(baby1.brain.w1_matrix.shape[0]):\n\t\t\tfor j in range(baby1.brain.w1_matrix.shape[1]):\n\t\t\t\tbaby1.brain.w1_matrix[i][j] = random.choice([parent_bird1.brain.w1_matrix[i][j], parent_bird2.brain.w1_matrix[i][j]])\t\t\n\t\t\t\tbaby2.brain.w1_matrix[i][j] = random.choice([parent_bird1.brain.w1_matrix[i][j], parent_bird2.brain.w1_matrix[i][j]])\n\n\t\t#w2 matrix\n\t\tfor i in range(baby1.brain.w2_matrix.shape[0]):\n\t\t\tfor j in range(baby1.brain.w2_matrix.shape[1]):\n\t\t\t\tbaby1.brain.w2_matrix[i][j] = random.choice([parent_bird1.brain.w2_matrix[i][j], parent_bird2.brain.w2_matrix[i][j]])\n\t\t\t\tbaby2.brain.w2_matrix[i][j] = random.choice([parent_bird1.brain.w2_matrix[i][j], parent_bird2.brain.w2_matrix[i][j]])\t\t\t\t\n\t\t\n\t\treturn baby1, baby2\n\t\t\n\tdef mutate(self):\t\t\n\t\t#Mutate single bird, 6/18 from W1 and 1/3 from W2\t\t\n\t\trandom_bird_index = random_obj.randint(0, len(self.population)-1)\n\t\t\t\t\n\t\tfor x in range(6):\n\t\t\trandom_row = random_obj.randint(0,5)\n\t\t\trandom_col = random_obj.randint(0,2)\n\t\t\tself.population[random_bird_index].brain.w1_matrix[random_row][random_col] = np.random.uniform(-1, 1)\n\n\t\tfor x in range(1):\n\t\t\trandom_row_w2 = random_obj.randint(0,2)\n\t\t\tself.population[random_bird_index].brain.w2_matrix[random_row_w2][0] = np.random.uniform(-1, 1)\n\t\t\n\tdef draw(self):\n\t\tfor bird in self.population:\n\t\t\tbird.draw()\n\t\t\ndef gameLoop():\n\tloop = True\n\tpopulation = Population()\n\tpopulation.createPopulation()\n\tspeed = 60\n\tcolors = Colors()\n\t\n\tglobal generation\n\t\n\twhile loop:\t\n\n\t\t#display info\n\t\ttext = \"Generation: \" + str(generation)\n\t\t#population.population.sort(key=lambda x: x.score, reverse=True)\n\t\t#top_bird = population.population[0]\n\t\t#text = text + \" Top Score: \" + str(top_bird.score) + \" Top Fitness: \" + str(top_bird.fitness)\n\t\tprint(text)\n\t\t\n\t\tfor event in pygame.event.get():\t\t\t\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\t\tgg_wp()\n\t\t\t\tif event.key == pygame.K_1:\n\t\t\t\t\tspeed = 60*1\n\t\t\t\tif event.key == pygame.K_2:\n\t\t\t\t\tspeed = 60*2\n\t\t\t\tif event.key == pygame.K_3:\n\t\t\t\t\tspeed = 60*3\n\t\t\t\tif event.key == pygame.K_4:\n\t\t\t\t\tspeed = 60*4\n\t\t\t\tif event.key == pygame.K_5:\n\t\t\t\t\tspeed = 60*5\n\t\t\t\tif event.key == pygame.K_6:\n\t\t\t\t\tspeed = 60*6\n\t\t\t\tif event.key == pygame.K_7:\n\t\t\t\t\tspeed = 60*7\n\t\t\t\tif event.key == pygame.K_8:\n\t\t\t\t\tspeed = 60*8\n\t\t\t\tif event.key == pygame.K_9:\n\t\t\t\t\tspeed = 60*9\t\t\n\n\t\t#Reset block\n\t\tif( block.x <= -block.blockWidth ):\n\t\t\tblock.reset()\n\t\t\t\t\n\t\t#Fill Game Window\n\t\tgameWindow.surface.fill(colors.azure4)\n\t\t\n\t\t#Block move\t\t\n\t\tblock.x \t= block.x - block.blockSpeed \t\t\n\t\t\n\t\t#draw block\n\t\tblock.draw()\t\n\t\t\n\t\tpopulation.move() \n\t\tpopulation.draw()\n\t\t\n\t\tpygame.display.update()\n\t\tgameWindow.clock.tick(speed)\ngameLoop()\n" ]
[ [ "numpy.square", "numpy.dot", "numpy.random.rand", "numpy.exp", "numpy.random.uniform", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Tesla2fox/MPDA-DE
[ "0a27d59ceba16d292fade01d95b1c1f336e5f604" ]
[ "MPDA_decode/instance.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 21 15:49:17 2018\n\n@author: robot\n\"\"\"\nfrom readCfg import *\nimport numpy as np\nfrom enum import Enum\n\nclass TaskModelType(Enum):\n ExpModel = 1\n LineModel = 2\n\n\n\n\nclass Instance(object):\n def __init__(self, insFileName = 'wtf'):\n self.insFileName = insFileName\n readCfg = Read_Cfg(insFileName)\n self.robNum = int(readCfg.getSingleVal('robNum'))\n self.taskNum = int(readCfg.getSingleVal('taskNum'))\n self.threhold = readCfg.getSingleVal('comp_threhold')\n self.robAbiLst = []\n self.robVelLst = []\n self.taskStateLst = []\n self.taskRateLst = []\n readCfg.get('rob_abi',self.robAbiLst)\n readCfg.get('rob_vel',self.robVelLst)\n readCfg.get('tsk_rat',self.taskRateLst)\n readCfg.get('tsk_state',self.taskStateLst)\n self.rob2taskDisMat = np.zeros((self.robNum,self.taskNum))\n disLst = []\n readCfg.get('rob2tskDis',disLst)\n# print(self.rob2taskDisMat)\n for i in range(self.robNum):\n for j in range(self.taskNum):\n# print(i,j)\n# print(i*self.robNum+j)\n# print(disLst[i*self.robNum+j])\n self.rob2taskDisMat[i][j] = disLst[i*self.taskNum+j]\n self.taskDisMat = np.zeros((self.taskNum,self.taskNum))\n disLst = []\n readCfg.get('tskDis',disLst)\n for i in range(self.taskNum):\n for j in range(self.taskNum):\n self.taskDisMat[i][j] = disLst[i*self.taskNum+j]\n # self.decode = DecodeSS(self.insFileName)\n self.taskModelType = TaskModelType.ExpModel\n\n def __str__(self):\n return self.insFileName + '\\n robNum = '+ str(self.robNum) +' task =' +str(self.taskNum)\n def __eq__(self,other):\n if self.insFileName == other.insFileName:\n return True\n else:\n return False\n def __ne__(self,other):\n if self.__eq__(other):\n return False\n return True\n def evaluate(self,encode):\n self.decode.encode = encode\n# makespan = self.decode.decode()\n try:\n makespan = self.decode.decode()\n pass\n except InvalidStateException:\n makespan = sys.float_info.max\n except RobotStuckException:\n makespan = sys.float_info.max\n# except Exception as e:\n# print(e)\n return makespan\n def genNoBackTrackEncode(self,encode):\n resEncode = np.zeros((self.robNum,self.taskNum),dtype =int)\n resEncode[:][:] = -1\n for i in range(self.robNum):\n ind = 0\n for j in range(self.taskNum):\n if encode[i][j] != -1:\n resEncode[i][ind] = encode[i][j]\n ind += 1\n return resEncode\n def calRob2TaskPeriod(self,robID,taskID):\n dis = self.rob2taskDisMat[robID][taskID]\n dis_time = dis/self.robVelLst[robID]\n return dis_time\n def calTask2TaskPeriod(self,robID,taskID1,taskID2):\n dis = self.taskDisMat[taskID1][taskID2]\n period = dis/self.robVelLst[robID]\n return period\n\n\nif __name__ =='__main__':\n\n wtf = Read_Cfg(\"wf\")\n\n # insName = 's100_5_10_max100_2.5_2.5_2.5_1.2_thre0.1_MPDAins.dat'\n # ins = Instance(BaseDir + '//data\\\\' + insName)\n # insName = 's100_5_10_max100_2.5_2.5_2.5_1.2_thre0.1_MPDAins.dat'\n # ins2 = Instance(BaseDir + '//data\\\\' + insName)\n # if ins == ins2:\n # print('asd')\n # print(ins)\n \n \n " ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TianjieZhang1993/PINNs
[ "9034ba7f4fef81c24954fa3cbf08a2d4a7fee85a" ]
[ "appendix/discrete_time_identification (Burgers)/Burgers_systematic.py" ]
[ "\"\"\"\n@author: Maziar Raissi\n\"\"\"\n\nimport sys\nsys.path.insert(0, '../../Utilities/')\n\nimport tensorflow as tf\nimport numpy as np\nimport time\nimport scipy.io\n\nnp.random.seed(1234)\ntf.set_random_seed(1234)\n\n\nclass PhysicsInformedNN:\n # Initialize the class\n def __init__(self, x0, u0, x1, u1, layers, dt, lb, ub, q):\n \n self.lb = lb\n self.ub = ub\n \n self.x0 = x0\n self.x1 = x1\n \n self.u0 = u0\n self.u1 = u1\n \n self.layers = layers\n self.dt = dt\n self.q = max(q,1)\n \n # Initialize NN\n self.weights, self.biases = self.initialize_NN(layers)\n \n # Initialize parameters\n self.lambda_1 = tf.Variable([0.0], dtype=tf.float32)\n self.lambda_2 = tf.Variable([-6.0], dtype=tf.float32) \n \n # Load IRK weights\n tmp = np.float32(np.loadtxt('../../Utilities/IRK_weights/Butcher_IRK%d.txt' % (q), ndmin = 2))\n weights = np.reshape(tmp[0:q**2+q], (q+1,q)) \n self.IRK_alpha = weights[0:-1,:]\n self.IRK_beta = weights[-1:,:] \n self.IRK_times = tmp[q**2+q:]\n \n # tf placeholders and graph\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=True))\n \n self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1]))\n self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1]))\n self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1]))\n self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1]))\n self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients \n self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients \n \n self.U0_pred = self.net_U0(self.x0_tf) # N0 x q\n self.U1_pred = self.net_U1(self.x1_tf) # N1 x q\n \n self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \\\n tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred)) \n \n self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, \n method = 'L-BFGS-B', \n options = {'maxiter': 50000,\n 'maxfun': 50000,\n 'maxcor': 50,\n 'maxls': 50,\n 'ftol' : 1.0 * np.finfo(float).eps}) \n \n self.optimizer_Adam = tf.train.AdamOptimizer()\n self.train_op_Adam = self.optimizer_Adam.minimize(self.loss)\n \n init = tf.global_variables_initializer()\n self.sess.run(init)\n \n def initialize_NN(self, layers): \n weights = []\n biases = []\n num_layers = len(layers) \n for l in range(0,num_layers-1):\n W = self.xavier_init(size=[layers[l], layers[l+1]])\n b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)\n weights.append(W)\n biases.append(b) \n return weights, biases\n \n def xavier_init(self, size):\n in_dim = size[0]\n out_dim = size[1] \n xavier_stddev = np.sqrt(2/(in_dim + out_dim))\n return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)\n \n def neural_net(self, X, weights, biases):\n num_layers = len(weights) + 1\n \n H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0\n for l in range(0,num_layers-2):\n W = weights[l]\n b = biases[l]\n H = tf.tanh(tf.add(tf.matmul(H, W), b))\n W = weights[-1]\n b = biases[-1]\n Y = tf.add(tf.matmul(H, W), b)\n return Y\n \n def fwd_gradients_0(self, U, x): \n g = tf.gradients(U, x, grad_ys=self.dummy_x0_tf)[0]\n return tf.gradients(g, self.dummy_x0_tf)[0]\n \n def fwd_gradients_1(self, U, x): \n g = tf.gradients(U, x, grad_ys=self.dummy_x1_tf)[0]\n return tf.gradients(g, self.dummy_x1_tf)[0] \n \n def net_U0(self, x):\n lambda_1 = self.lambda_1\n lambda_2 = tf.exp(self.lambda_2)\n U = self.neural_net(x, self.weights, self.biases) \n U_x = self.fwd_gradients_0(U, x)\n U_xx = self.fwd_gradients_0(U_x, x)\n F = -lambda_1*U*U_x + lambda_2*U_xx\n U0 = U - self.dt*tf.matmul(F, self.IRK_alpha.T)\n return U0\n \n def net_U1(self, x):\n lambda_1 = self.lambda_1\n lambda_2 = tf.exp(self.lambda_2)\n U = self.neural_net(x, self.weights, self.biases) \n U_x = self.fwd_gradients_1(U, x)\n U_xx = self.fwd_gradients_1(U_x, x)\n F = -lambda_1*U*U_x + lambda_2*U_xx\n U1 = U + self.dt*tf.matmul(F, (self.IRK_beta - self.IRK_alpha).T)\n return U1\n\n def callback(self, loss):\n print('Loss:', loss)\n \n def train(self, nIter):\n tf_dict = {self.x0_tf: self.x0, self.u0_tf: self.u0, \n self.x1_tf: self.x1, self.u1_tf: self.u1,\n self.dummy_x0_tf: np.ones((self.x0.shape[0], self.q)),\n self.dummy_x1_tf: np.ones((self.x1.shape[0], self.q))}\n \n start_time = time.time()\n for it in range(nIter):\n self.sess.run(self.train_op_Adam, tf_dict)\n \n # Print\n if it % 10 == 0:\n elapsed = time.time() - start_time\n loss_value = self.sess.run(self.loss, tf_dict)\n lambda_1_value = self.sess.run(self.lambda_1)\n lambda_2_value = np.exp(self.sess.run(self.lambda_2))\n print('It: %d, Loss: %.3e, l1: %.3f, l2: %.5f, Time: %.2f' % \n (it, loss_value, lambda_1_value, lambda_2_value, elapsed))\n start_time = time.time()\n \n self.optimizer.minimize(self.sess,\n feed_dict = tf_dict,\n fetches = [self.loss],\n loss_callback = self.callback)\n \n def predict(self, x_star):\n \n U0_star = self.sess.run(self.U0_pred, {self.x0_tf: x_star, self.dummy_x0_tf: np.ones((x_star.shape[0], self.q))}) \n U1_star = self.sess.run(self.U1_pred, {self.x1_tf: x_star, self.dummy_x1_tf: np.ones((x_star.shape[0], self.q))})\n \n return U0_star, U1_star\n\n \ndef main_loop(skip, noise, num_layers, num_neurons):\n \n N0 = 199\n N1 = 201\n \n data = scipy.io.loadmat('../Data/burgers_shock.mat')\n \n t_star = data['t'].flatten()[:,None]\n x_star = data['x'].flatten()[:,None]\n Exact = np.real(data['usol'])\n \n idx_t = 10\n \n idx_x = np.random.choice(Exact.shape[0], N0, replace=False)\n x0 = x_star[idx_x,:]\n u0 = Exact[idx_x,idx_t][:,None]\n u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1])\n \n idx_x = np.random.choice(Exact.shape[0], N1, replace=False)\n x1 = x_star[idx_x,:]\n u1 = Exact[idx_x,idx_t + skip][:,None]\n u1 = u1 + noise*np.std(u1)*np.random.randn(u1.shape[0], u1.shape[1])\n \n dt = np.asscalar(t_star[idx_t+skip] - t_star[idx_t]) \n q = int(np.ceil(0.5*np.log(np.finfo(float).eps)/np.log(dt)))\n \n layers = np.concatenate([[1], num_neurons*np.ones(num_layers), [q]]).astype(int).tolist() \n \n # Doman bounds\n lb = x_star.min(0)\n ub = x_star.max(0)\n\n model = PhysicsInformedNN(x0, u0, x1, u1, layers, dt, lb, ub, q)\n model.train(nIter = 50000)\n \n U0_pred, U1_pred = model.predict(x_star) \n \n lambda_1_value = model.sess.run(model.lambda_1)\n lambda_2_value = np.exp(model.sess.run(model.lambda_2))\n \n nu = 0.01/np.pi \n error_lambda_1 = np.abs(lambda_1_value - 1.0)/1.0 *100\n error_lambda_2 = np.abs(lambda_2_value - nu)/nu * 100\n \n print('Error lambda_1: %f%%' % (error_lambda_1))\n print('Error lambda_2: %f%%' % (error_lambda_2))\n \n return error_lambda_1, error_lambda_2\n \n \nif __name__ == \"__main__\": \n \n skip = [20, 40, 60, 80]\n noise = [0.0, 0.01, 0.05, 0.1]\n \n num_layers = [1,2,3,4]\n num_neurons = [10,25,50]\n \n error_lambda_1_table_1 = np.zeros((len(skip), len(noise)))\n error_lambda_2_table_1 = np.zeros((len(skip), len(noise)))\n \n error_lambda_1_table_2 = np.zeros((len(num_layers), len(num_neurons)))\n error_lambda_2_table_2 = np.zeros((len(num_layers), len(num_neurons)))\n \n for i in range(len(skip)):\n for j in range(len(noise)):\n error_lambda_1_table_1[i,j], error_lambda_2_table_1[i,j] = main_loop(skip[i], noise[j], num_layers[-1], num_neurons[-1])\n \n for i in range(len(num_layers)):\n for j in range(len(num_neurons)):\n error_lambda_1_table_2[i,j], error_lambda_2_table_2[i,j] = main_loop(skip[-1], noise[0], num_layers[i], num_neurons[j])\n \n \n np.savetxt('./tables/error_lambda_1_table_1.csv', error_lambda_1_table_1, delimiter=' & ', fmt='$%2.3f$', newline=' \\\\\\\\\\n')\n np.savetxt('./tables/error_lambda_2_table_1.csv', error_lambda_2_table_1, delimiter=' & ', fmt='$%2.3f$', newline=' \\\\\\\\\\n')\n\n np.savetxt('./tables/error_lambda_1_table_2.csv', error_lambda_1_table_2, delimiter=' & ', fmt='$%2.3f$', newline=' \\\\\\\\\\n')\n np.savetxt('./tables/error_lambda_2_table_2.csv', error_lambda_2_table_2, delimiter=' & ', fmt='$%2.3f$', newline=' \\\\\\\\\\n')\n\n" ]
[ [ "numpy.sqrt", "tensorflow.zeros", "numpy.random.randn", "tensorflow.train.AdamOptimizer", "numpy.asscalar", "tensorflow.Variable", "numpy.reshape", "tensorflow.gradients", "numpy.finfo", "tensorflow.ConfigProto", "numpy.real", "numpy.std", "tensorflow.square", "tensorflow.matmul", "numpy.log", "tensorflow.truncated_normal", "numpy.random.choice", "tensorflow.placeholder", "tensorflow.exp", "tensorflow.global_variables_initializer", "tensorflow.set_random_seed", "numpy.savetxt", "numpy.abs", "numpy.random.seed", "numpy.ones", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
justusschock/tensorboardX
[ "1c16e127f9a737c0b45d0447c20499dec666130c" ]
[ "tensorboardX/summary.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"## Generation of summaries.\n### Class for writing Summaries\n@@FileWriter\n@@FileWriterCache\n### Summary Ops\n@@tensor_summary\n@@scalar\n@@histogram\n@@audio\n@@image\n@@merge\n@@merge_all\n## Utilities\n@@get_summary_description\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport bisect\nimport logging\nimport numpy as np\nimport os\nimport re as _re\n\n# pylint: disable=unused-import\nfrom six import StringIO\nfrom six.moves import range\nfrom .proto.summary_pb2 import Summary\nfrom .proto.summary_pb2 import HistogramProto\nfrom .proto.summary_pb2 import SummaryMetadata\nfrom .proto.tensor_pb2 import TensorProto\nfrom .proto.tensor_shape_pb2 import TensorShapeProto\nfrom .proto.plugin_pr_curve_pb2 import PrCurvePluginData\nfrom .proto.plugin_text_pb2 import TextPluginData\nfrom .proto import layout_pb2\nfrom .x2num import make_np\nfrom .utils import _prepare_video, convert_to_HWC\n\n_INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\\w\\.]')\n\n\ndef _calc_scale_factor(tensor):\n converted = tensor.numpy() if not isinstance(tensor, np.ndarray) else tensor\n return 1 if converted.dtype == np.uint8 else 255\n\n\ndef _clean_tag(name):\n # In the past, the first argument to summary ops was a tag, which allowed\n # arbitrary characters. Now we are changing the first argument to be the node\n # name. This has a number of advantages (users of summary ops now can\n # take advantage of the tf name scope system) but risks breaking existing\n # usage, because a much smaller set of characters are allowed in node names.\n # This function replaces all illegal characters with _s, and logs a warning.\n # It also strips leading slashes from the name.\n if name is not None:\n new_name = _INVALID_TAG_CHARACTERS.sub('_', name)\n new_name = new_name.lstrip('/') # Remove leading slashes\n if new_name != name:\n logging.info(\n 'Summary name %s is illegal; using %s instead.' % (name, new_name))\n name = new_name\n return name\n\n\ndef _draw_single_box(image, xmin, ymin, xmax, ymax, display_str, color='black', color_text='black', thickness=2):\n from PIL import ImageDraw, ImageFont\n font = ImageFont.load_default()\n draw = ImageDraw.Draw(image)\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n draw.line([(left, top), (left, bottom), (right, bottom),\n (right, top), (left, top)], width=thickness, fill=color)\n if display_str:\n text_bottom = bottom\n # Reverse list and print from bottom to top.\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n draw.rectangle(\n [(left, text_bottom - text_height - 2 * margin),\n (left + text_width, text_bottom)], fill=color\n )\n draw.text(\n (left + margin, text_bottom - text_height - margin),\n display_str, fill=color_text, font=font\n )\n return image\n\n\ndef scalar(name, scalar, collections=None):\n \"\"\"Outputs a `Summary` protocol buffer containing a single scalar value.\n The generated Summary has a Tensor.proto containing the input Tensor.\n Args:\n name: A name for the generated node. Will also serve as the series name in\n TensorBoard.\n tensor: A real numeric Tensor containing a single value.\n collections: Optional list of graph collections keys. The new summary op is\n added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.\n Returns:\n A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.\n Raises:\n ValueError: If tensor has the wrong shape or type.\n \"\"\"\n name = _clean_tag(name)\n scalar = make_np(scalar)\n assert(scalar.squeeze().ndim == 0), 'scalar should be 0D'\n scalar = float(scalar)\n return Summary(value=[Summary.Value(tag=name, simple_value=scalar)])\n\n\ndef histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_counts):\n # pylint: disable=line-too-long\n \"\"\"Outputs a `Summary` protocol buffer with a histogram.\n The generated\n [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\n has one summary value containing a histogram for `values`.\n Args:\n name: A name for the generated node. Will also serve as a series name in\n TensorBoard.\n min: A float or int min value\n max: A float or int max value\n num: Int number of values\n sum: Float or int sum of all values\n sum_squares: Float or int sum of squares for all values\n bucket_limits: A numeric `Tensor` with upper value per bucket\n bucket_counts: A numeric `Tensor` with number of values per bucket\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer.\n \"\"\"\n hist = HistogramProto(min=min,\n max=max,\n num=num,\n sum=sum,\n sum_squares=sum_squares,\n bucket_limit=bucket_limits,\n bucket=bucket_counts)\n return Summary(value=[Summary.Value(tag=name, histo=hist)])\n\n\ndef histogram(name, values, bins, max_bins=None):\n # pylint: disable=line-too-long\n \"\"\"Outputs a `Summary` protocol buffer with a histogram.\n The generated\n [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\n has one summary value containing a histogram for `values`.\n This op reports an `InvalidArgument` error if any value is not finite.\n Args:\n name: A name for the generated node. Will also serve as a series name in\n TensorBoard.\n values: A real numeric `Tensor`. Any shape. Values to use to\n build the histogram.\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer.\n \"\"\"\n name = _clean_tag(name)\n values = make_np(values)\n hist = make_histogram(values.astype(float), bins, max_bins)\n return Summary(value=[Summary.Value(tag=name, histo=hist)])\n\n\ndef make_histogram(values, bins, max_bins=None):\n \"\"\"Convert values into a histogram proto using logic from histogram.cc.\"\"\"\n if values.size == 0:\n raise ValueError('The input has no element.')\n values = values.reshape(-1)\n counts, limits = np.histogram(values, bins=bins)\n num_bins = len(counts)\n if max_bins is not None and num_bins > max_bins:\n subsampling = num_bins // max_bins\n subsampling_remainder = num_bins % subsampling\n if subsampling_remainder != 0:\n counts = np.pad(counts, pad_width=[[0, subsampling - subsampling_remainder]],\n mode=\"constant\", constant_values=0)\n counts = counts.reshape(-1, subsampling).sum(axis=-1)\n new_limits = np.empty((counts.size + 1,), limits.dtype)\n new_limits[:-1] = limits[:-1:subsampling]\n new_limits[-1] = limits[-1]\n limits = new_limits\n\n # Find the first and the last bin defining the support of the histogram:\n cum_counts = np.cumsum(np.greater(counts, 0, dtype=np.int32))\n start, end = np.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side=\"right\")\n start = int(start)\n end = int(end) + 1\n del cum_counts\n\n # Tensorboard only includes the right bin limits. To still have the leftmost limit\n # included, we include an empty bin left.\n # If start == 0, we need to add an empty one left, otherwise we can just include the bin left to the\n # first nonzero-count bin:\n counts = counts[start - 1:end] if start > 0 else np.concatenate([[0], counts[:end]])\n limits = limits[start:end + 1]\n\n if counts.size == 0 or limits.size == 0:\n raise ValueError('The histogram is empty, please file a bug report.')\n\n sum_sq = values.dot(values)\n return HistogramProto(min=values.min(),\n max=values.max(),\n num=len(values),\n sum=values.sum(),\n sum_squares=sum_sq,\n bucket_limit=limits.tolist(),\n bucket=counts.tolist())\n\n\ndef image(tag, tensor, rescale=1, dataformats='NCHW'):\n \"\"\"Outputs a `Summary` protocol buffer with images.\n The summary has up to `max_images` summary values containing images. The\n images are built from `tensor` which must be 3-D with shape `[height, width,\n channels]` and where `channels` can be:\n * 1: `tensor` is interpreted as Grayscale.\n * 3: `tensor` is interpreted as RGB.\n * 4: `tensor` is interpreted as RGBA.\n The `name` in the outputted Summary.Value protobufs is generated based on the\n name, with a suffix depending on the max_outputs setting:\n * If `max_outputs` is 1, the summary value tag is '*name*/image'.\n * If `max_outputs` is greater than 1, the summary value tags are\n generated sequentially as '*name*/image/0', '*name*/image/1', etc.\n Args:\n tag: A name for the generated node. Will also serve as a series name in\n TensorBoard.\n tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width,\n channels]` where `channels` is 1, 3, or 4.\n 'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8).\n The image() function will scale the image values to [0, 255] by applying\n a scale factor of either 1 (uint8) or 255 (float32).\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer.\n \"\"\"\n tag = _clean_tag(tag)\n tensor = make_np(tensor)\n tensor = convert_to_HWC(tensor, dataformats)\n # Do not assume that user passes in values in [0, 255], use data type to detect\n scale_factor = _calc_scale_factor(tensor)\n tensor = tensor.astype(np.float32)\n tensor = (tensor * scale_factor).astype(np.uint8)\n image = make_image(tensor, rescale=rescale)\n return Summary(value=[Summary.Value(tag=tag, image=image)])\n\n\ndef image_boxes(tag, tensor_image, tensor_boxes, rescale=1, dataformats='CHW'):\n '''Outputs a `Summary` protocol buffer with images.'''\n tensor_image = make_np(tensor_image)\n tensor_image = convert_to_HWC(tensor_image, dataformats)\n tensor_boxes = make_np(tensor_boxes)\n tensor_image = tensor_image.astype(\n np.float32) * _calc_scale_factor(tensor_image)\n image = make_image(tensor_image.astype(np.uint8),\n rescale=rescale,\n rois=tensor_boxes)\n return Summary(value=[Summary.Value(tag=tag, image=image)])\n\n\ndef draw_boxes(disp_image, boxes):\n # xyxy format\n num_boxes = boxes.shape[0]\n list_gt = range(num_boxes)\n for i in list_gt:\n disp_image = _draw_single_box(disp_image,\n boxes[i, 0],\n boxes[i, 1],\n boxes[i, 2],\n boxes[i, 3],\n display_str=None,\n color='Red')\n return disp_image\n\n\ndef make_image(tensor, rescale=1, rois=None):\n \"\"\"Convert an numpy representation image to Image protobuf\"\"\"\n from PIL import Image\n height, width, channel = tensor.shape\n scaled_height = int(height * rescale)\n scaled_width = int(width * rescale)\n image = Image.fromarray(tensor)\n if rois is not None:\n image = draw_boxes(image, rois)\n image = image.resize((scaled_width, scaled_height), Image.ANTIALIAS)\n import io\n output = io.BytesIO()\n image.save(output, format='PNG')\n image_string = output.getvalue()\n output.close()\n return Summary.Image(height=height,\n width=width,\n colorspace=channel,\n encoded_image_string=image_string)\n\n\ndef video(tag, tensor, fps=4):\n tag = _clean_tag(tag)\n tensor = make_np(tensor)\n tensor = _prepare_video(tensor)\n # If user passes in uint8, then we don't need to rescale by 255\n scale_factor = _calc_scale_factor(tensor)\n tensor = tensor.astype(np.float32)\n tensor = (tensor * scale_factor).astype(np.uint8)\n video = make_video(tensor, fps)\n return Summary(value=[Summary.Value(tag=tag, image=video)])\n\n\ndef make_video(tensor, fps):\n try:\n import moviepy\n except ImportError:\n print('add_video needs package moviepy')\n return\n try:\n from moviepy import editor as mpy\n except ImportError:\n print(\"moviepy is installed, but can't import moviepy.editor.\",\n \"Some packages could be missing [imageio, requests]\")\n return\n import tempfile\n\n t, h, w, c = tensor.shape\n\n # encode sequence of images into gif string\n clip = mpy.ImageSequenceClip(list(tensor), fps=fps)\n with tempfile.NamedTemporaryFile() as f:\n filename = f.name + '.gif'\n\n try:\n clip.write_gif(filename, verbose=False, progress_bar=False)\n except TypeError:\n clip.write_gif(filename, verbose=False)\n\n with open(filename, 'rb') as f:\n tensor_string = f.read()\n\n try:\n os.remove(filename)\n except OSError:\n pass\n\n return Summary.Image(height=h, width=w, colorspace=c, encoded_image_string=tensor_string)\n\n\ndef audio(tag, tensor, sample_rate=44100):\n tensor = make_np(tensor)\n tensor = tensor.squeeze()\n if abs(tensor).max() > 1:\n print('warning: audio amplitude out of range, auto clipped.')\n tensor = tensor.clip(-1, 1)\n assert(tensor.ndim == 1), 'input tensor should be 1 dimensional.'\n\n tensor_list = [int(32767.0 * x) for x in tensor]\n import io\n import wave\n import struct\n fio = io.BytesIO()\n Wave_write = wave.open(fio, 'wb')\n Wave_write.setnchannels(1)\n Wave_write.setsampwidth(2)\n Wave_write.setframerate(sample_rate)\n tensor_enc = b''\n for v in tensor_list:\n tensor_enc += struct.pack('<h', v)\n\n Wave_write.writeframes(tensor_enc)\n Wave_write.close()\n audio_string = fio.getvalue()\n fio.close()\n audio = Summary.Audio(sample_rate=sample_rate,\n num_channels=1,\n length_frames=len(tensor_list),\n encoded_audio_string=audio_string,\n content_type='audio/wav')\n return Summary(value=[Summary.Value(tag=tag, audio=audio)])\n\n\ndef custom_scalars(layout):\n categoriesnames = layout.keys()\n categories = []\n layouts = []\n for k, v in layout.items():\n charts = []\n for chart_name, chart_meatadata in v.items():\n tags = chart_meatadata[1]\n if chart_meatadata[0] == 'Margin':\n assert len(tags) == 3\n mgcc = layout_pb2.MarginChartContent(series=[layout_pb2.MarginChartContent.Series(value=tags[0],\n lower=tags[1],\n upper=tags[2])])\n chart = layout_pb2.Chart(title=chart_name, margin=mgcc)\n else:\n mlcc = layout_pb2.MultilineChartContent(tag=tags)\n chart = layout_pb2.Chart(title=chart_name, multiline=mlcc)\n charts.append(chart)\n categories.append(layout_pb2.Category(title=k, chart=charts))\n\n layout = layout_pb2.Layout(category=categories)\n PluginData = [SummaryMetadata.PluginData(plugin_name='custom_scalars')]\n smd = SummaryMetadata(plugin_data=PluginData)\n tensor = TensorProto(dtype='DT_STRING',\n string_val=[layout.SerializeToString()],\n tensor_shape=TensorShapeProto())\n return Summary(value=[Summary.Value(tag='custom_scalars__config__', tensor=tensor, metadata=smd)])\n\n\ndef text(tag, text):\n import json\n PluginData = [SummaryMetadata.PluginData(\n plugin_name='text', content=TextPluginData(version=0).SerializeToString())]\n smd = SummaryMetadata(plugin_data=PluginData)\n tensor = TensorProto(dtype='DT_STRING',\n string_val=[text.encode(encoding='utf_8')],\n tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]))\n return Summary(value=[Summary.Value(tag=tag + '/text_summary', metadata=smd, tensor=tensor)])\n\n\ndef pr_curve_raw(tag, tp, fp, tn, fn, precision, recall, num_thresholds=127, weights=None):\n if num_thresholds > 127: # weird, value > 127 breaks protobuf\n num_thresholds = 127\n data = np.stack((tp, fp, tn, fn, precision, recall))\n pr_curve_plugin_data = PrCurvePluginData(\n version=0, num_thresholds=num_thresholds).SerializeToString()\n PluginData = [SummaryMetadata.PluginData(\n plugin_name='pr_curves', content=pr_curve_plugin_data)]\n smd = SummaryMetadata(plugin_data=PluginData)\n tensor = TensorProto(dtype='DT_FLOAT',\n float_val=data.reshape(-1).tolist(),\n tensor_shape=TensorShapeProto(\n dim=[TensorShapeProto.Dim(size=data.shape[0]), TensorShapeProto.Dim(size=data.shape[1])]))\n return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])\n\n\ndef pr_curve(tag, labels, predictions, num_thresholds=127, weights=None):\n # weird, value > 127 breaks protobuf\n num_thresholds = min(num_thresholds, 127)\n data = compute_curve(labels, predictions,\n num_thresholds=num_thresholds, weights=weights)\n pr_curve_plugin_data = PrCurvePluginData(\n version=0, num_thresholds=num_thresholds).SerializeToString()\n PluginData = [SummaryMetadata.PluginData(\n plugin_name='pr_curves', content=pr_curve_plugin_data)]\n smd = SummaryMetadata(plugin_data=PluginData)\n tensor = TensorProto(dtype='DT_FLOAT',\n float_val=data.reshape(-1).tolist(),\n tensor_shape=TensorShapeProto(\n dim=[TensorShapeProto.Dim(size=data.shape[0]), TensorShapeProto.Dim(size=data.shape[1])]))\n return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])\n\n\n# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py\ndef compute_curve(labels, predictions, num_thresholds=None, weights=None):\n _MINIMUM_COUNT = 1e-7\n\n if weights is None:\n weights = 1.0\n\n # Compute bins of true positives and false positives.\n bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))\n float_labels = labels.astype(np.float)\n histogram_range = (0, num_thresholds - 1)\n tp_buckets, _ = np.histogram(\n bucket_indices,\n bins=num_thresholds,\n range=histogram_range,\n weights=float_labels * weights)\n fp_buckets, _ = np.histogram(\n bucket_indices,\n bins=num_thresholds,\n range=histogram_range,\n weights=(1.0 - float_labels) * weights)\n\n # Obtain the reverse cumulative sum.\n tp = np.cumsum(tp_buckets[::-1])[::-1]\n fp = np.cumsum(fp_buckets[::-1])[::-1]\n tn = fp[0] - fp\n fn = tp[0] - tp\n precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)\n recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)\n return np.stack((tp, fp, tn, fn, precision, recall))\n" ]
[ [ "numpy.maximum", "numpy.greater", "numpy.pad", "numpy.cumsum", "numpy.stack", "numpy.concatenate", "numpy.ceil", "numpy.searchsorted", "numpy.floor", "numpy.histogram", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
v-morello/iqrm
[ "9f90e43c003af5d248d08b65d52fc88a73e12bbd", "9f90e43c003af5d248d08b65d52fc88a73e12bbd" ]
[ "iqrm/core.py", "iqrm/tests/test_iqrm.py" ]
[ "import itertools\nimport numpy as np\n\nfrom collections import defaultdict\n\n\ndef lagged_diff(x, k):\n \"\"\"\n Returns the sequence of x[i] - x[i - k], as an array with the same size as x.\n Boundary conditions are handled as follows:\n x[i] = x[0] if i < 0\n x[i] = x[n-1] if i >= n, where n = len(x)\n \"\"\"\n s = np.roll(x, k)\n if k >= 0:\n s[:k] = x[0]\n else:\n s[k:] = x[-1] # NOTE: lag is negative here\n return x - s\n\n\ndef outlier_mask(x, threshold=3.0):\n \"\"\"\n Returns an outlier mask for array x, based on Tukey's rule and assuming that the inlier\n distribution of x (the distribution of 'good' values) is Gaussian. 'threshold' represents a\n number of Gaussian sigmas.\n \"\"\"\n q1, med, q3 = np.percentile(x, [25, 50, 75])\n std = (q3 - q1) / 1.349\n return (x - med) > threshold * std\n\n\ndef genlags(radius, geofactor=1.5):\n lag = 1\n while lag <= radius:\n yield lag\n yield -lag\n lag = max(int(geofactor * lag), lag + 1)\n\n\ndef iqrm_mask(x, radius=5, threshold=3.0):\n \"\"\"\n Compute the IQRM mask for one-dimensional input data x.\n The input 'x' is expected to represent a per-channel statistic that measures RFI contamination\n in a block of time-frequency data. Any statistic can be used, but an important requirement is\n that larger values must indicate higher levels of RFI contamination.\n\n Parameters\n ----------\n x : list or ndarray\n Input data (1-dimensional)\n radius : int, optional\n Radius in number of elements. If a float is passed, it is truncated. A recommended value\n is 10% of the number of frequency channels\n threshold : float, optional\n Flagging threshold in number of Gaussian sigmas\n\n Returns\n -------\n mask : ndarray\n Boolean mask with the same size as the input 'x', where 'True' denotes an outlier\n votes_cast : dict of sets\n Dictionary of sets, where the keys are input array indices i that cast at least one vote,\n and the values are the set of array indices that received a vote from i.\n \"\"\"\n x = np.asarray(x)\n n = len(x)\n radius = int(radius)\n\n if not radius > 0:\n raise ValueError(\"radius must be > 0\")\n\n threshold = float(threshold)\n if not threshold > 0:\n raise ValueError(\"threshold must be > 0\")\n\n # These data structures both represent a directed graph\n # votes_cast[i] contains the recipients of votes cast by i\n # votes_received[i] contains the casters of votes received by i\n votes_cast = defaultdict(set)\n votes_received = defaultdict(set)\n\n for lag in genlags(radius):\n d = lagged_diff(x, lag)\n m = outlier_mask(d, threshold)\n\n # m[i] = True <=> point j = i - lag cast a vote on i\n # <=> point i received a vote from j = i - lag\n I = np.where(m)[0]\n J = np.clip(I - lag, 0, n - 1)\n\n for i, j in zip(I, J):\n votes_cast[j].add(i)\n votes_received[i].add(j)\n\n mask = np.zeros_like(x, dtype=bool)\n \n # i gets masked by j if both the following conditions are True:\n # 1) j has cast a vote on i\n # 2) j has cast strictly less votes in total than i has received in total\n for i, casters in votes_received.items():\n for j in casters:\n if j in votes_cast and len(votes_cast[j]) < len(votes_received[i]):\n mask[i] = True\n break\n\n return mask, dict(votes_cast)\n", "import numpy as np\nfrom pytest import raises\n\nfrom iqrm import iqrm_mask\n\n\ndef generate_noise(nchan=1024, seed=0):\n # IMPORTANT: set the random seed for reproducible results\n np.random.seed(seed)\n return np.random.normal(size=nchan)\n\n\ndef generate_noise_with_outlier_range(start, end, nchan=1024, seed=0):\n s = generate_noise(nchan=nchan, seed=seed)\n s[start:end] += 100\n return s\n\n\ndef test_param_checks():\n nchan = 1024\n s = np.zeros(nchan)\n\n with raises(ValueError): # radius must be > 0\n iqrm_mask(s, radius=0)\n\n with raises(ValueError): # threshold must be > 0\n iqrm_mask(s, threshold=0)\n\n\ndef test_masking_noise():\n s = generate_noise()\n\n for radius in range(1, 6):\n mask, __ = iqrm_mask(s, radius=radius, threshold=4.0)\n assert np.alltrue(~mask)\n\n\ndef test_masking_single_outlier():\n nchan = 1024\n indices = [0, 1, 42, 213, 740, 1022, 1023]\n\n for index in indices:\n # NOTE: when using radius = 1, if the either the first or last element are the sole \n # outlier, they won't be detected (the single vote they receive is not valid). \n # We thus start at radius=2.\n for radius in (2, 3, 4, 6, 9):\n s = generate_noise_with_outlier_range(index, index+1, nchan=nchan)\n mask, __ = iqrm_mask(s, radius=radius, threshold=4.0)\n assert mask[index] == True\n\n\ndef test_masking_outlier_range():\n # The idea here is to generate data that looks like a top hat, i.e. noise plus a contiguous\n # range of high ouliers with similar values.\n\n # If the edges of the top-hat lie \"far away\" from the edges of the input array, then we expect\n # all outliers to be masked as long as:\n # max trial lag value > width\n\n # NOTE: for a top-hat that lies at the edge of the input array, the situation is different,\n # and the radius required to mask all outliers is such that:\n # max trial lag value > 2*width\n\n nchan = 1024\n indices = [67, 213, 486, 740, 959]\n trial_lag_sequence = (1, 2, 3, 4, 6, 9, 13)\n\n for index in indices:\n for jj, width in enumerate(trial_lag_sequence[:-1]):\n s = generate_noise_with_outlier_range(index, index+width, nchan=nchan)\n radius = trial_lag_sequence[jj+1]\n mask, __ = iqrm_mask(s, radius=radius, threshold=4.0)\n assert np.alltrue(mask[index:index+width])" ]
[ [ "numpy.clip", "numpy.asarray", "numpy.percentile", "numpy.zeros_like", "numpy.where", "numpy.roll" ], [ "numpy.random.normal", "numpy.alltrue", "numpy.zeros", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
giacomov/pyggop
[ "81f0307281d00e367ab632fa52de41a56877b048" ]
[ "pyggop/Tau.py" ]
[ "import numpy as np\nfrom multiprocessing import Pool\n\nfrom grbod import *\n\nimport os, sys\nimport pickle\n\nimport scipy.interpolate\nfrom math import log10\n\nimport matplotlib.pyplot as plt\n\nfrom pyggop.ParallelPool import ParallelPool\n\n#This is the actual computation\ndef func(DRbar, R_0, b, m, a, xx, yy):\n \n R0_hat = 1.0 / ( 1.0 + 10**xx ) # = R_0/R_t0\n \n DR_hat = DRbar * R0_hat / R_0\n \n x = ( 10**yy )**2\n \n tau = tau_integral(x, R0_hat, DR_hat, \n b, m, a,\n reg={'Re':0,'Rt':1.e-4},\n verbose=True)\n return tau\n\ndef data_stream(DRbar, R_0, b, m, a, XX, YY):\n for idx, xx in np.ndenumerate(XX):\n yield idx, (DRbar, R_0, b, m, a, xx, YY[idx])\n\ndef proxy(args):\n return args[0], func(*args[1])\n\nclass Tau( object ):\n \n def __init__(self, m, b, a, DRbar, R_0=1.0, tau_star=1.0 ):\n \n self.m = float(m)\n self.b = float(b)\n self.a = float(a)\n self.DRbar = float( DRbar )\n self.R_0 = R_0\n self.tau_star = tau_star\n \n self.name = self._getUniqueName()\n \n self.loadLookupTable()\n \n def _getUniqueName(self):\n \n return \"%s-%s-%.2f-%.2g-%s\" % (self.m, self.b, self.a, \n self.DRbar, self.tau_star)\n \n def loadLookupTable(self):\n \n if not os.path.exists( '%s.pkl' % self.name ):\n \n #Lookup table does not exists. Create it\n \n self.computeLookupTable()\n \n #Load the lookup table\n results = pickle.load(open('%s.pkl'% self.name,'r')) \n \n \n #Get the extremes for the interpolation\n \n self.minX,self.maxX = results['X'].min(),results['X'].max()\n self.minY,self.maxY = results['Y'].min(),results['Y'].max()\n \n #print results['comment']\n \n self.tau_interp2 = scipy.interpolate.interp2d( results['X'],\n results['Y'],\n results['Z'].transpose(),\n bounds_error=True)\n \n def compute(self, XX, YY):\n \n result = np.zeros(shape=(XX.shape))\n \n pool = ParallelPool( )\n \n pool_res = pool.map(proxy, data_stream(self.DRbar, self.R_0, \n self.b, self.m, self.a,\n XX, YY))\n \n pool.close()\n \n for k,v in pool_res:\n \n result[k] = v\n \n return result\n \n def computeLookupTable( self, plot=False ):\n \n X = np.linspace(-11, 3, 50)#log10(R_t0/R0-1)\n \n Y = np.concatenate((np.arange(-6, -4,1 / 3.0),\n np.arange(-4, -1,1/ 3.0), \n np.arange(-1, -0.04, 0.1/ 8.0),\n np.arange(-0.04, 0.08, 0.01 / 8.0), \n np.arange(0.08, 0.9, 0.1/ 4.0),\n np.arange(1, 2.2,0.2/ 3.0)))\n \n #Y = np.concatenate((np.arange(-4, -1,1 / 2.0), \n # np.arange(-1, -0.04, 0.1 / 2.0),\n # np.arange(-0.04, 0.08, 0.02 / 2.0 ), \n # np.arange(0.08, 0.9, 0.1 / 2.0),\n # np.arange(1, 2.2,0.2 / 2.0)))\n \n XX, YY = np.meshgrid(X, Y, indexing='ij', sparse=False, copy=True)\n \n Z = self.compute(XX, YY)\n \n idx = np.isfinite(Z)\n \n Z[~idx] = 1e-30\n \n idx = (Z <= 0)\n Z[idx] = 1e-30\n \n print(\"Zmin = %s, Zmax = %s\" %(Z.min(),Z.max()))\n \n if plot:\n \n plt.figure(1)\n plt.contourf(X, Y, np.log10(Z.transpose()), 20)\n plt.colorbar()\n plt.savefig(\"interpolation_data.png\")\n \n final = {'X':X, 'Y':Y, 'Z':np.log10(Z)}\n final['comment'] = \"X = log10(R_t0/R0-1)\\nY = log10(gth_t0) = log10(sqrt(x))\\nZ = log10(tau_integral)\"\n \n pickle.dump(final, open( '%s.pkl' % self.name, 'w' ))\n \n def __call__(self, X, Y):\n \n try:\n \n val = self.tau_interp2( X, Y )\n #val = log10( func(self.DRbar, self.R_0, self.b, self.m, self.a, X, Y) )\n \n except:\n \n msg = (\"Request (X,Y) = (%s, %s) could not be satistfied. \" %(X,Y))\n msg += (\"Interpolation range is %s < X < %s , %s < Y < %s\" %(self.minX,self.maxX,self.minY,self.maxY))\n \n sys.stderr.write(msg)\n sys.stderr.write(\"\\n\")\n \n raise ValueError(msg)\n \n return val\n \n" ]
[ [ "numpy.isfinite", "numpy.linspace", "numpy.arange", "matplotlib.pyplot.savefig", "matplotlib.pyplot.colorbar", "numpy.log10", "numpy.ndenumerate", "numpy.meshgrid", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
schlevik/EGG
[ "428d5aed3eb6fb0296f6856fb77b0a1cdceb33f1" ]
[ "egg/zoo/basic_games/play.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\nimport egg.core as core\nfrom egg.core import Callback, Interaction, PrintValidationEvents\nfrom egg.zoo.basic_games.architectures import DiscriReceiver, RecoReceiver, Sender\nfrom egg.zoo.basic_games.data_readers import AttValDiscriDataset, AttValRecoDataset\n\n\n# the following section specifies parameters that are specific to our games: we will also inherit the\n# standard EGG parameters from https://github.com/facebookresearch/EGG/blob/master/egg/core/util.py\ndef get_params(params):\n parser = argparse.ArgumentParser()\n # arguments controlling the game type\n parser.add_argument(\n \"--game_type\",\n type=str,\n default=\"reco\",\n help=\"Selects whether to play a reco(nstruction) or discri(mination) game (default: reco)\",\n )\n # arguments concerning the input data and how they are processed\n parser.add_argument(\n \"--train_data\", type=str, default=None, help=\"Path to the train data\"\n )\n parser.add_argument(\n \"--validation_data\", type=str, default=None, help=\"Path to the validation data\"\n )\n # (the following is only used in the reco game)\n parser.add_argument(\n \"--n_attributes\",\n type=int,\n default=None,\n help=\"Number of attributes in Sender input (must match data set, and it is only used in reco game)\",\n )\n parser.add_argument(\n \"--n_values\",\n type=int,\n default=None,\n help=\"Number of values for each attribute (must match data set)\",\n )\n parser.add_argument(\n \"--validation_batch_size\",\n type=int,\n default=0,\n help=\"Batch size when processing validation data, whereas training data batch_size is controlled by batch_size (default: same as training data batch size)\",\n )\n # arguments concerning the training method\n parser.add_argument(\n \"--mode\",\n type=str,\n default=\"rf\",\n help=\"Selects whether Reinforce or Gumbel-Softmax relaxation is used for training {rf, gs} (default: rf)\",\n )\n parser.add_argument(\n \"--temperature\",\n type=float,\n default=1.0,\n help=\"GS temperature for the sender, only relevant in Gumbel-Softmax (gs) mode (default: 1.0)\",\n )\n parser.add_argument(\n \"--sender_entropy_coeff\",\n type=float,\n default=1e-1,\n help=\"Reinforce entropy regularization coefficient for Sender, only relevant in Reinforce (rf) mode (default: 1e-1)\",\n )\n # arguments concerning the agent architectures\n parser.add_argument(\n \"--sender_cell\",\n type=str,\n default=\"rnn\",\n help=\"Type of the cell used for Sender {rnn, gru, lstm} (default: rnn)\",\n )\n parser.add_argument(\n \"--receiver_cell\",\n type=str,\n default=\"rnn\",\n help=\"Type of the cell used for Receiver {rnn, gru, lstm} (default: rnn)\",\n )\n parser.add_argument(\n \"--sender_hidden\",\n type=int,\n default=10,\n help=\"Size of the hidden layer of Sender (default: 10)\",\n )\n parser.add_argument(\n \"--receiver_hidden\",\n type=int,\n default=10,\n help=\"Size of the hidden layer of Receiver (default: 10)\",\n )\n parser.add_argument(\n \"--sender_embedding\",\n type=int,\n default=10,\n help=\"Output dimensionality of the layer that embeds symbols produced at previous step in Sender (default: 10)\",\n )\n parser.add_argument(\n \"--receiver_embedding\",\n type=int,\n default=10,\n help=\"Output dimensionality of the layer that embeds the message symbols for Receiver (default: 10)\",\n )\n # arguments controlling the script output\n parser.add_argument(\n \"--print_validation_events\",\n default=False,\n action=\"store_true\",\n help=\"If this flag is passed, at the end of training the script prints the input validation data, the corresponding messages produced by the Sender, and the output probabilities produced by the Receiver (default: do not print)\",\n )\n args = core.init(parser, params)\n return args\n\n\ndef main(params):\n opts = get_params(params)\n if opts.validation_batch_size == 0:\n opts.validation_batch_size = opts.batch_size\n print(opts, flush=True)\n\n # the following if statement controls aspects specific to the two game tasks: loss, input data and architecture of the Receiver\n # (the Sender is identical in both cases, mapping a single input attribute-value vector to a variable-length message)\n if opts.game_type == \"discri\":\n # the game object we will encounter below takes as one of its mandatory arguments a loss: a loss in EGG is expected to take as arguments the sender input,\n # the message, the Receiver input, the Receiver output and the labels (although some of these elements might not actually be used by a particular loss);\n # together with the actual loss computation, the loss function can return a dictionary with other auxiliary statistics: in this case, accuracy\n def loss(\n _sender_input,\n _message,\n _receiver_input,\n receiver_output,\n labels,\n _aux_input,\n ):\n # in the discriminative case, accuracy is computed by comparing the index with highest score in Receiver output (a distribution of unnormalized\n # probabilities over target poisitions) and the corresponding label read from input, indicating the ground-truth position of the target\n acc = (receiver_output.argmax(dim=1) == labels).detach().float()\n # similarly, the loss computes cross-entropy between the Receiver-produced target-position probability distribution and the labels\n loss = F.cross_entropy(receiver_output, labels, reduction=\"none\")\n return loss, {\"acc\": acc}\n\n # the input data are read into DataLodaer objects, which are pytorch constructs implementing standard data processing functionalities, such as shuffling\n # and batching\n # within our games, we implement dataset classes, such as AttValDiscriDataset, to read the input text files and convert the information they contain\n # into the form required by DataLoader\n # look at the definition of the AttValDiscrDataset (the class to read discrimination game data) in data_readers.py for further details\n # note that, for the training dataset, we first instantiate the AttValDiscriDataset object and then feed it to DataLoader, whereas for the\n # validation data (confusingly called \"test\" data due to code heritage inertia) we directly declare the AttValDiscriDataset when instantiating\n # DataLoader: the reason for this difference is that we need the train_ds object to retrieve the number of features of the input vectors\n train_ds = AttValDiscriDataset(path=opts.train_data, n_values=opts.n_values)\n train_loader = DataLoader(\n train_ds, batch_size=opts.batch_size, shuffle=True, num_workers=1\n )\n test_loader = DataLoader(\n AttValDiscriDataset(path=opts.validation_data, n_values=opts.n_values),\n batch_size=opts.validation_batch_size,\n shuffle=False,\n num_workers=1,\n )\n # note that the number of features retrieved here concerns inputs after they are converted to 1-hot vectors\n n_features = train_ds.get_n_features()\n # we define here the core of the Receiver for the discriminative game, see the architectures.py file for details:\n # note that this will be embedded in a wrapper below to define the full agent\n receiver = DiscriReceiver(n_features=n_features, n_hidden=opts.receiver_hidden)\n\n else: # reco game\n\n def loss(\n sender_input, _message, _receiver_input, receiver_output, labels, _aux_input\n ):\n # in the case of the recognition game, for each attribute we compute a different cross-entropy score\n # based on comparing the probability distribution produced by the Receiver over the values of each attribute\n # with the corresponding ground truth, and then averaging across attributes\n # accuracy is instead computed by considering as a hit only cases where, for each attribute, the Receiver\n # assigned the largest probability to the correct value\n # most of this function consists of the usual pytorch madness needed to reshape tensors in order to perform these computations\n n_attributes = opts.n_attributes\n n_values = opts.n_values\n batch_size = sender_input.size(0)\n receiver_output = receiver_output.view(batch_size * n_attributes, n_values)\n receiver_guesses = receiver_output.argmax(dim=1)\n correct_samples = (\n (receiver_guesses == labels.view(-1))\n .view(batch_size, n_attributes)\n .detach()\n )\n acc = (torch.sum(correct_samples, dim=-1) == n_attributes).float()\n labels = labels.view(batch_size * n_attributes)\n loss = F.cross_entropy(receiver_output, labels, reduction=\"none\")\n loss = loss.view(batch_size, -1).mean(dim=1)\n return loss, {\"acc\": acc}\n\n # again, see data_readers.py in this directory for the AttValRecoDataset data reading class\n train_loader = DataLoader(\n AttValRecoDataset(\n path=opts.train_data,\n n_attributes=opts.n_attributes,\n n_values=opts.n_values,\n ),\n batch_size=opts.batch_size,\n shuffle=True,\n num_workers=1,\n )\n test_loader = DataLoader(\n AttValRecoDataset(\n path=opts.validation_data,\n n_attributes=opts.n_attributes,\n n_values=opts.n_values,\n ),\n batch_size=opts.validation_batch_size,\n shuffle=False,\n num_workers=1,\n )\n # the number of features for the Receiver (input) and the Sender (output) is given by n_attributes*n_values because\n # they are fed/produce 1-hot representations of the input vectors\n n_features = opts.n_attributes * opts.n_values\n # we define here the core of the receiver for the discriminative game, see the architectures.py file for details\n # this will be embedded in a wrapper below to define the full architecture\n receiver = RecoReceiver(n_features=n_features, n_hidden=opts.receiver_hidden)\n\n # we are now outside the block that defined game-type-specific aspects of the games: note that the core Sender architecture\n # (see architectures.py for details) is shared by the two games (it maps an input vector to a hidden layer that will be use to initialize\n # the message-producing RNN): this will also be embedded in a wrapper below to define the full architecture\n sender = Sender(n_hidden=opts.sender_hidden, n_features=n_features)\n\n # now, we instantiate the full sender and receiver architectures, and connect them and the loss into a game object\n # the implementation differs slightly depending on whether communication is optimized via Gumbel-Softmax ('gs') or Reinforce ('rf', default)\n if opts.mode.lower() == \"gs\":\n # in the following lines, we embed the Sender and Receiver architectures into standard EGG wrappers that are appropriate for Gumbel-Softmax optimization\n # the Sender wrapper takes the hidden layer produced by the core agent architecture we defined above when processing input, and uses it to initialize\n # the RNN that generates the message\n sender = core.RnnSenderGS(\n sender,\n vocab_size=opts.vocab_size,\n embed_dim=opts.sender_embedding,\n hidden_size=opts.sender_hidden,\n cell=opts.sender_cell,\n max_len=opts.max_len,\n temperature=opts.temperature,\n )\n # the Receiver wrapper takes the symbol produced by the Sender at each step (more precisely, in Gumbel-Softmax mode, a function of the overall probability\n # of non-eos symbols upt to the step is used), maps it to a hidden layer through a RNN, and feeds this hidden layer to the\n # core Receiver architecture we defined above (possibly with other Receiver input, as determined by the core architecture) to generate the output\n receiver = core.RnnReceiverGS(\n receiver,\n vocab_size=opts.vocab_size,\n embed_dim=opts.receiver_embedding,\n hidden_size=opts.receiver_hidden,\n cell=opts.receiver_cell,\n )\n game = core.SenderReceiverRnnGS(sender, receiver, loss)\n # callback functions can be passed to the trainer object (see below) to operate at certain steps of training and validation\n # for example, the TemperatureUpdater (defined in callbacks.py in the core directory) will update the Gumbel-Softmax temperature hyperparameter\n # after each epoch\n callbacks = [core.TemperatureUpdater(agent=sender, decay=0.9, minimum=0.1)]\n else: # NB: any other string than gs will lead to rf training!\n # here, the interesting thing to note is that we use the same core architectures we defined above, but now we embed them in wrappers that are suited to\n # Reinforce-based optmization\n sender = core.RnnSenderReinforce(\n sender,\n vocab_size=opts.vocab_size,\n embed_dim=opts.sender_embedding,\n hidden_size=opts.sender_hidden,\n cell=opts.sender_cell,\n max_len=opts.max_len,\n )\n receiver = core.RnnReceiverDeterministic(\n receiver,\n vocab_size=opts.vocab_size,\n embed_dim=opts.receiver_embedding,\n hidden_size=opts.receiver_hidden,\n cell=opts.receiver_cell,\n )\n game = core.SenderReceiverRnnReinforce(\n sender,\n receiver,\n loss,\n sender_entropy_coeff=opts.sender_entropy_coeff,\n receiver_entropy_coeff=0,\n )\n callbacks = []\n\n # we are almost ready to train: we define here an optimizer calling standard pytorch functionality\n optimizer = core.build_optimizer(game.parameters())\n # in the following statement, we finally instantiate the trainer object with all the components we defined (the game, the optimizer, the data\n # and the callbacks)\n if opts.print_validation_events == True:\n # we add a callback that will print loss and accuracy after each training and validation pass (see ConsoleLogger in callbacks.py in core directory)\n # if requested by the user, we will also print a detailed log of the validation pass after full training: look at PrintValidationEvents in\n # language_analysis.py (core directory)\n trainer = core.Trainer(\n game=game,\n optimizer=optimizer,\n train_data=train_loader,\n validation_data=test_loader,\n callbacks=callbacks\n + [\n core.ConsoleLogger(print_train_loss=True, as_json=True),\n core.PrintValidationEvents(n_epochs=opts.n_epochs),\n ],\n )\n else:\n trainer = core.Trainer(\n game=game,\n optimizer=optimizer,\n train_data=train_loader,\n validation_data=test_loader,\n callbacks=callbacks\n + [core.ConsoleLogger(print_train_loss=True, as_json=True)],\n )\n\n # and finally we train!\n trainer.train(n_epochs=opts.n_epochs)\n\n\nif __name__ == \"__main__\":\n import sys\n\n main(sys.argv[1:])\n" ]
[ [ "torch.sum", "torch.nn.functional.cross_entropy", "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kasimte/Pytorch-pensieve
[ "e550d9c36ed35b49592955fefbc17d1424d37b3b", "e550d9c36ed35b49592955fefbc17d1424d37b3b" ]
[ "test/model.py", "train/utils.py" ]
[ "from __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utils import norm_col_init, weights_init\n\n# class agentNET(torch.nn.Module):\n# def __init__(self, num_inputs = 1, num_outputs = 6):\n# super(agentNET, self).__init__()\n#\n# self.conv1 = nn.Conv1d(num_inputs, 16, 3, stride=1, padding=1)\n# self.conv2 = nn.Conv1d(16, 16, 2, stride=1)\n# self.conv3 = nn.Conv1d(16, 8, 2, stride=1)\n#\n# self.lstm = nn.LSTMCell(32, 20)\n# self.fc1 = nn.Linear(20, 10)\n#\n# self.critic_linear = nn.Linear(10, 1)\n# self.actor_linear = nn.Linear(10, num_outputs)\n#\n# self.apply(weights_init)\n# self.actor_linear.weight.data = norm_col_init(\n# self.actor_linear.weight.data, 0.01)\n# self.actor_linear.bias.data.fill_(0)\n#\n# self.critic_linear.weight.data = norm_col_init(\n# self.critic_linear.weight.data, 1.0)\n# self.critic_linear.bias.data.fill_(0)\n#\n# self.fc1.weight.data = norm_col_init(\n# self.fc1.weight.data, 1.0)\n# self.fc1.bias.data.fill_(0)\n#\n# self.lstm.bias_ih.data.fill_(0)\n# self.lstm.bias_hh.data.fill_(0)\n#\n# self.train()\n#\n# def forward(self, inputs):\n# inputs, (hx, cx) = inputs\n# x = F.elu(self.conv1(inputs))\n# x = F.elu(self.conv2(x))\n# x = F.elu(self.conv3(x))\n#\n# x = x.view(x.size(0), -1)\n#\n# hx, cx = self.lstm(x, (hx, cx))\n#\n# x = F.elu(self.fc1(hx))\n#\n# return self.critic_linear(x), self.actor_linear(x), (hx, cx)\n\nclass agentNET(torch.nn.Module):\n def __init__(self, num_inputs = 1, num_outputs = 6, S_LEN = 8):\n super(agentNET, self).__init__()\n\n self.conv1 = nn.Conv2d(num_inputs, 32, (6, 3), stride=1)\n self.conv2 = nn.Conv2d(32, 64, (1, 3), stride=1)\n self.conv3 = nn.Conv2d(64, 128, (1, 2), stride=1)\n\n self.lstm = nn.LSTMCell(128 * (S_LEN - 2 -2 -1), 96)\n self.fc1 = nn.Linear(96, 48)\n self.fc2 = nn.Linear(48, 24)\n\n self.critic_linear = nn.Linear(24, 1)\n self.actor_linear = nn.Linear(24, num_outputs)\n\n self.apply(weights_init)\n self.actor_linear.weight.data = norm_col_init(\n self.actor_linear.weight.data, 0.01)\n self.actor_linear.bias.data.fill_(0)\n\n self.critic_linear.weight.data = norm_col_init(\n self.critic_linear.weight.data, 1.0)\n self.critic_linear.bias.data.fill_(0)\n\n self.fc1.weight.data = norm_col_init(\n self.fc1.weight.data, 1.0)\n self.fc1.bias.data.fill_(0)\n\n self.fc2.weight.data = norm_col_init(\n self.fc2.weight.data, 1.0)\n self.fc2.bias.data.fill_(0)\n\n self.lstm.bias_ih.data.fill_(0)\n self.lstm.bias_hh.data.fill_(0)\n\n self.train()\n\n def forward(self, inputs):\n inputs, (hx, cx) = inputs\n x = F.relu(self.conv1(inputs))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n\n x = x.view(x.size(0), -1)\n\n hx, cx = self.lstm(x, (hx, cx))\n\n x = F.relu(self.fc1(hx))\n x = F.relu(self.fc2(x))\n\n return self.critic_linear(x), self.actor_linear(x), (hx, cx)", "from __future__ import division\nimport numpy as np\nimport torch\nimport json\nimport logging\n\n\ndef setup_logger(logger_name, log_file, level=logging.INFO):\n l = logging.getLogger(logger_name)\n formatter = logging.Formatter('%(asctime)s : %(message)s')\n fileHandler = logging.FileHandler(log_file, mode='w')\n fileHandler.setFormatter(formatter)\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(formatter)\n\n l.setLevel(level)\n l.addHandler(fileHandler)\n l.addHandler(streamHandler)\n\n return logging.getLogger(logger_name)\n\ndef setup_logger_add(logger_name, log_file, level=logging.INFO):\n l = logging.getLogger(logger_name)\n formatter = logging.Formatter('%(asctime)s : %(message)s')\n fileHandler = logging.FileHandler(log_file, mode='a')\n fileHandler.setFormatter(formatter)\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(formatter)\n\n l.setLevel(level)\n l.addHandler(fileHandler)\n l.addHandler(streamHandler)\n\n return logging.getLogger(logger_name)\n\ndef read_config(file_path):\n \"\"\"Read JSON config.\"\"\"\n json_object = json.load(open(file_path, 'r'))\n return json_object\n\n\ndef norm_col_init(weights, std=1.0):\n x = torch.randn(weights.size())\n x *= std / torch.sqrt((x**2).sum(1, keepdim=True))\n return x\n\n\ndef ensure_shared_grads(model, shared_model):\n for param, shared_param in zip(model.parameters(),\n shared_model.parameters()):\n if shared_param.grad is not None:\n return\n shared_param._grad = param.grad\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n weight_shape = list(m.weight.data.size())\n fan_in = np.prod(weight_shape[1:4])\n fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]\n w_bound = np.sqrt(6. / (fan_in + fan_out))\n m.weight.data.uniform_(-w_bound, w_bound)\n m.bias.data.fill_(0)\n elif classname.find('Linear') != -1:\n weight_shape = list(m.weight.data.size())\n fan_in = weight_shape[1]\n fan_out = weight_shape[0]\n w_bound = np.sqrt(6. / (fan_in + fan_out))\n m.weight.data.uniform_(-w_bound, w_bound)\n m.bias.data.fill_(0)\n\ndef reverse(state, WIDTH, HEIGHT):\n tmp = [[[0 for col in range(HEIGHT)] for row in range(WIDTH)] for channel in range(3)]\n for k in range(3):\n for i in range(WIDTH):\n for j in range(HEIGHT):\n tmp[k][i][j] = state[i][j][k]\n return tmp" ]
[ [ "torch.nn.LSTMCell", "torch.nn.Linear", "torch.nn.Conv2d" ], [ "numpy.sqrt", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nla-group/classix
[ "8584162e4c89ba05a62faf1e20104768cf5bb43c" ]
[ "exp/run_scale_lx.py" ]
[ "import sklearn.datasets as data\nfrom classix import CLASSIX\nimport matplotlib.pyplot as plt\n\ndef rn_scale_explore():\n plt.style.use('bmh')\n\n TOL = 0.1 \n random_state = 1\n moons, _ = data.make_moons(n_samples=1000, noise=0.05, random_state=random_state)\n blobs, _ = data.make_blobs(n_samples=1500, centers=[(-0.85,2.75), (1.75,2.25)], cluster_std=0.5, random_state=random_state)\n X = np.vstack([blobs, moons])\n\n for scale in np.arange(1, 3.3, 0.1):\n clx = CLASSIX(sorting='pca', radius=TOL, group_merging='distance', verbose=0)\n clx.fit_transform(X)\n clx.visualize_linkage(scale=scale, figsize=(8,8), labelsize=24, path='img')\n\n\n for tol in np.arange(0.1, 1.3, 0.1):\n clx = CLASSIX(sorting='pca', radius=tol, group_merging='distance', verbose=0)\n clx.fit_transform(X)\n clx.visualize_linkage(scale=1.5, figsize=(8,8), labelsize=24, plot_boundary=True, path='img')\n \n " ]
[ [ "sklearn.datasets.make_moons", "matplotlib.pyplot.style.use", "sklearn.datasets.make_blobs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RSLancs/Extracting_plant_names_and_collocates_from_historical_texts
[ "c8d6746978786ca4b83dc550114700530077c543" ]
[ "merge_geo_with collocates.py" ]
[ "##python27\r\n\r\nfrom pprint import pprint\r\nimport pandas as pd\r\n\r\n\r\n##..............open manually merged geoparsed results\r\ngeo = pd.read_csv('./data/merger_xml_extracted_geoparsed_collocates.csv')\r\ngeo = [tuple(x) for x in geo.values] # df to list\r\nprint(geo[1])\r\n\r\n\r\n##..........open collocate results....\r\ncollocate = pd.read_csv('./data/INDEXED_no_overlaps-abrev-dups_collocate_results_15.01.19.csv')\r\ncollocate = [tuple(x) for x in collocate.values] # df to list\r\nprint(collocate[1])\r\n\r\n\r\n#............merge results........................\r\n\r\nmerged = []\r\nfor ig in geo:\r\n\tfor ic in collocate:\r\n\t\tif ig[0] == ic[0]:\r\n\t\t\tmerged.append([ic[0],ic[2],ic[3],ic[4],ic[5],ic[6],ic[7],ig[0],ig[3],ig[5],ig[6]])\r\n\r\nmy_df = pd.DataFrame(merged) # transform result list to dataframe\r\n\r\nmy_df.columns = ['para_index',\r\n\t\t\t\t\t'text',\r\n\t\t\t\t\t'year',\r\n\t\t\t\t\t'spc_acc',\r\n\t\t\t\t\t'spc_syn',\r\n\t\t\t\t\t'find_index',\r\n\t\t\t\t\t'window', \r\n\t\t\t\t\t'geo_para_index',\r\n\t\t\t\t\t'standoff_loc_word',\r\n\t\t\t\t\t'lat',\r\n\t\t\t\t\t'lon' ] # add column labels\r\n\r\na = my_df.to_csv('./data/geo_locations_collocate_merger.csv')" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
ishine/tf-kaldi-speaker-master
[ "4b93110c4aa54f4764c58d9ffef3aec2efce39db", "4b93110c4aa54f4764c58d9ffef3aec2efce39db", "4b93110c4aa54f4764c58d9ffef3aec2efce39db" ]
[ "egs/voxceleb/sre/backend/tools/s-norm-get-enroll.py", "model/trainer_softmax_ftdnn.py", "egs/voxceleb/v1/nnet/lib/train_multitask2.py" ]
[ "import os\r\nimport numpy as np\r\nimport logging\r\nimport argparse\r\nimport sys\r\n\r\nlogger = logging.getLogger('s-norm score.')\r\nlogger.setLevel(logging.INFO)\r\nhandler = logging.StreamHandler(sys.stdout)\r\nhandler.setLevel(logging.INFO)\r\nformatter = logging.Formatter(\"%(asctime)s [%(pathname)s:%(lineno)s - \"\r\n \"%(funcName)s - %(levelname)s ] %(message)s\")\r\nhandler.setFormatter(formatter)\r\nlogger.addHandler(handler)\r\nlogger.info('Starting s-norm')\r\n\r\ndef get_args():\r\n \"\"\"\r\n get args from stdin.\r\n \"\"\"\r\n\r\n parser = argparse.ArgumentParser(description='snorm score.', formatter_class=argparse.ArgumentDefaultsHelpFormatter,\r\n conflict_handler='resolve')\r\n\r\n parser.add_argument('--score-file', dest='score_file', type=str, help='score file')\r\n parser.add_argument('--enroll-file', dest='enroll_file', type=str, help='score file')\r\n\r\n args = parser.parse_args()\r\n\r\n return args\r\n\r\ndef write_snorm(enroll_file, model_line, means_l, stds_l):\r\n f_snorm = open(enroll_file, 'w')\r\n \r\n len_model = len(model_line)\r\n \r\n \r\n for index in range(len_model):\r\n f_snorm.write(model_line[index] + ' ')\r\n f_snorm.write(str(means_l[index]))\r\n f_snorm.write(' ')\r\n f_snorm.write(str(stds_l[index]))\r\n f_snorm.write('\\n')\r\n \r\n f_snorm.close()\r\n\r\n\r\ndef snorm(args):\r\n score_file = args.score_file\r\n enroll_file = args.enroll_file\r\n\r\n f_score = open(score_file, 'r')\r\n score_lines = f_score.readlines()\r\n\r\n score_lines = [line.strip() for line in score_lines]\r\n score_lines = [line.split('|') for line in score_lines if line != '']\r\n\r\n model_line = score_lines[0].copy()\r\n del model_line[0]\r\n scores = np.delete(score_lines, 0, axis=0)\r\n test_line = [var[0] for var in scores]\r\n scores = np.delete(scores, 0, axis=1)\r\n\r\n leng, wid = scores.shape\r\n\r\n scores = [float(score) for ss in scores for score in ss]\r\n scores = np.array(scores)\r\n scores.shape = leng, wid\r\n\r\n \r\n\r\n snorm_scores = np.zeros((leng, wid))\r\n\r\n means_w = np.zeros(wid)\r\n stds_w = np.zeros(wid)\r\n\r\n\r\n for ww in range(wid):\r\n score_ww = scores[:,ww].copy()\r\n score_ww.sort()\r\n for i in range(leng):\r\n if score_ww[i] != -1000.0:\r\n break\r\n score_ww = score_ww[-int(leng*0.3):]\r\n #print(ww)\r\n means_w[ww] = np.mean(score_ww)\r\n stds_w[ww] = np.std(score_ww, ddof=1)\r\n del score_ww\r\n\r\n write_snorm(enroll_file, model_line, means_w, stds_w)\r\n \r\n f_score.close()\r\n \r\n\r\ndef main():\r\n args = get_args()\r\n snorm(args)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "import tensorflow as tf\nimport os\nimport re\nimport sys\nimport time\nimport numpy as np\nfrom model.common import l2_scaling\n#from model.svd_tdnn import tdnn_svd6\n#from model.tdnn import tdnn\n#from model.svd_tdnn import tdnn_svd6\n#from model.dynamic_tdnn import tdnn_svd\nfrom model.ftdnn import tdnn\nfrom model_src.loss import softmax\nfrom model_src.loss import asoftmax, additive_margin_softmax, additive_angular_margin_softmax\nfrom model_src.loss import semihard_triplet_loss, angular_triplet_loss, e2e_valid_loss, generalized_angular_triplet_loss\nfrom model_src.loss import extract_asoftmax\nfrom dataset.data_loader import KaldiDataRandomQueue, KaldiDataSeqQueue, DataOutOfRange\nfrom misc.utils import substring_in_list, activation_summaries\nfrom six.moves import range\n\n\nclass Trainer(object):\n \"\"\"Handle the training, validation and prediction\n\n Trainer is a simple class that deals with examples having feature-label structure.\n TODO: Add different Trainers to deal with feature+aux_feature - label+aux_label structure.\n \"\"\"\n\n def __init__(self, params, model_dir, single_cpu=False):\n \"\"\"\n Args:\n params: Parameters loaded from JSON.\n model_dir: The model directory.\n single_cpu: Run Tensorflow on one cpu. (default = False)\n \"\"\"\n\n # The network configuration is set while the loss is left to the build function.\n # I think we can switch different loss functions during training epochs.\n # Then simple re-build the network can give us a different loss. The main network won't change at that case.\n self.network_type = params.network_type\n if params.network_type == \"tdnn\":\n self.network = tdnn\n # elif params.network_type == \"tdnn_svd6\":\n # self.network = tdnn_svd6\n # elif params.network_type == \"tdnn_svd\":\n # self.network = tdnn_svd\n else:\n raise NotImplementedError(\"Not implement %s network\" % params.network_type)\n self.loss_type = None\n self.loss_network = None\n\n # We have to save all the parameters since the different models may need different parameters\n self.params = params\n\n if single_cpu:\n self.sess_config = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1,\n device_count={'CPU': 1},\n allow_soft_placement=True)\n else:\n self.sess_config = tf.ConfigProto(allow_soft_placement=True)\n self.sess = tf.Session(config=self.sess_config)\n\n # The model is saved in model/nnet and the evaluation result is saved in model/nnet/eval\n self.model = os.path.join(model_dir, \"nnet\")\n\n # The global step. Note that we don't use tf.train.create_global_step because we may extend the code to\n # support adversarial training, in which the global step increases by 1 after `several` updates on the critic\n # and encoder. The internal global_step should be carefully handled in that case. So just a placeholder here,\n # and use a counter to feed in this value is also an option.\n self.global_step = None\n\n # The learning rate is just a placeholder. I use placeholder because it gives me flexibility to dynamically\n # change the learning rate during training.\n self.learning_rate = None\n\n # Summary for the training and validation\n self.train_summary = None\n self.valid_summary = None\n\n # The output predictions. Useful in the prediction mode.\n self.embeddings = None\n self.endpoints = None\n\n # The optimizer used in the training.\n # The total loss is useful if we want to change the gradient or variables to optimize (e.g. in fine-tuning)\n self.optimizer = None\n self.total_loss = None\n\n # Training operation. This is called at each step\n self.train_op = None\n\n # Dicts for training and validation inspection.\n # In the basic condition, the train_ops contains optimization and training loss.\n # And valid loss in the valid_ops. It is possible to add other variables to the dictionaries.\n # Note that the valid loss should be computed from tf.metric.mean, so the valid_ops also has the update ops.\n # In some steps, the train_ops is required to combine with train_summary to get the summary string.\n # These ops are only executed once after several steps (for inspection).\n self.train_ops = {}\n self.valid_ops = {}\n\n # Model saver and summary writers\n # We don't create the saver or writer here, because after reset, they will be unavailable.\n self.saver = None\n self.summary_writer = None\n self.valid_summary_writer = None\n\n # This is an indicator to tell whether the model is built. After building the model, we can only use `reuse`\n # to refer to different part of the model.\n self.is_built = False\n self.is_loaded = False\n\n # In train, valid and prediction modes, we need the inputs. If tf.data is used, the input can be a node in\n # the graph. However, we may also use feed_dict mechanism to feed data, in which case the placeholder is placed\n # in the graph.\n # Now we define the placeholder in the build routines.\n self.train_features = None\n self.train_labels = None\n self.valid_features = None\n self.valid_labels = None\n self.pred_features = None\n\n def reset(self):\n \"\"\"Reset the graph so we can create new input pipeline or graph. (Or for other purposes)\"\"\"\n try:\n self.sess.close()\n except tf.errors.OpError:\n # Maybe the session is closed before\n pass\n tf.reset_default_graph()\n # The session should be created again after the graph is reset.\n self.sess = tf.Session(config=self.sess_config)\n # After the graph is reset, the flag should be set\n self.is_built = False\n self.is_loaded = False\n # After reset the graph, it is important to reset the seed.\n tf.set_random_seed(self.params.seed)\n\n # Reset some variables. The previous ones have become invalid due to the graph reset.\n self.saver = None\n self.summary_writer = None\n self.valid_summary_writer = None\n\n def close(self):\n \"\"\"Close the session we opened.\"\"\"\n try:\n self.sess.close()\n except tf.errors.OpError:\n pass\n\n def load(self):\n \"\"\"Load the saved variables.\n\n If the variables have values, the current values will be changed to the saved ones\n :return The step of the saved model.\n \"\"\"\n tf.logging.info(\"Reading checkpoints...\")\n ckpt = tf.train.get_checkpoint_state(self.model)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n step = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.saver.restore(self.sess, os.path.join(self.model, ckpt_name))\n tf.logging.info(\"Succeed to load checkpoint {}\".format(ckpt_name))\n else:\n sys.exit(\"Failed to find a checkpoint in {}\".format(self.model))\n self.is_loaded = True\n return step\n\n def save(self, step):\n \"\"\"Save the model.\n\n Args:\n step: The global step.\n \"\"\"\n self.saver.save(self.sess, os.path.join(self.model, \"model\"), global_step=step)\n\n def entire_network(self, features, params, is_training, reuse_variables):\n \"\"\"The definition of the entire network.\n Sometimes, feature normalization is applied after the main network.\n We combine them together (except for the loss layer).\n\n Args:\n features: The network input.\n params: The parameters.\n is_training: True if the network is for training.\n reuse_variables: Share variables.\n :return: The network output and the endpoints (for other usage).\n \"\"\"\n features, endpoints = self.network(features, params, is_training, reuse_variables)\n endpoints[\"output\"] = features\n # Add more components (post-processing) after the main network.\n if \"feature_norm\" in params.dict and params.feature_norm:\n assert \"feature_scaling_factor\" in params.dict, \"If feature normalization is applied, scaling factor is necessary.\"\n features = l2_scaling(features, params.feature_scaling_factor)\n endpoints[\"output\"] = features\n\n return features, endpoints\n\n def build(self, mode, dim, loss_type=None, num_speakers=None, noupdate_var_list=None):\n \"\"\" Build a network.\n\n Currently, I use placeholder in the graph and feed data during sess.run. So no need to parse\n features and labels.\n\n Args:\n mode: `train`, `valid` or `predict`.\n dim: The dimension of the feature.\n loss_type: Which loss function do we use. Could be None when mode == predict\n num_speakers: The total number of speakers. Used in softmax-like network\n noupdate_var_list: In the fine-tuning, some variables are fixed. The list contains their names (or part of their names).\n We use `noupdate` rather than `notrain` because some variables are not trainable, e.g.\n the mean and var in the batchnorm layers.\n \"\"\"\n assert(mode == \"train\" or mode == \"valid\" or mode == \"predict\")\n is_training = (mode == \"train\")\n reuse_variables = True if self.is_built else None\n\n # Create a new path for prediction, since the training may build a tower the support multi-GPUs\n if mode == \"predict\":\n self.pred_features = tf.placeholder(tf.float32, shape=[None, None, dim], name=\"pred_features\")\n with tf.name_scope(\"predict\") as scope:\n tf.logging.info(\"Extract embedding from node %s\" % self.params.embedding_node)\n # There is no need to do L2 normalization in this function, because we can do the normalization outside,\n # or simply a cosine similarity can do it.\n # Note that the output node may be different if we use different loss function. For example, if the\n # softmax is used, the output of 2-last layer is used as the embedding. While if the end2end loss is\n # used, the output of the last layer may be a better choice. So it is impossible to specify the\n # embedding node inside the network structure. The configuration will tell the network to output the\n # correct activations as the embeddings.\n features, endpoints = self.entire_network(self.pred_features, self.params, is_training, reuse_variables)\n # TODO:\n endpoints_loss = extract_asoftmax(features, num_speakers, self.params, is_training, reuse_variables)\n #self.embeddings = endpoints[self.params.embedding_node]\n self.embeddings = endpoints_loss[self.params.embedding_node]\n if self.saver is None:\n self.saver = tf.train.Saver()\n return\n\n # global_step should be defined before loss function since some loss functions use this value to tune\n # some internal parameters.\n if self.global_step is None:\n self.global_step = tf.placeholder(tf.int32, name=\"global_step\")\n self.params.dict[\"global_step\"] = self.global_step\n\n # If new loss function is added, please modify the code.\n self.loss_type = loss_type\n if loss_type == \"softmax\":\n self.loss_network = softmax\n elif loss_type == \"asoftmax\":\n self.loss_network = asoftmax\n elif loss_type == \"additive_margin_softmax\":\n self.loss_network = additive_margin_softmax\n elif loss_type == \"additive_angular_margin_softmax\":\n self.loss_network = additive_angular_margin_softmax\n elif loss_type == \"semihard_triplet_loss\":\n self.loss_network = semihard_triplet_loss\n elif loss_type == \"angular_triplet_loss\":\n self.loss_network = angular_triplet_loss\n elif loss_type == \"generalized_angular_triplet_loss\":\n self.loss_network = generalized_angular_triplet_loss\n else:\n raise NotImplementedError(\"Not implement %s loss\" % self.loss_type)\n\n if mode == \"valid\":\n tf.logging.info(\"Building valid network...\")\n self.valid_features = tf.placeholder(tf.float32, shape=[None, None, dim], name=\"valid_features\")\n self.valid_labels = tf.placeholder(tf.int32, shape=[None,], name=\"valid_labels\")\n with tf.name_scope(\"valid\") as scope:\n # We can adjust some parameters in the config when we do validation\n # TODO: I'm not sure whether it is necssary to change the margin for the valid set.\n # TODO: compare the performance!\n # Change the margin for the valid set.\n if loss_type == \"softmax\":\n pass\n elif loss_type == \"asoftmax\":\n train_margin = self.params.asoftmax_m\n self.params.asoftmax_m = 1\n elif loss_type == \"additive_margin_softmax\":\n train_margin = self.params.amsoftmax_m\n self.params.amsoftmax_m = 0\n elif loss_type == \"additive_angular_margin_softmax\":\n train_margin = self.params.arcsoftmax_m\n self.params.arcsoftmax_m = 0\n elif loss_type == \"angular_triplet_loss\":\n # Switch loss to e2e_valid_loss\n train_loss_network = self.loss_network\n self.loss_network = e2e_valid_loss\n else:\n pass\n\n if \"aux_loss_func\" in self.params.dict:\n # No auxiliary losses during validation.\n train_aux_loss_func = self.params.aux_loss_func\n self.params.aux_loss_func = []\n\n features, endpoints = self.entire_network(self.valid_features, self.params, is_training, reuse_variables)\n valid_loss, endpoints_loss = self.loss_network(features, self.valid_labels, num_speakers, self.params, is_training, reuse_variables)\n endpoints.update(endpoints_loss)\n\n if \"aux_loss_func\" in self.params.dict:\n self.params.aux_loss_func = train_aux_loss_func\n\n # Change the margin back!!!\n if loss_type == \"softmax\":\n pass\n elif loss_type == \"asoftmax\":\n self.params.asoftmax_m = train_margin\n elif loss_type == \"additive_margin_softmax\":\n self.params.amsoftmax_m = train_margin\n elif loss_type == \"additive_angular_margin_softmax\":\n self.params.arcsoftmax_m = train_margin\n elif loss_type == \"angular_triplet_loss\":\n self.loss_network = train_loss_network\n else:\n pass\n\n # We can evaluate other stuff in the valid_ops. Just add the new values to the dict.\n # We may also need to check other values expect for the loss. Leave the task to other functions.\n # During validation, I compute the cosine EER for the final output of the network.\n self.embeddings = endpoints[\"output\"]\n self.endpoints = endpoints\n\n self.valid_ops[\"raw_valid_loss\"] = valid_loss\n mean_valid_loss, mean_valid_loss_op = tf.metrics.mean(valid_loss)\n self.valid_ops[\"valid_loss\"] = mean_valid_loss\n self.valid_ops[\"valid_loss_op\"] = mean_valid_loss_op\n valid_loss_summary = tf.summary.scalar(\"loss\", mean_valid_loss)\n self.valid_summary = tf.summary.merge([valid_loss_summary])\n if self.saver is None:\n self.saver = tf.train.Saver(max_to_keep=self.params.keep_checkpoint_max)\n if self.valid_summary_writer is None:\n self.valid_summary_writer = tf.summary.FileWriter(os.path.join(self.model, \"eval\"), self.sess.graph)\n return\n\n tf.logging.info(\"Building training network...\")\n self.train_features = tf.placeholder(tf.float32, shape=[None, None, dim], name=\"train_features\")\n self.train_labels = tf.placeholder(tf.int32, shape=[None, ], name=\"train_labels\")\n self.learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\")\n\n if \"optimizer\" not in self.params.dict:\n # The default optimizer is sgd\n self.params.dict[\"optimizer\"] = \"sgd\"\n\n if self.params.optimizer == \"sgd\":\n if \"momentum\" in self.params.dict:\n sys.exit(\"Using sgd as the optimizer and you should not specify the momentum.\")\n tf.logging.info(\"***** Using SGD as the optimizer.\")\n opt = tf.train.GradientDescentOptimizer(self.learning_rate, name=\"optimizer\")\n elif self.params.optimizer == \"momentum\":\n # SGD with momentum\n # It is also possible to use other optimizers, e.g. Adam.\n tf.logging.info(\"***** Using Momentum as the optimizer.\")\n opt = tf.train.MomentumOptimizer(self.learning_rate, self.params.momentum, use_nesterov=self.params.use_nesterov, name=\"optimizer\")\n elif self.params.optimizer == \"adam\":\n tf.logging.info(\"***** Using Adam as the optimizer.\")\n opt = tf.train.AdamOptimizer(self.learning_rate, name=\"optimizer\")\n else:\n sys.exit(\"Optimizer %s is not supported.\" % self.params.optimizer)\n self.optimizer = opt\n\n # Use name_space here. Create multiple name_spaces if multi-gpus\n # There is a copy in `set_trainable_variables`\n with tf.name_scope(\"train\") as scope:\n features, endpoints = self.entire_network(self.train_features, self.params, is_training, reuse_variables)\n print(features.shape)\n loss, endpoints_loss = self.loss_network(features, self.train_labels, num_speakers, self.params, is_training, reuse_variables)\n self.endpoints = endpoints\n\n endpoints.update(endpoints_loss)\n regularization_loss = tf.losses.get_regularization_loss()\n total_loss = loss + regularization_loss\n\n # train_summary contains all the summeries we want to inspect.\n # Get the summaries define in the network and loss function.\n # The summeries in the network and loss function are about the network variables.\n self.train_summary = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)\n self.train_summary.append(tf.summary.scalar(\"loss\", loss))\n self.train_summary.append(tf.summary.scalar(\"regularization_loss\", regularization_loss))\n\n # We may have other losses (i.e. penalty term in attention layer)\n penalty_loss = tf.get_collection(\"PENALTY\")\n if len(penalty_loss) != 0:\n penalty_loss = tf.reduce_sum(penalty_loss)\n total_loss += penalty_loss\n self.train_summary.append(tf.summary.scalar(\"penalty_term\", penalty_loss))\n\n self.total_loss = total_loss\n self.train_summary.append(tf.summary.scalar(\"total_loss\", total_loss))\n self.train_summary.append(tf.summary.scalar(\"learning_rate\", self.learning_rate))\n\n # The gradient ops is inside the scope to support multi-gpus\n if noupdate_var_list is not None:\n old_batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)\n batchnorm_update_ops = []\n for op in old_batchnorm_update_ops:\n if not substring_in_list(op.name, noupdate_var_list):\n batchnorm_update_ops.append(op)\n tf.logging.info(\"[Info] Update %s\" % op.name)\n else:\n tf.logging.info(\"[Info] Op %s will not be executed\" % op.name)\n else:\n batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)\n\n if noupdate_var_list is not None:\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n train_var_list = []\n\n for v in variables:\n if not substring_in_list(v.name, noupdate_var_list):\n train_var_list.append(v)\n tf.logging.info(\"[Info] Train %s\" % v.name)\n else:\n tf.logging.info(\"[Info] Var %s will not be updated\" % v.name)\n grads = opt.compute_gradients(total_loss, var_list=train_var_list)\n else:\n grads = opt.compute_gradients(total_loss)\n\n # Once the model has been built (even for a tower), we set the flag\n self.is_built = True\n\n if self.params.clip_gradient:\n grads, vars = zip(*grads) # compute gradients of variables with respect to loss\n grads_clip, _ = tf.clip_by_global_norm(grads, self.params.clip_gradient_norm) # l2 norm clipping\n\n # we follow the instruction in ge2e paper to scale the learning rate for w and b\n # Actually, I wonder that we can just simply set a large value for w (e.g. 20) and fix it.\n if self.loss_type == \"ge2e\":\n # The parameters w and b must be the last variables in the gradients\n grads_clip = grads_clip[:-2] + [0.01 * grad for grad in grads_clip[-2:]]\n # Simply check the position of w and b\n for var in vars[-2:]:\n assert(\"w\" in var.name or \"b\" in var.name)\n grads = zip(grads_clip, vars)\n\n # There are some things we can do to the gradients, i.e. learning rate scaling.\n\n # # The values and gradients are added to summeries\n # for grad, var in grads:\n # if grad is not None:\n # self.train_summary.append(tf.summary.histogram(var.op.name + '/gradients', grad))\n # self.train_summary.append(tf.summary.scalar(var.op.name + '/gradients_norm', tf.norm(grad)))\n\n self.train_summary.append(activation_summaries(endpoints))\n for var in tf.trainable_variables():\n self.train_summary.append(tf.summary.histogram(var.op.name, var))\n self.train_summary = tf.summary.merge(self.train_summary)\n\n with tf.control_dependencies(batchnorm_update_ops):\n self.train_op = opt.apply_gradients(grads)\n\n # We want to inspect other values during training?\n self.train_ops[\"loss\"] = total_loss\n self.train_ops[\"raw_loss\"] = loss\n\n # The model saver\n if self.saver is None:\n self.saver = tf.train.Saver(max_to_keep=self.params.keep_checkpoint_max)\n\n # The training summary writer\n if self.summary_writer is None:\n self.summary_writer = tf.summary.FileWriter(self.model, self.sess.graph)\n return\n\n def train(self, data, spklist, learning_rate, aux_data=None):\n \"\"\"Train the model.\n\n Args:\n data: The training data directory.\n spklist: The spklist is a file map speaker name to the index.\n learning_rate: The learning rate is passed by the main program. The main program can easily tune the\n learning rate according to the validation accuracy or anything else.\n aux_data: The auxiliary data (maybe useful in child class.)\n \"\"\"\n\n def get_semi_orthogonal_for_cnn(mat):\n M = tf.reshape(mat, [-1, mat.shape[3]])\n I = tf.Variable(np.identity(M.shape[0]), dtype=tf.float32)\n for _ in range(10):\n P = tf.matmul(M, M, transpose_b=True)\n alpha2 = tf.divide(tf.trace(tf.matmul(P, P, transpose_b=True)), tf.trace(P))\n M = M - (1 / (2.0 * alpha2)) * tf.matmul(tf.subtract(P, alpha2 * I), M)\n P = tf.matmul(M, M, transpose_b=True)\n alpha2 = tf.divide(tf.trace(tf.matmul(P, P, transpose_b=True)), tf.trace(P))\n M = M / alpha2\n ans = tf.reshape(M, mat.shape)\n return ans\n\n graph = tf.get_default_graph()\n constrained_semi_ops = []\n for i in range(2, 10):\n kernel = graph.get_tensor_by_name('tdnn/%d_semio/kernel:0' % i)\n semi = get_semi_orthogonal_for_cnn(kernel)\n constrained_semi_ops.append(tf.assign(kernel, semi))\n\n self.sess.run(tf.global_variables_initializer())\n\n # curr_step is the real step the training at.\n curr_step = 0\n\n # Load the model if we have\n if os.path.isfile(os.path.join(self.model, \"checkpoint\")):\n curr_step = self.load()\n\n # The data loader\n data_loader = KaldiDataRandomQueue(data, spklist,\n num_parallel=self.params.num_parallel_datasets,\n max_qsize=self.params.max_queue_size,\n num_speakers=self.params.num_speakers_per_batch,\n num_segments=self.params.num_segments_per_speaker,\n min_len=self.params.min_segment_len,\n max_len=self.params.max_segment_len,\n shuffle=True)\n epoch = int(curr_step / self.params.num_steps_per_epoch)\n data_loader.start()\n for step in range(curr_step % self.params.num_steps_per_epoch, self.params.num_steps_per_epoch):\n try:\n if step % 4 == 0:\n # SEMI ORTHOGONA;\n self.sess.run(constrained_semi_ops)\n if step % self.params.save_summary_steps == 0 or step % self.params.show_training_progress == 0:\n train_ops = [self.train_ops, self.train_op]\n if step % self.params.save_summary_steps == 0:\n train_ops.append(self.train_summary)\n start_time = time.time()\n features, labels = data_loader.fetch()\n train_val = self.sess.run(train_ops, feed_dict={self.train_features: features,\n self.train_labels: labels,\n self.global_step: curr_step,\n self.learning_rate: learning_rate})\n end_time = time.time()\n tf.logging.info(\n \"Epoch: [%2d] step: [%2d/%2d] time: %.4f s/step, raw loss: %f, total loss: %f\"\n % (epoch, step, self.params.num_steps_per_epoch, end_time - start_time,\n train_val[0][\"raw_loss\"], train_val[0][\"loss\"]))\n if step % self.params.save_summary_steps == 0:\n self.summary_writer.add_summary(train_val[-1], curr_step)\n else:\n # Only compute optimizer.\n features, labels = data_loader.fetch()\n _ = self.sess.run(self.train_op, feed_dict={self.train_features: features,\n self.train_labels: labels,\n self.global_step: curr_step,\n self.learning_rate: learning_rate})\n\n if step % self.params.save_checkpoints_steps == 0 and curr_step != 0:\n self.save(curr_step)\n curr_step += 1\n except DataOutOfRange:\n tf.logging.info(\"Finished reading features.\")\n break\n\n data_loader.stop()\n self.save(curr_step)\n\n return\n\n def train_tune_lr(self, data, spklist, tune_period=100, aux_data=None):\n \"\"\"Tune the learning rate.\n\n According to: https://www.kdnuggets.com/2017/11/estimating-optimal-learning-rate-deep-neural-network.html\n\n Args:\n data: The training data directory.\n spklist: The spklist is a file map speaker name to the index.\n tune_period: How many steps per learning rate.\n aux_data: The auxiliary data directory.\n \"\"\"\n # initialize all variables\n self.sess.run(tf.global_variables_initializer())\n\n # We need to load the model sometimes, since we may try to find the learning rate for fine-tuning.\n if os.path.isfile(os.path.join(self.model, \"checkpoint\")):\n self.load()\n\n data_loader = KaldiDataRandomQueue(data, spklist,\n num_parallel=self.params.num_parallel_datasets,\n max_qsize=self.params.max_queue_size,\n num_speakers=self.params.num_speakers_per_batch,\n num_segments=self.params.num_segments_per_speaker,\n min_len=self.params.min_segment_len,\n max_len=self.params.max_segment_len,\n shuffle=True)\n data_loader.start()\n\n # The learning rate normally varies from 1e-5 to 1\n # Some common values:\n # 1. factor = 1.15\n # tune_period = 200\n # tune_times = 100\n init_learning_rate = 1e-5\n factor = 1.15\n tune_times = 100\n\n fp_lr = open(os.path.join(self.model, \"learning_rate_tuning\"), \"w\")\n for step in range(tune_period * tune_times):\n lr = init_learning_rate * (factor ** (step // tune_period))\n try:\n if step % tune_period == 0:\n train_ops = [self.train_ops, self.train_op, self.train_summary]\n # train_ops = [self.train_ops, self.train_op]\n start_time = time.time()\n features, labels = data_loader.fetch()\n train_val = self.sess.run(train_ops, feed_dict={self.train_features: features,\n self.train_labels: labels,\n self.global_step: 0,\n self.learning_rate: lr})\n end_time = time.time()\n tf.logging.info(\n \"Epoch: step: %2d, time: %.4f s/step, lr: %f, raw loss: %f, total loss: %f\" \\\n % (step, end_time - start_time, lr,\n train_val[0][\"raw_loss\"], train_val[0][\"loss\"]))\n fp_lr.write(\"%d %f %f\\n\" % (step, lr, train_val[0][\"loss\"]))\n self.summary_writer.add_summary(train_val[-1], step)\n else:\n features, labels = data_loader.fetch()\n _ = self.sess.run(self.train_op, feed_dict={self.train_features: features,\n self.train_labels: labels,\n self.global_step: 0,\n self.learning_rate: lr})\n except DataOutOfRange:\n tf.logging.info(\"Finished reading features.\")\n break\n data_loader.stop()\n fp_lr.close()\n return\n\n def valid(self, data, spklist, batch_type=\"softmax\", output_embeddings=False, aux_data=None):\n \"\"\"Evaluate on the validation set\n\n Args:\n data: The training data directory.\n spklist: The spklist is a file map speaker name to the index.\n batch_type: `softmax` or `end2end`. The batch is `softmax-like` or `end2end-like`.\n If the batch is `softmax-like`, each sample are from different speakers;\n if the batch is `end2end-like`, the samples are from N speakers with M segments per speaker.\n output_embeddings: Set True to output the corresponding embeddings and labels of the valid set.\n If output_embeddings, an additional valid metric (e.g. EER) should be computed outside\n the function.\n aux_data: The auxiliary data directory.\n\n :return: valid_loss, embeddings and labels (None if output_embeddings is False).\n \"\"\"\n # Initialization will reset all the variables in the graph.\n # The local variables are also need to be initialized for metrics function.\n self.sess.run(tf.global_variables_initializer())\n self.sess.run(tf.local_variables_initializer())\n assert batch_type == \"softmax\" or batch_type == \"end2end\", \"The batch_type can only be softmax or end2end\"\n\n curr_step = 0\n # Load the model. The valid function can only be called after training (of course...)\n if os.path.isfile(os.path.join(self.model, \"checkpoint\")):\n curr_step = self.load()\n else:\n tf.logging.info(\"[Warning] Cannot find model in %s. Random initialization is used in validation.\" % self.model)\n\n embeddings_val = None\n labels_val = None\n num_batches = 0\n\n if output_embeddings:\n # If we want to output embeddings, the features should be loaded in order\n data_loader = KaldiDataSeqQueue(data, spklist,\n num_parallel=2,\n max_qsize=10,\n batch_size=self.params.num_speakers_per_batch * self.params.num_segments_per_speaker,\n min_len=self.params.min_segment_len,\n max_len=self.params.max_segment_len,\n shuffle=False)\n data_loader.start()\n\n tf.logging.info(\"Generate valid embeddings.\")\n # In this mode, the embeddings and labels will be saved and output. It needs more memory and takes longer\n # to process these values.\n while True:\n try:\n if num_batches % 100 == 0:\n tf.logging.info(\"valid step: %d\" % num_batches)\n features, labels = data_loader.fetch()\n valid_emb_val, valid_labels_val = self.sess.run([self.embeddings, self.valid_labels], feed_dict={self.valid_features: features,\n self.valid_labels: labels,\n self.global_step: curr_step})\n # Save the embeddings and labels\n if embeddings_val is None:\n embeddings_val = valid_emb_val\n labels_val = valid_labels_val\n else:\n embeddings_val = np.concatenate((embeddings_val, valid_emb_val), axis=0)\n labels_val = np.concatenate((labels_val, valid_labels_val), axis=0)\n num_batches += 1\n except DataOutOfRange:\n break\n data_loader.stop()\n\n if batch_type == \"softmax\":\n data_loader = KaldiDataSeqQueue(data, spklist,\n num_parallel=2,\n max_qsize=10,\n batch_size=self.params.num_speakers_per_batch * self.params.num_segments_per_speaker,\n min_len=self.params.min_segment_len,\n max_len=self.params.max_segment_len,\n shuffle=True)\n elif batch_type == \"end2end\":\n # The num_valid_speakers_per_batch and num_valid_segments_per_speaker are only required when\n # End2End loss is used. Since we switch the loss function to softmax generalized e2e loss\n # when the e2e loss is used.\n assert \"num_valid_speakers_per_batch\" in self.params.dict and \"num_valid_segments_per_speaker\" in self.params.dict, \\\n \"Valid parameters should be set if E2E loss is selected\"\n data_loader = KaldiDataRandomQueue(data, spklist,\n num_parallel=2,\n max_qsize=10,\n num_speakers=self.params.num_valid_speakers_per_batch,\n num_segments=self.params.num_valid_segments_per_speaker,\n min_len=self.params.min_segment_len,\n max_len=self.params.max_segment_len,\n shuffle=True)\n else:\n raise ValueError\n\n data_loader.start()\n num_batches = 0\n for _ in range(self.params.valid_max_iterations):\n try:\n if num_batches % 100 == 0:\n tf.logging.info(\"valid step: %d\" % num_batches)\n features, labels = data_loader.fetch()\n _ = self.sess.run(self.valid_ops[\"valid_loss_op\"], feed_dict={self.valid_features: features,\n self.valid_labels: labels,\n self.global_step: curr_step})\n num_batches += 1\n except DataOutOfRange:\n break\n data_loader.stop()\n\n loss, summary = self.sess.run([self.valid_ops[\"valid_loss\"], self.valid_summary])\n # We only save the summary for the last batch.\n self.valid_summary_writer.add_summary(summary, curr_step)\n # The valid loss is averaged over all the batches.\n tf.logging.info(\"[Validation %d batches] valid loss: %f\" % (num_batches, loss))\n\n # The output embeddings and labels can be used to compute EER or other metrics\n return loss, embeddings_val, labels_val\n\n def predict(self, features):\n \"\"\"Output the embeddings\n\n :return: A numpy array which is the embeddings\n \"\"\"\n if not self.is_loaded:\n if os.path.isfile(os.path.join(self.model, \"checkpoint\")):\n self.load()\n else:\n sys.exit(\"Cannot find model in %s\" % self.model)\n rank = len(features.shape)\n assert(rank == 2 or rank == 3)\n # Expand the feature if the rank is 2\n if rank == 2:\n features = np.expand_dims(features, axis=0)\n embeddings = self.sess.run(self.embeddings, feed_dict={self.pred_features: features})\n if rank == 2:\n embeddings = np.squeeze(embeddings, axis=0)\n return embeddings\n\n def set_trainable_variables(self, variable_list=None):\n \"\"\"Set the variables which we want to optimize.\n The optimizer will only optimize the variables which contain sub-string in the variable list.\n Basically, this is copied from the training path in `build`.\n\n The batchnorm statistics can always be updated?\n\n Args:\n variable_list: The model variable contains sub-string in the list will be optimized.\n If None, all variables will be optimized.\n \"\"\"\n add_train_summary = []\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n trainable_variables = []\n if variable_list is None:\n tf.logging.info(\"[Info] Add all trainable variables to the optimizer.\")\n trainable_variables = None\n else:\n for v in variables:\n if substring_in_list(v.name, variable_list):\n trainable_variables.append(v)\n tf.logging.info(\"[Info] Add %s to trainable list\" % v.name)\n\n with tf.name_scope(\"train\") as scope:\n grads = self.optimizer.compute_gradients(self.total_loss, var_list=trainable_variables)\n\n if self.params.clip_gradient:\n grads, vars = zip(*grads) # compute gradients of variables with respect to loss\n grads_clip, _ = tf.clip_by_global_norm(grads, self.params.clip_gradient_norm) # l2 norm clipping\n grads = zip(grads_clip, vars)\n\n # # The values and gradients are added to summeries\n # for grad, var in grads:\n # if grad is not None:\n # add_train_summary.append(tf.summary.histogram(var.op.name + '/gradients', grad))\n # add_train_summary.append(tf.summary.scalar(var.op.name + '/gradients_norm', tf.norm(grad)))\n\n if variable_list is None:\n trainable_variables = tf.trainable_variables()\n for var in trainable_variables:\n add_train_summary.append(tf.summary.histogram(var.op.name, var))\n self.train_summary = tf.summary.merge([self.train_summary, tf.summary.merge(add_train_summary)])\n\n batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)\n with tf.control_dependencies(batchnorm_update_ops):\n self.train_op = self.optimizer.apply_gradients(grads)\n\n def get_finetune_model(self, excluded_list):\n \"\"\"Start from a pre-trained model and other parameters are initialized using default initializer.\n Actually, this function is only called at the first epoch of the fine-tuning, because in succeeded epochs,\n we need to fully load the model rather than loading part of the graph.\n\n The pre-trained model is saved in the model directory as index 0.\n Backup the pre-trained model and save the new model (with random initialized parameters) as index 0 instead.\n\n Args:\n excluded_list: A list. Do NOT restore the parameters in the exclude_list. This is useful in fine-truning\n an existing model. We load a part of the pre-trained model and leave the other part\n randomly initialized.\n Deprecated:\n data: The training data directory.\n spklist: The spklist is a file map speaker name to the index.\n learning_rate: The learning rate is passed by the main program. The main program can easily tune the\n learning rate according to the validation accuracy or anything else.\n \"\"\"\n # initialize all variables\n self.sess.run(tf.global_variables_initializer())\n\n # Load parts of the model\n variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n restore_variables = []\n for v in variables:\n if not substring_in_list(v.name, excluded_list):\n restore_variables.append(v)\n else:\n tf.logging.info(\"[Info] Ignore %s when loading the checkpoint\" % v.name)\n finetune_saver = tf.train.Saver(var_list=restore_variables)\n ckpt = tf.train.get_checkpoint_state(self.model)\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n finetune_saver.restore(self.sess, os.path.join(self.model, ckpt_name))\n\n # Backup the old files\n import glob, shutil\n model_checkpoint_path = ckpt.model_checkpoint_path\n for filename in glob.glob(model_checkpoint_path + \"*\"):\n shutil.copyfile(filename, filename + '.bak')\n\n # Save the new model. The new model is basically the same with the pre-trained one, while parameters\n # NOT in the pre-trained model are random initialized.\n # Set the step to 0.\n self.save(0)\n return\n\n def insight(self, data, spklist, batch_type=\"softmax\", output_embeddings=False, aux_data=None):\n \"\"\"Just use to debug the network\n \"\"\"\n self.sess.run(tf.global_variables_initializer())\n self.sess.run(tf.local_variables_initializer())\n assert batch_type == \"softmax\" or batch_type == \"end2end\", \"The batch_type can only be softmax or end2end\"\n\n embeddings_val = None\n labels_val = None\n\n self.load()\n\n if output_embeddings:\n # If we want to output embeddings, the features should be loaded in order\n data_loader = KaldiDataSeqQueue(data, spklist,\n num_parallel=2,\n max_qsize=10,\n batch_size=self.params.num_speakers_per_batch * self.params.num_segments_per_speaker,\n min_len=self.params.min_segment_len,\n max_len=self.params.max_segment_len,\n shuffle=False)\n data_loader.start()\n\n tf.logging.info(\"Generate valid embeddings.\")\n # In this mode, the embeddings and labels will be saved and output. It needs more memory and takes longer\n # to process these values.\n while True:\n try:\n features, labels = data_loader.fetch()\n valid_emb_val, valid_labels_val, endpoints_val = self.sess.run([self.embeddings, self.valid_labels, self.endpoints], feed_dict={self.valid_features: features,\n self.valid_labels: labels})\n\n # acc = np.sum(np.equal(np.argmax(endpoints_val['logits'], axis=1), labels, dtype=np.float)) / float(\n # labels.shape[0])\n # print(\"Acc: %f\" % acc)\n\n # Save the embeddings and labels\n if embeddings_val is None:\n embeddings_val = valid_emb_val\n labels_val = valid_labels_val\n else:\n embeddings_val = np.concatenate((embeddings_val, valid_emb_val), axis=0)\n labels_val = np.concatenate((labels_val, valid_labels_val), axis=0)\n except DataOutOfRange:\n break\n data_loader.stop()\n\n if batch_type == \"softmax\":\n data_loader = KaldiDataSeqQueue(data, spklist,\n num_parallel=2,\n max_qsize=10,\n batch_size=self.params.num_speakers_per_batch * self.params.num_segments_per_speaker*10,\n min_len=self.params.min_segment_len,\n max_len=self.params.max_segment_len,\n shuffle=True)\n elif batch_type == \"end2end\":\n # The num_valid_speakers_per_batch and num_valid_segments_per_speaker are only required when\n # End2End loss is used. Since we switch the loss function to softmax generalized e2e loss\n # when the e2e loss is used.\n assert \"num_valid_speakers_per_batch\" in self.params.dict and \"num_valid_segments_per_speaker\" in self.params.dict, \\\n \"Valid parameters should be set if E2E loss is selected\"\n data_loader = KaldiDataRandomQueue(data, spklist,\n num_parallel=2,\n max_qsize=10,\n num_speakers=self.params.num_valid_speakers_per_batch,\n num_segments=self.params.num_valid_segments_per_speaker,\n min_len=self.params.min_segment_len,\n max_len=self.params.max_segment_len,\n shuffle=True)\n else:\n raise ValueError\n\n data_loader.start()\n\n while True:\n try:\n features, labels = data_loader.fetch()\n _, endpoints_val = self.sess.run([self.valid_ops[\"valid_loss_op\"], self.endpoints], feed_dict={self.valid_features: features,\n self.valid_labels: labels})\n except DataOutOfRange:\n break\n data_loader.stop()\n loss = self.sess.run(self.valid_ops[\"valid_loss\"])\n tf.logging.info(\"Shorter segments are used to test the valid loss (%d-%d)\" % (self.params.min_segment_len, self.params.max_segment_len))\n tf.logging.info(\"Loss: %f\" % loss)\n\n\n # while True:\n # try:\n # features, labels = data_loader.fetch()\n # valid_ops, endpoints_val = self.sess.run([self.valid_ops, self.endpoints], feed_dict={self.valid_features: features,\n # self.valid_labels: labels})\n # loss = valid_ops[\"valid_loss\"]\n # except DataOutOfRange:\n # break\n # data_loader.stop()\n # tf.logging.info(\"Loss: %f\" % loss)\n\n acc = np.sum(np.equal(np.argmax(endpoints_val['logits'], axis=1), labels, dtype=np.float)) / float(labels.shape[0])\n print(\"Acc: %f\" % acc)\n\n import pdb\n pdb.set_trace()\n # from model.test_utils import softmax\n # with tf.variable_scope(\"softmax\", reuse=True):\n # test = tf.get_variable(\"output/kernel\")\n # test_val = self.sess.run(test)\n return loss, embeddings_val, labels_val\n", "import os\nimport argparse\nimport random\nimport sys\nimport numpy, scipy, sklearn\nimport tensorflow as tf\nimport numpy as np\nfrom misc.utils import ValidLoss, load_lr, load_valid_loss, save_codes_and_config, compute_cos_pairwise_eer\nfrom model_src.trainer_multitask import Trainer\nfrom dataset_src.data_loader import KaldiDataRandomQueue\nfrom dataset_src.kaldi_io import FeatureReader\nfrom six.moves import range\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '3'\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-c\", \"--cont\", action=\"store_true\", help=\"Continue training from an existing model.\")\nparser.add_argument(\"--config\", type=str, help=\"The configuration file.\")\nparser.add_argument(\"train_dir\", type=str, help=\"The data directory of the training set.\")\nparser.add_argument(\"train_spklist\", type=str, help=\"The spklist file maps the TRAINING speakers to the indices.\")\nparser.add_argument(\"valid_dir\", type=str, help=\"The data directory of the validation set.\")\nparser.add_argument(\"valid_spklist\", type=str, help=\"The spklist maps the VALID speakers to the indices.\")\nparser.add_argument(\"model\", type=str, help=\"The output model directory.\")\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n args = parser.parse_args()\n params = save_codes_and_config(args.cont, args.model, args.config)\n\n # The model directory always has a folder named nnet\n model_dir = os.path.join(args.model, \"nnet\")\n\n # Set the random seed. The random operations may appear in data input, batch forming, etc.\n tf.set_random_seed(params.seed)\n random.seed(params.seed)\n np.random.seed(params.seed)\n\n if args.cont:\n # If we continue training, we can figure out how much steps the model has been trained,\n # using the index of the checkpoint\n import re\n ckpt = tf.train.get_checkpoint_state(model_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n step = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n else:\n sys.exit(\"Cannot load checkpoint from %s\" % model_dir)\n start_epoch = int(step / params.num_steps_per_epoch)\n else:\n start_epoch = 0\n\n learning_rate = params.learning_rate\n learning_rate_array = []\n if os.path.isfile(str(learning_rate)):\n with open(str(learning_rate), \"r\") as f:\n for line in f.readlines():\n learning_rate_array.append(float(line.strip()))\n # The size of the file should be large enough\n assert len(learning_rate_array) > params.num_epochs, \"The learning rate file is shorter than the num of epochs.\"\n tf.logging.info(\"Using specified learning rate decay strategy.\")\n else:\n # The learning rate is determined by the training process. However, if we continue training, \n # the code doesn't know the previous learning rate if it is tuned using the validation set. \n # To solve that, just save the learning rate to an individual file.\n if os.path.isfile(os.path.join(model_dir, \"learning_rate\")):\n learning_rate_array = load_lr(os.path.join(model_dir, \"learning_rate\"))\n assert len(learning_rate_array) == start_epoch + 1, \"Not enough learning rates in the learning_rate file.\"\n else:\n learning_rate_array = [float(learning_rate)] * (start_epoch + 1)\n\n dim = FeatureReader(args.train_dir).get_dim()\n with open(os.path.join(model_dir, \"feature_dim\"), \"w\") as f:\n f.write(\"%d\\n\" % dim)\n num_total_train_speakers = KaldiDataRandomQueue(args.train_dir, args.train_spklist).num_total_speakers\n tf.logging.info(\"There are %d speakers in the training set and the dim is %d\" % (num_total_train_speakers, dim))\n\n # Load the history valid loss\n min_valid_loss = ValidLoss()\n if os.path.isfile(os.path.join(model_dir, \"valid_loss\")):\n min_valid_loss = load_valid_loss(os.path.join(model_dir, \"valid_loss\"))\n\n # The trainer is used to control the training process\n trainer = Trainer(params, args.model)\n trainer.build(\"train\",\n dim=dim,\n loss_type=params.loss_func,\n num_speakers=num_total_train_speakers)\n trainer.build(\"valid\",\n dim=dim,\n loss_type=params.loss_func,\n num_speakers=num_total_train_speakers)\n\n if \"early_stop_epochs\" not in params.dict:\n params.dict[\"early_stop_epochs\"] = 10\n if \"min_learning_rate\" not in params.dict:\n params.dict[\"min_learning_rate\"] = 1e-5\n\n for epoch in range(start_epoch, params.num_epochs):\n trainer.train(args.train_dir, args.train_spklist, learning_rate_array[epoch])\n valid_loss, valid_embeddings, valid_labels = trainer.valid(args.valid_dir, args.valid_spklist,\n batch_type=params.batch_type,\n output_embeddings=True)\n\n eer = compute_cos_pairwise_eer(valid_embeddings, valid_labels)\n tf.logging.info(\"[INFO] Valid EER: %f\" % eer)\n\n # Tune the learning rate if necessary.\n if not os.path.isfile(str(learning_rate)):\n new_learning_rate = learning_rate_array[epoch]\n if valid_loss < min_valid_loss.min_loss:\n min_valid_loss.min_loss = valid_loss\n min_valid_loss.min_loss_epoch = epoch\n else:\n if epoch - min_valid_loss.min_loss_epoch >= params.reduce_lr_epochs:\n new_learning_rate /= 2\n # If the valid loss in the next epoch still does not reduce, the learning rate will keep reducing.\n tf.logging.info(\"After epoch %d, no improvement. Reduce the learning rate to %.8f\" % (\n min_valid_loss.min_loss_epoch, new_learning_rate))\n min_valid_loss.min_loss_epoch += 2\n learning_rate_array.append(new_learning_rate)\n\n if epoch == 0:\n # If this is the first epoch, the first learning rate should be recorded\n with open(os.path.join(model_dir, \"learning_rate\"), \"a\") as f:\n f.write(\"0 %.8f\\n\" % learning_rate_array[0])\n\n # Save the learning rate and loss for each epoch.\n with open(os.path.join(model_dir, \"learning_rate\"), \"a\") as f:\n f.write(\"%d %.8f\\n\" % (epoch + 1, learning_rate_array[epoch + 1]))\n with open(os.path.join(model_dir, \"valid_loss\"), \"a\") as f:\n f.write(\"%d %f %f\\n\" % (epoch, valid_loss, eer))\n\n if not os.path.isfile(str(learning_rate)):\n # If the learning rate is too small, the training is actually get stuck.\n # Also early stop is applied.\n # This is only applied when the learning rate is not specified.\n if learning_rate_array[epoch + 1] < (params.min_learning_rate - 1e-12) or \\\n epoch - min_valid_loss.min_loss_epoch >= params.early_stop_epochs:\n break\n\n # Close the session before we exit.\n trainer.close()\n" ]
[ [ "numpy.std", "numpy.delete", "numpy.mean", "numpy.array", "numpy.zeros" ], [ "numpy.expand_dims", "tensorflow.control_dependencies", "tensorflow.reduce_sum", "numpy.squeeze", "numpy.concatenate", "tensorflow.train.AdamOptimizer", "tensorflow.get_default_graph", "tensorflow.summary.scalar", "tensorflow.trace", "tensorflow.get_collection", "tensorflow.subtract", "tensorflow.ConfigProto", "tensorflow.reset_default_graph", "tensorflow.train.MomentumOptimizer", "tensorflow.name_scope", "tensorflow.Session", "numpy.argmax", "tensorflow.trainable_variables", "tensorflow.train.Saver", "tensorflow.metrics.mean", "tensorflow.matmul", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.logging.info", "numpy.identity", "tensorflow.set_random_seed", "tensorflow.summary.merge", "tensorflow.summary.histogram", "tensorflow.train.get_checkpoint_state", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.losses.get_regularization_loss", "tensorflow.reshape", "tensorflow.assign", "tensorflow.clip_by_global_norm" ], [ "tensorflow.train.get_checkpoint_state", "numpy.random.seed", "tensorflow.logging.info", "tensorflow.logging.set_verbosity", "tensorflow.set_random_seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
QB3/sparse-ho-qbe
[ "73358caeff2ff08ca4e88af419e7dae753d43ea9" ]
[ "sparse_ho/implicit_forward.py" ]
[ "import numpy as np\nfrom scipy.sparse import issparse\nfrom sparse_ho.forward import get_beta_jac_iterdiff\n\n\nclass ImplicitForward():\n def __init__(\n self, criterion, tol_jac=1e-3, n_iter=100, n_iter_jac=100,\n use_sk=False, verbose=False):\n self.criterion = criterion\n self.n_iter = n_iter\n self.tol_jac = tol_jac\n self.n_iter_jac = n_iter_jac\n self.use_sk = use_sk\n self.verbose = verbose\n\n def get_beta_jac_v(\n self, X, y, log_alpha, model, get_v, mask0=None, dense0=None,\n quantity_to_warm_start=None, max_iter=1000, tol=1e-3,\n compute_jac=False, backward=False, full_jac_v=False):\n\n mask, dense, jac = get_beta_jac_fast_iterdiff(\n X, y, log_alpha, self.criterion.X_val, self.criterion.y_val,\n get_v, mask0=mask0, dense0=dense0,\n jac0=quantity_to_warm_start,\n # tol_jac=self.tol_jac,\n tol_jac=tol, use_sk=self.use_sk,\n tol=tol, niter_jac=self.n_iter_jac, model=model,\n max_iter=self.criterion.model.max_iter, verbose=self.verbose)\n\n jac_v = model.get_jac_v(mask, dense, jac, get_v)\n if full_jac_v:\n jac_v = model.get_full_jac_v(mask, jac_v, X.shape[1])\n\n return mask, dense, jac_v, jac\n\n def get_val_grad(\n self, log_alpha, mask0=None, dense0=None, beta_star=None,\n jac0=None, max_iter=1000, tol=1e-3, compute_jac=True,\n backward=False):\n return self.criterion.get_val_grad(\n log_alpha, self.get_beta_jac_v, max_iter=max_iter, tol=tol,\n compute_jac=compute_jac, backward=backward)\n\n def get_val(\n self, log_alpha, mask0=None, dense0=None, beta_star=None,\n jac0=None, max_iter=1000, tol=1e-3, compute_jac=True,\n backward=False):\n return self.criterion.get_val(\n log_alpha, self.get_beta_jac_v, max_iter=max_iter, tol=tol,\n compute_jac=compute_jac, backward=backward)\n\n\ndef get_beta_jac_fast_iterdiff(\n X, y, log_alpha, X_val, y_val, get_v, model, mask0=None, dense0=None, jac0=None, tol=1e-3, max_iter=1000, niter_jac=1000, tol_jac=1e-6, use_sk=False, verbose=False):\n n_samples, n_features = X.shape\n\n mask, dense, _ = get_beta_jac_iterdiff(\n X, y, log_alpha, mask0=mask0, dense0=dense0, jac0=jac0, tol=tol,\n max_iter=max_iter, compute_jac=False, model=model, use_sk=use_sk,\n verbose=verbose)\n\n dbeta0_new = model._init_dbeta0(mask, mask0, jac0)\n reduce_alpha = model._reduce_alpha(np.exp(log_alpha), mask)\n\n v = None\n _, r = model._init_beta_r(X, y, mask, dense)\n jac = get_only_jac(\n model.reduce_X(mask), model.reduce_y(mask), r, reduce_alpha, model.sign(dense), v,\n dbeta=dbeta0_new, niter_jac=niter_jac, tol_jac=tol_jac, model=model, mask=mask, dense=dense, verbose=verbose)\n\n return mask, dense, jac\n\n\ndef get_only_jac(\n Xs, y, r, alpha, sign_beta, v, dbeta=None, niter_jac=100, tol_jac=1e-4, model=\"lasso\", mask=None, dense=None, verbose=False):\n n_samples, n_features = Xs.shape\n\n is_sparse = issparse(Xs)\n L = model.get_L(Xs, is_sparse)\n\n objs = []\n\n if dbeta is None:\n model._init_dbeta(n_features)\n # if model == \"lasso\":\n # dbeta = np.zeros(n_features)\n # if model == \"mcp\":\n # dbeta = np.zeros((n_features, 2))\n # elif model == \"wlasso\":\n # dbeta = np.zeros((n_features, n_features))\n else:\n dbeta = dbeta.copy()\n\n dr = model._init_dr(dbeta, Xs, y, mask)\n for i in range(niter_jac):\n if verbose:\n print(\"%i -st iterations over %i\" % (i, niter_jac))\n if is_sparse:\n model._update_only_jac_sparse(\n Xs.data, Xs.indptr, Xs.indices, y, n_samples,\n n_features, dbeta, r, dr, L, alpha, sign_beta)\n else:\n model._update_only_jac(\n Xs, y, r, dbeta, dr, L, alpha, sign_beta, mask)\n\n objs.append(\n model.get_jac_obj(Xs, y, sign_beta, dbeta, r, dr, alpha, mask))\n\n # m1 = norm(- v.T @ Xs.T @ dr + sign_beta * n_samples * alpha)\n # m2 = tol_jac * np.sqrt(n_features) * n_samples * alpha * norm(v)\n # crit = m1 <= m2\n # print(\"m1 %.2f\", m1)\n # print(\"m2 %.2f\", m2)\n # print(\"m1 = %f\" % norm(v @ (dbeta - dbeta_old)))\n # print(\"tol_crit %f\" % tol_crit)\n # if norm(v @ (dbeta - dbeta_old)) < tol_crit:\n # if norm((dbeta - dbeta_old)) < tol_jac * norm(dbeta):\n # crit =\n print('jac obj', objs[-1])\n if i > 1 and np.abs(objs[-2] - objs[-1]) < np.abs(objs[-1]) * tol_jac:\n break\n # dbeta_old = dbeta.copy()\n # dr_old = dr.copy()\n\n return dbeta\n" ]
[ [ "numpy.abs", "numpy.exp", "scipy.sparse.issparse" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
saethlin/yt
[ "992ae71974dca933346e91008c5a50f43a0a350e", "992ae71974dca933346e91008c5a50f43a0a350e" ]
[ "yt/geometry/oct_geometry_handler.py", "yt/data_objects/time_series.py" ]
[ "\"\"\"\nOctree geometry handler\n\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom yt.utilities.logger import ytLogger as mylog\nfrom yt.geometry.geometry_handler import Index\nfrom yt.fields.field_detector import FieldDetector\n\n\nclass OctreeIndex(Index):\n \"\"\"The Index subclass for oct AMR datasets\"\"\"\n def _setup_geometry(self):\n mylog.debug(\"Initializing Octree Geometry Handler.\")\n self._initialize_oct_handler()\n\n def get_smallest_dx(self):\n \"\"\"\n Returns (in code units) the smallest cell size in the simulation.\n \"\"\"\n return (self.dataset.domain_width /\n (self.dataset.domain_dimensions * 2**(self.max_level))).min()\n\n def convert(self, unit):\n return self.dataset.conversion_factors[unit]\n\n def _add_mesh_sampling_particle_field(self, deposit_field, ftype, ptype):\n units = self.ds.field_info[ftype, deposit_field].units\n take_log = self.ds.field_info[ftype, deposit_field].take_log\n field_name = \"cell_%s_%s\" % (ftype, deposit_field)\n\n def _cell_index(field, data):\n # Get the position of the particles\n pos = data[ptype, \"particle_position\"]\n Npart = pos.shape[0]\n ret = np.zeros(Npart)\n tmp = np.zeros(Npart)\n\n if isinstance(data, FieldDetector):\n return ret\n\n remaining = np.ones(Npart, dtype=bool)\n Nremaining = Npart\n\n Nobjs = len(data._current_chunk.objs)\n Nbits = int(np.ceil(np.log2(Nobjs)))\n\n for i, obj in enumerate(data._current_chunk.objs):\n if Nremaining == 0:\n break\n icell = obj['index', 'ones'].T.reshape(-1).astype(np.int64).cumsum().value - 1\n mesh_data = ((icell << Nbits) + i).astype(np.float64)\n # Access the mesh data and attach them to their particles\n tmp[:Nremaining] = obj.mesh_sampling_particle_field(pos[remaining], mesh_data)\n\n ret[remaining] = tmp[:Nremaining]\n\n remaining[remaining] = np.isnan(tmp[:Nremaining])\n Nremaining = remaining.sum()\n\n return data.ds.arr(ret.astype(np.float64), input_units='1')\n\n def _mesh_sampling_particle_field(field, data):\n \"\"\"\n Create a grid field for particle quantities using given method.\n \"\"\"\n ones = data[ptype, 'particle_ones']\n\n # Access \"cell_index\" field\n Npart = ones.shape[0]\n ret = np.zeros(Npart)\n cell_index = np.array(data[ptype, 'cell_index'], np.int64)\n\n if isinstance(data, FieldDetector):\n return ret\n\n # The index of the obj is stored on the first bits\n Nobjs = len(data._current_chunk.objs)\n Nbits = int(np.ceil(np.log2(Nobjs)))\n icell = cell_index >> Nbits\n iobj = cell_index - (icell << Nbits)\n for i, subset in enumerate(data._current_chunk.objs):\n mask = (iobj == i)\n\n subset.field_parameters = data.field_parameters\n\n cell_data = subset[ftype, deposit_field].T.reshape(-1)\n\n ret[mask] = cell_data[icell[mask]]\n\n return data.ds.arr(ret, input_units=cell_data.units)\n\n if (ptype, 'cell_index') not in self.ds.derived_field_list:\n self.ds.add_field(\n (ptype, 'cell_index'),\n function=_cell_index,\n sampling_type=\"particle\",\n units='1')\n\n self.ds.add_field(\n (ptype, field_name),\n function=_mesh_sampling_particle_field,\n sampling_type=\"particle\",\n units=units,\n take_log=take_log)\n\n", "\"\"\"\nTime series analysis functions.\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport inspect\nimport functools\nimport glob\nimport numpy as np\nimport os\nimport weakref\n\nfrom functools import wraps\n\nfrom yt.extern.six import add_metaclass, string_types\nfrom yt.convenience import load\nfrom yt.config import ytcfg\nfrom yt.data_objects.data_containers import data_object_registry\nfrom yt.data_objects.derived_quantities import \\\n derived_quantity_registry\nfrom yt.data_objects.analyzer_objects import \\\n create_quantity_proxy, \\\n analysis_task_registry, \\\n AnalysisTask\nfrom yt.data_objects.particle_trajectories import \\\n ParticleTrajectories\nfrom yt.funcs import \\\n iterable, \\\n ensure_list, \\\n mylog\nfrom yt.units.yt_array import YTArray, YTQuantity\nfrom yt.utilities.exceptions import \\\n YTException, \\\n YTOutputNotIdentified\nfrom yt.utilities.parallel_tools.parallel_analysis_interface \\\n import parallel_objects, parallel_root_only, communication_system\nfrom yt.utilities.parameter_file_storage import \\\n simulation_time_series_registry\n \nclass AnalysisTaskProxy(object):\n def __init__(self, time_series):\n self.time_series = time_series\n\n def __getitem__(self, key):\n task_cls = analysis_task_registry[key]\n @wraps(task_cls.__init__)\n def func(*args, **kwargs):\n task = task_cls(*args, **kwargs)\n return self.time_series.eval(task)\n return func\n\n def keys(self):\n return analysis_task_registry.keys()\n\n def __contains__(self, key):\n return key in analysis_task_registry\n\ndef get_ds_prop(propname):\n def _eval(params, ds):\n return getattr(ds, propname)\n cls = type(propname, (AnalysisTask,),\n dict(eval = _eval, _params = tuple()))\n return cls\n\ndef get_filenames_from_glob_pattern(filenames):\n file_list = glob.glob(filenames)\n if len(file_list) == 0:\n data_dir = ytcfg.get(\"yt\", \"test_data_dir\")\n pattern = os.path.join(data_dir, filenames)\n td_filenames = glob.glob(pattern)\n if len(td_filenames) > 0:\n file_list = td_filenames\n else:\n raise YTOutputNotIdentified(filenames, {})\n return sorted(file_list)\n\nattrs = (\"refine_by\", \"dimensionality\", \"current_time\",\n \"domain_dimensions\", \"domain_left_edge\",\n \"domain_right_edge\", \"unique_identifier\",\n \"current_redshift\", \"cosmological_simulation\",\n \"omega_matter\", \"omega_lambda\", \"omega_radiation\",\n \"hubble_constant\")\n\nclass TimeSeriesParametersContainer(object):\n def __init__(self, data_object):\n self.data_object = data_object\n\n def __getattr__(self, attr):\n if attr in attrs:\n return self.data_object.eval(get_ds_prop(attr)())\n raise AttributeError(attr)\n\nclass DatasetSeries(object):\n r\"\"\"The DatasetSeries object is a container of multiple datasets,\n allowing easy iteration and computation on them.\n\n DatasetSeries objects are designed to provide easy ways to access,\n analyze, parallelize and visualize multiple datasets sequentially. This is\n primarily expressed through iteration, but can also be constructed via\n analysis tasks (see :ref:`time-series-analysis`).\n\n Parameters\n ----------\n filenames : list or pattern\n This can either be a list of filenames (such as [\"DD0001/DD0001\",\n \"DD0002/DD0002\"]) or a pattern to match, such as\n \"DD*/DD*.index\"). If it's the former, they will be loaded in\n order. The latter will be identified with the glob module and then\n sorted.\n parallel : True, False or int\n This parameter governs the behavior when .piter() is called on the\n resultant DatasetSeries object. If this is set to False, the time\n series will not iterate in parallel when .piter() is called. If\n this is set to either True, one processor will be allocated for\n each iteration of the loop. If this is set to an integer, the loop\n will be parallelized over this many workgroups. It the integer\n value is less than the total number of available processors,\n more than one processor will be allocated to a given loop iteration,\n causing the functionality within the loop to be run in parallel.\n setup_function : callable, accepts a ds\n This function will be called whenever a dataset is loaded.\n mixed_dataset_types : True or False, default False\n Set to True if the DatasetSeries will load different dataset types, set\n to False if loading dataset of a single type as this will result in a\n considerable speed up from not having to figure out the dataset type.\n\n Examples\n --------\n\n >>> ts = DatasetSeries(\n \"GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0\")\n >>> for ds in ts:\n ... SlicePlot(ds, \"x\", \"Density\").save()\n ...\n >>> def print_time(ds):\n ... print ds.current_time\n ...\n >>> ts = DatasetSeries(\n ... \"GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0\",\n ... setup_function = print_time)\n ...\n >>> for ds in ts:\n ... SlicePlot(ds, \"x\", \"Density\").save()\n\n \"\"\"\n def __new__(cls, outputs, *args, **kwargs):\n if isinstance(outputs, string_types):\n outputs = get_filenames_from_glob_pattern(outputs)\n ret = super(DatasetSeries, cls).__new__(cls)\n try:\n ret._pre_outputs = outputs[:]\n except TypeError:\n raise YTOutputNotIdentified(outputs, {})\n return ret\n\n def __init__(self, outputs, parallel = True, setup_function = None,\n mixed_dataset_types = False, **kwargs):\n # This is needed to properly set _pre_outputs for Simulation subclasses.\n self._mixed_dataset_types = mixed_dataset_types\n if iterable(outputs) and not isinstance(outputs, string_types):\n self._pre_outputs = outputs[:]\n self.tasks = AnalysisTaskProxy(self)\n self.params = TimeSeriesParametersContainer(self)\n if setup_function is None:\n setup_function = lambda a: None\n self._setup_function = setup_function\n for type_name in data_object_registry:\n setattr(self, type_name, functools.partial(\n DatasetSeriesObject, self, type_name))\n self.parallel = parallel\n self.kwargs = kwargs\n\n def __iter__(self):\n # We can make this fancier, but this works\n for o in self._pre_outputs:\n if isinstance(o, string_types):\n ds = self._load(o, **self.kwargs)\n self._setup_function(ds)\n yield ds\n else:\n yield o\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n if isinstance(key.start, float):\n return self.get_range(key.start, key.stop)\n # This will return a sliced up object!\n return DatasetSeries(self._pre_outputs[key], self.parallel)\n o = self._pre_outputs[key]\n if isinstance(o, string_types):\n o = self._load(o, **self.kwargs)\n self._setup_function(o)\n return o\n\n def __len__(self):\n return len(self._pre_outputs)\n\n @property\n def outputs(self):\n return self._pre_outputs\n\n def piter(self, storage = None, dynamic = False):\n r\"\"\"Iterate over time series components in parallel.\n\n This allows you to iterate over a time series while dispatching\n individual components of that time series to different processors or\n processor groups. If the parallelism strategy was set to be\n multi-processor (by \"parallel = N\" where N is an integer when the\n DatasetSeries was created) this will issue each dataset to an\n N-processor group. For instance, this would allow you to start a 1024\n processor job, loading up 100 datasets in a time series and creating 8\n processor groups of 128 processors each, each of which would be\n assigned a different dataset. This could be accomplished as shown in\n the examples below. The *storage* option is as seen in\n :func:`~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_objects`\n which is a mechanism for storing results of analysis on an individual\n dataset and then combining the results at the end, so that the entire\n set of processors have access to those results.\n\n Note that supplying a *store* changes the iteration mechanism; see\n below.\n\n Parameters\n ----------\n storage : dict\n This is a dictionary, which will be filled with results during the\n course of the iteration. The keys will be the dataset\n indices and the values will be whatever is assigned to the *result*\n attribute on the storage during iteration.\n dynamic : boolean\n This governs whether or not dynamic load balancing will be\n enabled. This requires one dedicated processor; if this\n is enabled with a set of 128 processors available, only\n 127 will be available to iterate over objects as one will\n be load balancing the rest.\n\n\n Examples\n --------\n Here is an example of iteration when the results do not need to be\n stored. One processor will be assigned to each dataset.\n\n >>> ts = DatasetSeries(\"DD*/DD*.index\")\n >>> for ds in ts.piter():\n ... SlicePlot(ds, \"x\", \"Density\").save()\n ...\n \n This demonstrates how one might store results:\n\n >>> def print_time(ds):\n ... print ds.current_time\n ...\n >>> ts = DatasetSeries(\"DD*/DD*.index\",\n ... setup_function = print_time )\n ...\n >>> my_storage = {}\n >>> for sto, ds in ts.piter(storage=my_storage):\n ... v, c = ds.find_max(\"density\")\n ... sto.result = (v, c)\n ...\n >>> for i, (v, c) in sorted(my_storage.items()):\n ... print \"% 4i %0.3e\" % (i, v)\n ...\n\n This shows how to dispatch 4 processors to each dataset:\n\n >>> ts = DatasetSeries(\"DD*/DD*.index\",\n ... parallel = 4)\n >>> for ds in ts.piter():\n ... ProjectionPlot(ds, \"x\", \"Density\").save()\n ...\n\n \"\"\"\n if self.parallel is False:\n njobs = 1\n elif dynamic is False:\n if self.parallel is True:\n njobs = -1\n else:\n njobs = self.parallel\n else:\n my_communicator = communication_system.communicators[-1]\n nsize = my_communicator.size\n if nsize == 1:\n self.parallel = False\n dynamic = False\n njobs = 1\n else:\n njobs = nsize - 1\n\n for output in parallel_objects(self._pre_outputs, njobs=njobs,\n storage=storage, dynamic=dynamic):\n if storage is not None:\n sto, output = output\n\n if isinstance(output, string_types):\n ds = self._load(output, **self.kwargs)\n self._setup_function(ds)\n else:\n ds = output\n\n if storage is not None:\n next_ret = (sto, ds)\n else:\n next_ret = ds\n\n yield next_ret\n\n def eval(self, tasks, obj=None):\n tasks = ensure_list(tasks)\n return_values = {}\n for store, ds in self.piter(return_values):\n store.result = []\n for task in tasks:\n try:\n style = inspect.getargspec(task.eval)[0][1]\n if style == 'ds':\n arg = ds\n elif style == 'data_object':\n if obj is None:\n obj = DatasetSeriesObject(self, \"all_data\")\n arg = obj.get(ds)\n rv = task.eval(arg)\n # We catch and store YT-originating exceptions\n # This fixes the standard problem of having a sphere that's too\n # small.\n except YTException:\n pass\n store.result.append(rv)\n return [v for k, v in sorted(return_values.items())]\n\n @classmethod\n def from_filenames(cls, filenames, parallel = True, setup_function = None,\n **kwargs):\n r\"\"\"Create a time series from either a filename pattern or a list of\n filenames.\n\n This method provides an easy way to create a\n :class:`~yt.data_objects.time_series.DatasetSeries`, given a set of\n filenames or a pattern that matches them. Additionally, it can set the\n parallelism strategy.\n\n Parameters\n ----------\n filenames : list or pattern\n This can either be a list of filenames (such as [\"DD0001/DD0001\",\n \"DD0002/DD0002\"]) or a pattern to match, such as\n \"DD*/DD*.index\"). If it's the former, they will be loaded in\n order. The latter will be identified with the glob module and then\n sorted.\n parallel : True, False or int\n This parameter governs the behavior when .piter() is called on the\n resultant DatasetSeries object. If this is set to False, the time\n series will not iterate in parallel when .piter() is called. If\n this is set to either True or an integer, it will be iterated with\n 1 or that integer number of processors assigned to each parameter\n file provided to the loop.\n setup_function : callable, accepts a ds\n This function will be called whenever a dataset is loaded.\n\n Examples\n --------\n\n >>> def print_time(ds):\n ... print ds.current_time\n ...\n >>> ts = DatasetSeries.from_filenames(\n ... \"GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0\",\n ... setup_function = print_time)\n ...\n >>> for ds in ts:\n ... SlicePlot(ds, \"x\", \"Density\").save()\n\n \"\"\"\n \n if isinstance(filenames, string_types):\n filenames = get_filenames_from_glob_pattern(filenames)\n\n # This will crash with a less informative error if filenames is not\n # iterable, but the plural keyword should give users a clue...\n for fn in filenames:\n if not isinstance(fn, string_types):\n raise YTOutputNotIdentified(\"DataSeries accepts a list of \"\n \"strings, but \"\n \"received {0}\".format(fn))\n obj = cls(filenames[:], parallel = parallel,\n setup_function = setup_function, **kwargs)\n return obj\n\n @classmethod\n def from_output_log(cls, output_log,\n line_prefix = \"DATASET WRITTEN\",\n parallel = True):\n filenames = []\n for line in open(output_log):\n if not line.startswith(line_prefix): continue\n cut_line = line[len(line_prefix):].strip()\n fn = cut_line.split()[0]\n filenames.append(fn)\n obj = cls(filenames, parallel = parallel)\n return obj\n\n _dataset_cls = None\n def _load(self, output_fn, **kwargs):\n if self._dataset_cls is not None:\n return self._dataset_cls(output_fn, **kwargs)\n elif self._mixed_dataset_types:\n return load(output_fn, **kwargs)\n ds = load(output_fn, **kwargs)\n self._dataset_cls = ds.__class__\n return ds\n\n def particle_trajectories(self, indices, fields=None, suppress_logging=False, ptype=None):\n r\"\"\"Create a collection of particle trajectories in time over a series of\n datasets.\n\n Parameters\n ----------\n indices : array_like\n An integer array of particle indices whose trajectories we\n want to track. If they are not sorted they will be sorted.\n fields : list of strings, optional\n A set of fields that is retrieved when the trajectory\n collection is instantiated. Default: None (will default\n to the fields 'particle_position_x', 'particle_position_y',\n 'particle_position_z')\n suppress_logging : boolean\n Suppress yt's logging when iterating over the simulation time\n series. Default: False\n ptype : str, optional\n Only use this particle type. Default: None, which uses all particle type.\n\n Examples\n --------\n >>> my_fns = glob.glob(\"orbit_hdf5_chk_00[0-9][0-9]\")\n >>> my_fns.sort()\n >>> fields = [\"particle_position_x\", \"particle_position_y\",\n >>> \"particle_position_z\", \"particle_velocity_x\",\n >>> \"particle_velocity_y\", \"particle_velocity_z\"]\n >>> ds = load(my_fns[0])\n >>> init_sphere = ds.sphere(ds.domain_center, (.5, \"unitary\"))\n >>> indices = init_sphere[\"particle_index\"].astype(\"int\")\n >>> ts = DatasetSeries(my_fns)\n >>> trajs = ts.particle_trajectories(indices, fields=fields)\n >>> for t in trajs :\n >>> print t[\"particle_velocity_x\"].max(), t[\"particle_velocity_x\"].min()\n\n Note\n ----\n This function will fail if there are duplicate particle ids or if some of the particle\n disappear.\n \"\"\"\n return ParticleTrajectories(self, indices, fields=fields, suppress_logging=suppress_logging,\n ptype=ptype)\n\nclass TimeSeriesQuantitiesContainer(object):\n def __init__(self, data_object, quantities):\n self.data_object = data_object\n self.quantities = quantities\n\n def __getitem__(self, key):\n if key not in self.quantities: raise KeyError(key)\n q = self.quantities[key]\n def run_quantity_wrapper(quantity, quantity_name):\n @wraps(derived_quantity_registry[quantity_name][1])\n def run_quantity(*args, **kwargs):\n to_run = quantity(*args, **kwargs)\n return self.data_object.eval(to_run)\n return run_quantity\n return run_quantity_wrapper(q, key)\n\nclass DatasetSeriesObject(object):\n def __init__(self, time_series, data_object_name, *args, **kwargs):\n self.time_series = weakref.proxy(time_series)\n self.data_object_name = data_object_name\n self._args = args\n self._kwargs = kwargs\n qs = dict([(qn, create_quantity_proxy(qv)) for qn, qv in derived_quantity_registry.items()])\n self.quantities = TimeSeriesQuantitiesContainer(self, qs)\n\n def eval(self, tasks):\n return self.time_series.eval(tasks, self)\n\n def get(self, ds):\n # We get the type name, which corresponds to an attribute of the\n # index\n cls = getattr(ds, self.data_object_name)\n return cls(*self._args, **self._kwargs)\n\nclass RegisteredSimulationTimeSeries(type):\n def __init__(cls, name, b, d):\n type.__init__(cls, name, b, d)\n code_name = name[:name.find('Simulation')]\n if code_name:\n simulation_time_series_registry[code_name] = cls\n mylog.debug(\"Registering simulation: %s as %s\", code_name, cls)\n\n@add_metaclass(RegisteredSimulationTimeSeries)\nclass SimulationTimeSeries(DatasetSeries):\n def __init__(self, parameter_filename, find_outputs=False):\n \"\"\"\n Base class for generating simulation time series types.\n Principally consists of a *parameter_filename*.\n \"\"\"\n\n if not os.path.exists(parameter_filename):\n raise IOError(parameter_filename)\n self.parameter_filename = parameter_filename\n self.basename = os.path.basename(parameter_filename)\n self.directory = os.path.dirname(parameter_filename)\n self.parameters = {}\n self.key_parameters = []\n\n # Set some parameter defaults.\n self._set_parameter_defaults()\n # Read the simulation dataset.\n self._parse_parameter_file()\n # Set units\n self._set_units()\n # Figure out the starting and stopping times and redshift.\n self._calculate_simulation_bounds()\n # Get all possible datasets.\n self._get_all_outputs(find_outputs=find_outputs)\n \n self.print_key_parameters()\n\n def _set_parameter_defaults(self):\n pass\n\n def _parse_parameter_file(self):\n pass\n\n def _set_units(self):\n pass\n\n def _calculate_simulation_bounds(self):\n pass\n\n def _get_all_outputs(**kwargs):\n pass\n \n def __repr__(self):\n return self.parameter_filename\n\n _arr = None\n @property\n def arr(self):\n if self._arr is not None:\n return self._arr\n self._arr = functools.partial(YTArray, registry = self.unit_registry)\n return self._arr\n \n _quan = None\n @property\n def quan(self):\n if self._quan is not None:\n return self._quan\n self._quan = functools.partial(YTQuantity,\n registry = self.unit_registry)\n return self._quan\n \n @parallel_root_only\n def print_key_parameters(self):\n \"\"\"\n Print out some key parameters for the simulation.\n \"\"\"\n if self.simulation_type == \"grid\":\n for a in [\"domain_dimensions\", \"domain_left_edge\",\n \"domain_right_edge\"]:\n self._print_attr(a)\n for a in [\"initial_time\", \"final_time\",\n \"cosmological_simulation\"]:\n self._print_attr(a)\n if getattr(self, \"cosmological_simulation\", False):\n for a in [\"box_size\", \"omega_matter\", \"omega_lambda\",\n \"omega_radiation\", \"hubble_constant\",\n \"initial_redshift\", \"final_redshift\"]:\n self._print_attr(a)\n for a in self.key_parameters:\n self._print_attr(a)\n mylog.info(\"Total datasets: %d.\" % len(self.all_outputs))\n\n def _print_attr(self, a):\n \"\"\"\n Print the attribute or warn about it missing.\n \"\"\"\n if not hasattr(self, a):\n mylog.error(\"Missing %s in dataset definition!\", a)\n return\n v = getattr(self, a)\n mylog.info(\"Parameters: %-25s = %s\", a, v)\n\n def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):\n r\"\"\"\n Get datasets at or near to given values.\n\n Parameters\n ----------\n key: str\n The key by which to retrieve outputs, usually 'time' or\n 'redshift'.\n values: array_like\n A list of values, given as floats.\n tolerance : float\n If not None, do not return a dataset unless the value is\n within the tolerance value. If None, simply return the\n nearest dataset.\n Default: None.\n outputs : list\n The list of outputs from which to choose. If None,\n self.all_outputs is used.\n Default: None.\n\n Examples\n --------\n >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)\n\n \"\"\"\n\n if not isinstance(values, YTArray):\n if isinstance(values, tuple) and len(values) == 2:\n values = self.arr(*values)\n else:\n values = self.arr(values)\n values = values.in_base()\n\n if outputs is None:\n outputs = self.all_outputs\n my_outputs = []\n if not outputs:\n return my_outputs\n for value in values:\n outputs.sort(key=lambda obj:np.abs(value - obj[key]))\n if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \\\n and outputs[0] not in my_outputs:\n my_outputs.append(outputs[0])\n else:\n mylog.error(\"No dataset added for %s = %f.\", key, value)\n\n outputs.sort(key=lambda obj: obj['time'])\n return my_outputs\n" ]
[ [ "numpy.log2", "numpy.isnan", "numpy.ones", "numpy.array", "numpy.zeros" ], [ "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
stiphyMT/plantcv
[ "b51b545f3aef0742a2d250c75cc998ba6c9e57b2", "f38f7de53663522eb770870b70823d5fc46d0c0f" ]
[ "plantcv/plantcv/auto_crop.py", "plantcv/plantcv/fill_holes.py" ]
[ "# Resize image\n\nimport os\nimport cv2\nimport numpy as np\nfrom plantcv.plantcv._debug import _debug\nfrom plantcv.plantcv import params\nfrom plantcv.plantcv import fatal_error\n\n\ndef auto_crop(img, obj, padding_x=0, padding_y=0, color='black'):\n \"\"\"\n Resize image.\n\n Inputs:\n img = RGB or grayscale image data\n obj = contours\n padding_x = integer or tuple to add padding the x direction\n padding_y = integer or tuple to add padding the y direction\n color = either 'black', 'white', or 'image'\n\n Returns:\n cropped = cropped image\n\n :param img: numpy.ndarray\n :param obj: list\n :param padding_x: int\n :param padding_y: int\n :param color: str\n :return cropped: numpy.ndarray\n \"\"\"\n\n params.device += 1\n img_copy = np.copy(img)\n img_copy2 = np.copy(img)\n\n # Get the height and width of the reference image\n height, width = np.shape(img)[:2]\n\n x, y, w, h = cv2.boundingRect(obj)\n cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 5)\n\n crop_img = img[y:y + h, x:x + w]\n\n if type(padding_x) is int and type(padding_y) is int:\n offsetx_left = int(np.rint(padding_x))\n offsetx_right = int(np.rint(padding_x))\n offsety_top = int(np.rint(padding_y))\n offsety_bottom = int(np.rint(padding_y))\n\n elif type(padding_x) is tuple and type(padding_y) is tuple:\n offsetx_left = padding_x[0]\n offsetx_right = padding_x[1]\n offsety_top = padding_y[0]\n offsety_bottom = padding_y[1]\n\n else:\n fatal_error('Both padding_x and padding_x parameters must be either int or tuple.')\n\n if color.upper() == 'BLACK':\n colorval = (0, 0, 0)\n cropped = cv2.copyMakeBorder(crop_img, offsety_top, offsety_bottom, offsetx_left,\n offsetx_right, cv2.BORDER_CONSTANT, value=colorval)\n elif color.upper() == 'WHITE':\n colorval = (255, 255, 255)\n cropped = cv2.copyMakeBorder(crop_img, offsety_top, offsety_bottom, offsetx_left,\n offsetx_right, cv2.BORDER_CONSTANT, value=colorval)\n elif color.upper() == 'IMAGE':\n # Check whether the ROI is correctly bounded inside the image\n if x - offsetx_right < 0 or y - offsety_top < 0 or x + w + offsetx_right > width or y + h + offsety_bottom > height:\n cropped = img_copy2[y:y + h, x:x + w]\n else:\n # If padding is the image, crop the image with a buffer rather than cropping and adding a buffer\n cropped = img_copy2[y - offsety_top:y + h + offsety_bottom, x - offsetx_left:x + w + offsetx_right]\n else:\n fatal_error('Color was provided but ' + str(color) + ' is not \"white\", \"black\", or \"image\"!')\n\n if len(np.shape(img_copy)) == 3:\n cmap = None\n else:\n cmap = 'gray'\n\n _debug(visual=img_copy,\n filename=os.path.join(params.debug_outdir, str(params.device) + \"_crop_area.png\"),\n cmap=cmap)\n _debug(visual=cropped,\n filename=os.path.join(params.debug_outdir, str(params.device) + \"_auto_cropped.png\"),\n cmap=cmap)\n\n return cropped\n", "# Fill in holes, flood fill\n\nimport numpy as np\nimport os\nfrom plantcv.plantcv._debug import _debug\nfrom plantcv.plantcv import fatal_error\nfrom plantcv.plantcv import params\nfrom scipy.ndimage.morphology import binary_fill_holes\n\n\ndef fill_holes(bin_img):\n \"\"\"\n Flood fills holes in a binary mask\n\n Inputs:\n bin_img = Binary image data\n\n Returns:\n filtered_img = image with objects filled\n\n :param bin_img: numpy.ndarray\n :return filtered_img: numpy.ndarray\n \"\"\"\n\n # Make sure the image is binary\n if len(np.shape(bin_img)) != 2 or len(np.unique(bin_img)) != 2:\n fatal_error(\"Image is not binary\")\n\n # Cast binary image to boolean\n bool_img = bin_img.astype(bool)\n\n # Flood fill holes\n bool_img = binary_fill_holes(bool_img)\n\n # Cast boolean image to binary and make a copy of the binary image for returning\n filtered_img = np.copy(bool_img.astype(np.uint8) * 255)\n\n _debug(visual=filtered_img,\n filename=os.path.join(params.debug_outdir, str(params.device) + '_fill_holes' + '.png'),\n cmap='gray')\n\n return filtered_img\n" ]
[ [ "numpy.copy", "numpy.rint", "numpy.shape" ], [ "numpy.shape", "scipy.ndimage.morphology.binary_fill_holes", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] } ]
Yurlungur/FLRW
[ "15424d2304e44d0e38110b655c5c28a6aeb34147" ]
[ "plot_all_variables_big_a.py" ]
[ "#!/usr/bin/env python2\n\n# Author: Jonah Miller ([email protected])\n# Time-stamp: <2013-12-14 16:50:20 (jonah)>\n\n# This is a companion program to my FLRW simulator. It takes a data\n# file and generates a plot of the scale factor, its derivative, the\n# density, and the pressure of the matter.\n# Call the program with\n# python2 plot_all_variables.py filename.dat\n\n# Imports\n# ----------------------------------------------------------------------\nimport numpy as np\nimport scipy as sp\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport sys, os\n# ----------------------------------------------------------------------\n\n\n# Parameters for plots\n# ----------------------------------------------------------------------\nxlabel = \"Cosmological time. (geometrized units)\"\nmy_linewidth = 5\nfontsize = 20\na_rescaling = 7000\n# ----------------------------------------------------------------------\n\n\ndef load_data(filename):\n \"\"\"\n Takes a file name as a string and extracts the simulation data\n from it. Returns a tuple of arrays:\n (times,a_values,b_values,rho_values,p_values)\n \"\"\"\n with open(filename,'r') as f:\n data = np.loadtxt(filename).transpose()\n times = data[0]\n a_values = a_rescaling * data[1]\n a_offset = a_values[0]\n a_values -= a_offset\n rho_values = data[2]\n p_values = data[3]\n return times,a_values,rho_values,p_values,a_offset\n\ndef plot_data(times,a_values,rho_values,p_values,a_offset,filename):\n \"\"\"\n Takes the times,a_values,b_values,rho_values, and p_values\n and makes a nice plot out of them. Takes labels, etc. into account.\n \"\"\"\n mpl.rcParams.update({'font.size': fontsize})\n lines = [plt.plot(times,y_set,linewidth=my_linewidth)\n for y_set in [a_values,rho_values,p_values]]\n plt.legend([\"{}a - {}\".format(a_rescaling,a_offset),r'$\\rho$',\"p\"],loc=9)\n plt.xlabel(xlabel)\n title_list = filename.split('.')[0].split('_')\n title_list[1] = \"universe:\"\n title = reduce(lambda x,y: \"{} {}\".format(x,y),title_list)\n plt.title(title)\n plt.show()\n return\n\ndef plot_file(filename):\n \"Plots the data in a file.\"\n times,a_values,rho_values,p_values,a_offset = load_data(filename)\n plot_data(times,a_values,rho_values,p_values,a_offset,filename)\n return\n\nif __name__ == \"__main__\":\n for filename in sys.argv[1:]:\n plot_file(filename)\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.rcParams.update", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MaxXSoft/ZexGP
[ "c01d68d134990c0d18f30f12d93855ba5ffcbe29" ]
[ "examples/funcfit/__main__.py" ]
[ "'''\nAn example of fitting a function with ZexGP.\n'''\n\nfrom zexgp.kernel import Kernel\nfrom os import path\nfrom sys import float_info as fi\nfrom matplotlib import pyplot as plt\n\n\n# some necessary global variables\nfunc_val = []\ndomain = []\n\n\ndef init_func_val():\n '''\n Initialize function value.\n '''\n def func(x): return 1 / (1 + 25 * x ** 2)\n for i in range(100):\n x = (i - 50) / 50\n func_val.append(func(x))\n domain.append(x)\n\n\ndef get_int(i):\n '''\n Get a function that returns a specific integer.\n '''\n return lambda: i\n\n\ndef pow(x, y):\n try:\n return float(x ** y)\n except (OverflowError, ZeroDivisionError):\n return fi.max\n except TypeError:\n return float('nan')\n\n\ndef fitness(tree):\n '''\n Fitness function.\n '''\n sum = 0\n for i in range(100):\n x = (i - 50) / 50\n ans = tree.eval(x)\n exp = func_val[i]\n sum += abs(ans - exp)\n return 1 / (sum + 1) if sum == sum else 0\n\n\n# create kernel and load settings from disk\nk = Kernel()\nk.load_conf(path.dirname(__file__) + '/config.json')\nk.conf['maxRuns'] = 3\n\n# add functions\nk.add('+', func=lambda x, y: x + y)\nk.add('-', func=lambda x, y: x - y)\nk.add('*', func=lambda x, y: x * y)\nk.add('/', func=lambda x, y: x / y if y else fi.max)\nk.add('^', func=pow)\n\n# add terminals\nk.add('x', arg_index=0)\nfor i in range(1, 4):\n k.add(str(i), func=get_int(i))\n\n# set fitness function\nk.set_fitness(fitness)\n\n# run & print results\ninit_func_val()\nresults = k.run(jobs=4)\nfor i in results:\n print(i)\n\n# draw plots\nplt.figure()\nplt.subplot(221)\nplt.plot(domain, func_val, c='g')\nif results[0]:\n plt.subplot(222)\n plt.plot(domain, [results[0].eval(x) for x in domain])\nif results[1]:\n plt.subplot(223)\n plt.plot(domain, [results[1].eval(x) for x in domain])\nif results[2]:\n plt.subplot(224)\n plt.plot(domain, [results[2].eval(x) for x in domain])\nplt.suptitle('Results')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
algebra2k/terrier
[ "8b6f4b0b0c30dc94411f197e610f634ce0ab5b0b", "8b6f4b0b0c30dc94411f197e610f634ce0ab5b0b" ]
[ "script/model/model.py", "script/model/data_class/grouped_op_unit_data.py" ]
[ "#!/usr/bin/env python3\n\nimport numpy as np\n\nimport lightgbm as lgb\n\nfrom sklearn import linear_model\nfrom sklearn import kernel_ridge\nfrom sklearn import ensemble\nfrom sklearn import preprocessing\nfrom sklearn import neural_network\nfrom sklearn import multioutput\n\n# import warnings filter\nfrom warnings import simplefilter\n\n# ignore all future warnings\nsimplefilter(action='ignore', category=FutureWarning)\n\n_LOGTRANS_EPS = 1e-4\n\n\ndef _get_base_ml_model(method):\n regressor = None\n if method == 'lr':\n regressor = linear_model.LinearRegression()\n if method == 'huber':\n regressor = linear_model.HuberRegressor(max_iter=100)\n regressor = multioutput.MultiOutputRegressor(regressor)\n if method == 'kr':\n regressor = kernel_ridge.KernelRidge(kernel='rbf')\n if method == 'rf':\n regressor = ensemble.RandomForestRegressor(n_estimators=50, n_jobs=8)\n if method == 'gbm':\n regressor = lgb.LGBMRegressor(max_depth=20, num_leaves=5000, n_estimators=100, min_child_samples=5,\n random_state=42)\n regressor = multioutput.MultiOutputRegressor(regressor)\n if method == 'nn':\n regressor = neural_network.MLPRegressor(hidden_layer_sizes=(25, 25), early_stopping=True,\n max_iter=1000000, alpha=0.01)\n\n return regressor\n\n\nclass Model:\n \"\"\"\n The class that wraps around standard ML libraries.\n With the implementation for different normalization handlings\n \"\"\"\n\n def __init__(self, method, normalize=True, log_transform=True, modeling_transformer=None):\n \"\"\"\n\n :param method: which ML method to use\n :param normalize: whether to perform standard normalization on data (both x and y)\n :param log_transform: whether to perform log transformation on data (both x and y)\n :param modeling_transformer: the customized data transformer (a pair of functions with the first for training\n and second for predict)\n \"\"\"\n self._base_model = _get_base_ml_model(method)\n self._normalize = normalize\n self._log_transform = log_transform\n self._xscaler = preprocessing.StandardScaler()\n self._yscaler = preprocessing.StandardScaler()\n self._modeling_transformer = modeling_transformer\n\n def train(self, x, y):\n if self._modeling_transformer is not None:\n y = self._modeling_transformer[0](x, y)\n\n if self._log_transform:\n x = np.log(x + _LOGTRANS_EPS)\n y = np.log(y + _LOGTRANS_EPS)\n\n if self._normalize:\n x = self._xscaler.fit_transform(x)\n y = self._yscaler.fit_transform(y)\n\n self._base_model.fit(x, y)\n\n def predict(self, x):\n original_x = x\n\n # transform the features\n if self._log_transform:\n x = np.log(x + _LOGTRANS_EPS)\n if self._normalize:\n x = self._xscaler.transform(x)\n\n # make prediction\n y = self._base_model.predict(x)\n\n # transform the y back\n if self._normalize:\n y = self._yscaler.inverse_transform(y)\n if self._log_transform == 1:\n y = np.exp(y) - _LOGTRANS_EPS\n y = np.clip(y, 0, None)\n\n if self._modeling_transformer is not None:\n y = self._modeling_transformer[1](original_x, y)\n\n return y\n", "import csv\nimport numpy as np\nimport copy\nimport tqdm\n\nfrom info import data_info, query_info\nimport global_model_config\n\nfrom type import Target, ConcurrentCountingMode\n\n\ndef get_grouped_op_unit_data(filename):\n \"\"\"Get the training data from the global model\n\n :param filename: the input data file\n :return: the list of global model data\n \"\"\"\n\n if \"txn\" in filename:\n # Cannot handle the transaction manager data yet\n return []\n if \"execution\" in filename:\n # Special handle of the execution data\n return _execution_get_grouped_op_unit_data(filename)\n\n return _default_get_global_data(filename)\n\n\ndef _execution_get_grouped_op_unit_data(filename):\n # Get the global running data for the execution engine\n data_list = []\n with open(filename, \"r\") as f:\n reader = csv.reader(f, delimiter=\",\", skipinitialspace=True)\n next(reader)\n for line in tqdm.tqdm(reader):\n # The first element is always the query/pipeline identifier\n identifier = line[0]\n if identifier in query_info.FEATURE_MAP:\n # Need to deep copy since we're going to add the execution mode after it\n opunit_features = copy.deepcopy(query_info.FEATURE_MAP[identifier])\n # Execution mode is the second element for now...\n mode = int(line[1])\n for opunit_feature in opunit_features:\n opunit_feature[1].append(mode)\n line_data = list(map(int, line[2:]))\n data_list.append(GroupedOpUnitData(line[0], opunit_features, np.array(line_data)))\n\n return data_list\n\n\ndef _default_get_global_data(filename):\n # In the default case, the data does not need any pre-processing and the file name indicates the opunit\n return []\n\n\nclass GroupedOpUnitData:\n \"\"\"\n The class that stores the information about a group of operating units measured together\n \"\"\"\n def __init__(self, name, opunit_features, metrics):\n \"\"\"\n :param name: The name of the data point (e.g., could be the pipeline identifier)\n :param opunit_features: The list of opunits and their inputs for this event\n :param metrics: The runtime metrics\n \"\"\"\n self.name = name\n self.opunit_features = opunit_features\n self.y = metrics[-data_info.MINI_MODEL_TARGET_NUM:]\n self.y_pred = None\n index_map = data_info.TARGET_CSV_INDEX\n self.start_time = metrics[index_map[Target.START_TIME]]\n self.end_time = self.start_time + self.y[index_map[Target.ELAPSED_US]] - 1\n self.cpu_id = metrics[index_map[Target.CPU_ID]]\n\n def get_start_time(self, concurrent_counting_mode):\n \"\"\"Get the start time for this group for counting the concurrent operations\n\n :param concurrent_counting_mode: ConcurrentCountingMode type\n :return: the start time\n \"\"\"\n start_time = None\n if concurrent_counting_mode is ConcurrentCountingMode.EXACT:\n start_time = self.start_time\n if concurrent_counting_mode is ConcurrentCountingMode.ESTIMATED:\n start_time = self.start_time\n if concurrent_counting_mode is ConcurrentCountingMode.INTERVAL:\n start_time = self.start_time + global_model_config.INTERVAL_START\n return start_time\n\n def get_end_time(self, concurrent_counting_mode):\n \"\"\"Get the end time for this group for counting the concurrent operations\n\n :param concurrent_counting_mode: ConcurrentCountingMode type\n :return: the end time\n \"\"\"\n end_time = None\n if concurrent_counting_mode is ConcurrentCountingMode.EXACT:\n end_time = self.end_time\n if concurrent_counting_mode is ConcurrentCountingMode.ESTIMATED:\n end_time = self.start_time + self.y_pred[data_info.TARGET_CSV_INDEX[Target.ELAPSED_US]] - 1\n if concurrent_counting_mode is ConcurrentCountingMode.INTERVAL:\n end_time = self.start_time + global_model_config.INTERVAL_START + global_model_config.INTERVAL_SIZE\n return end_time\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "numpy.log", "sklearn.linear_model.HuberRegressor", "numpy.clip", "sklearn.kernel_ridge.KernelRidge", "sklearn.linear_model.LinearRegression", "sklearn.multioutput.MultiOutputRegressor", "sklearn.preprocessing.StandardScaler", "numpy.exp", "sklearn.neural_network.MLPRegressor" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shivammaniharsahu/django_api
[ "6ffb3d9f70f30f5fd3ae06ec00a6dd7c7783a797", "6ffb3d9f70f30f5fd3ae06ec00a6dd7c7783a797", "6ffb3d9f70f30f5fd3ae06ec00a6dd7c7783a797" ]
[ "Lib/site-packages/tensorflow_core/python/ops/gen_functional_ops.py", "Lib/site-packages/tensorflow_core/_api/v1/compat/v1/experimental/__init__.py", "Lib/site-packages/tensorflow_core/_api/v1/ragged/__init__.py" ]
[ "\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\n\"\"\"\n\nimport collections as _collections\nimport six as _six\n\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import errors as _errors\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\nfrom tensorflow.python.util.deprecation import deprecated_endpoints\nfrom tensorflow.python.util import dispatch as _dispatch\nfrom tensorflow.python.util.tf_export import tf_export\nfrom tensorflow.python.util.tf_export import kwarg_only as _kwarg_only\nfrom tensorflow.tools.docs import doc_controls as _doc_controls\n\n\ndef case(branch_index, input, Tout, branches, output_shapes=[], name=None):\n r\"\"\"An n-way switch statement which calls a single branch function.\n\n An n-way switch statement, implementing the following:\n ```\n switch (branch_index) {\n case 0:\n output = branches[0](input);\n break;\n case 1:\n output = branches[1](input);\n break;\n ...\n case [[nbranches-1]]:\n default:\n output = branches[nbranches-1](input);\n break;\n }\n ```\n\n Args:\n branch_index: A `Tensor` of type `int32`.\n The branch selector, an int32 Tensor.\n input: A list of `Tensor` objects.\n A list of input tensors passed to the branch function.\n Tout: A list of `tf.DTypes`. A list of output types.\n branches: A list of functions decorated with @Defun that has length `>= 1`.\n A list of functions each of which takes 'inputs' and returns a list of\n tensors, whose types are the same as what every other branch returns.\n output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name, \"Case\",\n name, _ctx._post_execution_callbacks, branch_index, input, \"Tout\",\n Tout, \"branches\", branches, \"output_shapes\", output_shapes)\n return _result\n except _core._FallbackException:\n try:\n return case_eager_fallback(\n branch_index, input, Tout=Tout, branches=branches,\n output_shapes=output_shapes, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'case' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if not isinstance(branches, (list, tuple)):\n raise TypeError(\n \"Expected list for 'branches' argument to \"\n \"'case' Op, not %r.\" % branches)\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'case' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _, _, _op = _op_def_lib._apply_op_helper(\n \"Case\", branch_index=branch_index, input=input, Tout=Tout,\n branches=branches, output_shapes=output_shapes, name=name)\n _result = _op.outputs[:]\n if not _result:\n return _op\n _inputs_flat = _op.inputs\n _attrs = (\"Tin\", _op.get_attr(\"Tin\"), \"Tout\", _op.get_attr(\"Tout\"),\n \"branches\", _op.get_attr(\"branches\"), \"output_shapes\",\n _op.get_attr(\"output_shapes\"))\n _execute.record_gradient(\n \"Case\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef Case(branch_index, input, Tout, branches, output_shapes=[], name=None):\n return case(branch_index=branch_index, input=input, Tout=Tout, branches=branches, output_shapes=output_shapes, name=name)\nCase.__doc__ = case.__doc__\nCase = _doc_controls.do_not_generate_docs(_kwarg_only(Case))\ntf_export(\"raw_ops.Case\")(Case)\n\n\ndef case_eager_fallback(branch_index, input, Tout, branches, output_shapes=[], name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function case\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'case' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if not isinstance(branches, (list, tuple)):\n raise TypeError(\n \"Expected list for 'branches' argument to \"\n \"'case' Op, not %r.\" % branches)\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'case' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n branch_index = _ops.convert_to_tensor(branch_index, _dtypes.int32)\n _inputs_flat = [branch_index] + list(input)\n _attrs = (\"Tin\", _attr_Tin, \"Tout\", Tout, \"branches\", branches,\n \"output_shapes\", output_shapes)\n _result = _execute.execute(b\"Case\", len(Tout), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"Case\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef fake_param(dtype, shape, name=None):\n r\"\"\" This op is used as a placeholder in If branch functions. It doesn't provide a\n valid output when run, so must either be removed (e.g. replaced with a\n function input) or guaranteed not to be used (e.g. if mirroring an\n intermediate output needed for the gradient computation of the other branch).\n\n Args:\n dtype: A `tf.DType`. The type of the output.\n shape: A `tf.TensorShape` or list of `ints`.\n The purported shape of the output. This is only used for shape inference;\n the output will not necessarily have this shape. Can be a partial shape.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `dtype`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"FakeParam\", name, _ctx._post_execution_callbacks, \"dtype\", dtype,\n \"shape\", shape)\n return _result\n except _core._FallbackException:\n try:\n return fake_param_eager_fallback(\n dtype=dtype, shape=shape, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n dtype = _execute.make_type(dtype, \"dtype\")\n shape = _execute.make_shape(shape, \"shape\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"FakeParam\", dtype=dtype, shape=shape, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"dtype\", _op._get_attr_type(\"dtype\"), \"shape\",\n _op.get_attr(\"shape\"))\n _execute.record_gradient(\n \"FakeParam\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\ndef FakeParam(dtype, shape, name=None):\n return fake_param(dtype=dtype, shape=shape, name=name)\nFakeParam.__doc__ = fake_param.__doc__\nFakeParam = _doc_controls.do_not_generate_docs(_kwarg_only(FakeParam))\ntf_export(\"raw_ops.FakeParam\")(FakeParam)\n\n\ndef fake_param_eager_fallback(dtype, shape, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function fake_param\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n dtype = _execute.make_type(dtype, \"dtype\")\n shape = _execute.make_shape(shape, \"shape\")\n _inputs_flat = []\n _attrs = (\"dtype\", dtype, \"shape\", shape)\n _result = _execute.execute(b\"FakeParam\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"FakeParam\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef _for(start, limit, delta, input, body, name=None):\n r\"\"\" ```python\n output = input;\n for i in range(start, limit, delta)\n output = body(i, output);\n ```\n\n Args:\n start: A `Tensor` of type `int32`. The lower bound. An int32\n limit: A `Tensor` of type `int32`. The upper bound. An int32\n delta: A `Tensor` of type `int32`. The increment. An int32\n input: A list of `Tensor` objects.\n A list of input tensors whose types are T.\n body: A function decorated with @Defun.\n A function that takes a list of tensors (int32, T) and returns another\n list of tensors (T).\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects. Has the same type as `input`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name, \"For\",\n name, _ctx._post_execution_callbacks, start, limit, delta, input,\n \"body\", body)\n return _result\n except _core._FallbackException:\n try:\n return _for_eager_fallback(\n start, limit, delta, input, body=body, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n _, _, _op = _op_def_lib._apply_op_helper(\n \"For\", start=start, limit=limit, delta=delta, input=input, body=body,\n name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"body\", _op.get_attr(\"body\"))\n _execute.record_gradient(\n \"For\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef For(start, limit, delta, input, body, name=None):\n return _for(start=start, limit=limit, delta=delta, input=input, body=body, name=name)\nFor.__doc__ = _for.__doc__\nFor = _doc_controls.do_not_generate_docs(_kwarg_only(For))\ntf_export(\"raw_ops.For\")(For)\n\n\ndef _for_eager_fallback(start, limit, delta, input, body, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function _for\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n start = _ops.convert_to_tensor(start, _dtypes.int32)\n limit = _ops.convert_to_tensor(limit, _dtypes.int32)\n delta = _ops.convert_to_tensor(delta, _dtypes.int32)\n _inputs_flat = [start, limit, delta] + list(input)\n _attrs = (\"T\", _attr_T, \"body\", body)\n _result = _execute.execute(b\"For\", len(input), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"For\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef _if(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None):\n r\"\"\"output = cond ? then_branch(input) : else_branch(input)\n\n Args:\n cond: A `Tensor`.\n A Tensor. If the tensor is a scalar of non-boolean type, the\n scalar is converted to a boolean according to the\n following rule: if the scalar is a numerical value, non-zero means\n `True` and zero means False; if the scalar is a string, non-empty\n means `True` and empty means `False`. If the tensor is not a scalar,\n being empty means False and being non-empty means True.\n input: A list of `Tensor` objects. A list of input tensors.\n Tout: A list of `tf.DTypes`. A list of output types.\n then_branch: A function decorated with @Defun.\n A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what else_branch returns.\n else_branch: A function decorated with @Defun.\n A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what then_branch returns.\n output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name, \"If\", name,\n _ctx._post_execution_callbacks, cond, input, \"Tout\", Tout,\n \"then_branch\", then_branch, \"else_branch\", else_branch,\n \"output_shapes\", output_shapes)\n return _result\n except _core._FallbackException:\n try:\n return _if_eager_fallback(\n cond, input, Tout=Tout, then_branch=then_branch,\n else_branch=else_branch, output_shapes=output_shapes, name=name,\n ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'if' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'if' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _, _, _op = _op_def_lib._apply_op_helper(\n \"If\", cond=cond, input=input, Tout=Tout, then_branch=then_branch,\n else_branch=else_branch, output_shapes=output_shapes, name=name)\n _result = _op.outputs[:]\n if not _result:\n return _op\n _inputs_flat = _op.inputs\n _attrs = (\"Tcond\", _op._get_attr_type(\"Tcond\"), \"Tin\", _op.get_attr(\"Tin\"),\n \"Tout\", _op.get_attr(\"Tout\"), \"then_branch\",\n _op.get_attr(\"then_branch\"), \"else_branch\",\n _op.get_attr(\"else_branch\"), \"output_shapes\",\n _op.get_attr(\"output_shapes\"))\n _execute.record_gradient(\n \"If\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef If(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None):\n return _if(cond=cond, input=input, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes, name=name)\nIf.__doc__ = _if.__doc__\nIf = _doc_controls.do_not_generate_docs(_kwarg_only(If))\ntf_export(\"raw_ops.If\")(If)\n\n\ndef _if_eager_fallback(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function _if\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'if' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'if' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _attr_Tcond, (cond,) = _execute.args_to_matching_eager([cond], _ctx)\n _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n _inputs_flat = [cond] + list(input)\n _attrs = (\"Tcond\", _attr_Tcond, \"Tin\", _attr_Tin, \"Tout\", Tout,\n \"then_branch\", then_branch, \"else_branch\", else_branch, \"output_shapes\",\n output_shapes)\n _result = _execute.execute(b\"If\", len(Tout), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"If\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef partitioned_call(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None):\n r\"\"\"returns `f(inputs)`, where `f`'s body is placed and partitioned.\n\n Args:\n args: A list of `Tensor` objects. A list of input tensors.\n Tout: A list of `tf.DTypes`. A list of output types.\n f: A function decorated with @Defun.\n A function that takes 'args', a list of tensors, and returns 'output',\n another list of tensors. Input and output types are specified by 'Tin'\n and 'Tout'. The function body of f will be placed and partitioned across\n devices, setting this op apart from the regular Call op.\n config: An optional `string`. Defaults to `\"\"`.\n config_proto: An optional `string`. Defaults to `\"\"`.\n executor_type: An optional `string`. Defaults to `\"\"`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"PartitionedCall\", name, _ctx._post_execution_callbacks, args, \"Tout\",\n Tout, \"f\", f, \"config\", config, \"config_proto\", config_proto,\n \"executor_type\", executor_type)\n return _result\n except _core._FallbackException:\n try:\n return partitioned_call_eager_fallback(\n args, Tout=Tout, f=f, config=config, config_proto=config_proto,\n executor_type=executor_type, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'partitioned_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if config is None:\n config = \"\"\n config = _execute.make_str(config, \"config\")\n if config_proto is None:\n config_proto = \"\"\n config_proto = _execute.make_str(config_proto, \"config_proto\")\n if executor_type is None:\n executor_type = \"\"\n executor_type = _execute.make_str(executor_type, \"executor_type\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"PartitionedCall\", args=args, Tout=Tout, f=f, config=config,\n config_proto=config_proto,\n executor_type=executor_type, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"Tin\", _op.get_attr(\"Tin\"), \"Tout\", _op.get_attr(\"Tout\"), \"f\",\n _op.get_attr(\"f\"), \"config\", _op.get_attr(\"config\"),\n \"config_proto\", _op.get_attr(\"config_proto\"), \"executor_type\",\n _op.get_attr(\"executor_type\"))\n _execute.record_gradient(\n \"PartitionedCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef PartitionedCall(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None):\n return partitioned_call(args=args, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type, name=name)\nPartitionedCall.__doc__ = partitioned_call.__doc__\nPartitionedCall = _doc_controls.do_not_generate_docs(_kwarg_only(PartitionedCall))\ntf_export(\"raw_ops.PartitionedCall\")(PartitionedCall)\n\n\ndef partitioned_call_eager_fallback(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function partitioned_call\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'partitioned_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if config is None:\n config = \"\"\n config = _execute.make_str(config, \"config\")\n if config_proto is None:\n config_proto = \"\"\n config_proto = _execute.make_str(config_proto, \"config_proto\")\n if executor_type is None:\n executor_type = \"\"\n executor_type = _execute.make_str(executor_type, \"executor_type\")\n _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)\n _inputs_flat = list(args)\n _attrs = (\"Tin\", _attr_Tin, \"Tout\", Tout, \"f\", f, \"config\", config,\n \"config_proto\", config_proto, \"executor_type\", executor_type)\n _result = _execute.execute(b\"PartitionedCall\", len(Tout),\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"PartitionedCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef remote_call(target, args, Tout, f, name=None):\n r\"\"\"Runs function `f` on a remote device indicated by `target`.\n\n Args:\n target: A `Tensor` of type `string`.\n A fully specified device name where we want to run the function.\n args: A list of `Tensor` objects. A list of arguments for the function.\n Tout: A list of `tf.DTypes` that has length `>= 1`.\n The type list for the return values.\n f: A function decorated with @Defun. The function to run remotely.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"RemoteCall\", name, _ctx._post_execution_callbacks, target, args,\n \"Tout\", Tout, \"f\", f)\n return _result\n except _core._FallbackException:\n try:\n return remote_call_eager_fallback(\n target, args, Tout=Tout, f=f, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'remote_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n _, _, _op = _op_def_lib._apply_op_helper(\n \"RemoteCall\", target=target, args=args, Tout=Tout, f=f, name=name)\n _result = _op.outputs[:]\n if not _result:\n return _op\n _inputs_flat = _op.inputs\n _attrs = (\"Tin\", _op.get_attr(\"Tin\"), \"Tout\", _op.get_attr(\"Tout\"), \"f\",\n _op.get_attr(\"f\"))\n _execute.record_gradient(\n \"RemoteCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef RemoteCall(target, args, Tout, f, name=None):\n return remote_call(target=target, args=args, Tout=Tout, f=f, name=name)\nRemoteCall.__doc__ = remote_call.__doc__\nRemoteCall = _doc_controls.do_not_generate_docs(_kwarg_only(RemoteCall))\ntf_export(\"raw_ops.RemoteCall\")(RemoteCall)\n\n\ndef remote_call_eager_fallback(target, args, Tout, f, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function remote_call\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'remote_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)\n target = _ops.convert_to_tensor(target, _dtypes.string)\n _inputs_flat = [target] + list(args)\n _attrs = (\"Tin\", _attr_Tin, \"Tout\", Tout, \"f\", f)\n _result = _execute.execute(b\"RemoteCall\", len(Tout), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"RemoteCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef stateful_partitioned_call(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None):\n r\"\"\"returns `f(inputs)`, where `f`'s body is placed and partitioned.\n\n Args:\n args: A list of `Tensor` objects. A list of input tensors.\n Tout: A list of `tf.DTypes`. A list of output types.\n f: A function decorated with @Defun.\n A function that takes 'args', a list of tensors, and returns 'output',\n another list of tensors. Input and output types are specified by 'Tin'\n and 'Tout'. The function body of f will be placed and partitioned across\n devices, setting this op apart from the regular Call op. This op is\n stateful.\n config: An optional `string`. Defaults to `\"\"`.\n config_proto: An optional `string`. Defaults to `\"\"`.\n executor_type: An optional `string`. Defaults to `\"\"`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"StatefulPartitionedCall\", name, _ctx._post_execution_callbacks, args,\n \"Tout\", Tout, \"f\", f, \"config\", config, \"config_proto\", config_proto,\n \"executor_type\", executor_type)\n return _result\n except _core._FallbackException:\n try:\n return stateful_partitioned_call_eager_fallback(\n args, Tout=Tout, f=f, config=config, config_proto=config_proto,\n executor_type=executor_type, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'stateful_partitioned_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if config is None:\n config = \"\"\n config = _execute.make_str(config, \"config\")\n if config_proto is None:\n config_proto = \"\"\n config_proto = _execute.make_str(config_proto, \"config_proto\")\n if executor_type is None:\n executor_type = \"\"\n executor_type = _execute.make_str(executor_type, \"executor_type\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StatefulPartitionedCall\", args=args, Tout=Tout, f=f, config=config,\n config_proto=config_proto,\n executor_type=executor_type, name=name)\n _result = _op.outputs[:]\n if not _result:\n return _op\n _inputs_flat = _op.inputs\n _attrs = (\"Tin\", _op.get_attr(\"Tin\"), \"Tout\", _op.get_attr(\"Tout\"), \"f\",\n _op.get_attr(\"f\"), \"config\", _op.get_attr(\"config\"),\n \"config_proto\", _op.get_attr(\"config_proto\"), \"executor_type\",\n _op.get_attr(\"executor_type\"))\n _execute.record_gradient(\n \"StatefulPartitionedCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef StatefulPartitionedCall(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None):\n return stateful_partitioned_call(args=args, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type, name=name)\nStatefulPartitionedCall.__doc__ = stateful_partitioned_call.__doc__\nStatefulPartitionedCall = _doc_controls.do_not_generate_docs(_kwarg_only(StatefulPartitionedCall))\ntf_export(\"raw_ops.StatefulPartitionedCall\")(StatefulPartitionedCall)\n\n\ndef stateful_partitioned_call_eager_fallback(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function stateful_partitioned_call\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'stateful_partitioned_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if config is None:\n config = \"\"\n config = _execute.make_str(config, \"config\")\n if config_proto is None:\n config_proto = \"\"\n config_proto = _execute.make_str(config_proto, \"config_proto\")\n if executor_type is None:\n executor_type = \"\"\n executor_type = _execute.make_str(executor_type, \"executor_type\")\n _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)\n _inputs_flat = list(args)\n _attrs = (\"Tin\", _attr_Tin, \"Tout\", Tout, \"f\", f, \"config\", config,\n \"config_proto\", config_proto, \"executor_type\", executor_type)\n _result = _execute.execute(b\"StatefulPartitionedCall\", len(Tout),\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"StatefulPartitionedCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef stateless_if(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None):\n r\"\"\"output = cond ? then_branch(input) : else_branch(input)\n\n Args:\n cond: A `Tensor`.\n A Tensor. If the tensor is a scalar of non-boolean type, the\n scalar is converted to a boolean according to the\n following rule: if the scalar is a numerical value, non-zero means\n `True` and zero means False; if the scalar is a string, non-empty\n means `True` and empty means `False`. If the tensor is not a scalar,\n being empty means False and being non-empty means True.\n\n This should only be used when the if then/else body functions do not\n have stateful ops.\n input: A list of `Tensor` objects. A list of input tensors.\n Tout: A list of `tf.DTypes`. A list of output types.\n then_branch: A function decorated with @Defun.\n A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what else_branch returns.\n else_branch: A function decorated with @Defun.\n A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what then_branch returns.\n output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"StatelessIf\", name, _ctx._post_execution_callbacks, cond, input,\n \"Tout\", Tout, \"then_branch\", then_branch, \"else_branch\", else_branch,\n \"output_shapes\", output_shapes)\n return _result\n except _core._FallbackException:\n try:\n return stateless_if_eager_fallback(\n cond, input, Tout=Tout, then_branch=then_branch,\n else_branch=else_branch, output_shapes=output_shapes, name=name,\n ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'stateless_if' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'stateless_if' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StatelessIf\", cond=cond, input=input, Tout=Tout,\n then_branch=then_branch, else_branch=else_branch,\n output_shapes=output_shapes, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"Tcond\", _op._get_attr_type(\"Tcond\"), \"Tin\", _op.get_attr(\"Tin\"),\n \"Tout\", _op.get_attr(\"Tout\"), \"then_branch\",\n _op.get_attr(\"then_branch\"), \"else_branch\",\n _op.get_attr(\"else_branch\"), \"output_shapes\",\n _op.get_attr(\"output_shapes\"))\n _execute.record_gradient(\n \"StatelessIf\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef StatelessIf(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None):\n return stateless_if(cond=cond, input=input, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes, name=name)\nStatelessIf.__doc__ = stateless_if.__doc__\nStatelessIf = _doc_controls.do_not_generate_docs(_kwarg_only(StatelessIf))\ntf_export(\"raw_ops.StatelessIf\")(StatelessIf)\n\n\ndef stateless_if_eager_fallback(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function stateless_if\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'stateless_if' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'stateless_if' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _attr_Tcond, (cond,) = _execute.args_to_matching_eager([cond], _ctx)\n _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n _inputs_flat = [cond] + list(input)\n _attrs = (\"Tcond\", _attr_Tcond, \"Tin\", _attr_Tin, \"Tout\", Tout,\n \"then_branch\", then_branch, \"else_branch\", else_branch, \"output_shapes\",\n output_shapes)\n _result = _execute.execute(b\"StatelessIf\", len(Tout), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"StatelessIf\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef stateless_while(input, cond, body, name=None):\n r\"\"\"output = input; While (Cond(output)) { output = Body(output) }\n\n Args:\n input: A list of `Tensor` objects.\n A list of input tensors whose types are T.\n cond: A function decorated with @Defun.\n A function takes 'input' and returns a tensor. If the tensor is\n a scalar of non-boolean, the scalar is converted to a boolean\n according to the following rule: if the scalar is a numerical\n value, non-zero means True and zero means False; if the scalar is\n a string, non-empty means True and empty means False. If the\n tensor is not a scalar, non-emptiness means True and False\n otherwise.\n\n This should only be used when the while condition and body functions\n do not have stateful ops.\n body: A function decorated with @Defun.\n A function that takes a list of tensors and returns another\n list of tensors. Both lists have the same types as specified\n by T.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects. Has the same type as `input`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"StatelessWhile\", name, _ctx._post_execution_callbacks, input, \"cond\",\n cond, \"body\", body)\n return _result\n except _core._FallbackException:\n try:\n return stateless_while_eager_fallback(\n input, cond=cond, body=body, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StatelessWhile\", input=input, cond=cond, body=body, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"cond\", _op.get_attr(\"cond\"), \"body\",\n _op.get_attr(\"body\"))\n _execute.record_gradient(\n \"StatelessWhile\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef StatelessWhile(input, cond, body, name=None):\n return stateless_while(input=input, cond=cond, body=body, name=name)\nStatelessWhile.__doc__ = stateless_while.__doc__\nStatelessWhile = _doc_controls.do_not_generate_docs(_kwarg_only(StatelessWhile))\ntf_export(\"raw_ops.StatelessWhile\")(StatelessWhile)\n\n\ndef stateless_while_eager_fallback(input, cond, body, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function stateless_while\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n _inputs_flat = list(input)\n _attrs = (\"T\", _attr_T, \"cond\", cond, \"body\", body)\n _result = _execute.execute(b\"StatelessWhile\", len(input),\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"StatelessWhile\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef symbolic_gradient(input, Tout, f, name=None):\n r\"\"\"Computes the gradient function for function f via backpropagation.\n\n Args:\n input: A list of `Tensor` objects. a list of input tensors of size N + M;\n Tout: A list of `tf.DTypes` that has length `>= 1`.\n the type list for the input list.\n f: A function decorated with @Defun.\n The function we want to compute the gradient for.\n\n The function 'f' must be a numerical function which takes N inputs and\n produces M outputs. Its gradient function 'g', which is computed by\n this SymbolicGradient op is a function taking N + M inputs and\n produces N outputs.\n\n I.e. if we have\n (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),\n then, g is\n (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,\n dL/dy1, dL/dy2, ..., dL/dy_M),\n\n where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the\n loss function). dL/dx_i is the partial derivative of L with respect\n to x_i.\n\n (Needs some math expert to say the comment above better.)\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"SymbolicGradient\", name, _ctx._post_execution_callbacks, input,\n \"Tout\", Tout, \"f\", f)\n return _result\n except _core._FallbackException:\n try:\n return symbolic_gradient_eager_fallback(\n input, Tout=Tout, f=f, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'symbolic_gradient' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n _, _, _op = _op_def_lib._apply_op_helper(\n \"SymbolicGradient\", input=input, Tout=Tout, f=f, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"Tin\", _op.get_attr(\"Tin\"), \"Tout\", _op.get_attr(\"Tout\"), \"f\",\n _op.get_attr(\"f\"))\n _execute.record_gradient(\n \"SymbolicGradient\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef SymbolicGradient(input, Tout, f, name=None):\n return symbolic_gradient(input=input, Tout=Tout, f=f, name=name)\nSymbolicGradient.__doc__ = symbolic_gradient.__doc__\nSymbolicGradient = _doc_controls.do_not_generate_docs(_kwarg_only(SymbolicGradient))\ntf_export(\"raw_ops.SymbolicGradient\")(SymbolicGradient)\n\n\ndef symbolic_gradient_eager_fallback(input, Tout, f, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function symbolic_gradient\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'symbolic_gradient' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n _inputs_flat = list(input)\n _attrs = (\"Tin\", _attr_Tin, \"Tout\", Tout, \"f\", f)\n _result = _execute.execute(b\"SymbolicGradient\", len(Tout),\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"SymbolicGradient\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef _while(input, cond, body, output_shapes=[], parallel_iterations=10, name=None):\n r\"\"\"output = input; While (Cond(output)) { output = Body(output) }\n\n Args:\n input: A list of `Tensor` objects.\n A list of input tensors whose types are T.\n cond: A function decorated with @Defun.\n A function takes 'input' and returns a tensor. If the tensor is\n a scalar of non-boolean, the scalar is converted to a boolean\n according to the following rule: if the scalar is a numerical\n value, non-zero means True and zero means False; if the scalar is\n a string, non-empty means True and empty means False. If the\n tensor is not a scalar, non-emptiness means True and False\n otherwise.\n body: A function decorated with @Defun.\n A function that takes a list of tensors and returns another\n list of tensors. Both lists have the same types as specified\n by T.\n output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.\n parallel_iterations: An optional `int`. Defaults to `10`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects. Has the same type as `input`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name, \"While\",\n name, _ctx._post_execution_callbacks, input, \"cond\", cond, \"body\",\n body, \"output_shapes\", output_shapes, \"parallel_iterations\",\n parallel_iterations)\n return _result\n except _core._FallbackException:\n try:\n return _while_eager_fallback(\n input, cond=cond, body=body, output_shapes=output_shapes,\n parallel_iterations=parallel_iterations, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'while' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n if parallel_iterations is None:\n parallel_iterations = 10\n parallel_iterations = _execute.make_int(parallel_iterations, \"parallel_iterations\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"While\", input=input, cond=cond, body=body,\n output_shapes=output_shapes,\n parallel_iterations=parallel_iterations, name=name)\n _result = _op.outputs[:]\n if not _result:\n return _op\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"cond\", _op.get_attr(\"cond\"), \"body\",\n _op.get_attr(\"body\"), \"output_shapes\",\n _op.get_attr(\"output_shapes\"), \"parallel_iterations\",\n _op.get_attr(\"parallel_iterations\"))\n _execute.record_gradient(\n \"While\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef While(input, cond, body, output_shapes=[], parallel_iterations=10, name=None):\n return _while(input=input, cond=cond, body=body, output_shapes=output_shapes, parallel_iterations=parallel_iterations, name=name)\nWhile.__doc__ = _while.__doc__\nWhile = _doc_controls.do_not_generate_docs(_kwarg_only(While))\ntf_export(\"raw_ops.While\")(While)\n\n\ndef _while_eager_fallback(input, cond, body, output_shapes=[], parallel_iterations=10, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function _while\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'while' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n if parallel_iterations is None:\n parallel_iterations = 10\n parallel_iterations = _execute.make_int(parallel_iterations, \"parallel_iterations\")\n _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n _inputs_flat = list(input)\n _attrs = (\"T\", _attr_T, \"cond\", cond, \"body\", body, \"output_shapes\",\n output_shapes, \"parallel_iterations\", parallel_iterations)\n _result = _execute.execute(b\"While\", len(input), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"While\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"Case\"\n# input_arg {\n# name: \"branch_index\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"branches\"\n# type: \"list(func)\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"output_shapes\"\n# type: \"list(shape)\"\n# default_value {\n# list {\n# }\n# }\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"FakeParam\"\n# output_arg {\n# name: \"output\"\n# type_attr: \"dtype\"\n# }\n# attr {\n# name: \"dtype\"\n# type: \"type\"\n# }\n# attr {\n# name: \"shape\"\n# type: \"shape\"\n# }\n# }\n# op {\n# name: \"For\"\n# input_arg {\n# name: \"start\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"limit\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"delta\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"T\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"body\"\n# type: \"func\"\n# }\n# }\n# op {\n# name: \"If\"\n# input_arg {\n# name: \"cond\"\n# type_attr: \"Tcond\"\n# }\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tcond\"\n# type: \"type\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"then_branch\"\n# type: \"func\"\n# }\n# attr {\n# name: \"else_branch\"\n# type: \"func\"\n# }\n# attr {\n# name: \"output_shapes\"\n# type: \"list(shape)\"\n# default_value {\n# list {\n# }\n# }\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"PartitionedCall\"\n# input_arg {\n# name: \"args\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"f\"\n# type: \"func\"\n# }\n# attr {\n# name: \"config\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# attr {\n# name: \"config_proto\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# attr {\n# name: \"executor_type\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# }\n# op {\n# name: \"RemoteCall\"\n# input_arg {\n# name: \"target\"\n# type: DT_STRING\n# }\n# input_arg {\n# name: \"args\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"f\"\n# type: \"func\"\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"StatefulPartitionedCall\"\n# input_arg {\n# name: \"args\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"f\"\n# type: \"func\"\n# }\n# attr {\n# name: \"config\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# attr {\n# name: \"config_proto\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# attr {\n# name: \"executor_type\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"StatelessIf\"\n# input_arg {\n# name: \"cond\"\n# type_attr: \"Tcond\"\n# }\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tcond\"\n# type: \"type\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"then_branch\"\n# type: \"func\"\n# }\n# attr {\n# name: \"else_branch\"\n# type: \"func\"\n# }\n# attr {\n# name: \"output_shapes\"\n# type: \"list(shape)\"\n# default_value {\n# list {\n# }\n# }\n# }\n# }\n# op {\n# name: \"StatelessWhile\"\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"T\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"cond\"\n# type: \"func\"\n# }\n# attr {\n# name: \"body\"\n# type: \"func\"\n# }\n# }\n# op {\n# name: \"SymbolicGradient\"\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"f\"\n# type: \"func\"\n# }\n# }\n# op {\n# name: \"While\"\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"T\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"cond\"\n# type: \"func\"\n# }\n# attr {\n# name: \"body\"\n# type: \"func\"\n# }\n# attr {\n# name: \"output_shapes\"\n# type: \"list(shape)\"\n# default_value {\n# list {\n# }\n# }\n# }\n# attr {\n# name: \"parallel_iterations\"\n# type: \"int\"\n# default_value {\n# i: 10\n# }\n# }\n# is_stateful: true\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\242\\001\\n\\004Case\\022\\020\\n\\014branch_index\\030\\003\\022\\014\\n\\005input2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\\"\\032\\n\\010branches\\022\\nlist(func)(\\0010\\001\\\" \\n\\routput_shapes\\022\\013list(shape)\\032\\002\\n\\000\\210\\001\\001\\n;\\n\\tFakeParam\\032\\017\\n\\006output\\\"\\005dtype\\\"\\r\\n\\005dtype\\022\\004type\\\"\\016\\n\\005shape\\022\\005shape\\n`\\n\\003For\\022\\t\\n\\005start\\030\\003\\022\\t\\n\\005limit\\030\\003\\022\\t\\n\\005delta\\030\\003\\022\\n\\n\\005input2\\001T\\032\\013\\n\\006output2\\001T\\\"\\021\\n\\001T\\022\\nlist(type)(\\001\\\"\\014\\n\\004body\\022\\004func\\n\\272\\001\\n\\002If\\022\\r\\n\\004cond\\\"\\005Tcond\\022\\014\\n\\005input2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\r\\n\\005Tcond\\022\\004type\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\\"\\023\\n\\013then_branch\\022\\004func\\\"\\023\\n\\013else_branch\\022\\004func\\\" \\n\\routput_shapes\\022\\013list(shape)\\032\\002\\n\\000\\210\\001\\001\\n\\263\\001\\n\\017PartitionedCall\\022\\013\\n\\004args2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\\"\\t\\n\\001f\\022\\004func\\\"\\024\\n\\006config\\022\\006string\\032\\002\\022\\000\\\"\\032\\n\\014config_proto\\022\\006string\\032\\002\\022\\000\\\"\\033\\n\\rexecutor_type\\022\\006string\\032\\002\\022\\000\\nr\\n\\nRemoteCall\\022\\n\\n\\006target\\030\\007\\022\\013\\n\\004args2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\025\\n\\003Tin\\022\\nlist(type)(\\0010\\001\\\"\\026\\n\\004Tout\\022\\nlist(type)(\\0010\\001\\\"\\t\\n\\001f\\022\\004func\\210\\001\\001\\n\\276\\001\\n\\027StatefulPartitionedCall\\022\\013\\n\\004args2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\\"\\t\\n\\001f\\022\\004func\\\"\\024\\n\\006config\\022\\006string\\032\\002\\022\\000\\\"\\032\\n\\014config_proto\\022\\006string\\032\\002\\022\\000\\\"\\033\\n\\rexecutor_type\\022\\006string\\032\\002\\022\\000\\210\\001\\001\\n\\300\\001\\n\\013StatelessIf\\022\\r\\n\\004cond\\\"\\005Tcond\\022\\014\\n\\005input2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\r\\n\\005Tcond\\022\\004type\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\\"\\023\\n\\013then_branch\\022\\004func\\\"\\023\\n\\013else_branch\\022\\004func\\\" \\n\\routput_shapes\\022\\013list(shape)\\032\\002\\n\\000\\nX\\n\\016StatelessWhile\\022\\n\\n\\005input2\\001T\\032\\013\\n\\006output2\\001T\\\"\\021\\n\\001T\\022\\nlist(type)(\\001\\\"\\014\\n\\004cond\\022\\004func\\\"\\014\\n\\004body\\022\\004func\\nj\\n\\020SymbolicGradient\\022\\014\\n\\005input2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\025\\n\\003Tin\\022\\nlist(type)(\\0010\\001\\\"\\026\\n\\004Tout\\022\\nlist(type)(\\0010\\001\\\"\\t\\n\\001f\\022\\004func\\n\\224\\001\\n\\005While\\022\\n\\n\\005input2\\001T\\032\\013\\n\\006output2\\001T\\\"\\021\\n\\001T\\022\\nlist(type)(\\001\\\"\\014\\n\\004cond\\022\\004func\\\"\\014\\n\\004body\\022\\004func\\\" \\n\\routput_shapes\\022\\013list(shape)\\032\\002\\n\\000\\\"\\036\\n\\023parallel_iterations\\022\\003int\\032\\002\\030\\n\\210\\001\\001\")\n", "# This file is MACHINE GENERATED! Do not edit.\n# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.\n\"\"\"Public API for tf.experimental namespace.\n\"\"\"\n\nfrom __future__ import print_function as _print_function\n\nimport sys as _sys\n\nfrom tensorflow.python.eager.context import function_executor_type\n\ndel _print_function\n\nfrom tensorflow.python.util import module_wrapper as _module_wrapper\n\nif not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):\n _sys.modules[__name__] = _module_wrapper.TFModuleWrapper(\n _sys.modules[__name__], \"compat.v1.experimental\", public_apis=None, deprecation=False,\n has_lite=False)\n", "# This file is MACHINE GENERATED! Do not edit.\n# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.\n\"\"\"Ragged Tensors.\n\nThis package defines ops for manipulating ragged tensors (`tf.RaggedTensor`),\nwhich are tensors with non-uniform shapes. In particular, each `RaggedTensor`\nhas one or more *ragged dimensions*, which are dimensions whose slices may have\ndifferent lengths. For example, the inner (column) dimension of\n`rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices\n(`rt[0, :]`, ..., `rt[4, :]`) have different lengths. For a more detailed\ndescription of ragged tensors, see the `tf.RaggedTensor` class documentation\nand the [Ragged Tensor Guide](/guide/ragged_tensors).\n\n\n### Additional ops that support `RaggedTensor`\n\nArguments that accept `RaggedTensor`s are marked in **bold**.\n\n* `tf.batch_gather`(**params**, **indices**, name=`None`)\n* `tf.bitwise.bitwise_and`(**x**, **y**, name=`None`)\n* `tf.bitwise.bitwise_or`(**x**, **y**, name=`None`)\n* `tf.bitwise.bitwise_xor`(**x**, **y**, name=`None`)\n* `tf.bitwise.invert`(**x**, name=`None`)\n* `tf.bitwise.left_shift`(**x**, **y**, name=`None`)\n* `tf.bitwise.right_shift`(**x**, **y**, name=`None`)\n* `tf.clip_by_value`(**t**, clip_value_min, clip_value_max, name=`None`)\n* `tf.concat`(**values**, axis, name=`'concat'`)\n* `tf.debugging.check_numerics`(**tensor**, message, name=`None`)\n* `tf.dtypes.cast`(**x**, dtype, name=`None`)\n* `tf.dtypes.complex`(**real**, **imag**, name=`None`)\n* `tf.dtypes.saturate_cast`(**value**, dtype, name=`None`)\n* `tf.dynamic_partition`(**data**, **partitions**, num_partitions, name=`None`)\n* `tf.expand_dims`(**input**, axis=`None`, name=`None`, dim=`None`)\n* `tf.gather_nd`(**params**, **indices**, name=`None`, batch_dims=`0`)\n* `tf.gather`(**params**, **indices**, validate_indices=`None`, name=`None`, axis=`None`, batch_dims=`0`)\n* `tf.identity`(**input**, name=`None`)\n* `tf.io.decode_base64`(**input**, name=`None`)\n* `tf.io.decode_compressed`(**bytes**, compression_type=`''`, name=`None`)\n* `tf.io.encode_base64`(**input**, pad=`False`, name=`None`)\n* `tf.math.abs`(**x**, name=`None`)\n* `tf.math.acos`(**x**, name=`None`)\n* `tf.math.acosh`(**x**, name=`None`)\n* `tf.math.add_n`(**inputs**, name=`None`)\n* `tf.math.add`(**x**, **y**, name=`None`)\n* `tf.math.angle`(**input**, name=`None`)\n* `tf.math.asin`(**x**, name=`None`)\n* `tf.math.asinh`(**x**, name=`None`)\n* `tf.math.atan2`(**y**, **x**, name=`None`)\n* `tf.math.atan`(**x**, name=`None`)\n* `tf.math.atanh`(**x**, name=`None`)\n* `tf.math.ceil`(**x**, name=`None`)\n* `tf.math.conj`(**x**, name=`None`)\n* `tf.math.cos`(**x**, name=`None`)\n* `tf.math.cosh`(**x**, name=`None`)\n* `tf.math.digamma`(**x**, name=`None`)\n* `tf.math.divide_no_nan`(**x**, **y**, name=`None`)\n* `tf.math.divide`(**x**, **y**, name=`None`)\n* `tf.math.equal`(**x**, **y**, name=`None`)\n* `tf.math.erf`(**x**, name=`None`)\n* `tf.math.erfc`(**x**, name=`None`)\n* `tf.math.exp`(**x**, name=`None`)\n* `tf.math.expm1`(**x**, name=`None`)\n* `tf.math.floor`(**x**, name=`None`)\n* `tf.math.floordiv`(**x**, **y**, name=`None`)\n* `tf.math.floormod`(**x**, **y**, name=`None`)\n* `tf.math.greater_equal`(**x**, **y**, name=`None`)\n* `tf.math.greater`(**x**, **y**, name=`None`)\n* `tf.math.imag`(**input**, name=`None`)\n* `tf.math.is_finite`(**x**, name=`None`)\n* `tf.math.is_inf`(**x**, name=`None`)\n* `tf.math.is_nan`(**x**, name=`None`)\n* `tf.math.less_equal`(**x**, **y**, name=`None`)\n* `tf.math.less`(**x**, **y**, name=`None`)\n* `tf.math.lgamma`(**x**, name=`None`)\n* `tf.math.log1p`(**x**, name=`None`)\n* `tf.math.log_sigmoid`(**x**, name=`None`)\n* `tf.math.log`(**x**, name=`None`)\n* `tf.math.logical_and`(**x**, **y**, name=`None`)\n* `tf.math.logical_not`(**x**, name=`None`)\n* `tf.math.logical_or`(**x**, **y**, name=`None`)\n* `tf.math.logical_xor`(**x**, **y**, name=`'LogicalXor'`)\n* `tf.math.maximum`(**x**, **y**, name=`None`)\n* `tf.math.minimum`(**x**, **y**, name=`None`)\n* `tf.math.multiply`(**x**, **y**, name=`None`)\n* `tf.math.negative`(**x**, name=`None`)\n* `tf.math.not_equal`(**x**, **y**, name=`None`)\n* `tf.math.pow`(**x**, **y**, name=`None`)\n* `tf.math.real`(**input**, name=`None`)\n* `tf.math.reciprocal`(**x**, name=`None`)\n* `tf.math.reduce_any`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_max`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_mean`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_min`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_prod`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.reduce_sum`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.math.rint`(**x**, name=`None`)\n* `tf.math.round`(**x**, name=`None`)\n* `tf.math.rsqrt`(**x**, name=`None`)\n* `tf.math.sign`(**x**, name=`None`)\n* `tf.math.sin`(**x**, name=`None`)\n* `tf.math.sinh`(**x**, name=`None`)\n* `tf.math.sqrt`(**x**, name=`None`)\n* `tf.math.square`(**x**, name=`None`)\n* `tf.math.squared_difference`(**x**, **y**, name=`None`)\n* `tf.math.subtract`(**x**, **y**, name=`None`)\n* `tf.math.tan`(**x**, name=`None`)\n* `tf.math.truediv`(**x**, **y**, name=`None`)\n* `tf.math.unsorted_segment_max`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_mean`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_min`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_prod`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_sqrt_n`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.math.unsorted_segment_sum`(**data**, **segment_ids**, num_segments, name=`None`)\n* `tf.ones_like`(**tensor**, dtype=`None`, name=`None`, optimize=`True`)\n* `tf.rank`(**input**, name=`None`)\n* `tf.realdiv`(**x**, **y**, name=`None`)\n* `tf.reduce_all`(**input_tensor**, axis=`None`, keepdims=`False`, name=`None`)\n* `tf.size`(**input**, name=`None`, out_type=`tf.int32`)\n* `tf.squeeze`(**input**, axis=`None`, name=`None`, squeeze_dims=`None`)\n* `tf.stack`(**values**, axis=`0`, name=`'stack'`)\n* `tf.strings.as_string`(**input**, precision=`-1`, scientific=`False`, shortest=`False`, width=`-1`, fill=`''`, name=`None`)\n* `tf.strings.join`(**inputs**, separator=`''`, name=`None`)\n* `tf.strings.length`(**input**, name=`None`, unit=`'BYTE'`)\n* `tf.strings.reduce_join`(**inputs**, axis=`None`, keepdims=`False`, separator=`''`, name=`None`)\n* `tf.strings.regex_full_match`(**input**, pattern, name=`None`)\n* `tf.strings.regex_replace`(**input**, pattern, rewrite, replace_global=`True`, name=`None`)\n* `tf.strings.strip`(**input**, name=`None`)\n* `tf.strings.substr`(**input**, pos, len, name=`None`, unit=`'BYTE'`)\n* `tf.strings.to_hash_bucket_fast`(**input**, num_buckets, name=`None`)\n* `tf.strings.to_hash_bucket_strong`(**input**, num_buckets, key, name=`None`)\n* `tf.strings.to_hash_bucket`(**input**, num_buckets, name=`None`)\n* `tf.strings.to_hash_bucket`(**input**, num_buckets, name=`None`)\n* `tf.strings.to_number`(**input**, out_type=`tf.float32`, name=`None`)\n* `tf.strings.unicode_script`(**input**, name=`None`)\n* `tf.tile`(**input**, multiples, name=`None`)\n* `tf.truncatediv`(**x**, **y**, name=`None`)\n* `tf.truncatemod`(**x**, **y**, name=`None`)\n* `tf.where`(**condition**, **x**=`None`, **y**=`None`, name=`None`)\n* `tf.zeros_like`(**tensor**, dtype=`None`, name=`None`, optimize=`True`)n\n\"\"\"\n\nfrom __future__ import print_function as _print_function\n\nimport sys as _sys\n\nfrom tensorflow.python.ops.ragged.ragged_array_ops import boolean_mask\nfrom tensorflow.python.ops.ragged.ragged_array_ops import stack_dynamic_partitions\nfrom tensorflow.python.ops.ragged.ragged_concat_ops import stack\nfrom tensorflow.python.ops.ragged.ragged_factory_ops import constant\nfrom tensorflow.python.ops.ragged.ragged_factory_ops import constant_value\nfrom tensorflow.python.ops.ragged.ragged_factory_ops import placeholder\nfrom tensorflow.python.ops.ragged.ragged_functional_ops import map_flat_values\nfrom tensorflow.python.ops.ragged.ragged_math_ops import range\nfrom tensorflow.python.ops.ragged.ragged_tensor_value import RaggedTensorValue\nfrom tensorflow.python.ops.ragged.segment_id_ops import row_splits_to_segment_ids\nfrom tensorflow.python.ops.ragged.segment_id_ops import segment_ids_to_row_splits\n\ndel _print_function\n\nfrom tensorflow.python.util import module_wrapper as _module_wrapper\n\nif not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):\n _sys.modules[__name__] = _module_wrapper.TFModuleWrapper(\n _sys.modules[__name__], \"ragged\", public_apis=None, deprecation=True,\n has_lite=False)\n" ]
[ [ "tensorflow.python.eager.execute.convert_to_mixed_eager_tensors", "tensorflow.python.eager.execute.args_to_matching_eager", "tensorflow.python.eager.execute.make_type", "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.eager.execute.record_gradient", "tensorflow.python.eager.execute.make_str", "tensorflow.python.eager.core._status_to_exception", "tensorflow.python.eager.execute.make_int", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.eager.execute.make_shape", "tensorflow.python.framework.op_def_registry.register_op_list", "tensorflow.python.framework.op_def_library.OpDefLibrary", "tensorflow.python.eager.context.context", "tensorflow.python.util.tf_export.kwarg_only", "tensorflow.python.eager.execute.execute" ], [ "tensorflow.python.util.module_wrapper.TFModuleWrapper" ], [ "tensorflow.python.util.module_wrapper.TFModuleWrapper" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
linusidom/rl-workshop
[ "dbb2ca8b9a5330042a30655ee64c3a4be241d692" ]
[ "solutions/networks.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.autograd.variable import Variable\n\nclass QNetwork(nn.Module):\n def __init__(self, state_size, action_size, nb_hidden, seed=1412):\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.model = nn.Sequential(\n nn.Linear(state_size, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5)\n nn.Linear(nb_hidden, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5),\n nn.Linear(nb_hidden, action_size)\n )\n def forward(self, state):\n x = self.model(state)\n return(x)\n\nclass DuelingNetwork(nn.Module):\n def __init__(self, state_size, action_size, nb_hidden, seed=1412):\n super(DuelingNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n \n self.feature = nn.Sequential(\n nn.Linear(state_size, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5)\n )\n \n self.advantage = nn.Sequential(\n nn.Linear(nb_hidden, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5),\n nn.Linear(nb_hidden, action_size)\n )\n \n self.value = nn.Sequential(\n nn.Linear(nb_hidden, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5),\n nn.Linear(nb_hidden, 1)\n )\n\n def forward(self, state):\n x = self.feature(state)\n adv = self.advantage(x)\n val = self.value(x)\n result = adv + val - adv.mean() \n return(result)\n\n\nclass NoisyLinear(nn.Module):\n def __init__(self, in_features, out_features, std_init=0.1):\n super(NoisyLinear, self).__init__()\n \n self.in_features = in_features\n self.out_features = out_features\n self.std_init = std_init\n \n self.W_mu = nn.Parameter(torch.FloatTensor(out_features, in_features))\n self.W_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features))\n self.register_buffer('W_epsilon', torch.FloatTensor(out_features, in_features))\n \n self.b_mu = nn.Parameter(torch.FloatTensor(out_features))\n self.b_sigma = nn.Parameter(torch.FloatTensor(out_features))\n self.register_buffer('b_epsilon', torch.FloatTensor(out_features))\n \n self.init_parameters()\n self.reset_noise()\n \n def forward(self, x):\n if self.training: \n W = self.W_mu + self.W_sigma * Variable(self.W_epsilon)\n b = self.b_mu + self.b_sigma * Variable(self.b_epsilon)\n else:\n W = self.W_mu\n b = self.b_mu\n result = F.linear(x, W, b)\n return(result)\n \n def init_parameters(self):\n mu_range = 1 / self.in_features**(1/2)\n \n self.W_mu.data.uniform_(-mu_range, mu_range)\n self.W_sigma.data.fill_(self.std_init / (self.in_features)**(1/2))\n \n self.b_mu.data.uniform_(-mu_range, mu_range)\n self.b_sigma.data.fill_(self.std_init / (self.in_features)**(1/2))\n \n def reset_noise(self):\n epsilon_in = self.f_noise(self.in_features)\n epsilon_out = self.f_noise(self.out_features)\n \n self.W_epsilon.copy_(epsilon_out.ger(epsilon_in))\n self.b_epsilon.copy_(epsilon_out)\n \n def f_noise(self, size):\n x = torch.randn(size)\n x = x.sign() * (x.abs().sqrt())\n return(x)\n \nclass NoisyDuelingNetwork(nn.Module):\n def __init__(self, state_size, action_size, nb_hidden, seed=1412):\n super(NoisyDuelingNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.feature = nn.Sequential(\n nn.Linear(state_size, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5)\n )\n \n self.advantage = nn.Sequential(\n NoisyLinear(nb_hidden, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5),\n NoisyLinear(nb_hidden, action_size)\n )\n \n self.value = nn.Sequential(\n NoisyLinear(nb_hidden, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5),\n NoisyLinear(nb_hidden, 1)\n )\n\n def forward(self, state):\n x = self.feature(state)\n adv = self.advantage(x)\n val = self.value(x)\n result = adv + val - adv.mean() \n return(result)\n \n def reset_noise(self):\n# self._modules['feature'][0].reset_noise()\n self._modules['advantage'][0].reset_noise()\n self._modules['advantage'][2].reset_noise()\n self._modules['value'][0].reset_noise()\n self._modules['value'][2].reset_noise()" ]
[ [ "torch.manual_seed", "torch.randn", "torch.autograd.variable.Variable", "torch.nn.Linear", "torch.FloatTensor", "torch.nn.ReLU", "torch.nn.functional.linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ky-zhang/utils
[ "f1c9d2580db5ef0f0291ae77312b3d538f292a12" ]
[ "plot/plot_folder.py" ]
[ "import os\nimport numpy\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport argparse\nfrom PIL import Image\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-input', default = '', help = 'input file folder')\nparser.add_argument('-label', default = '', help = 'label file')\nparser.add_argument('-output', default = '', help = 'output file folder')\nparser.add_argument('-range', default = '', help = 'axis range')\n\nargs = parser.parse_args()\n\nlabel = []\nif args.label != '':\n for line in open(args.label):\n label.append(line.strip())\n\nfiles = os.listdir(args.input)\nfor file in files:\n filepath = os.path.join(\"%s%s\" %(args.input, file))\n out_png_path = os.path.join(\"%s%s\" %(args.output, file))\n out_png_path = out_png_path.replace(\"txt\", \"png\")\n N = M = 0\n all_data = {}\n for i, line in enumerate(open(filepath)):\n vec = line.strip().split(' ')\n if i == 0:\n N = int(vec[0])\n M = int(vec[1])\n elif i <= N:\n if args.label == '':\n label.append(0)\n all_data.setdefault(label[i-1], []).append((float(vec[-2]), float(vec[-1])))\n\n colors = plt.cm.rainbow(numpy.linspace(0, 1, len(all_data)))\n\n for color, ll in zip(colors, sorted(all_data.keys())):\n x = [t[0] for t in all_data[ll]]\n y = [t[1] for t in all_data[ll]]\n plt.plot(x, y, '.', color = color, markersize = 1)\n if args.range != '':\n l = abs(float(args.range))\n plt.xlim(-l, l)\n plt.ylim(-l, l)\n # 坐标轴\n plt.axis('off')\n plt.savefig(out_png_path, dpi = 300)\n plt.close(\"all\")\n\n # 背景透明\n # img = Image.open(out_png_path)\n # img = img.convert(\"RGBA\")\n # datas = img.getdata()\n # newData = list()\n # for item in datas:\n # if item[0] >220 and item[1] > 220 and item[2] > 220:\n # newData.append(( 255, 255, 255, 0))\n # else:\n # newData.append(item)\n \n # img.putdata(newData)\n # img.save(out_png_path,\"PNG\")\n\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.close", "matplotlib.pyplot.axis" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
algteam/spacy_zh_model
[ "0b0cba1a3964aa426e5f96087849c90e69e2a89d", "0b0cba1a3964aa426e5f96087849c90e69e2a89d" ]
[ "examples/keras_parikh_entailment/spacy_hook.py", "examples/vectors_tensorboard_standalone.py" ]
[ "import numpy as np\r\nfrom keras.models import model_from_json\r\n\r\ntry:\r\n import cPickle as pickle\r\nexcept ImportError:\r\n import pickle\r\n\r\n\r\nclass KerasSimilarityShim(object):\r\n entailment_types = [\"entailment\", \"contradiction\", \"neutral\"]\r\n\r\n @classmethod\r\n def load(cls, path, nlp, max_length=100, get_features=None):\r\n \r\n if get_features is None:\r\n get_features = get_word_ids\r\n \r\n with (path / 'config.json').open() as file_:\r\n model = model_from_json(file_.read())\r\n with (path / 'model').open('rb') as file_:\r\n weights = pickle.load(file_)\r\n \r\n embeddings = get_embeddings(nlp.vocab)\r\n weights.insert(1, embeddings)\r\n model.set_weights(weights)\r\n\r\n return cls(model, get_features=get_features, max_length=max_length)\r\n\r\n def __init__(self, model, get_features=None, max_length=100):\r\n self.model = model\r\n self.get_features = get_features\r\n self.max_length = max_length\r\n\r\n def __call__(self, doc):\r\n doc.user_hooks['similarity'] = self.predict\r\n doc.user_span_hooks['similarity'] = self.predict\r\n\r\n return doc\r\n\r\n def predict(self, doc1, doc2):\r\n x1 = self.get_features([doc1], max_length=self.max_length)\r\n x2 = self.get_features([doc2], max_length=self.max_length)\r\n scores = self.model.predict([x1, x2])\r\n\r\n return self.entailment_types[scores.argmax()], scores.max()\r\n\r\n\r\ndef get_embeddings(vocab, nr_unk=100):\r\n # the extra +1 is for a zero vector representing sentence-final padding\r\n num_vectors = max(lex.rank for lex in vocab) + 2 \r\n \r\n # create random vectors for OOV tokens\r\n oov = np.random.normal(size=(nr_unk, vocab.vectors_length))\r\n oov = oov / oov.sum(axis=1, keepdims=True)\r\n \r\n vectors = np.zeros((num_vectors + nr_unk, vocab.vectors_length), dtype='float32')\r\n vectors[1:(nr_unk + 1), ] = oov\r\n for lex in vocab:\r\n if lex.has_vector and lex.vector_norm > 0:\r\n vectors[nr_unk + lex.rank + 1] = lex.vector / lex.vector_norm \r\n\r\n return vectors\r\n\r\n\r\ndef get_word_ids(docs, max_length=100, nr_unk=100):\r\n Xs = np.zeros((len(docs), max_length), dtype='int32')\r\n \r\n for i, doc in enumerate(docs):\r\n for j, token in enumerate(doc):\r\n if j == max_length:\r\n break\r\n if token.has_vector:\r\n Xs[i, j] = token.rank + nr_unk + 1\r\n else:\r\n Xs[i, j] = token.rank % nr_unk + 1\r\n return Xs\r\n", "#!/usr/bin/env python\r\n# coding: utf8\r\n\"\"\"Export spaCy model vectors for use in TensorBoard's standalone embedding projector.\r\nhttps://github.com/tensorflow/embedding-projector-standalone\r\n\r\nUsage:\r\n\r\n python vectors_tensorboard_standalone.py ./myVectorModel ./output [name]\r\n\r\nThis outputs two files that have to be copied into the \"oss_data\" of the standalone projector:\r\n\r\n [name]_labels.tsv - metadata such as human readable labels for vectors\r\n [name]_tensors.bytes - numpy.ndarray of numpy.float32 precision vectors\r\n\r\n\"\"\"\r\nfrom __future__ import unicode_literals\r\n\r\nimport json\r\nimport math\r\nfrom os import path\r\n\r\nimport numpy\r\nimport plac\r\nimport spacy\r\nimport tqdm\r\n\r\n\r\[email protected](\r\n vectors_loc=(\"Path to spaCy model that contains vectors\", \"positional\", None, str),\r\n out_loc=(\"Path to output folder writing tensors and labels data\", \"positional\", None, str),\r\n name=(\"Human readable name for tsv file and vectors tensor\", \"positional\", None, str),\r\n)\r\ndef main(vectors_loc, out_loc, name=\"spaCy_vectors\"):\r\n # A tab-separated file that contains information about the vectors for visualization\r\n #\r\n # Learn more: https://www.tensorflow.org/programmers_guide/embedding#metadata\r\n meta_file = \"{}_labels.tsv\".format(name)\r\n out_meta_file = path.join(out_loc, meta_file)\r\n\r\n print('Loading spaCy vectors model: {}'.format(vectors_loc))\r\n model = spacy.load(vectors_loc)\r\n\r\n print('Finding lexemes with vectors attached: {}'.format(vectors_loc))\r\n voacb_strings = [\r\n w for w in tqdm.tqdm(model.vocab.strings, total=len(model.vocab.strings), leave=False)\r\n if model.vocab.has_vector(w)\r\n ]\r\n vector_count = len(voacb_strings)\r\n\r\n print('Building Projector labels for {} vectors: {}'.format(vector_count, out_meta_file))\r\n vector_dimensions = model.vocab.vectors.shape[1]\r\n tf_vectors_variable = numpy.zeros((vector_count, vector_dimensions), dtype=numpy.float32)\r\n\r\n # Write a tab-separated file that contains information about the vectors for visualization\r\n #\r\n # Reference: https://www.tensorflow.org/programmers_guide/embedding#metadata\r\n with open(out_meta_file, 'wb') as file_metadata:\r\n # Define columns in the first row\r\n file_metadata.write(\"Text\\tFrequency\\n\".encode('utf-8'))\r\n # Write out a row for each vector that we add to the tensorflow variable we created\r\n vec_index = 0\r\n\r\n for text in tqdm.tqdm(voacb_strings, total=len(voacb_strings), leave=False):\r\n # https://github.com/tensorflow/tensorflow/issues/9094\r\n text = '<Space>' if text.lstrip() == '' else text\r\n lex = model.vocab[text]\r\n\r\n # Store vector data and metadata\r\n tf_vectors_variable[vec_index] = numpy.float64(model.vocab.get_vector(text))\r\n file_metadata.write(\"{}\\t{}\\n\".format(text, math.exp(lex.prob) * len(voacb_strings)).encode('utf-8'))\r\n vec_index += 1\r\n\r\n # Write out \"[name]_tensors.bytes\" file for standalone embeddings projector to load\r\n tensor_path = '{}_tensors.bytes'.format(name)\r\n tf_vectors_variable.tofile(path.join(out_loc, tensor_path))\r\n\r\n print('Done.')\r\n print('Add the following entry to \"oss_data/oss_demo_projector_config.json\"')\r\n print(json.dumps({\r\n \"tensorName\": name,\r\n \"tensorShape\": [vector_count, vector_dimensions],\r\n \"tensorPath\": 'oss_data/{}'.format(tensor_path),\r\n \"metadataPath\": 'oss_data/{}'.format(meta_file)\r\n }, indent=2))\r\n\r\n\r\nif __name__ == '__main__':\r\n plac.call(main)\r\n" ]
[ [ "numpy.random.normal", "numpy.zeros" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nabeelyousfi/MyoEmg
[ "c819712d93bfb58828b36669e55cd4d77453c1cf" ]
[ "train.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 29 13:39:13 2018\n\n@author: Hassan Yousuf & Nabeel Hussain\n\"\"\"\nfrom __future__ import print_function\nimport sklearn.ensemble\nfrom sklearn import metrics\nfrom myo import init, Hub, DeviceListener, StreamEmg\nfrom time import sleep\nimport numpy as np\nimport threading\nimport collections\nimport _pickle as cPickle\n\n# Complete code for training and predicting EMG data in Python using RandomForestClassifier via Myo Armband 2\n\ndef unison_shuffled_copies(a, b):\n assert len(a) == len(b)\n p = np.random.permutation(len(a))\n return a[p], b[p]\n\ndef rms(array):\n n = len(array)\n sum = 0\n for a in array:\n sum =+ a*a\n return np.sqrt((1/float(n))*sum)\n\ndef iav(array):\n sum = 0\n for a in array:\n sum += np.abs(a)\n return sum\n\ndef ssi(array):\n sum = 0\n for a in array:\n sum += a*a\n return sum\n\ndef var(array):\n n = len(array)\n sum = 0\n for a in array:\n sum += a*a\n return ((1/float(n-1))*sum)\n\ndef tm3(array):\n n = len(array)\n print('n : ', n)\n sum = 0\n for a in array:\n sum =+ a*a*a\n return np.power((1/float(n))*sum,1/float(3))\n\ndef wl(array):\n sum = 0\n for a in range(0,len(array)-1):\n sum =+ array[a+1] - array[a]\n return sum\n\ndef aac(array):\n n = len(array)\n sum = 0\n for a in range(0,n-1):\n sum =+ array[0+1] - array[0]\n return sum/float(n)\n\n\ndef featurize(array):\n n = []\n for a in array:\n n.append(rms(a))\n return n\n\nstatus = 0\nX = []\n\nclass Listener(DeviceListener):\n def __init__(self, queue_size=1):\n self.lock = threading.Lock()\n self.emg_data_queue = collections.deque(maxlen=queue_size)\n self.ori_data_queue = collections.deque(maxlen=queue_size)\n\n def on_connect(self, myo, timestamp, firmware_version):\n myo.set_stream_emg(StreamEmg.enabled)\n\n def on_emg_data(self, myo, timestamp, emg):\n if(status):\n X.append(np.asarray(emg))\n\n def on_orientation_data(self, myo, timestamp, quat):\n # print(\"Orientation:\", quat.x, quat.y, quat.z, quat.w)\n with self.lock:\n self.ori_data_queue.append(quat)\n\n def get_ori_data(self):\n with self.lock:\n return list(self.ori_data_queue)\n\ninit()\nhub = Hub()\nlistener = Listener()\nhub.run(1000, listener)\n\nstatus = 9999\n\nsleep(1)\n\nmyX = []\n\nreq_iter = 20\ntrain_1 = []\ntrain_2 = []\ntrain_3 = []\ntrain_4 = []\ntrain_5 = []\n\nges1 = ['Rock', 'Paper', 'Scissors', 'Lizard', 'Spock']\nges2 = ['Number 1', 'Number 2', 'Number 3', 'Number 4', 'Number 5']\nges = [\"Spread Fingers\", \"Wave Out\", \"Wave In\", \"Fist\", \"Rest\"]\n\nfor a in range(1,4):\n print(\"\\nGesture -- \", ges[0],\" : Ready?\")\n input(\"Press Enter to continue...\")\n X = []\n while(1):\n if len(X) > 20:\n # print(X[-1])\n train_1.append(np.asarray(X))\n X = []\n if len(train_1) > a*req_iter:\n break\n\n print(\"\\nGesture -- \", ges[1],\" : Ready?\")\n input(\"Press Enter to continue...\")\n X = []\n while(1):\n if len(X) > 20:\n # print(X[-1])\n train_2.append(np.asarray(X))\n X = []\n if len(train_2) > a*req_iter:\n break\n\n print(\"\\nGesture -- \", ges[2],\" : Ready?\")\n input(\"Press Enter to continue...\")\n X = []\n while(1):\n if len(X) > 20:\n # print(X[-1])\n train_3.append(np.asarray(X))\n X = []\n if len(train_3) > a*req_iter:\n break\n\n print(\"\\nGesture -- \", ges[3],\" : Ready?\")\n input(\"Press Enter to continue...\")\n X = []\n while(1):\n if len(X) > 20:\n # print(X[-1])\n train_4.append(np.asarray(X))\n X = []\n if len(train_4) > a*req_iter:\n break\n\n print(\"\\nGesture -- \", ges[4],\" : Ready?\")\n input(\"Press Enter to continue...\")\n X = []\n while(1):\n if len(X) > 20:\n # print(X[-1])\n train_5.append(np.asarray(X))\n X = []\n if len(train_5) > a*req_iter:\n break\n\ntrain_x = []\ntrain_y = []\n\nfor a in train_1:\n train_x.append(np.asarray(a))\n train_y.append(1)\n\nfor a in train_2:\n train_x.append(np.asarray(a))\n train_y.append(2)\n\nfor a in train_3:\n train_x.append(np.asarray(a))\n train_y.append(3)\n\nfor a in train_4:\n train_x.append(np.asarray(a))\n train_y.append(4)\n\nfor a in train_5:\n train_x.append(np.asarray(a))\n train_y.append(5)\n\ntrain_x_f = []\n\nfor a in train_x:\n x_f_h = []\n for b in range(0,8):\n x_f_h.append(rms(a[:, b]))\n x_f_h.append(iav(a[:, b]))\n x_f_h.append(ssi(a[:, b]))\n x_f_h.append(var(a[:, b]))\n # x_f_h.append(tm3(a[:, b]))\n x_f_h.append(wl(a[:, b]))\n x_f_h.append(aac(a[:, b]))\n train_x_f.append(x_f_h)\n\n# print(len(train_x_f), len(train_x))\nclf = sklearn.ensemble.AdaBoostClassifier(n_estimators=7, learning_rate=1) #, random_state=np.random.randint(0,9))\nclf2 = sklearn.ensemble.RandomForestClassifier()\nclf3 = sklearn.ensemble.RandomForestClassifier(n_estimators=25)\n\nclf.fit(train_x_f, train_y)\nclf2.fit(train_x_f, train_y)\nclf3.fit(train_x_f, train_y)\n\ny_i = clf.predict(train_x_f)\nprint('SkLearn : ', metrics.accuracy_score(train_y, y_i))\n\nprint(train_x_f[0])\n\nprint(\"Training Complete!\")\n\nwith open('META001.pkl', 'wb') as fid:\n cPickle.dump(clf, fid)\n\nwith open('META002.pkl', 'wb') as fid:\n cPickle.dump(clf2, fid)\n\nwith open('META003.pkl', 'wb') as fid:\n cPickle.dump(clf3, fid)\nsleep(1)\nhub.shutdown()" ]
[ [ "numpy.asarray", "numpy.abs", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mcstro/natural-neighbor-interpolation
[ "76ba7bb50c84aef35e993902c46824e5991df45d" ]
[ "tests/test_api.py" ]
[ "import scipy.interpolate\nimport numpy as np\nimport pytest\n\nimport naturalneighbor\n\n\[email protected](\"grid_ranges\", [\n [[0, 4, 0.6], [-3, 3, 1.0], [0, 1, 3]],\n [[0, 2, 1], [0, 2, 1j], [0, 2, 2j]],\n [[0, 2, 1 + 1j], [0, 2, -10j], [0, 2, 2j]],\n [[0, 2, 1 + 1j], [0, 2, -0.9j], [0, 2, 2.1j]],\n])\ndef test_output_size_matches_scipy(grid_ranges):\n points = np.random.rand(10, 3)\n values = np.random.rand(10)\n\n mesh_grids = tuple(np.mgrid[\n grid_ranges[0][0]:grid_ranges[0][1]:grid_ranges[0][2],\n grid_ranges[1][0]:grid_ranges[1][1]:grid_ranges[1][2],\n grid_ranges[2][0]:grid_ranges[2][1]:grid_ranges[2][2],\n ])\n\n scipy_result = scipy.interpolate.griddata(points, values, mesh_grids)\n nn_result = naturalneighbor.griddata(points, values, grid_ranges)\n\n assert scipy_result.shape == nn_result.shape\n" ]
[ [ "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ahtkom/hello-world
[ "3a81bd25713513836c9242fe943b171ff731cfce" ]
[ "2021_1/sql_principle/prac_1/src/upload_geodata.py" ]
[ "import numpy as np\nimport pandas as pd\nimport psycopg2, sys\n\nhostname, user, dbname, passward = sys.argv[1:5]\n\n\ndef load_data():\n lon0, lat0 = 115.8, 29.4\n x = pd.read_excel('../data/x.xlsx', header=0, index_col=0).values\n y = pd.read_excel('../data/y.xlsx', header=0, index_col=0).values\n \n lon = lon0 + 360*x/np.pi/6371\n lat = lat0 + 180*y/np.pi/6371\n lon_list, lat_list = [], []\n\n for i in range(lon.shape[0]):\n lon_list.append([x for x in lon[i,:] if not np.isnan(x)])\n lat_list.append([x for x in lat[i,:] if not np.isnan(x)])\n\n return lon_list, lat_list\n\n \nif __name__ == '__main__':\n conn = psycopg2.connect(host=hostname, port=5432, user=user, \n dbname=dbname, password=passward)\n cursor = conn.cursor()\n \n lon_list, lat_list = load_data()\n flag, sep_list = 1, [0]\n for i in range(len(lon_list)):\n for j in range(len(lon_list[i])):\n cursor.execute(\"insert into my_point (point_id,lon,lat) values (%s,%s,%s)\",\n (flag, lon_list[i][j], lat_list[i][j]))\n flag += 1\n sep_list.append(flag-1)\n print(i+1, '/', len(lon_list), 'num:', len(lon_list[i]))\n # if i == 10:\n # break\n for i in range(len(sep_list)-1):\n cursor.execute(\"insert into my_line (line_id,point_list) values (%s,%s)\",\n (i+1, list(range(sep_list[i]+1,sep_list[i+1]+1))))\n\n # conn.commit()\n conn.close()\n\n" ]
[ [ "numpy.isnan", "pandas.read_excel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
rli596/manim
[ "e147a9fc6c117332221e42437481f3efba76499a" ]
[ "manim/scene/scene.py" ]
[ "\"\"\"Basic canvas for animations.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\"Scene\"]\n\nimport copy\nimport datetime\nimport inspect\nimport platform\nimport random\nimport threading\nimport time\nimport types\nfrom queue import Queue\nfrom typing import Callable\n\nimport srt\n\nfrom manim.scene.section import DefaultSectionType\n\ntry:\n import dearpygui.dearpygui as dpg\n\n dearpygui_imported = True\nexcept ImportError:\n dearpygui_imported = False\nimport numpy as np\nfrom tqdm import tqdm\nfrom watchdog.events import FileSystemEventHandler\nfrom watchdog.observers import Observer\n\nfrom manim.mobject.opengl.opengl_mobject import OpenGLPoint\n\nfrom .. import config, logger\nfrom ..animation.animation import Animation, Wait, prepare_animation\nfrom ..camera.camera import Camera\nfrom ..constants import *\nfrom ..gui.gui import configure_pygui\nfrom ..renderer.cairo_renderer import CairoRenderer\nfrom ..renderer.opengl_renderer import OpenGLRenderer\nfrom ..renderer.shader import Object3D\nfrom ..utils import opengl, space_ops\nfrom ..utils.exceptions import EndSceneEarlyException, RerunSceneException\nfrom ..utils.family import extract_mobject_family_members\nfrom ..utils.family_ops import restructure_list_to_exclude_certain_family_members\nfrom ..utils.file_ops import open_media_file\nfrom ..utils.iterables import list_difference_update, list_update\n\n\nclass RerunSceneHandler(FileSystemEventHandler):\n \"\"\"A class to handle rerunning a Scene after the input file is modified.\"\"\"\n\n def __init__(self, queue):\n super().__init__()\n self.queue = queue\n\n def on_modified(self, event):\n self.queue.put((\"rerun_file\", [], {}))\n\n\nclass Scene:\n \"\"\"A Scene is the canvas of your animation.\n\n The primary role of :class:`Scene` is to provide the user with tools to manage\n mobjects and animations. Generally speaking, a manim script consists of a class\n that derives from :class:`Scene` whose :meth:`Scene.construct` method is overridden\n by the user's code.\n\n Mobjects are displayed on screen by calling :meth:`Scene.add` and removed from\n screen by calling :meth:`Scene.remove`. All mobjects currently on screen are kept\n in :attr:`Scene.mobjects`. Animations are played by calling :meth:`Scene.play`.\n\n A :class:`Scene` is rendered internally by calling :meth:`Scene.render`. This in\n turn calls :meth:`Scene.setup`, :meth:`Scene.construct`, and\n :meth:`Scene.tear_down`, in that order.\n\n It is not recommended to override the ``__init__`` method in user Scenes. For code\n that should be ran before a Scene is rendered, use :meth:`Scene.setup` instead.\n\n Examples\n --------\n Override the :meth:`Scene.construct` method with your code.\n\n .. code-block:: python\n\n class MyScene(Scene):\n def construct(self):\n self.play(Write(Text(\"Hello World!\")))\n\n \"\"\"\n\n def __init__(\n self,\n renderer=None,\n camera_class=Camera,\n always_update_mobjects=False,\n random_seed=None,\n skip_animations=False,\n ):\n self.camera_class = camera_class\n self.always_update_mobjects = always_update_mobjects\n self.random_seed = random_seed\n self.skip_animations = skip_animations\n\n self.animations = None\n self.stop_condition = None\n self.moving_mobjects = []\n self.static_mobjects = []\n self.time_progression = None\n self.duration = None\n self.last_t = None\n self.queue = Queue()\n self.skip_animation_preview = False\n self.meshes = []\n self.camera_target = ORIGIN\n self.widgets = []\n self.dearpygui_imported = dearpygui_imported\n self.updaters = []\n self.point_lights = []\n self.ambient_light = None\n self.key_to_function_map = {}\n self.mouse_press_callbacks = []\n self.interactive_mode = False\n\n if config.renderer == \"opengl\":\n # Items associated with interaction\n self.mouse_point = OpenGLPoint()\n self.mouse_drag_point = OpenGLPoint()\n if renderer is None:\n renderer = OpenGLRenderer()\n\n if renderer is None:\n self.renderer = CairoRenderer(\n camera_class=self.camera_class,\n skip_animations=self.skip_animations,\n )\n else:\n self.renderer = renderer\n self.renderer.init_scene(self)\n\n self.mobjects = []\n # TODO, remove need for foreground mobjects\n self.foreground_mobjects = []\n if self.random_seed is not None:\n random.seed(self.random_seed)\n np.random.seed(self.random_seed)\n\n @property\n def camera(self):\n return self.renderer.camera\n\n def __deepcopy__(self, clone_from_id):\n cls = self.__class__\n result = cls.__new__(cls)\n clone_from_id[id(self)] = result\n for k, v in self.__dict__.items():\n if k in [\"renderer\", \"time_progression\"]:\n continue\n if k == \"camera_class\":\n setattr(result, k, v)\n setattr(result, k, copy.deepcopy(v, clone_from_id))\n result.mobject_updater_lists = []\n\n # Update updaters\n for mobject in self.mobjects:\n cloned_updaters = []\n for updater in mobject.updaters:\n # Make the cloned updater use the cloned Mobjects as free variables\n # rather than the original ones. Analyzing function bytecode with the\n # dis module will help in understanding this.\n # https://docs.python.org/3/library/dis.html\n # TODO: Do the same for function calls recursively.\n free_variable_map = inspect.getclosurevars(updater).nonlocals\n cloned_co_freevars = []\n cloned_closure = []\n for free_variable_name in updater.__code__.co_freevars:\n free_variable_value = free_variable_map[free_variable_name]\n\n # If the referenced variable has not been cloned, raise.\n if id(free_variable_value) not in clone_from_id:\n raise Exception(\n f\"{free_variable_name} is referenced from an updater \"\n \"but is not an attribute of the Scene, which isn't \"\n \"allowed.\",\n )\n\n # Add the cloned object's name to the free variable list.\n cloned_co_freevars.append(free_variable_name)\n\n # Add a cell containing the cloned object's reference to the\n # closure list.\n cloned_closure.append(\n types.CellType(clone_from_id[id(free_variable_value)]),\n )\n\n cloned_updater = types.FunctionType(\n updater.__code__.replace(co_freevars=tuple(cloned_co_freevars)),\n updater.__globals__,\n updater.__name__,\n updater.__defaults__,\n tuple(cloned_closure),\n )\n cloned_updaters.append(cloned_updater)\n mobject_clone = clone_from_id[id(mobject)]\n mobject_clone.updaters = cloned_updaters\n if len(cloned_updaters) > 0:\n result.mobject_updater_lists.append((mobject_clone, cloned_updaters))\n return result\n\n def render(self, preview=False):\n \"\"\"\n Renders this Scene.\n\n Parameters\n ---------\n preview : bool\n If true, opens scene in a file viewer.\n \"\"\"\n self.setup()\n try:\n self.construct()\n except EndSceneEarlyException:\n pass\n except RerunSceneException as e:\n self.remove(*self.mobjects)\n self.renderer.clear_screen()\n self.renderer.num_plays = 0\n return True\n self.tear_down()\n # We have to reset these settings in case of multiple renders.\n self.renderer.scene_finished(self)\n\n # Show info only if animations are rendered or to get image\n if (\n self.renderer.num_plays\n or config[\"format\"] == \"png\"\n or config[\"save_last_frame\"]\n ):\n logger.info(\n f\"Rendered {str(self)}\\nPlayed {self.renderer.num_plays} animations\",\n )\n\n # If preview open up the render after rendering.\n if preview:\n config[\"preview\"] = True\n\n if config[\"preview\"] or config[\"show_in_file_browser\"]:\n open_media_file(self.renderer.file_writer)\n\n def setup(self):\n \"\"\"\n This is meant to be implemented by any scenes which\n are commonly subclassed, and have some common setup\n involved before the construct method is called.\n \"\"\"\n pass\n\n def tear_down(self):\n \"\"\"\n This is meant to be implemented by any scenes which\n are commonly subclassed, and have some common method\n to be invoked before the scene ends.\n \"\"\"\n pass\n\n def construct(self):\n \"\"\"Add content to the Scene.\n\n From within :meth:`Scene.construct`, display mobjects on screen by calling\n :meth:`Scene.add` and remove them from screen by calling :meth:`Scene.remove`.\n All mobjects currently on screen are kept in :attr:`Scene.mobjects`. Play\n animations by calling :meth:`Scene.play`.\n\n Notes\n -----\n Initialization code should go in :meth:`Scene.setup`. Termination code should\n go in :meth:`Scene.tear_down`.\n\n Examples\n --------\n A typical manim script includes a class derived from :class:`Scene` with an\n overridden :meth:`Scene.contruct` method:\n\n .. code-block:: python\n\n class MyScene(Scene):\n def construct(self):\n self.play(Write(Text(\"Hello World!\")))\n\n See Also\n --------\n :meth:`Scene.setup`\n :meth:`Scene.render`\n :meth:`Scene.tear_down`\n\n \"\"\"\n pass # To be implemented in subclasses\n\n def next_section(\n self,\n name: str = \"unnamed\",\n type: str = DefaultSectionType.NORMAL,\n skip_animations: bool = False,\n ) -> None:\n \"\"\"Create separation here; the last section gets finished and a new one gets created.\n ``skip_animations`` skips the rendering of all animations in this section.\n Refer to :doc:`the documentation</tutorials/a_deeper_look>` on how to use sections.\n \"\"\"\n self.renderer.file_writer.next_section(name, type, skip_animations)\n\n def __str__(self):\n return self.__class__.__name__\n\n def get_attrs(self, *keys):\n \"\"\"\n Gets attributes of a scene given the attribute's identifier/name.\n\n Parameters\n ----------\n *keys : str\n Name(s) of the argument(s) to return the attribute of.\n\n Returns\n -------\n list\n List of attributes of the passed identifiers.\n \"\"\"\n return [getattr(self, key) for key in keys]\n\n def update_mobjects(self, dt):\n \"\"\"\n Begins updating all mobjects in the Scene.\n\n Parameters\n ----------\n dt: int or float\n Change in time between updates. Defaults (mostly) to 1/frames_per_second\n \"\"\"\n for mobject in self.mobjects:\n mobject.update(dt)\n\n def update_meshes(self, dt):\n for obj in self.meshes:\n for mesh in obj.get_family():\n mesh.update(dt)\n\n def update_self(self, dt):\n for func in self.updaters:\n func(dt)\n\n def should_update_mobjects(self) -> bool:\n \"\"\"\n Returns True if the mobjects of this scene should be updated.\n\n In particular, this checks whether\n\n - the :attr:`always_update_mobjects` attribute of :class:`.Scene`\n is set to ``True``,\n - the :class:`.Scene` itself has time-based updaters attached,\n - any mobject in this :class:`.Scene` has time-based updaters attached.\n\n This is only called when a single Wait animation is played.\n \"\"\"\n wait_animation = self.animations[0]\n if wait_animation.is_static_wait is None:\n should_update = (\n self.always_update_mobjects\n or self.updaters\n or any(\n [\n mob.has_time_based_updater()\n for mob in self.get_mobject_family_members()\n ],\n )\n )\n wait_animation.is_static_wait = not should_update\n return not wait_animation.is_static_wait\n\n def get_top_level_mobjects(self):\n \"\"\"\n Returns all mobjects which are not submobjects.\n\n Returns\n -------\n list\n List of top level mobjects.\n \"\"\"\n # Return only those which are not in the family\n # of another mobject from the scene\n families = [m.get_family() for m in self.mobjects]\n\n def is_top_level(mobject):\n num_families = sum((mobject in family) for family in families)\n return num_families == 1\n\n return list(filter(is_top_level, self.mobjects))\n\n def get_mobject_family_members(self):\n \"\"\"\n Returns list of family-members of all mobjects in scene.\n If a Circle() and a VGroup(Rectangle(),Triangle()) were added,\n it returns not only the Circle(), Rectangle() and Triangle(), but\n also the VGroup() object.\n\n Returns\n -------\n list\n List of mobject family members.\n \"\"\"\n if config.renderer == \"opengl\":\n family_members = []\n for mob in self.mobjects:\n family_members.extend(mob.get_family())\n return family_members\n else:\n return extract_mobject_family_members(\n self.mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n )\n\n def add(self, *mobjects):\n \"\"\"\n Mobjects will be displayed, from background to\n foreground in the order with which they are added.\n\n Parameters\n ---------\n *mobjects : Mobject\n Mobjects to add.\n\n Returns\n -------\n Scene\n The same scene after adding the Mobjects in.\n\n \"\"\"\n if config.renderer == \"opengl\":\n new_mobjects = []\n new_meshes = []\n for mobject_or_mesh in mobjects:\n if isinstance(mobject_or_mesh, Object3D):\n new_meshes.append(mobject_or_mesh)\n else:\n new_mobjects.append(mobject_or_mesh)\n self.remove(*new_mobjects)\n self.mobjects += new_mobjects\n self.remove(*new_meshes)\n self.meshes += new_meshes\n else:\n mobjects = [*mobjects, *self.foreground_mobjects]\n self.restructure_mobjects(to_remove=mobjects)\n self.mobjects += mobjects\n if self.moving_mobjects is not None:\n self.restructure_mobjects(\n to_remove=mobjects,\n mobject_list_name=\"moving_mobjects\",\n )\n self.moving_mobjects += mobjects\n return self\n\n def add_mobjects_from_animations(self, animations):\n curr_mobjects = self.get_mobject_family_members()\n for animation in animations:\n if animation.is_introducer():\n continue\n # Anything animated that's not already in the\n # scene gets added to the scene\n mob = animation.mobject\n if mob is not None and mob not in curr_mobjects:\n self.add(mob)\n curr_mobjects += mob.get_family()\n\n def remove(self, *mobjects):\n \"\"\"\n Removes mobjects in the passed list of mobjects\n from the scene and the foreground, by removing them\n from \"mobjects\" and \"foreground_mobjects\"\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobjects to remove.\n \"\"\"\n if config.renderer == \"opengl\":\n mobjects_to_remove = []\n meshes_to_remove = set()\n for mobject_or_mesh in mobjects:\n if isinstance(mobject_or_mesh, Object3D):\n meshes_to_remove.add(mobject_or_mesh)\n else:\n mobjects_to_remove.append(mobject_or_mesh)\n self.mobjects = restructure_list_to_exclude_certain_family_members(\n self.mobjects,\n mobjects_to_remove,\n )\n self.meshes = list(\n filter(lambda mesh: mesh not in set(meshes_to_remove), self.meshes),\n )\n return self\n else:\n for list_name in \"mobjects\", \"foreground_mobjects\":\n self.restructure_mobjects(mobjects, list_name, False)\n return self\n\n def add_updater(self, func):\n self.updaters.append(func)\n\n def remove_updater(self, func):\n self.updaters = [f for f in self.updaters if f is not func]\n\n def restructure_mobjects(\n self,\n to_remove,\n mobject_list_name=\"mobjects\",\n extract_families=True,\n ):\n \"\"\"\n tl:wr\n If your scene has a Group(), and you removed a mobject from the Group,\n this dissolves the group and puts the rest of the mobjects directly\n in self.mobjects or self.foreground_mobjects.\n\n In cases where the scene contains a group, e.g. Group(m1, m2, m3), but one\n of its submobjects is removed, e.g. scene.remove(m1), the list of mobjects\n will be edited to contain other submobjects, but not m1, e.g. it will now\n insert m2 and m3 to where the group once was.\n\n Parameters\n ----------\n to_remove : Mobject\n The Mobject to remove.\n\n mobject_list_name : str, optional\n The list of mobjects (\"mobjects\", \"foreground_mobjects\" etc) to remove from.\n\n extract_families : bool, optional\n Whether the mobject's families should be recursively extracted.\n\n Returns\n -------\n Scene\n The Scene mobject with restructured Mobjects.\n \"\"\"\n if extract_families:\n to_remove = extract_mobject_family_members(\n to_remove,\n use_z_index=self.renderer.camera.use_z_index,\n )\n _list = getattr(self, mobject_list_name)\n new_list = self.get_restructured_mobject_list(_list, to_remove)\n setattr(self, mobject_list_name, new_list)\n return self\n\n def get_restructured_mobject_list(self, mobjects, to_remove):\n \"\"\"\n Given a list of mobjects and a list of mobjects to be removed, this\n filters out the removable mobjects from the list of mobjects.\n\n Parameters\n ----------\n\n mobjects : list\n The Mobjects to check.\n\n to_remove : list\n The list of mobjects to remove.\n\n Returns\n -------\n list\n The list of mobjects with the mobjects to remove removed.\n \"\"\"\n\n new_mobjects = []\n\n def add_safe_mobjects_from_list(list_to_examine, set_to_remove):\n for mob in list_to_examine:\n if mob in set_to_remove:\n continue\n intersect = set_to_remove.intersection(mob.get_family())\n if intersect:\n add_safe_mobjects_from_list(mob.submobjects, intersect)\n else:\n new_mobjects.append(mob)\n\n add_safe_mobjects_from_list(mobjects, set(to_remove))\n return new_mobjects\n\n # TODO, remove this, and calls to this\n def add_foreground_mobjects(self, *mobjects):\n \"\"\"\n Adds mobjects to the foreground, and internally to the list\n foreground_mobjects, and mobjects.\n\n Parameters\n ----------\n *mobjects : Mobject\n The Mobjects to add to the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobjects added.\n \"\"\"\n self.foreground_mobjects = list_update(self.foreground_mobjects, mobjects)\n self.add(*mobjects)\n return self\n\n def add_foreground_mobject(self, mobject):\n \"\"\"\n Adds a single mobject to the foreground, and internally to the list\n foreground_mobjects, and mobjects.\n\n Parameters\n ----------\n mobject : Mobject\n The Mobject to add to the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobject added.\n \"\"\"\n return self.add_foreground_mobjects(mobject)\n\n def remove_foreground_mobjects(self, *to_remove):\n \"\"\"\n Removes mobjects from the foreground, and internally from the list\n foreground_mobjects.\n\n Parameters\n ----------\n *to_remove : Mobject\n The mobject(s) to remove from the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobjects removed.\n \"\"\"\n self.restructure_mobjects(to_remove, \"foreground_mobjects\")\n return self\n\n def remove_foreground_mobject(self, mobject):\n \"\"\"\n Removes a single mobject from the foreground, and internally from the list\n foreground_mobjects.\n\n Parameters\n ----------\n mobject : Mobject\n The mobject to remove from the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobject removed.\n \"\"\"\n return self.remove_foreground_mobjects(mobject)\n\n def bring_to_front(self, *mobjects):\n \"\"\"\n Adds the passed mobjects to the scene again,\n pushing them to he front of the scene.\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobject(s) to bring to the front of the scene.\n\n Returns\n ------\n Scene\n The Scene, with the mobjects brought to the front\n of the scene.\n \"\"\"\n self.add(*mobjects)\n return self\n\n def bring_to_back(self, *mobjects):\n \"\"\"\n Removes the mobject from the scene and\n adds them to the back of the scene.\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobject(s) to push to the back of the scene.\n\n Returns\n ------\n Scene\n The Scene, with the mobjects pushed to the back\n of the scene.\n \"\"\"\n self.remove(*mobjects)\n self.mobjects = list(mobjects) + self.mobjects\n return self\n\n def clear(self):\n \"\"\"\n Removes all mobjects present in self.mobjects\n and self.foreground_mobjects from the scene.\n\n Returns\n ------\n Scene\n The Scene, with all of its mobjects in\n self.mobjects and self.foreground_mobjects\n removed.\n \"\"\"\n self.mobjects = []\n self.foreground_mobjects = []\n return self\n\n def get_moving_mobjects(self, *animations):\n \"\"\"\n Gets all moving mobjects in the passed animation(s).\n\n Parameters\n ----------\n *animations : Animation\n The animations to check for moving mobjects.\n\n Returns\n ------\n list\n The list of mobjects that could be moving in\n the Animation(s)\n \"\"\"\n # Go through mobjects from start to end, and\n # as soon as there's one that needs updating of\n # some kind per frame, return the list from that\n # point forward.\n animation_mobjects = [anim.mobject for anim in animations]\n mobjects = self.get_mobject_family_members()\n for i, mob in enumerate(mobjects):\n update_possibilities = [\n mob in animation_mobjects,\n len(mob.get_family_updaters()) > 0,\n mob in self.foreground_mobjects,\n ]\n if any(update_possibilities):\n return mobjects[i:]\n return []\n\n def get_moving_and_static_mobjects(self, animations):\n all_mobjects = list_update(self.mobjects, self.foreground_mobjects)\n all_mobject_families = extract_mobject_family_members(\n all_mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n only_those_with_points=True,\n )\n moving_mobjects = self.get_moving_mobjects(*animations)\n all_moving_mobject_families = extract_mobject_family_members(\n moving_mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n )\n static_mobjects = list_difference_update(\n all_mobject_families,\n all_moving_mobject_families,\n )\n return all_moving_mobject_families, static_mobjects\n\n def compile_animations(self, *args, **kwargs):\n \"\"\"\n Creates _MethodAnimations from any _AnimationBuilders and updates animation\n kwargs with kwargs passed to play().\n\n Parameters\n ----------\n *args : Tuple[:class:`Animation`]\n Animations to be played.\n **kwargs\n Configuration for the call to play().\n\n Returns\n -------\n Tuple[:class:`Animation`]\n Animations to be played.\n \"\"\"\n animations = []\n for arg in args:\n try:\n animations.append(prepare_animation(arg))\n except TypeError:\n if inspect.ismethod(arg):\n raise TypeError(\n \"Passing Mobject methods to Scene.play is no longer\"\n \" supported. Use Mobject.animate instead.\",\n )\n else:\n raise TypeError(\n f\"Unexpected argument {arg} passed to Scene.play().\",\n )\n\n for animation in animations:\n for k, v in kwargs.items():\n setattr(animation, k, v)\n\n return animations\n\n def _get_animation_time_progression(self, animations, duration):\n \"\"\"\n You will hardly use this when making your own animations.\n This method is for Manim's internal use.\n\n Uses :func:`~.get_time_progression` to obtain a\n CommandLine ProgressBar whose ``fill_time`` is\n dependent on the qualities of the passed Animation,\n\n Parameters\n ----------\n animations : List[:class:`~.Animation`, ...]\n The list of animations to get\n the time progression for.\n\n duration : int or float\n duration of wait time\n\n Returns\n -------\n time_progression\n The CommandLine Progress Bar.\n \"\"\"\n if len(animations) == 1 and isinstance(animations[0], Wait):\n stop_condition = animations[0].stop_condition\n if stop_condition is not None:\n time_progression = self.get_time_progression(\n duration,\n f\"Waiting for {stop_condition.__name__}\",\n n_iterations=-1, # So it doesn't show % progress\n override_skip_animations=True,\n )\n else:\n time_progression = self.get_time_progression(\n duration,\n f\"Waiting {self.renderer.num_plays}\",\n )\n else:\n time_progression = self.get_time_progression(\n duration,\n \"\".join(\n [\n f\"Animation {self.renderer.num_plays}: \",\n str(animations[0]),\n (\", etc.\" if len(animations) > 1 else \"\"),\n ],\n ),\n )\n return time_progression\n\n def get_time_progression(\n self,\n run_time,\n description,\n n_iterations=None,\n override_skip_animations=False,\n ):\n \"\"\"\n You will hardly use this when making your own animations.\n This method is for Manim's internal use.\n\n Returns a CommandLine ProgressBar whose ``fill_time``\n is dependent on the ``run_time`` of an animation,\n the iterations to perform in that animation\n and a bool saying whether or not to consider\n the skipped animations.\n\n Parameters\n ----------\n run_time : float\n The ``run_time`` of the animation.\n\n n_iterations : int, optional\n The number of iterations in the animation.\n\n override_skip_animations : bool, optional\n Whether or not to show skipped animations in the progress bar.\n\n Returns\n -------\n time_progression\n The CommandLine Progress Bar.\n \"\"\"\n if self.renderer.skip_animations and not override_skip_animations:\n times = [run_time]\n else:\n step = 1 / config[\"frame_rate\"]\n times = np.arange(0, run_time, step)\n time_progression = tqdm(\n times,\n desc=description,\n total=n_iterations,\n leave=config[\"progress_bar\"] == \"leave\",\n ascii=True if platform.system() == \"Windows\" else None,\n disable=config[\"progress_bar\"] == \"none\",\n )\n return time_progression\n\n def get_run_time(self, animations):\n \"\"\"\n Gets the total run time for a list of animations.\n\n Parameters\n ----------\n animations : List[:class:`Animation`, ...]\n A list of the animations whose total\n ``run_time`` is to be calculated.\n\n Returns\n -------\n float\n The total ``run_time`` of all of the animations in the list.\n \"\"\"\n\n if len(animations) == 1 and isinstance(animations[0], Wait):\n if animations[0].stop_condition is not None:\n return 0\n else:\n return animations[0].duration\n\n else:\n return np.max([animation.run_time for animation in animations])\n\n def play(\n self,\n *args,\n subcaption=None,\n subcaption_duration=None,\n subcaption_offset=0,\n **kwargs,\n ):\n r\"\"\"Plays an animation in this scene.\n\n Parameters\n ----------\n\n args\n Animations to be played.\n subcaption\n The content of the external subcaption that should\n be added during the animation.\n subcaption_duration\n The duration for which the specified subcaption is\n added. If ``None`` (the default), the run time of the\n animation is taken.\n subcaption_offset\n An offset (in seconds) for the start time of the\n added subcaption.\n kwargs\n All other keywords are passed to the renderer.\n\n \"\"\"\n start_time = self.renderer.time\n self.renderer.play(self, *args, **kwargs)\n run_time = self.renderer.time - start_time\n if subcaption:\n if subcaption_duration is None:\n subcaption_duration = run_time\n # The start of the subcaption needs to be offset by the\n # run_time of the animation because it is added after\n # the animation has already been played (and Scene.renderer.time\n # has already been updated).\n self.add_subcaption(\n content=subcaption,\n duration=subcaption_duration,\n offset=-run_time + subcaption_offset,\n )\n\n def wait(\n self,\n duration: float = DEFAULT_WAIT_TIME,\n stop_condition: Callable[[], bool] | None = None,\n frozen_frame: bool | None = None,\n ):\n \"\"\"Plays a \"no operation\" animation.\n\n Parameters\n ----------\n duration\n The run time of the animation.\n stop_condition\n A function without positional arguments that is evaluated every time\n a frame is rendered. The animation only stops when the return value\n of the function is truthy. Overrides any value passed to ``duration``.\n frozen_frame\n If True, updater functions are not evaluated, and the animation outputs\n a frozen frame. If False, updater functions are called and frames\n are rendered as usual. If None (the default), the scene tries to\n determine whether or not the frame is frozen on its own.\n\n See also\n --------\n :class:`.Wait`, :meth:`.should_mobjects_update`\n \"\"\"\n self.play(\n Wait(\n run_time=duration,\n stop_condition=stop_condition,\n frozen_frame=frozen_frame,\n )\n )\n\n def pause(self, duration: float = DEFAULT_WAIT_TIME):\n \"\"\"Pauses the scene (i.e., displays a frozen frame).\n\n This is an alias for :meth:`.wait` with ``frozen_frame``\n set to ``True``.\n\n Parameters\n ----------\n duration\n The duration of the pause.\n\n See also\n --------\n :meth:`.wait`, :class:`.Wait`\n \"\"\"\n self.wait(duration=duration, frozen_frame=True)\n\n def wait_until(self, stop_condition, max_time=60):\n \"\"\"\n Like a wrapper for wait().\n You pass a function that determines whether to continue waiting,\n and a max wait time if that is never fulfilled.\n\n Parameters\n ----------\n stop_condition : function\n The function whose boolean return value determines whether to continue waiting\n\n max_time : int or float, optional\n The maximum wait time in seconds, if the stop_condition is never fulfilled.\n \"\"\"\n self.wait(max_time, stop_condition=stop_condition)\n\n def compile_animation_data(self, *animations: Animation, **play_kwargs):\n \"\"\"Given a list of animations, compile the corresponding\n static and moving mobjects, and gather the animation durations.\n\n This also begins the animations.\n\n Parameters\n ----------\n skip_rendering : bool, optional\n Whether the rendering should be skipped, by default False\n\n Returns\n -------\n self, None\n None if there is nothing to play, or self otherwise.\n \"\"\"\n # NOTE TODO : returns statement of this method are wrong. It should return nothing, as it makes a little sense to get any information from this method.\n # The return are kept to keep webgl renderer from breaking.\n if len(animations) == 0:\n raise ValueError(\"Called Scene.play with no animations\")\n\n self.animations = self.compile_animations(*animations, **play_kwargs)\n self.add_mobjects_from_animations(self.animations)\n\n self.last_t = 0\n self.stop_condition = None\n self.moving_mobjects = []\n self.static_mobjects = []\n\n if len(self.animations) == 1 and isinstance(self.animations[0], Wait):\n if self.should_update_mobjects():\n self.update_mobjects(dt=0) # Any problems with this?\n self.stop_condition = self.animations[0].stop_condition\n else:\n self.duration = self.animations[0].duration\n # Static image logic when the wait is static is done by the renderer, not here.\n self.animations[0].is_static_wait = True\n return None\n elif config.renderer != \"opengl\":\n # Paint all non-moving objects onto the screen, so they don't\n # have to be rendered every frame\n (\n self.moving_mobjects,\n self.static_mobjects,\n ) = self.get_moving_and_static_mobjects(self.animations)\n self.duration = self.get_run_time(self.animations)\n return self\n\n def begin_animations(self) -> None:\n \"\"\"Start the animations of the scene.\"\"\"\n for animation in self.animations:\n animation._setup_scene(self)\n animation.begin()\n\n def is_current_animation_frozen_frame(self) -> bool:\n \"\"\"Returns whether the current animation produces a static frame (generally a Wait).\"\"\"\n return (\n isinstance(self.animations[0], Wait)\n and len(self.animations) == 1\n and self.animations[0].is_static_wait\n )\n\n def play_internal(self, skip_rendering=False):\n \"\"\"\n This method is used to prep the animations for rendering,\n apply the arguments and parameters required to them,\n render them, and write them to the video file.\n\n Parameters\n ----------\n args\n Animation or mobject with mobject method and params\n kwargs\n named parameters affecting what was passed in ``args``,\n e.g. ``run_time``, ``lag_ratio`` and so on.\n \"\"\"\n self.duration = self.get_run_time(self.animations)\n self.time_progression = self._get_animation_time_progression(\n self.animations,\n self.duration,\n )\n for t in self.time_progression:\n self.update_to_time(t)\n if not skip_rendering and not self.skip_animation_preview:\n self.renderer.render(self, t, self.moving_mobjects)\n if self.stop_condition is not None and self.stop_condition():\n self.time_progression.close()\n break\n\n for animation in self.animations:\n animation.finish()\n animation.clean_up_from_scene(self)\n if not self.renderer.skip_animations:\n self.update_mobjects(0)\n self.renderer.static_image = None\n # Closing the progress bar at the end of the play.\n self.time_progression.close()\n\n def check_interactive_embed_is_valid(self):\n if config[\"force_window\"]:\n return True\n if self.skip_animation_preview:\n logger.warning(\n \"Disabling interactive embed as 'skip_animation_preview' is enabled\",\n )\n return False\n elif config[\"write_to_movie\"]:\n logger.warning(\"Disabling interactive embed as 'write_to_movie' is enabled\")\n return False\n elif config[\"format\"]:\n logger.warning(\n \"Disabling interactive embed as '--format' is set as \"\n + config[\"format\"],\n )\n return False\n elif not self.renderer.window:\n logger.warning(\"Disabling interactive embed as no window was created\")\n return False\n elif config.dry_run:\n logger.warning(\"Disabling interactive embed as dry_run is enabled\")\n return False\n return True\n\n def interactive_embed(self):\n \"\"\"\n Like embed(), but allows for screen interaction.\n \"\"\"\n if not self.check_interactive_embed_is_valid():\n return\n self.interactive_mode = True\n\n def ipython(shell, namespace):\n import manim.opengl\n\n def load_module_into_namespace(module, namespace):\n for name in dir(module):\n namespace[name] = getattr(module, name)\n\n load_module_into_namespace(manim, namespace)\n load_module_into_namespace(manim.opengl, namespace)\n\n def embedded_rerun(*args, **kwargs):\n self.queue.put((\"rerun_keyboard\", args, kwargs))\n shell.exiter()\n\n namespace[\"rerun\"] = embedded_rerun\n\n shell(local_ns=namespace)\n self.queue.put((\"exit_keyboard\", [], {}))\n\n def get_embedded_method(method_name):\n return lambda *args, **kwargs: self.queue.put((method_name, args, kwargs))\n\n local_namespace = inspect.currentframe().f_back.f_locals\n for method in (\"play\", \"wait\", \"add\", \"remove\"):\n embedded_method = get_embedded_method(method)\n # Allow for calling scene methods without prepending 'self.'.\n local_namespace[method] = embedded_method\n\n from IPython.terminal.embed import InteractiveShellEmbed\n from traitlets.config import Config\n\n cfg = Config()\n cfg.TerminalInteractiveShell.confirm_exit = False\n shell = InteractiveShellEmbed(config=cfg)\n\n keyboard_thread = threading.Thread(\n target=ipython,\n args=(shell, local_namespace),\n )\n # run as daemon to kill thread when main thread exits\n if not shell.pt_app:\n keyboard_thread.daemon = True\n keyboard_thread.start()\n\n if self.dearpygui_imported and config[\"enable_gui\"]:\n if not dpg.is_dearpygui_running():\n gui_thread = threading.Thread(\n target=configure_pygui,\n args=(self.renderer, self.widgets),\n kwargs={\"update\": False},\n )\n gui_thread.start()\n else:\n configure_pygui(self.renderer, self.widgets, update=True)\n\n self.camera.model_matrix = self.camera.default_model_matrix\n\n self.interact(shell, keyboard_thread)\n\n def interact(self, shell, keyboard_thread):\n event_handler = RerunSceneHandler(self.queue)\n file_observer = Observer()\n file_observer.schedule(event_handler, config[\"input_file\"], recursive=True)\n file_observer.start()\n\n self.quit_interaction = False\n keyboard_thread_needs_join = shell.pt_app is not None\n assert self.queue.qsize() == 0\n\n last_time = time.time()\n while not (self.renderer.window.is_closing or self.quit_interaction):\n if not self.queue.empty():\n tup = self.queue.get_nowait()\n if tup[0].startswith(\"rerun\"):\n # Intentionally skip calling join() on the file thread to save time.\n if not tup[0].endswith(\"keyboard\"):\n if shell.pt_app:\n shell.pt_app.app.exit(exception=EOFError)\n file_observer.unschedule_all()\n raise RerunSceneException\n keyboard_thread.join()\n\n kwargs = tup[2]\n if \"from_animation_number\" in kwargs:\n config[\"from_animation_number\"] = kwargs[\n \"from_animation_number\"\n ]\n # # TODO: This option only makes sense if interactive_embed() is run at the\n # # end of a scene by default.\n # if \"upto_animation_number\" in kwargs:\n # config[\"upto_animation_number\"] = kwargs[\n # \"upto_animation_number\"\n # ]\n\n keyboard_thread.join()\n file_observer.unschedule_all()\n raise RerunSceneException\n elif tup[0].startswith(\"exit\"):\n # Intentionally skip calling join() on the file thread to save time.\n if not tup[0].endswith(\"keyboard\") and shell.pt_app:\n shell.pt_app.app.exit(exception=EOFError)\n keyboard_thread.join()\n # Remove exit_keyboard from the queue if necessary.\n while self.queue.qsize() > 0:\n self.queue.get()\n keyboard_thread_needs_join = False\n break\n else:\n method, args, kwargs = tup\n getattr(self, method)(*args, **kwargs)\n else:\n self.renderer.animation_start_time = 0\n dt = time.time() - last_time\n last_time = time.time()\n self.renderer.render(self, dt, self.moving_mobjects)\n self.update_mobjects(dt)\n self.update_meshes(dt)\n self.update_self(dt)\n\n # Join the keyboard thread if necessary.\n if shell is not None and keyboard_thread_needs_join:\n shell.pt_app.app.exit(exception=EOFError)\n keyboard_thread.join()\n # Remove exit_keyboard from the queue if necessary.\n while self.queue.qsize() > 0:\n self.queue.get()\n\n file_observer.stop()\n file_observer.join()\n\n if self.dearpygui_imported and config[\"enable_gui\"]:\n dpg.stop_dearpygui()\n\n if self.renderer.window.is_closing:\n self.renderer.window.destroy()\n\n def embed(self):\n if not config[\"preview\"]:\n logger.warning(\"Called embed() while no preview window is available.\")\n return\n if config[\"write_to_movie\"]:\n logger.warning(\"embed() is skipped while writing to a file.\")\n return\n\n self.renderer.animation_start_time = 0\n self.renderer.render(self, -1, self.moving_mobjects)\n\n # Configure IPython shell.\n from IPython.terminal.embed import InteractiveShellEmbed\n\n shell = InteractiveShellEmbed()\n\n # Have the frame update after each command\n shell.events.register(\n \"post_run_cell\",\n lambda *a, **kw: self.renderer.render(self, -1, self.moving_mobjects),\n )\n\n # Use the locals of the caller as the local namespace\n # once embedded, and add a few custom shortcuts.\n local_ns = inspect.currentframe().f_back.f_locals\n # local_ns[\"touch\"] = self.interact\n for method in (\n \"play\",\n \"wait\",\n \"add\",\n \"remove\",\n \"interact\",\n # \"clear\",\n # \"save_state\",\n # \"restore\",\n ):\n local_ns[method] = getattr(self, method)\n shell(local_ns=local_ns, stack_depth=2)\n\n # End scene when exiting an embed.\n raise Exception(\"Exiting scene.\")\n\n def update_to_time(self, t):\n dt = t - self.last_t\n self.last_t = t\n for animation in self.animations:\n animation.update_mobjects(dt)\n alpha = t / animation.run_time\n animation.interpolate(alpha)\n self.update_mobjects(dt)\n self.update_meshes(dt)\n self.update_self(dt)\n\n def add_subcaption(\n self, content: str, duration: float = 1, offset: float = 0\n ) -> None:\n r\"\"\"Adds an entry in the corresponding subcaption file\n at the current time stamp.\n\n The current time stamp is obtained from ``Scene.renderer.time``.\n\n Parameters\n ----------\n\n content\n The subcaption content.\n duration\n The duration (in seconds) for which the subcaption is shown.\n offset\n This offset (in seconds) is added to the starting time stamp\n of the subcaption.\n\n Examples\n --------\n\n This example illustrates both possibilities for adding\n subcaptions to Manimations::\n\n class SubcaptionExample(Scene):\n def construct(self):\n square = Square()\n circle = Circle()\n\n # first option: via the add_subcaption method\n self.add_subcaption(\"Hello square!\", duration=1)\n self.play(Create(square))\n\n # second option: within the call to Scene.play\n self.play(\n Transform(square, circle),\n subcaption=\"The square transforms.\"\n )\n\n \"\"\"\n subtitle = srt.Subtitle(\n index=len(self.renderer.file_writer.subcaptions),\n content=content,\n start=datetime.timedelta(seconds=self.renderer.time + offset),\n end=datetime.timedelta(seconds=self.renderer.time + offset + duration),\n )\n self.renderer.file_writer.subcaptions.append(subtitle)\n\n def add_sound(self, sound_file, time_offset=0, gain=None, **kwargs):\n \"\"\"\n This method is used to add a sound to the animation.\n\n Parameters\n ----------\n\n sound_file : str\n The path to the sound file.\n time_offset : int,float, optional\n The offset in the sound file after which\n the sound can be played.\n gain : float\n Amplification of the sound.\n\n Examples\n --------\n .. manim:: SoundExample\n\n class SoundExample(Scene):\n # Source of sound under Creative Commons 0 License. https://freesound.org/people/Druminfected/sounds/250551/\n def construct(self):\n dot = Dot().set_color(GREEN)\n self.add_sound(\"click.wav\")\n self.add(dot)\n self.wait()\n self.add_sound(\"click.wav\")\n dot.set_color(BLUE)\n self.wait()\n self.add_sound(\"click.wav\")\n dot.set_color(RED)\n self.wait()\n\n Download the resource for the previous example `here <https://github.com/ManimCommunity/manim/blob/main/docs/source/_static/click.wav>`_ .\n \"\"\"\n if self.renderer.skip_animations:\n return\n time = self.renderer.time + time_offset\n self.renderer.file_writer.add_sound(sound_file, time, gain, **kwargs)\n\n def on_mouse_motion(self, point, d_point):\n self.mouse_point.move_to(point)\n if SHIFT_VALUE in self.renderer.pressed_keys:\n shift = -d_point\n shift[0] *= self.camera.get_width() / 2\n shift[1] *= self.camera.get_height() / 2\n transform = self.camera.inverse_rotation_matrix\n shift = np.dot(np.transpose(transform), shift)\n self.camera.shift(shift)\n\n def on_mouse_scroll(self, point, offset):\n if not config.use_projection_stroke_shaders:\n factor = 1 + np.arctan(-2.1 * offset[1])\n self.camera.scale(factor, about_point=self.camera_target)\n self.mouse_scroll_orbit_controls(point, offset)\n\n def on_key_press(self, symbol, modifiers):\n try:\n char = chr(symbol)\n except OverflowError:\n logger.warning(\"The value of the pressed key is too large.\")\n return\n\n if char == \"r\":\n self.camera.to_default_state()\n self.camera_target = np.array([0, 0, 0], dtype=np.float32)\n elif char == \"q\":\n self.quit_interaction = True\n else:\n if char in self.key_to_function_map:\n self.key_to_function_map[char]()\n\n def on_key_release(self, symbol, modifiers):\n pass\n\n def on_mouse_drag(self, point, d_point, buttons, modifiers):\n self.mouse_drag_point.move_to(point)\n if buttons == 1:\n self.camera.increment_theta(-d_point[0])\n self.camera.increment_phi(d_point[1])\n elif buttons == 4:\n camera_x_axis = self.camera.model_matrix[:3, 0]\n horizontal_shift_vector = -d_point[0] * camera_x_axis\n vertical_shift_vector = -d_point[1] * np.cross(OUT, camera_x_axis)\n total_shift_vector = horizontal_shift_vector + vertical_shift_vector\n self.camera.shift(1.1 * total_shift_vector)\n\n self.mouse_drag_orbit_controls(point, d_point, buttons, modifiers)\n\n def mouse_scroll_orbit_controls(self, point, offset):\n camera_to_target = self.camera_target - self.camera.get_position()\n camera_to_target *= np.sign(offset[1])\n shift_vector = 0.01 * camera_to_target\n self.camera.model_matrix = (\n opengl.translation_matrix(*shift_vector) @ self.camera.model_matrix\n )\n\n def mouse_drag_orbit_controls(self, point, d_point, buttons, modifiers):\n # Left click drag.\n if buttons == 1:\n # Translate to target the origin and rotate around the z axis.\n self.camera.model_matrix = (\n opengl.rotation_matrix(z=-d_point[0])\n @ opengl.translation_matrix(*-self.camera_target)\n @ self.camera.model_matrix\n )\n\n # Rotation off of the z axis.\n camera_position = self.camera.get_position()\n camera_y_axis = self.camera.model_matrix[:3, 1]\n axis_of_rotation = space_ops.normalize(\n np.cross(camera_y_axis, camera_position),\n )\n rotation_matrix = space_ops.rotation_matrix(\n d_point[1],\n axis_of_rotation,\n homogeneous=True,\n )\n\n maximum_polar_angle = self.camera.maximum_polar_angle\n minimum_polar_angle = self.camera.minimum_polar_angle\n\n potential_camera_model_matrix = rotation_matrix @ self.camera.model_matrix\n potential_camera_location = potential_camera_model_matrix[:3, 3]\n potential_camera_y_axis = potential_camera_model_matrix[:3, 1]\n sign = (\n np.sign(potential_camera_y_axis[2])\n if potential_camera_y_axis[2] != 0\n else 1\n )\n potential_polar_angle = sign * np.arccos(\n potential_camera_location[2]\n / np.linalg.norm(potential_camera_location),\n )\n if minimum_polar_angle <= potential_polar_angle <= maximum_polar_angle:\n self.camera.model_matrix = potential_camera_model_matrix\n else:\n sign = np.sign(camera_y_axis[2]) if camera_y_axis[2] != 0 else 1\n current_polar_angle = sign * np.arccos(\n camera_position[2] / np.linalg.norm(camera_position),\n )\n if potential_polar_angle > maximum_polar_angle:\n polar_angle_delta = maximum_polar_angle - current_polar_angle\n else:\n polar_angle_delta = minimum_polar_angle - current_polar_angle\n rotation_matrix = space_ops.rotation_matrix(\n polar_angle_delta,\n axis_of_rotation,\n homogeneous=True,\n )\n self.camera.model_matrix = rotation_matrix @ self.camera.model_matrix\n\n # Translate to target the original target.\n self.camera.model_matrix = (\n opengl.translation_matrix(*self.camera_target)\n @ self.camera.model_matrix\n )\n # Right click drag.\n elif buttons == 4:\n camera_x_axis = self.camera.model_matrix[:3, 0]\n horizontal_shift_vector = -d_point[0] * camera_x_axis\n vertical_shift_vector = -d_point[1] * np.cross(OUT, camera_x_axis)\n total_shift_vector = horizontal_shift_vector + vertical_shift_vector\n\n self.camera.model_matrix = (\n opengl.translation_matrix(*total_shift_vector)\n @ self.camera.model_matrix\n )\n self.camera_target += total_shift_vector\n\n def set_key_function(self, char, func):\n self.key_to_function_map[char] = func\n\n def on_mouse_press(self, point, button, modifiers):\n for func in self.mouse_press_callbacks:\n func()\n" ]
[ [ "numpy.cross", "numpy.random.seed", "numpy.arctan", "numpy.arange", "numpy.linalg.norm", "numpy.sign", "numpy.max", "numpy.transpose", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MichaelAquilina/numpy
[ "6e8b869d52ec5a1242df69bcd9323a4b0947933b" ]
[ "numpy/distutils/exec_command.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nexec_command\n\nImplements exec_command function that is (almost) equivalent to\ncommands.getstatusoutput function but on NT, DOS systems the\nreturned status is actually correct (though, the returned status\nvalues may be different by a factor). In addition, exec_command\ntakes keyword arguments for (re-)defining environment variables.\n\nProvides functions:\n\n exec_command --- execute command in a specified directory and\n in the modified environment.\n find_executable --- locate a command using info from environment\n variable PATH. Equivalent to posix `which`\n command.\n\nAuthor: Pearu Peterson <[email protected]>\nCreated: 11 January 2003\n\nRequires: Python 2.x\n\nSuccesfully tested on:\n\n======== ============ =================================================\nos.name sys.platform comments\n======== ============ =================================================\nposix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3\n PyCrust 0.9.3, Idle 1.0.2\nposix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2\nposix sunos5 SunOS 5.9, Python 2.2, 2.3.2\nposix darwin Darwin 7.2.0, Python 2.3\nnt win32 Windows Me\n Python 2.3(EE), Idle 1.0, PyCrust 0.7.2\n Python 2.1.1 Idle 0.8\nnt win32 Windows 98, Python 2.1.1. Idle 0.8\nnt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests\n fail i.e. redefining environment variables may\n not work. FIXED: don't use cygwin echo!\n Comment: also `cmd /c echo` will not work\n but redefining environment variables do work.\nposix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)\nnt win32 Windows XP, Python 2.3.3\n======== ============ =================================================\n\nKnown bugs:\n\n* Tests, that send messages to stderr, fail when executed from MSYS prompt\n because the messages are lost at some point.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\n__all__ = ['exec_command', 'find_executable']\n\nimport os\nimport sys\nimport shlex\n\nfrom numpy.distutils.misc_util import is_sequence, make_temp_file\nfrom numpy.distutils import log\nfrom numpy.distutils.compat import get_exception\n\nfrom numpy.compat import open_latin1\n\ndef temp_file_name():\n fo, name = make_temp_file()\n fo.close()\n return name\n\ndef get_pythonexe():\n pythonexe = sys.executable\n if os.name in ['nt', 'dos']:\n fdir, fn = os.path.split(pythonexe)\n fn = fn.upper().replace('PYTHONW', 'PYTHON')\n pythonexe = os.path.join(fdir, fn)\n assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)\n return pythonexe\n\ndef find_executable(exe, path=None, _cache={}):\n \"\"\"Return full path of a executable or None.\n\n Symbolic links are not followed.\n \"\"\"\n key = exe, path\n try:\n return _cache[key]\n except KeyError:\n pass\n log.debug('find_executable(%r)' % exe)\n orig_exe = exe\n\n if path is None:\n path = os.environ.get('PATH', os.defpath)\n if os.name=='posix':\n realpath = os.path.realpath\n else:\n realpath = lambda a:a\n\n if exe.startswith('\"'):\n exe = exe[1:-1]\n\n suffixes = ['']\n if os.name in ['nt', 'dos', 'os2']:\n fn, ext = os.path.splitext(exe)\n extra_suffixes = ['.exe', '.com', '.bat']\n if ext.lower() not in extra_suffixes:\n suffixes = extra_suffixes\n\n if os.path.isabs(exe):\n paths = ['']\n else:\n paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]\n\n for path in paths:\n fn = os.path.join(path, exe)\n for s in suffixes:\n f_ext = fn+s\n if not os.path.islink(f_ext):\n f_ext = realpath(f_ext)\n if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):\n log.info('Found executable %s' % f_ext)\n _cache[key] = f_ext\n return f_ext\n\n log.warn('Could not locate executable %s' % orig_exe)\n return None\n\n############################################################\n\ndef _preserve_environment( names ):\n log.debug('_preserve_environment(%r)' % (names))\n env = {}\n for name in names:\n env[name] = os.environ.get(name)\n return env\n\ndef _update_environment( **env ):\n log.debug('_update_environment(...)')\n for name, value in env.items():\n os.environ[name] = value or ''\n\ndef _supports_fileno(stream):\n \"\"\"\n Returns True if 'stream' supports the file descriptor and allows fileno().\n \"\"\"\n if hasattr(stream, 'fileno'):\n try:\n r = stream.fileno()\n return True\n except IOError:\n return False\n else:\n return False\n\ndef exec_command(command, execute_in='', use_shell=None, use_tee=None,\n _with_python = 1, **env ):\n \"\"\"\n Return (status,output) of executed command.\n\n Parameters\n ----------\n command : str\n A concatenated string of executable and arguments.\n execute_in : str\n Before running command ``cd execute_in`` and after ``cd -``.\n use_shell : {bool, None}, optional\n If True, execute ``sh -c command``. Default None (True)\n use_tee : {bool, None}, optional\n If True use tee. Default None (True)\n\n\n Returns\n -------\n res : str\n Both stdout and stderr messages.\n\n Notes\n -----\n On NT, DOS systems the returned status is correct for external commands.\n Wild cards will not work for non-posix systems or when use_shell=0.\n\n \"\"\"\n log.debug('exec_command(%r,%s)' % (command,\\\n ','.join(['%s=%r'%kv for kv in env.items()])))\n\n if use_tee is None:\n use_tee = os.name=='posix'\n if use_shell is None:\n use_shell = os.name=='posix'\n execute_in = os.path.abspath(execute_in)\n oldcwd = os.path.abspath(os.getcwd())\n\n if __name__[-12:] == 'exec_command':\n exec_dir = os.path.dirname(os.path.abspath(__file__))\n elif os.path.isfile('exec_command.py'):\n exec_dir = os.path.abspath('.')\n else:\n exec_dir = os.path.abspath(sys.argv[0])\n if os.path.isfile(exec_dir):\n exec_dir = os.path.dirname(exec_dir)\n\n if oldcwd!=execute_in:\n os.chdir(execute_in)\n log.debug('New cwd: %s' % execute_in)\n else:\n log.debug('Retaining cwd: %s' % oldcwd)\n\n oldenv = _preserve_environment( list(env.keys()) )\n _update_environment( **env )\n\n try:\n # _exec_command is robust but slow, it relies on\n # usable sys.std*.fileno() descriptors. If they\n # are bad (like in win32 Idle, PyCrust environments)\n # then _exec_command_python (even slower)\n # will be used as a last resort.\n #\n # _exec_command_posix uses os.system and is faster\n # but not on all platforms os.system will return\n # a correct status.\n if (_with_python and _supports_fileno(sys.stdout) and\n sys.stdout.fileno() == -1):\n st = _exec_command_python(command,\n exec_command_dir = exec_dir,\n **env)\n elif os.name=='posix':\n st = _exec_command_posix(command,\n use_shell=use_shell,\n use_tee=use_tee,\n **env)\n else:\n st = _exec_command(command, use_shell=use_shell,\n use_tee=use_tee,**env)\n finally:\n if oldcwd!=execute_in:\n os.chdir(oldcwd)\n log.debug('Restored cwd to %s' % oldcwd)\n _update_environment(**oldenv)\n\n return st\n\ndef _exec_command_posix( command,\n use_shell = None,\n use_tee = None,\n **env ):\n log.debug('_exec_command_posix(...)')\n\n if is_sequence(command):\n command_str = ' '.join(list(command))\n else:\n command_str = command\n\n tmpfile = temp_file_name()\n stsfile = None\n if use_tee:\n stsfile = temp_file_name()\n filter = ''\n if use_tee == 2:\n filter = r'| tr -cd \"\\n\" | tr \"\\n\" \".\"; echo'\n command_posix = '( %s ; echo $? > %s ) 2>&1 | tee %s %s'\\\n % (command_str, stsfile, tmpfile, filter)\n else:\n stsfile = temp_file_name()\n command_posix = '( %s ; echo $? > %s ) > %s 2>&1'\\\n % (command_str, stsfile, tmpfile)\n #command_posix = '( %s ) > %s 2>&1' % (command_str,tmpfile)\n\n log.debug('Running os.system(%r)' % (command_posix))\n status = os.system(command_posix)\n\n if use_tee:\n if status:\n # if command_tee fails then fall back to robust exec_command\n log.warn('_exec_command_posix failed (status=%s)' % status)\n return _exec_command(command, use_shell=use_shell, **env)\n\n if stsfile is not None:\n f = open_latin1(stsfile, 'r')\n status_text = f.read()\n status = int(status_text)\n f.close()\n os.remove(stsfile)\n\n f = open_latin1(tmpfile, 'r')\n text = f.read()\n f.close()\n os.remove(tmpfile)\n\n if text[-1:]=='\\n':\n text = text[:-1]\n\n return status, text\n\n\ndef _exec_command_python(command,\n exec_command_dir='', **env):\n log.debug('_exec_command_python(...)')\n\n python_exe = get_pythonexe()\n cmdfile = temp_file_name()\n stsfile = temp_file_name()\n outfile = temp_file_name()\n\n f = open(cmdfile, 'w')\n f.write('import os\\n')\n f.write('import sys\\n')\n f.write('sys.path.insert(0,%r)\\n' % (exec_command_dir))\n f.write('from exec_command import exec_command\\n')\n f.write('del sys.path[0]\\n')\n f.write('cmd = %r\\n' % command)\n f.write('os.environ = %r\\n' % (os.environ))\n f.write('s,o = exec_command(cmd, _with_python=0, **%r)\\n' % (env))\n f.write('f=open(%r,\"w\")\\nf.write(str(s))\\nf.close()\\n' % (stsfile))\n f.write('f=open(%r,\"w\")\\nf.write(o)\\nf.close()\\n' % (outfile))\n f.close()\n\n cmd = '%s %s' % (python_exe, cmdfile)\n status = os.system(cmd)\n if status:\n raise RuntimeError(\"%r failed\" % (cmd,))\n os.remove(cmdfile)\n\n f = open_latin1(stsfile, 'r')\n status = int(f.read())\n f.close()\n os.remove(stsfile)\n\n f = open_latin1(outfile, 'r')\n text = f.read()\n f.close()\n os.remove(outfile)\n\n return status, text\n\ndef quote_arg(arg):\n if arg[0]!='\"' and ' ' in arg:\n return '\"%s\"' % arg\n return arg\n\ndef _exec_command( command, use_shell=None, use_tee = None, **env ):\n log.debug('_exec_command(...)')\n\n if use_shell is None:\n use_shell = os.name=='posix'\n if use_tee is None:\n use_tee = os.name=='posix'\n using_command = 0\n if use_shell:\n # We use shell (unless use_shell==0) so that wildcards can be\n # used.\n sh = os.environ.get('SHELL', '/bin/sh')\n if is_sequence(command):\n argv = [sh, '-c', ' '.join(list(command))]\n else:\n argv = [sh, '-c', command]\n else:\n # On NT, DOS we avoid using command.com as it's exit status is\n # not related to the exit status of a command.\n if is_sequence(command):\n argv = command[:]\n else:\n argv = shlex.split(command)\n\n if hasattr(os, 'spawnvpe'):\n spawn_command = os.spawnvpe\n else:\n spawn_command = os.spawnve\n argv[0] = find_executable(argv[0]) or argv[0]\n if not os.path.isfile(argv[0]):\n log.warn('Executable %s does not exist' % (argv[0]))\n if os.name in ['nt', 'dos']:\n # argv[0] might be internal command\n argv = [os.environ['COMSPEC'], '/C'] + argv\n using_command = 1\n\n _so_has_fileno = _supports_fileno(sys.stdout)\n _se_has_fileno = _supports_fileno(sys.stderr)\n so_flush = sys.stdout.flush\n se_flush = sys.stderr.flush\n if _so_has_fileno:\n so_fileno = sys.stdout.fileno()\n so_dup = os.dup(so_fileno)\n if _se_has_fileno:\n se_fileno = sys.stderr.fileno()\n se_dup = os.dup(se_fileno)\n\n outfile = temp_file_name()\n fout = open(outfile, 'w')\n if using_command:\n errfile = temp_file_name()\n ferr = open(errfile, 'w')\n\n log.debug('Running %s(%s,%r,%r,os.environ)' \\\n % (spawn_command.__name__, os.P_WAIT, argv[0], argv))\n\n if sys.version_info[0] >= 3 and os.name == 'nt':\n # Pre-encode os.environ, discarding un-encodable entries,\n # to avoid it failing during encoding as part of spawn. Failure\n # is possible if the environment contains entries that are not\n # encoded using the system codepage as windows expects.\n #\n # This is not necessary on unix, where os.environ is encoded\n # using the surrogateescape error handler and decoded using\n # it as part of spawn.\n encoded_environ = {}\n for k, v in os.environ.items():\n try:\n encoded_environ[k.encode(sys.getfilesystemencoding())] = v.encode(\n sys.getfilesystemencoding())\n except UnicodeEncodeError:\n log.debug(\"ignoring un-encodable env entry %s\", k)\n else:\n encoded_environ = os.environ\n\n argv0 = argv[0]\n if not using_command:\n argv[0] = quote_arg(argv0)\n\n so_flush()\n se_flush()\n if _so_has_fileno:\n os.dup2(fout.fileno(), so_fileno)\n\n if _se_has_fileno:\n if using_command:\n #XXX: disabled for now as it does not work from cmd under win32.\n # Tests fail on msys\n os.dup2(ferr.fileno(), se_fileno)\n else:\n os.dup2(fout.fileno(), se_fileno)\n try:\n status = spawn_command(os.P_WAIT, argv0, argv, encoded_environ)\n except Exception:\n errmess = str(get_exception())\n status = 999\n sys.stderr.write('%s: %s'%(errmess, argv[0]))\n\n so_flush()\n se_flush()\n if _so_has_fileno:\n os.dup2(so_dup, so_fileno)\n if _se_has_fileno:\n os.dup2(se_dup, se_fileno)\n\n fout.close()\n fout = open_latin1(outfile, 'r')\n text = fout.read()\n fout.close()\n os.remove(outfile)\n\n if using_command:\n ferr.close()\n ferr = open_latin1(errfile, 'r')\n errmess = ferr.read()\n ferr.close()\n os.remove(errfile)\n if errmess and not status:\n # Not sure how to handle the case where errmess\n # contains only warning messages and that should\n # not be treated as errors.\n #status = 998\n if text:\n text = text + '\\n'\n #text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess)\n text = text + errmess\n print (errmess)\n if text[-1:]=='\\n':\n text = text[:-1]\n if status is None:\n status = 0\n\n if use_tee:\n print (text)\n\n return status, text\n\n\ndef test_nt(**kws):\n pythonexe = get_pythonexe()\n echo = find_executable('echo')\n using_cygwin_echo = echo != 'echo'\n if using_cygwin_echo:\n log.warn('Using cygwin echo in win32 environment is not supported')\n\n s, o=exec_command(pythonexe\\\n +' -c \"import os;print os.environ.get(\\'AAA\\',\\'\\')\"')\n assert s==0 and o=='', (s, o)\n\n s, o=exec_command(pythonexe\\\n +' -c \"import os;print os.environ.get(\\'AAA\\')\"',\n AAA='Tere')\n assert s==0 and o=='Tere', (s, o)\n\n os.environ['BBB'] = 'Hi'\n s, o=exec_command(pythonexe\\\n +' -c \"import os;print os.environ.get(\\'BBB\\',\\'\\')\"')\n assert s==0 and o=='Hi', (s, o)\n\n s, o=exec_command(pythonexe\\\n +' -c \"import os;print os.environ.get(\\'BBB\\',\\'\\')\"',\n BBB='Hey')\n assert s==0 and o=='Hey', (s, o)\n\n s, o=exec_command(pythonexe\\\n +' -c \"import os;print os.environ.get(\\'BBB\\',\\'\\')\"')\n assert s==0 and o=='Hi', (s, o)\n elif 0:\n s, o=exec_command('echo Hello')\n assert s==0 and o=='Hello', (s, o)\n\n s, o=exec_command('echo a%AAA%')\n assert s==0 and o=='a', (s, o)\n\n s, o=exec_command('echo a%AAA%', AAA='Tere')\n assert s==0 and o=='aTere', (s, o)\n\n os.environ['BBB'] = 'Hi'\n s, o=exec_command('echo a%BBB%')\n assert s==0 and o=='aHi', (s, o)\n\n s, o=exec_command('echo a%BBB%', BBB='Hey')\n assert s==0 and o=='aHey', (s, o)\n s, o=exec_command('echo a%BBB%')\n assert s==0 and o=='aHi', (s, o)\n\n s, o=exec_command('this_is_not_a_command')\n assert s and o!='', (s, o)\n\n s, o=exec_command('type not_existing_file')\n assert s and o!='', (s, o)\n\n s, o=exec_command('echo path=%path%')\n assert s==0 and o!='', (s, o)\n\n s, o=exec_command('%s -c \"import sys;sys.stderr.write(sys.platform)\"' \\\n % pythonexe)\n assert s==0 and o=='win32', (s, o)\n\n s, o=exec_command('%s -c \"raise \\'Ignore me.\\'\"' % pythonexe)\n assert s==1 and o, (s, o)\n\n s, o=exec_command('%s -c \"import sys;sys.stderr.write(\\'0\\');sys.stderr.write(\\'1\\');sys.stderr.write(\\'2\\')\"'\\\n % pythonexe)\n assert s==0 and o=='012', (s, o)\n\n s, o=exec_command('%s -c \"import sys;sys.exit(15)\"' % pythonexe)\n assert s==15 and o=='', (s, o)\n\n s, o=exec_command('%s -c \"print \\'Heipa\\'\"' % pythonexe)\n assert s==0 and o=='Heipa', (s, o)\n\n print ('ok')\n\ndef test_posix(**kws):\n s, o=exec_command(\"echo Hello\",**kws)\n assert s==0 and o=='Hello', (s, o)\n\n s, o=exec_command('echo $AAA',**kws)\n assert s==0 and o=='', (s, o)\n\n s, o=exec_command('echo \"$AAA\"',AAA='Tere',**kws)\n assert s==0 and o=='Tere', (s, o)\n\n\n s, o=exec_command('echo \"$AAA\"',**kws)\n assert s==0 and o=='', (s, o)\n\n os.environ['BBB'] = 'Hi'\n s, o=exec_command('echo \"$BBB\"',**kws)\n assert s==0 and o=='Hi', (s, o)\n\n s, o=exec_command('echo \"$BBB\"',BBB='Hey',**kws)\n assert s==0 and o=='Hey', (s, o)\n\n s, o=exec_command('echo \"$BBB\"',**kws)\n assert s==0 and o=='Hi', (s, o)\n\n\n s, o=exec_command('this_is_not_a_command',**kws)\n assert s!=0 and o!='', (s, o)\n\n s, o=exec_command('echo path=$PATH',**kws)\n assert s==0 and o!='', (s, o)\n\n s, o=exec_command('python -c \"import sys,os;sys.stderr.write(os.name)\"',**kws)\n assert s==0 and o=='posix', (s, o)\n\n s, o=exec_command('python -c \"raise \\'Ignore me.\\'\"',**kws)\n assert s==1 and o, (s, o)\n\n s, o=exec_command('python -c \"import sys;sys.stderr.write(\\'0\\');sys.stderr.write(\\'1\\');sys.stderr.write(\\'2\\')\"',**kws)\n assert s==0 and o=='012', (s, o)\n\n s, o=exec_command('python -c \"import sys;sys.exit(15)\"',**kws)\n assert s==15 and o=='', (s, o)\n\n s, o=exec_command('python -c \"print \\'Heipa\\'\"',**kws)\n assert s==0 and o=='Heipa', (s, o)\n\n print ('ok')\n\ndef test_execute_in(**kws):\n pythonexe = get_pythonexe()\n tmpfile = temp_file_name()\n fn = os.path.basename(tmpfile)\n tmpdir = os.path.dirname(tmpfile)\n f = open(tmpfile, 'w')\n f.write('Hello')\n f.close()\n\n s, o = exec_command('%s -c \"print \\'Ignore the following IOError:\\','\\\n 'open(%r,\\'r\\')\"' % (pythonexe, fn),**kws)\n assert s and o!='', (s, o)\n s, o = exec_command('%s -c \"print open(%r,\\'r\\').read()\"' % (pythonexe, fn),\n execute_in = tmpdir,**kws)\n assert s==0 and o=='Hello', (s, o)\n os.remove(tmpfile)\n print ('ok')\n\ndef test_svn(**kws):\n s, o = exec_command(['svn', 'status'],**kws)\n assert s, (s, o)\n print ('svn ok')\n\ndef test_cl(**kws):\n if os.name=='nt':\n s, o = exec_command(['cl', '/V'],**kws)\n assert s, (s, o)\n print ('cl ok')\n\nif os.name=='posix':\n test = test_posix\nelif os.name in ['nt', 'dos']:\n test = test_nt\nelse:\n raise NotImplementedError('exec_command tests for ', os.name)\n\n############################################################\n\nif __name__ == \"__main__\":\n\n test(use_tee=0)\n test(use_tee=1)\n test_execute_in(use_tee=0)\n test_execute_in(use_tee=1)\n test_svn(use_tee=1)\n test_cl(use_tee=1)\n" ]
[ [ "numpy.distutils.misc_util.make_temp_file", "numpy.distutils.log.debug", "numpy.distutils.compat.get_exception", "numpy.distutils.misc_util.is_sequence", "numpy.distutils.log.warn", "numpy.distutils.log.info", "numpy.compat.open_latin1" ] ]
[ { "matplotlib": [], "numpy": [ "1.24", "1.22", "1.23" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
yihui8776/TensorRT-DETR
[ "1f32e9a2f98e26ec5b2376f9a2695193887430fb", "1f32e9a2f98e26ec5b2376f9a2695193887430fb" ]
[ "trt_int8_quant.py", "model/matcher.py" ]
[ "\n#\n# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ~~~Medcare AI Lab~~~\n\nimport os\nimport glob\nimport cv2\nfrom PIL import Image\nimport numpy as np\nimport argparse\n\nimport torchvision.transforms as T\nfrom trt_util.common import build_engine_onnx_v2\nfrom trt_util.calibrator import Calibrator\n\n\ntransform = T.Compose([\n T.Resize((800,800)), # PIL.Image.BILINEAR\n T.ToTensor(),\n T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\ndef preprocess(img_pil):\n img = transform(img_pil).cpu().numpy()\n return img\n\n# def preprocess(img_pil):\n# img = img_pil.resize((800, 800),Image.BILINEAR)\n# img = np.array(img).astype(np.float32) / 255.0\n# img = img.transpose(2,0,1)\n# # print(img.shape)\n# img = (img - np.array([ [[0.485]], [[0.456]], [[0.406]] ]))/np.array([ [[0.229]], [[0.224]], [[0.225]] ])\n\n# # img = img.transpose(1,2,0)\n# # img = np.expand_dims(img, axis=0)\n# img = np.ascontiguousarray(img)\n# img = np.array(img).astype(np.float32)\n# print(img.shape)\n# return img\n\nclass DataLoader:\n def __init__(self,calib_img_dir=\"./calib_train_image\",batch=1,batch_size=32):\n self.index = 0\n self.length = batch\n self.batch_size = batch_size\n self.calib_img_dir = calib_img_dir\n # self.img_list = [i.strip() for i in open('calib.txt').readlines()]\n self.img_list = glob.glob(os.path.join(self.calib_img_dir, \"*.jpg\"))\n print(f'[INFO] found all {len(self.img_list)} images to calib.')\n assert len(self.img_list) > self.batch_size * self.length, '[Error] {} must contains more than {} images to calib'.format(self.calib_img_dir,self.batch_size * self.length)\n self.calibration_data = np.zeros((self.batch_size,3,800,800), dtype=np.float32)\n\n def reset(self):\n self.index = 0\n\n def next_batch(self):\n if self.index < self.length:\n for i in range(self.batch_size):\n assert os.path.exists(self.img_list[i + self.index * self.batch_size]), '[Error] Batch not found!!'\n # data preprocess\n img = Image.open(self.img_list[i + self.index * self.batch_size])\n # img = cv2.imread(self.img_list[i + self.index * self.batch_size])\n img = preprocess(img)\n # self.calibration_data[i] = np.ones((3,800,800), dtype=np.float32)\n self.calibration_data[i] = img\n\n self.index += 1\n return np.ascontiguousarray(self.calibration_data, dtype=np.float32)\n else:\n return np.array([])\n\n def __len__(self):\n return self.length\n\ndef main(onnx_model_path,engine_model_path,calib_img_dir,calibration_table,fp16,int8,batch,batch_size):\n\n fp16_mode = fp16 \n int8_mode = int8 \n\n # calibration\n calibration_stream = DataLoader(calib_img_dir=calib_img_dir,batch=batch,batch_size=batch_size)\n engine_model_path = engine_model_path\n\n # 校准产生校准表,但是我们并没有生成校准表!\n engine_fixed = build_engine_onnx_v2(onnx_model_path, engine_model_path, fp16_mode=fp16_mode, \n int8_mode=int8_mode,max_batch_size=batch_size, calibration_stream=calibration_stream, \n calibration_table_path=calibration_table, save_engine=True)\n assert engine_fixed, '[Error] Broken engine_fixed'\n print('[INFO] ====> onnx to tensorrt completed !\\n')\n \nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='TensorRT INT8 Quant.')\n parser.add_argument('--onnx_model_path', type= str , default='./detr_sim.onnx', help='ONNX Model Path') \n parser.add_argument('--engine_model_path', type= str , default='./detr_int8.plan', help='TensorRT Engine File')\n parser.add_argument('--calib_img_dir', type= str , default='./calib_train_image', help='Calib Image Dir') \n parser.add_argument('--calibration_table', type=str,default=\"./detr_calibration.cache\", help='Calibration Table')\n parser.add_argument('--batch', type=int,default=958, help='Number of Batch: [total_image/batch_size]') # 30660/batch_size\n parser.add_argument('--batch_size', type=int,default=32, help='Batch Size')\n\n parser.add_argument('--fp16', action=\"store_true\", help='Open FP16 Mode')\n parser.add_argument('--int8', action=\"store_true\", help='Open INT8 Mode')\n\n args = parser.parse_args()\n main(args.onnx_model_path,args.engine_model_path,args.calib_img_dir,args.calibration_table,\n args.fp16,args.int8,args.batch,args.batch_size)\n\n # python3 trt_int8_quant.py --onnx_model_path ./detr_sim.onnx --engine_model_path ./detr_int8.plan --calib_img_dir ./calib_train_image --calibration_table ./detr_calibration.cache --batch 1 --int8\n\n", "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nModules to compute the matching cost and solve the corresponding LSAP.\n\"\"\"\nimport torch\nfrom scipy.optimize import linear_sum_assignment\nfrom torch import nn\n\nfrom .box_ops import box_cxcywh_to_xyxy, generalized_box_iou\n\n\nclass HungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\" Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n out_prob = outputs[\"pred_logits\"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]\n\n\ndef build_matcher(args):\n return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)\n" ]
[ [ "numpy.ascontiguousarray", "numpy.array", "numpy.zeros" ], [ "torch.cat", "torch.cdist", "torch.no_grad", "scipy.optimize.linear_sum_assignment", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.4", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
robrkerr/tensorflow-models
[ "3656a07e89be134c2bc333c60a6c709e475024a6", "3656a07e89be134c2bc333c60a6c709e475024a6", "3656a07e89be134c2bc333c60a6c709e475024a6", "3656a07e89be134c2bc333c60a6c709e475024a6" ]
[ "differential_privacy/dp_sgd/dp_optimizer/utils.py", "slim/nets/resnet_v1.py", "inception/inception/slim/variables.py", "inception/inception/imagenet_distributed_train.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utils for building and training NN models.\n\"\"\"\nfrom __future__ import division\n\nimport math\n\nimport numpy\nimport tensorflow as tf\n\n\nclass LayerParameters(object):\n \"\"\"class that defines a non-conv layer.\"\"\"\n def __init__(self):\n self.name = \"\"\n self.num_units = 0\n self._with_bias = False\n self.relu = False\n self.gradient_l2norm_bound = 0.0\n self.bias_gradient_l2norm_bound = 0.0\n self.trainable = True\n self.weight_decay = 0.0\n\n\nclass ConvParameters(object):\n \"\"\"class that defines a conv layer.\"\"\"\n def __init__(self):\n self.patch_size = 5\n self.stride = 1\n self.in_channels = 1\n self.out_channels = 0\n self.with_bias = True\n self.relu = True\n self.max_pool = True\n self.max_pool_size = 2\n self.max_pool_stride = 2\n self.trainable = False\n self.in_size = 28\n self.name = \"\"\n self.num_outputs = 0\n self.bias_stddev = 0.1\n\n\n# Parameters for a layered neural network.\nclass NetworkParameters(object):\n \"\"\"class that define the overall model structure.\"\"\"\n def __init__(self):\n self.input_size = 0\n self.projection_type = 'NONE' # NONE, RANDOM, PCA\n self.projection_dimensions = 0\n self.default_gradient_l2norm_bound = 0.0\n self.layer_parameters = [] # List of LayerParameters\n self.conv_parameters = [] # List of ConvParameters\n\n\ndef GetTensorOpName(x):\n \"\"\"Get the name of the op that created a tensor.\n\n Useful for naming related tensors, as ':' in name field of op is not permitted\n\n Args:\n x: the input tensor.\n Returns:\n the name of the op.\n \"\"\"\n\n t = x.name.rsplit(\":\", 1)\n if len(t) == 1:\n return x.name\n else:\n return t[0]\n\n\ndef BuildNetwork(inputs, network_parameters):\n \"\"\"Build a network using the given parameters.\n\n Args:\n inputs: a Tensor of floats containing the input data.\n network_parameters: NetworkParameters object\n that describes the parameters for the network.\n Returns:\n output, training_parameters: where the outputs (a tensor) is the output\n of the network, and training_parameters (a dictionary that maps the\n name of each variable to a dictionary of parameters) is the parameters\n used during training.\n \"\"\"\n\n training_parameters = {}\n num_inputs = network_parameters.input_size\n outputs = inputs\n projection = None\n\n # First apply convolutions, if needed\n for conv_param in network_parameters.conv_parameters:\n outputs = tf.reshape(\n outputs,\n [-1, conv_param.in_size, conv_param.in_size,\n conv_param.in_channels])\n conv_weights_name = \"%s_conv_weight\" % (conv_param.name)\n conv_bias_name = \"%s_conv_bias\" % (conv_param.name)\n conv_std_dev = 1.0 / (conv_param.patch_size\n * math.sqrt(conv_param.in_channels))\n conv_weights = tf.Variable(\n tf.truncated_normal([conv_param.patch_size,\n conv_param.patch_size,\n conv_param.in_channels,\n conv_param.out_channels],\n stddev=conv_std_dev),\n trainable=conv_param.trainable,\n name=conv_weights_name)\n conv_bias = tf.Variable(\n tf.truncated_normal([conv_param.out_channels],\n stddev=conv_param.bias_stddev),\n trainable=conv_param.trainable,\n name=conv_bias_name)\n training_parameters[conv_weights_name] = {}\n training_parameters[conv_bias_name] = {}\n conv = tf.nn.conv2d(outputs, conv_weights,\n strides=[1, conv_param.stride,\n conv_param.stride, 1],\n padding=\"SAME\")\n relud = tf.nn.relu(conv + conv_bias)\n mpd = tf.nn.max_pool(relud, ksize=[1,\n conv_param.max_pool_size,\n conv_param.max_pool_size, 1],\n strides=[1, conv_param.max_pool_stride,\n conv_param.max_pool_stride, 1],\n padding=\"SAME\")\n outputs = mpd\n num_inputs = conv_param.num_outputs\n # this should equal\n # in_size * in_size * out_channels / (stride * max_pool_stride)\n\n # once all the convs are done, reshape to make it flat\n outputs = tf.reshape(outputs, [-1, num_inputs])\n\n # Now project, if needed\n if network_parameters.projection_type is not \"NONE\":\n projection = tf.Variable(tf.truncated_normal(\n [num_inputs, network_parameters.projection_dimensions],\n stddev=1.0 / math.sqrt(num_inputs)), trainable=False, name=\"projection\")\n num_inputs = network_parameters.projection_dimensions\n outputs = tf.matmul(outputs, projection)\n\n # Now apply any other layers\n\n for layer_parameters in network_parameters.layer_parameters:\n num_units = layer_parameters.num_units\n hidden_weights_name = \"%s_weight\" % (layer_parameters.name)\n hidden_weights = tf.Variable(\n tf.truncated_normal([num_inputs, num_units],\n stddev=1.0 / math.sqrt(num_inputs)),\n name=hidden_weights_name, trainable=layer_parameters.trainable)\n training_parameters[hidden_weights_name] = {}\n if layer_parameters.gradient_l2norm_bound:\n training_parameters[hidden_weights_name][\"gradient_l2norm_bound\"] = (\n layer_parameters.gradient_l2norm_bound)\n if layer_parameters.weight_decay:\n training_parameters[hidden_weights_name][\"weight_decay\"] = (\n layer_parameters.weight_decay)\n\n outputs = tf.matmul(outputs, hidden_weights)\n if layer_parameters.with_bias:\n hidden_biases_name = \"%s_bias\" % (layer_parameters.name)\n hidden_biases = tf.Variable(tf.zeros([num_units]),\n name=hidden_biases_name)\n training_parameters[hidden_biases_name] = {}\n if layer_parameters.bias_gradient_l2norm_bound:\n training_parameters[hidden_biases_name][\n \"bias_gradient_l2norm_bound\"] = (\n layer_parameters.bias_gradient_l2norm_bound)\n\n outputs += hidden_biases\n if layer_parameters.relu:\n outputs = tf.nn.relu(outputs)\n # num_inputs for the next layer is num_units in the current layer.\n num_inputs = num_units\n\n return outputs, projection, training_parameters\n\n\ndef VaryRate(start, end, saturate_epochs, epoch):\n \"\"\"Compute a linearly varying number.\n\n Decrease linearly from start to end until epoch saturate_epochs.\n\n Args:\n start: the initial number.\n end: the end number.\n saturate_epochs: after this we do not reduce the number; if less than\n or equal to zero, just return start.\n epoch: the current learning epoch.\n Returns:\n the caculated number.\n \"\"\"\n if saturate_epochs <= 0:\n return start\n\n step = (start - end) / (saturate_epochs - 1)\n if epoch < saturate_epochs:\n return start - step * epoch\n else:\n return end\n\n\ndef BatchClipByL2norm(t, upper_bound, name=None):\n \"\"\"Clip an array of tensors by L2 norm.\n\n Shrink each dimension-0 slice of tensor (for matrix it is each row) such\n that the l2 norm is at most upper_bound. Here we clip each row as it\n corresponds to each example in the batch.\n\n Args:\n t: the input tensor.\n upper_bound: the upperbound of the L2 norm.\n name: optional name.\n Returns:\n the clipped tensor.\n \"\"\"\n\n assert upper_bound > 0\n with tf.op_scope([t, upper_bound], name, \"batch_clip_by_l2norm\") as name:\n saved_shape = tf.shape(t)\n batch_size = tf.slice(saved_shape, [0], [1])\n t2 = tf.reshape(t, tf.concat(0, [batch_size, [-1]]))\n upper_bound_inv = tf.fill(tf.slice(saved_shape, [0], [1]),\n tf.constant(1.0/upper_bound))\n # Add a small number to avoid divide by 0\n l2norm_inv = tf.rsqrt(tf.reduce_sum(t2 * t2, [1]) + 0.000001)\n scale = tf.minimum(l2norm_inv, upper_bound_inv) * upper_bound\n clipped_t = tf.matmul(tf.diag(scale), t2)\n clipped_t = tf.reshape(clipped_t, saved_shape, name=name)\n return clipped_t\n\n\ndef SoftThreshold(t, threshold_ratio, name=None):\n \"\"\"Soft-threshold a tensor by the mean value.\n\n Softthreshold each dimension-0 vector (for matrix it is each column) by\n the mean of absolute value multiplied by the threshold_ratio factor. Here\n we soft threshold each column as it corresponds to each unit in a layer.\n\n Args:\n t: the input tensor.\n threshold_ratio: the threshold ratio.\n name: the optional name for the returned tensor.\n Returns:\n the thresholded tensor, where each entry is soft-thresholded by\n threshold_ratio times the mean of the aboslute value of each column.\n \"\"\"\n\n assert threshold_ratio >= 0\n with tf.op_scope([t, threshold_ratio], name, \"soft_thresholding\") as name:\n saved_shape = tf.shape(t)\n t2 = tf.reshape(t, tf.concat(0, [tf.slice(saved_shape, [0], [1]), -1]))\n t_abs = tf.abs(t2)\n t_x = tf.sign(t2) * tf.nn.relu(t_abs -\n (tf.reduce_mean(t_abs, [0],\n keep_dims=True) *\n threshold_ratio))\n return tf.reshape(t_x, saved_shape, name=name)\n\n\ndef AddGaussianNoise(t, sigma, name=None):\n \"\"\"Add i.i.d. Gaussian noise (0, sigma^2) to every entry of t.\n\n Args:\n t: the input tensor.\n sigma: the stddev of the Gaussian noise.\n name: optional name.\n Returns:\n the noisy tensor.\n \"\"\"\n\n with tf.op_scope([t, sigma], name, \"add_gaussian_noise\") as name:\n noisy_t = t + tf.random_normal(tf.shape(t), stddev=sigma)\n return noisy_t\n\n\ndef GenerateBinomialTable(m):\n \"\"\"Generate binomial table.\n\n Args:\n m: the size of the table.\n Returns:\n A two dimensional array T where T[i][j] = (i choose j),\n for 0<= i, j <=m.\n \"\"\"\n\n table = numpy.zeros((m + 1, m + 1), dtype=numpy.float64)\n for i in range(m + 1):\n table[i, 0] = 1\n for i in range(1, m + 1):\n for j in range(1, m + 1):\n v = table[i - 1, j] + table[i - 1, j -1]\n assert not math.isnan(v) and not math.isinf(v)\n table[i, j] = v\n return tf.convert_to_tensor(table)\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains definitions for the original form of Residual Networks.\n\nThe 'v1' residual networks (ResNets) implemented in this module were proposed\nby:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n\nOther variants were introduced in:\n[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Identity Mappings in Deep Residual Networks. arXiv: 1603.05027\n\nThe networks defined in this module utilize the bottleneck building block of\n[1] with projection shortcuts only for increasing depths. They employ batch\nnormalization *after* every weight layer. This is the architecture used by\nMSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and\nResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'\narchitecture and the alternative 'v2' architecture of [2] which uses batch\nnormalization *before* every weight layer in the so-called full pre-activation\nunits.\n\nTypical use:\n\n from tensorflow.contrib.slim.nets import resnet_v1\n\nResNet-101 for image classification into 1000 classes:\n\n # inputs has shape [batch, 224, 224, 3]\n with slim.arg_scope(resnet_v1.resnet_arg_scope()):\n net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)\n\nResNet-101 for semantic segmentation into 21 classes:\n\n # inputs has shape [batch, 513, 513, 3]\n with slim.arg_scope(resnet_v1.resnet_arg_scope()):\n net, end_points = resnet_v1.resnet_v1_101(inputs,\n 21,\n is_training=False,\n global_pool=False,\n output_stride=16)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom nets import resnet_utils\n\n\nresnet_arg_scope = resnet_utils.resnet_arg_scope\nslim = tf.contrib.slim\n\n\[email protected]_arg_scope\ndef bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,\n outputs_collections=None, scope=None):\n \"\"\"Bottleneck residual unit variant with BN after convolutions.\n\n This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for\n its definition. Note that we use here the bottleneck variant which has an\n extra bottleneck layer.\n\n When putting together two consecutive ResNet blocks that use this unit, one\n should use stride = 2 in the last unit of the first block.\n\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The depth of the ResNet unit output.\n depth_bottleneck: The depth of the bottleneck layers.\n stride: The ResNet unit's stride. Determines the amount of downsampling of\n the units output compared to its input.\n rate: An integer, rate for atrous convolution.\n outputs_collections: Collection to add the ResNet unit output.\n scope: Optional variable_scope.\n\n Returns:\n The ResNet unit's output.\n \"\"\"\n with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n if depth == depth_in:\n shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')\n else:\n shortcut = slim.conv2d(inputs, depth, [1, 1], stride=stride,\n activation_fn=None, scope='shortcut')\n\n residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n activation_fn=None, scope='conv3')\n\n output = tf.nn.relu(shortcut + residual)\n\n return slim.utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope,\n output)\n\n\ndef resnet_v1(inputs,\n blocks,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n include_root_block=True,\n reuse=None,\n scope=None):\n \"\"\"Generator for v1 ResNet models.\n\n This function generates a family of ResNet v1 models. See the resnet_v1_*()\n methods for specific model instantiations, obtained by selecting different\n block instantiations that produce ResNets of various depths.\n\n Training for image classification on Imagenet is usually done with [224, 224]\n inputs, resulting in [7, 7] feature maps at the output of the last ResNet\n block for the ResNets defined in [1] that have nominal stride equal to 32.\n However, for dense prediction tasks we advise that one uses inputs with\n spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In\n this case the feature maps at the ResNet output will have spatial shape\n [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]\n and corners exactly aligned with the input image corners, which greatly\n facilitates alignment of the features to the image. Using as input [225, 225]\n images results in [8, 8] feature maps at the output of the last ResNet block.\n\n For dense prediction tasks, the ResNet needs to run in fully-convolutional\n (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all\n have nominal stride equal to 32 and a good choice in FCN mode is to use\n output_stride=16 in order to increase the density of the computed features at\n small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.\n\n Args:\n inputs: A tensor of size [batch, height_in, width_in, channels].\n blocks: A list of length equal to the number of ResNet blocks. Each element\n is a resnet_utils.Block object describing the units in the block.\n num_classes: Number of predicted classes for classification tasks. If None\n we return the features before the logit layer.\n is_training: whether is training or not.\n global_pool: If True, we perform global average pooling before computing the\n logits. Set to True for image classification, False for dense prediction.\n output_stride: If None, then the output will be computed at the nominal\n network stride. If output_stride is not None, it specifies the requested\n ratio of input to output spatial resolution.\n include_root_block: If True, include the initial convolution followed by\n max-pooling, if False excludes it.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n\n Returns:\n net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].\n If global_pool is False, then height_out and width_out are reduced by a\n factor of output_stride compared to the respective height_in and width_in,\n else both height_out and width_out equal one. If num_classes is None, then\n net is the output of the last ResNet block, potentially after global\n average pooling. If num_classes is not None, net contains the pre-softmax\n activations.\n end_points: A dictionary from components of the network to the corresponding\n activation.\n\n Raises:\n ValueError: If the target output_stride is not valid.\n \"\"\"\n with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:\n end_points_collection = sc.name + '_end_points'\n with slim.arg_scope([slim.conv2d, bottleneck,\n resnet_utils.stack_blocks_dense],\n outputs_collections=end_points_collection):\n with slim.arg_scope([slim.batch_norm], is_training=is_training):\n net = inputs\n if include_root_block:\n if output_stride is not None:\n if output_stride % 4 != 0:\n raise ValueError('The output_stride needs to be a multiple of 4.')\n output_stride /= 4\n net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')\n net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')\n net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)\n if global_pool:\n # Global average pooling.\n net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)\n if num_classes is not None:\n net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, scope='logits')\n # Convert end_points_collection into a dictionary of end_points.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n if num_classes is not None:\n end_points['predictions'] = slim.softmax(net, scope='predictions')\n return net, end_points\nresnet_v1.default_image_size = 224\n\n\ndef resnet_v1_50(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_50'):\n \"\"\"ResNet-50 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)\n ]\n return resnet_v1(inputs, blocks, num_classes, is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n\n\ndef resnet_v1_101(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_101'):\n \"\"\"ResNet-101 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)\n ]\n return resnet_v1(inputs, blocks, num_classes, is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n\n\ndef resnet_v1_152(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_152'):\n \"\"\"ResNet-152 model of [1]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)]\n return resnet_v1(inputs, blocks, num_classes, is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n\n\ndef resnet_v1_200(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n reuse=None,\n scope='resnet_v1_200'):\n \"\"\"ResNet-200 model of [2]. See resnet_v1() for arg and return description.\"\"\"\n blocks = [\n resnet_utils.Block(\n 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),\n resnet_utils.Block(\n 'block2', bottleneck, [(512, 128, 1)] * 23 + [(512, 128, 2)]),\n resnet_utils.Block(\n 'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),\n resnet_utils.Block(\n 'block4', bottleneck, [(2048, 512, 1)] * 3)]\n return resnet_v1(inputs, blocks, num_classes, is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, reuse=reuse, scope=scope)\n", "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains convenience wrappers for creating variables in TF-Slim.\n\nThe variables module is typically used for defining model variables from the\nops routines (see slim.ops). Such variables are used for training, evaluation\nand inference of models.\n\nAll the variables created through this module would be added to the\nMODEL_VARIABLES collection, if you create a model variable outside slim, it can\nbe added with slim.variables.add_variable(external_variable, reuse).\n\nUsage:\n weights_initializer = tf.truncated_normal_initializer(stddev=0.01)\n l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005)\n weights = variables.variable('weights',\n shape=[100, 100],\n initializer=weights_initializer,\n regularizer=l2_regularizer,\n device='/cpu:0')\n\n biases = variables.variable('biases',\n shape=[100],\n initializer=tf.zeros_initializer(),\n device='/cpu:0')\n\n # More complex example.\n\n net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1')\n net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2')\n with slim.arg_scope([variables.variable], restore=False):\n net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3')\n\n # Get all model variables from all the layers.\n model_variables = slim.variables.get_variables()\n\n # Get all model variables from a specific the layer, i.e 'conv1'.\n conv1_variables = slim.variables.get_variables('conv1')\n\n # Get all weights from all the layers.\n weights = slim.variables.get_variables_by_name('weights')\n\n # Get all bias from all the layers.\n biases = slim.variables.get_variables_by_name('biases')\n\n # Get all variables to restore.\n # (i.e. only those created by 'conv1' and 'conv2')\n variables_to_restore = slim.variables.get_variables_to_restore()\n\n************************************************\n* Initializing model variables from a checkpoint\n************************************************\n\n# Create some variables.\nv1 = slim.variables.variable(name=\"v1\", ..., restore=False)\nv2 = slim.variables.variable(name=\"v2\", ...) # By default restore=True\n...\n# The list of variables to restore should only contain 'v2'.\nvariables_to_restore = slim.variables.get_variables_to_restore()\nrestorer = tf.train.Saver(variables_to_restore)\nwith tf.Session() as sess:\n # Restore variables from disk.\n restorer.restore(sess, \"/tmp/model.ckpt\")\n print(\"Model restored.\")\n # Do some work with the model\n ...\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom inception.slim import scopes\n\n# Collection containing all the variables created using slim.variables\nMODEL_VARIABLES = '_model_variables_'\n\n# Collection containing the slim.variables that are created with restore=True.\nVARIABLES_TO_RESTORE = '_variables_to_restore_'\n\n\ndef add_variable(var, restore=True):\n \"\"\"Adds a variable to the MODEL_VARIABLES collection.\n\n Optionally it will add the variable to the VARIABLES_TO_RESTORE collection.\n Args:\n var: a variable.\n restore: whether the variable should be added to the\n VARIABLES_TO_RESTORE collection.\n\n \"\"\"\n collections = [MODEL_VARIABLES]\n if restore:\n collections.append(VARIABLES_TO_RESTORE)\n for collection in collections:\n if var not in tf.get_collection(collection):\n tf.add_to_collection(collection, var)\n\n\ndef get_variables(scope=None, suffix=None):\n \"\"\"Gets the list of variables, filtered by scope and/or suffix.\n\n Args:\n scope: an optional scope for filtering the variables to return.\n suffix: an optional suffix for filtering the variables to return.\n\n Returns:\n a copied list of variables with scope and suffix.\n \"\"\"\n candidates = tf.get_collection(MODEL_VARIABLES, scope)[:]\n if suffix is not None:\n candidates = [var for var in candidates if var.op.name.endswith(suffix)]\n return candidates\n\n\ndef get_variables_to_restore():\n \"\"\"Gets the list of variables to restore.\n\n Returns:\n a copied list of variables.\n \"\"\"\n return tf.get_collection(VARIABLES_TO_RESTORE)[:]\n\n\ndef get_variables_by_name(given_name, scope=None):\n \"\"\"Gets the list of variables that were given that name.\n\n Args:\n given_name: name given to the variable without scope.\n scope: an optional scope for filtering the variables to return.\n\n Returns:\n a copied list of variables with the given name and prefix.\n \"\"\"\n return get_variables(scope=scope, suffix=given_name)\n\n\ndef get_unique_variable(name):\n \"\"\"Gets the variable uniquely identified by that name.\n\n Args:\n name: a name that uniquely identifies the variable.\n\n Returns:\n a tensorflow variable.\n\n Raises:\n ValueError: if no variable uniquely identified by the name exists.\n \"\"\"\n candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)\n if not candidates:\n raise ValueError('Couldnt find variable %s' % name)\n\n for candidate in candidates:\n if candidate.op.name == name:\n return candidate\n raise ValueError('Variable %s does not uniquely identify a variable', name)\n\n\nclass VariableDeviceChooser(object):\n \"\"\"Slim device chooser for variables.\n\n When using a parameter server it will assign them in a round-robin fashion.\n When not using a parameter server it allows GPU:0 placement otherwise CPU:0.\n \"\"\"\n\n def __init__(self,\n num_parameter_servers=0,\n ps_device='/job:ps',\n placement='CPU:0'):\n \"\"\"Initialize VariableDeviceChooser.\n\n Args:\n num_parameter_servers: number of parameter servers.\n ps_device: string representing the parameter server device.\n placement: string representing the placement of the variable either CPU:0\n or GPU:0. When using parameter servers forced to CPU:0.\n \"\"\"\n self._num_ps = num_parameter_servers\n self._ps_device = ps_device\n self._placement = placement if num_parameter_servers == 0 else 'CPU:0'\n self._next_task_id = 0\n\n def __call__(self, op):\n device_string = ''\n if self._num_ps > 0:\n task_id = self._next_task_id\n self._next_task_id = (self._next_task_id + 1) % self._num_ps\n device_string = '%s/task:%d' % (self._ps_device, task_id)\n device_string += '/%s' % self._placement\n return device_string\n\n\n# TODO(sguada) Remove once get_variable is able to colocate op.devices.\ndef variable_device(device, name):\n \"\"\"Fix the variable device to colocate its ops.\"\"\"\n if callable(device):\n var_name = tf.get_variable_scope().name + '/' + name\n var_def = tf.NodeDef(name=var_name, op='Variable')\n device = device(var_def)\n if device is None:\n device = ''\n return device\n\n\[email protected]_arg_scope\ndef global_step(device=''):\n \"\"\"Returns the global step variable.\n\n Args:\n device: Optional device to place the variable. It can be an string or a\n function that is called to get the device for the variable.\n\n Returns:\n the tensor representing the global step variable.\n \"\"\"\n global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)\n if global_step_ref:\n return global_step_ref[0]\n else:\n collections = [\n VARIABLES_TO_RESTORE,\n tf.GraphKeys.GLOBAL_VARIABLES,\n tf.GraphKeys.GLOBAL_STEP,\n ]\n # Get the device for the variable.\n with tf.device(variable_device(device, 'global_step')):\n return tf.get_variable('global_step', shape=[], dtype=tf.int64,\n initializer=tf.zeros_initializer,\n trainable=False, collections=collections)\n\n\[email protected]_arg_scope\ndef variable(name, shape=None, dtype=tf.float32, initializer=None,\n regularizer=None, trainable=True, collections=None, device='',\n restore=True):\n \"\"\"Gets an existing variable with these parameters or creates a new one.\n\n It also add itself to a group with its name.\n\n Args:\n name: the name of the new or existing variable.\n shape: shape of the new or existing variable.\n dtype: type of the new or existing variable (defaults to `DT_FLOAT`).\n initializer: initializer for the variable if one is created.\n regularizer: a (Tensor -> Tensor or None) function; the result of\n applying it on a newly created variable will be added to the collection\n GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.\n trainable: If `True` also add the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n collections: A list of collection names to which the Variable will be added.\n Note that the variable is always also added to the tf.GraphKeys.GLOBAL_VARIABLES\n and MODEL_VARIABLES collections.\n device: Optional device to place the variable. It can be an string or a\n function that is called to get the device for the variable.\n restore: whether the variable should be added to the\n VARIABLES_TO_RESTORE collection.\n\n Returns:\n The created or existing variable.\n \"\"\"\n collections = list(collections or [])\n\n # Make sure variables are added to tf.GraphKeys.GLOBAL_VARIABLES and MODEL_VARIABLES\n collections += [tf.GraphKeys.GLOBAL_VARIABLES, MODEL_VARIABLES]\n # Add to VARIABLES_TO_RESTORE if necessary\n if restore:\n collections.append(VARIABLES_TO_RESTORE)\n # Remove duplicates\n collections = set(collections)\n # Get the device for the variable.\n with tf.device(variable_device(device, name)):\n return tf.get_variable(name, shape=shape, dtype=dtype,\n initializer=initializer, regularizer=regularizer,\n trainable=trainable, collections=collections)\n", "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=line-too-long\n\"\"\"A binary to train Inception in a distributed manner using multiple systems.\n\nPlease see accompanying README.md for details and instructions.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom inception import inception_distributed_train\nfrom inception.imagenet_data import ImagenetData\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef main(unused_args):\n assert FLAGS.job_name in ['ps', 'worker'], 'job_name must be ps or worker'\n\n # Extract all the hostnames for the ps and worker jobs to construct the\n # cluster spec.\n ps_hosts = FLAGS.ps_hosts.split(',')\n worker_hosts = FLAGS.worker_hosts.split(',')\n tf.logging.info('PS hosts are: %s' % ps_hosts)\n tf.logging.info('Worker hosts are: %s' % worker_hosts)\n\n cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts,\n 'worker': worker_hosts})\n server = tf.train.Server(\n {'ps': ps_hosts,\n 'worker': worker_hosts},\n job_name=FLAGS.job_name,\n task_index=FLAGS.task_id)\n\n if FLAGS.job_name == 'ps':\n # `ps` jobs wait for incoming connections from the workers.\n server.join()\n else:\n # `worker` jobs will actually do the work.\n dataset = ImagenetData(subset=FLAGS.subset)\n assert dataset.data_files()\n # Only the chief checks for or creates train_dir.\n if FLAGS.task_id == 0:\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MakeDirs(FLAGS.train_dir)\n inception_distributed_train.train(server.target, dataset, cluster_spec)\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.sign", "tensorflow.concat", "tensorflow.zeros", "tensorflow.nn.max_pool", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.diag", "tensorflow.nn.conv2d", "numpy.zeros", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.shape", "tensorflow.op_scope", "tensorflow.nn.relu", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.slice", "tensorflow.reshape", "tensorflow.abs" ], [ "tensorflow.variable_scope", "tensorflow.nn.relu", "tensorflow.reduce_mean" ], [ "tensorflow.get_variable", "tensorflow.get_collection", "tensorflow.get_variable_scope", "tensorflow.add_to_collection", "tensorflow.NodeDef" ], [ "tensorflow.train.Server", "tensorflow.gfile.Exists", "tensorflow.train.ClusterSpec", "tensorflow.gfile.MakeDirs", "tensorflow.logging.info", "tensorflow.logging.set_verbosity", "tensorflow.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
jtpils/compression
[ "b758903c6df5eeafb5d444e8cec85d1b2bc132d3" ]
[ "tensorflow_compression/__init__.py" ]
[ "# Copyright 2018 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Data compression tools.\"\"\"\n\nfrom __future__ import absolute_import as _absolute_import\nfrom __future__ import division as _division\nfrom __future__ import print_function as _print_function\n\ntry:\n import tensorflow as _tensorflow\n _tf_version = [int(v) for v in _tensorflow.version.VERSION.split(\".\")]\n assert _tf_version[0] == 1 and _tf_version[1] >= 14\nexcept (ImportError, AssertionError):\n raise RuntimeError(\"For tensorflow_compression, please install TensorFlow \"\n \"1.14 or above. TensorFlow 2 is not yet supported.\")\n\n\n# pylint: disable=wildcard-import\nfrom tensorflow_compression.python.layers.entropy_models import *\nfrom tensorflow_compression.python.layers.gdn import *\nfrom tensorflow_compression.python.layers.initializers import *\nfrom tensorflow_compression.python.layers.parameterizers import *\nfrom tensorflow_compression.python.layers.signal_conv import *\nfrom tensorflow_compression.python.ops.math_ops import *\nfrom tensorflow_compression.python.ops.padding_ops import *\nfrom tensorflow_compression.python.ops.range_coding_ops import *\nfrom tensorflow_compression.python.ops.spectral_ops import *\n# pylint: enable=wildcard-import\n" ]
[ [ "tensorflow.version.VERSION.split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Kali-Hac/SPC-MGR
[ "3eccceeba97e0dca62132187c6645b98620f3bd1" ]
[ "SPC-MGR.py" ]
[ "\"\"\"\r\nThe SPC-MGR is built based in part on graph attention mechanism (https://arxiv.org/abs/1710.10903),\r\npart on MG-SCR (https://www.ijcai.org/proceedings/2021/0135),\r\nand includes open-source codes provided by\r\nthe project of Graph Attention Network (GAT) at https://github.com/PetarV-/GAT,\r\nand the project of MG-SCR at https://github.com/Kali-Hac/MG-SCR.\r\n\"\"\"\r\n\r\nimport time\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport os, sys\r\nfrom models import GAT as MSRL # (Veličković et al.)\r\nfrom utils import process_L3 as process\r\nfrom utils.faiss_rerank import compute_jaccard_distance\r\nfrom tensorflow.python.layers.core import Dense\r\nfrom sklearn.preprocessing import label_binarize\r\nfrom sklearn.cluster import DBSCAN\r\nimport torch\r\nimport collections\r\nfrom sklearn.metrics import average_precision_score\r\n\r\ndataset = ''\r\nprobe = ''\r\npre_dir = 'ReID_Models/'\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\n\r\nnb_nodes = 20 # number of nodes in joint-scale graph\r\nnhood = 1 # structural relation learning (nhood=1 for neighbor nodes)\r\nfusion_lambda = 1 # collaboration fusion coefficient\r\nft_size = 3 # originial node feature dimension (D)\r\ntime_step = 6 # sequence length (f)\r\n\r\n\r\n# training params\r\nbatch_size = 256\r\nnb_epochs = 100000\r\npatience = 250 # patience for early stopping\r\nhid_units = [8] # numbers of hidden units per each attention head in each layer\r\nMs = [8, 1] # additional entry for the output layer\r\nk1, k2 = 20, 6 # parameters to compute feature distance matrix\r\nresidual = False\r\nnonlinearity = tf.nn.elu\r\n\r\n\r\ntf.app.flags.DEFINE_string('dataset', 'KS20', \"Dataset: IAS, KS20, BIWI, CASIA-B or KGBD\")\r\ntf.app.flags.DEFINE_string('length', '6', \"4, 6, 8, 10 or 12\")\r\ntf.app.flags.DEFINE_string('t', '0.07', \"temperature for contrastive learning\")\r\ntf.app.flags.DEFINE_string('lr', '0.00035', \"learning rate\")\r\ntf.app.flags.DEFINE_string('eps', '0.6', \"distance parameter in DBSCAN\")\r\ntf.app.flags.DEFINE_string('min_samples', '2', \"minimum sample number in DBSCAN\")\r\ntf.app.flags.DEFINE_string('probe', 'probe', \"for testing probe\")\r\ntf.app.flags.DEFINE_string('gpu', '0', \"GPU number\")\r\ntf.app.flags.DEFINE_string('probe_view', '', \"test different views on CASIA B or KS20\")\r\ntf.app.flags.DEFINE_string('gallery_view', '', \"test different views on CASIA B or KS20\")\r\ntf.app.flags.DEFINE_string('struct_only', '0', \"struct_only\")\r\ntf.app.flags.DEFINE_string('m', '8', \"structural relation heads\")\r\ntf.app.flags.DEFINE_string('probe_type', '', \"probe.gallery\")\r\ntf.app.flags.DEFINE_string('patience', '200', \"epochs for early stopping\")\r\ntf.app.flags.DEFINE_string('fusion_lambda', '1', \"collaboration fusion coefficient\")\r\ntf.app.flags.DEFINE_string('S_dataset', '', \"Source Dataset\")\r\ntf.app.flags.DEFINE_string('S_probe', '', \"Source Dataset probe\")\r\ntf.app.flags.DEFINE_string('mode', 'UF', \"Unsupervised Fine-tuning (UF) or Direct Generalization (DG)\")\r\ntf.app.flags.DEFINE_string('evaluate', '0', \"evaluate on the best model\")\r\nFLAGS = tf.app.flags.FLAGS\r\n\r\n\r\n# check parameters\r\nif FLAGS.dataset not in ['IAS', 'KGBD', 'KS20', 'BIWI', 'CASIA_B']:\r\n\traise Exception('Dataset must be IAS, KGBD, KS20, BIWI or CASIA B.')\r\nif not FLAGS.gpu.isdigit() or int(FLAGS.gpu) < 0:\r\n\traise Exception('GPU number must be a positive integer.')\r\nif FLAGS.dataset == 'CASIA_B':\r\n\tpass\r\nelse:\r\n\tif FLAGS.length not in ['4', '6', '8', '10', '12']:\r\n\t\traise Exception('Length number must be 4, 6, 8, 10 or 12.')\r\nif FLAGS.probe not in ['probe', 'Walking', 'Still', 'A', 'B']:\r\n\traise Exception('Dataset probe must be \"A\" (for IAS-A), \"B\" (for IAS-B), \"probe\" (for KS20, KGBD).')\r\nif float(FLAGS.fusion_lambda) < 0 or float(FLAGS.fusion_lambda) > 1:\r\n\traise Exception('Multi-Level Graph Fusion coefficient must be not less than 0 or not larger than 1.')\r\nif FLAGS.mode not in ['UF', 'DG']:\r\n\traise Exception('Mode must be UF or DG.')\r\nif FLAGS.mode == 'DG' and FLAGS.S_dataset == '':\r\n\traise Exception('DG mode must set a source dataset.')\r\nif FLAGS.mode == 'UF' and FLAGS.S_dataset != '':\r\n\traise Exception('UF mode does not use a source dataset.')\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = FLAGS.gpu\r\ndataset = FLAGS.dataset\r\n\r\n# optimal paramters\r\nif dataset == 'KGBD':\r\n\tbatch_size = 256\r\n\tFLAGS.lr = '0.00035'\r\n\tFLAGS.min_samples = '4'\r\n\tFLAGS.t = '0.06'\r\nelif dataset == 'CASIA_B':\r\n\tbatch_size = 128\r\n\tFLAGS.lr = '0.00035'\r\n\tFLAGS.min_samples = '2'\r\n\tFLAGS.eps = '0.75'\r\n\tFLAGS.t = '0.075'\r\nelse:\r\n\tbatch_size = 128\r\n\tFLAGS.lr = '0.00035'\r\nif dataset == 'KS20' or dataset == 'IAS':\r\n\tFLAGS.t = '0.08'\r\n\tFLAGS.eps = '0.8'\r\nelif dataset == 'BIWI':\r\n\tFLAGS.t = '0.07'\r\n\r\n\r\neps = float(FLAGS.eps)\r\nmin_samples = int(FLAGS.min_samples)\r\n\r\ntime_step = int(FLAGS.length)\r\nfusion_lambda = float(FLAGS.fusion_lambda)\r\nprobe = FLAGS.probe\r\npatience = int(FLAGS.patience)\r\n\r\n\r\nglobal_att = False\r\nstruct_only = False\r\nP = '8'\r\n\r\n\r\nchange = ''\r\n\r\n\r\n\r\nif FLAGS.probe_type != '':\r\n\tchange += '_CME'\r\nif FLAGS.fusion_lambda != '1':\r\n\tchange = '_lambda_' + FLAGS.fusion_lambda\r\n\r\nif FLAGS.struct_only == '1':\r\n\tstruct_only = True\r\n\r\n\r\nif FLAGS.dataset == 'KGBD':\r\n\tFLAGS.m = '16'\r\nif FLAGS.m != '8':\r\n\tm = FLAGS.m\r\n\tMs = [int(m), 1]\r\n\r\ntry:\r\n\tos.mkdir(pre_dir)\r\nexcept:\r\n\tpass\r\n\r\nif struct_only:\r\n\tpre_dir += '_struct_only'\r\nif P != '8':\r\n\tpre_dir += '_P_' + P\r\n\r\n\r\nif dataset == 'KS20':\r\n\tnb_nodes = 25\r\n\r\nif dataset == 'CASIA_B':\r\n\tnb_nodes = 14\r\n\r\n\r\n\r\nprint('----- Model hyperparams -----')\r\n# print('skeleton_nodes: ' + str(nb_nodes))\r\nprint('seqence_length: ' + str(time_step))\r\nprint('fusion_lambda: ' + str(fusion_lambda))\r\nprint('batch_size: ' + str(batch_size))\r\nprint('lr: ' + str(FLAGS.lr))\r\nprint('temperature: ' + FLAGS.t)\r\nprint('eps: ' + FLAGS.eps)\r\nprint('min_samples: ' + FLAGS.min_samples)\r\nprint('m: ' + FLAGS.m)\r\nprint('fusion_lambda: ' + FLAGS.fusion_lambda)\r\n# print('patience: ' + FLAGS.patience)\r\n\r\nprint('Mode: ' + FLAGS.mode)\r\nprint('Evaluate: ' + FLAGS.evaluate)\r\n\r\nif FLAGS.mode == 'DG':\r\n\tprint('----- Mode Information -----')\r\n\tprint('Source Dataset: ' + FLAGS.S_dataset)\r\n\tprint('Target Dataset: ' + FLAGS.dataset)\r\n\tprint('Target Probe: ' + FLAGS.probe)\r\nelif FLAGS.mode == 'UF':\r\n\tprint('----- Dataset Information -----')\r\n\tprint('Dataset: ' + dataset)\r\n\tif dataset == 'CASIA_B':\r\n\t\tprint('Probe.Gallery: ', FLAGS.probe_type.split('.')[0], FLAGS.probe_type.split('.')[1])\r\n\telse:\r\n\t\tprint('Probe: ' + FLAGS.probe)\r\n\r\n\"\"\"\r\n Obtain training and testing data in part-level, body-scale, and hyper-body-scale.\r\n Generate corresponding adjacent matrix and bias.\r\n\"\"\"\r\nif FLAGS.probe_type == '':\r\n\tif FLAGS.probe_view == '' and FLAGS.gallery_view == '':\r\n\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_test_P, X_test_B, X_test_H_B, _, y_test, \\\r\n\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\tprocess.gen_train_data(dataset=dataset, split=probe, time_step=time_step,\r\n\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size, )\r\n\telse:\r\n\t\tif dataset == 'KS20':\r\n\t\t\t_, _, _, _, _, _, _, X_test_P, X_test_B, X_test_H_B, _, y_test, \\\r\n\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\tprocess.gen_train_data(dataset=dataset, split='view_'+FLAGS.probe_view, time_step=time_step,\r\n\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t )\r\n\t\t\tX_train_P_all = []\r\n\t\t\tX_train_B_all = []\r\n\t\t\tX_train_H_B_all = []\r\n\t\t\ty_train_all = []\r\n\t\t\tfor i in range(5):\r\n\t\t\t\tif str(i) not in [FLAGS.probe_view, FLAGS.gallery_view]:\r\n\t\t\t\t\t_, _, _, _, _, _, _, X_train_P, X_train_B, X_train_H_B, _, y_train, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='view_' + str(i), time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\t\t\tX_train_H_B_all.extend(X_train_H_B)\r\n\t\t\t\t\tX_train_P_all.extend(X_train_P)\r\n\t\t\t\t\tX_train_B_all.extend(X_train_B)\r\n\t\t\t\t\ty_train_all.extend(y_train_all)\r\n\t\t\tX_train_P = np.array(X_train_P_all)\r\n\t\t\tX_train_B = np.array(X_train_B_all)\r\n\t\t\tX_train_H_B = np.array(X_train_H_B_all)\r\n\t\t\ty_train = np.array(y_train)\r\n\r\nelse:\r\n\tfrom utils import process_cme_L3 as process\r\n\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_test_P, X_test_B, X_test_H_B, _, y_test, \\\r\n\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\tprocess.gen_train_data(dataset=dataset, split=probe, time_step=time_step,\r\n\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size, PG_type=FLAGS.probe_type.split('.')[0])\r\n\tprint('## [Probe].[Gallery]', FLAGS.probe_type)\r\n\r\n\r\nall_ftr_size = hid_units[0] * (15 + 3)\r\nloaded_graph = tf.Graph()\r\n\r\ncluster_epochs = 15000\r\ndisplay = 20\r\n\r\nif FLAGS.evaluate == '1':\r\n\tFLAGS.S_dataset = FLAGS.dataset\r\n\tFLAGS.S_probe = FLAGS.probe\r\n\tFLAGS.mode = 'DG'\r\n\r\nif FLAGS.mode == 'UF':\r\n\twith tf.Graph().as_default():\r\n\t\twith tf.name_scope('Input'):\r\n\t\t\tP_in = tf.placeholder(dtype=tf.float32, shape=(batch_size * time_step, 10, ft_size))\r\n\t\t\tB_in = tf.placeholder(dtype=tf.float32, shape=(batch_size * time_step, 5, ft_size))\r\n\t\t\tH_B_in = tf.placeholder(dtype=tf.float32, shape=(batch_size * time_step, 3, ft_size))\r\n\t\t\tP_bias_in = tf.placeholder(dtype=tf.float32, shape=(1, 10, 10))\r\n\t\t\tB_bias_in = tf.placeholder(dtype=tf.float32, shape=(1, 5, 5))\r\n\t\t\tH_B_bias_in = tf.placeholder(dtype=tf.float32, shape=(1, 3, 3))\r\n\t\t\tattn_drop = tf.placeholder(dtype=tf.float32, shape=())\r\n\t\t\tffd_drop = tf.placeholder(dtype=tf.float32, shape=())\r\n\t\t\tis_train = tf.placeholder(dtype=tf.bool, shape=())\r\n\t\t\tpseudo_lab = tf.placeholder(dtype=tf.int32, shape=(batch_size,))\r\n\t\t\tcluster_ftr = tf.placeholder(dtype=tf.float32, shape=(None, all_ftr_size))\r\n\r\n\t\twith tf.name_scope(\"MG\"), tf.variable_scope(\"MG\", reuse=tf.AUTO_REUSE):\r\n\t\t\tdef SRL(J_in, J_bias_in, nb_nodes):\r\n\t\t\t\tW_h = tf.Variable(tf.random_normal([3, hid_units[-1]]))\r\n\t\t\t\tb_h = tf.Variable(tf.zeros(shape=[hid_units[-1], ]))\r\n\t\t\t\tJ_h = tf.reshape(J_in, [-1, ft_size])\r\n\r\n\t\t\t\tJ_h = tf.matmul(J_h, W_h) + b_h\r\n\t\t\t\tJ_h = tf.reshape(J_h, [batch_size*time_step, nb_nodes, hid_units[-1]])\r\n\t\t\t\tJ_seq_ftr = MSRL.inference(J_h, 0, nb_nodes, is_train,\r\n\t\t\t\t attn_drop, ffd_drop,\r\n\t\t\t\t bias_mat=J_bias_in,\r\n\t\t\t\t hid_units=hid_units, n_heads=Ms,\r\n\t\t\t\t residual=residual, activation=nonlinearity, r_pool=True)\r\n\t\t\t\treturn J_seq_ftr\r\n\r\n\r\n\t\t\tdef FCRL(s1, s2, s1_num, s2_num, hid_in):\r\n\t\t\t\tr_unorm = tf.matmul(s2, tf.transpose(s1, [0, 2, 1]))\r\n\t\t\t\tatt_w = tf.nn.softmax(r_unorm)\r\n\t\t\t\tatt_w = tf.expand_dims(att_w, axis=-1)\r\n\t\t\t\ts1 = tf.reshape(s1, [s1.shape[0], 1, s1.shape[1], hid_in])\r\n\t\t\t\tc_ftr = tf.reduce_sum(att_w * s1, axis=2)\r\n\t\t\t\tc_ftr = tf.reshape(c_ftr, [-1, hid_in])\r\n\t\t\t\tatt_w = tf.reshape(att_w, [-1, s1_num * s2_num])\r\n\t\t\t\treturn r_unorm, c_ftr\r\n\r\n\r\n\t\t\tdef MSC(P_in, B_in, H_B_in, P_bias_in, B_bias_in, H_B_bias_in, hid_in, hid_out):\r\n\t\t\t\th_P_seq_ftr = SRL(J_in=P_in, J_bias_in=P_bias_in, nb_nodes=10)\r\n\t\t\t\th_B_seq_ftr = SRL(J_in=B_in, J_bias_in=B_bias_in, nb_nodes=5)\r\n\t\t\t\th_H_B_seq_ftr = SRL(J_in=H_B_in, J_bias_in=H_B_bias_in, nb_nodes=3)\r\n\r\n\t\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, 10, hid_in])\r\n\t\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, 5, hid_in])\r\n\t\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, 3, hid_in])\r\n\r\n\r\n\t\t\t\tW_cs_23 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\t\t\t\tW_cs_24 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\t\t\t\tW_cs_34 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\r\n\r\n\t\t\t\tW_self_2 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\t\t\t\tW_self_3 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\t\t\t\tW_self_4 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\r\n\t\t\t\tself_a_2, self_r_2 = FCRL(h_P_seq_ftr, h_P_seq_ftr, 10, 10, hid_in)\r\n\t\t\t\tself_a_3, self_r_3 = FCRL(h_B_seq_ftr, h_B_seq_ftr, 5, 5, hid_in)\r\n\t\t\t\tself_a_4, self_r_4 = FCRL(h_H_B_seq_ftr, h_H_B_seq_ftr, 3, 3, hid_in)\r\n\r\n\t\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, hid_in])\r\n\t\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, hid_in])\r\n\t\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, hid_in])\r\n\r\n\r\n\t\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, 10, hid_in])\r\n\t\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, 5, hid_in])\r\n\t\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, 3, hid_in])\r\n\r\n\r\n\t\t\t\ta_23, r_23 = FCRL(h_B_seq_ftr, h_P_seq_ftr, 5, 10, hid_in)\r\n\t\t\t\ta_24, r_24 = FCRL(h_H_B_seq_ftr, h_P_seq_ftr, 3, 10, hid_in)\r\n\t\t\t\ta_34, r_34 = FCRL(h_H_B_seq_ftr, h_B_seq_ftr, 3, 5, hid_in)\r\n\r\n\r\n\t\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, hid_in])\r\n\t\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, hid_in])\r\n\t\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, hid_in])\r\n\r\n\t\t\t\tif not struct_only:\r\n\t\t\t\t\th_P_seq_ftr = h_P_seq_ftr + float(FLAGS.fusion_lambda) * (\r\n\t\t\t\t\t\t\t\ttf.matmul(self_r_2, W_self_2) + tf.matmul(r_23, W_cs_23) + tf.matmul(r_24, W_cs_24))\r\n\t\t\t\t\th_B_seq_ftr = h_B_seq_ftr + float(FLAGS.fusion_lambda) * (tf.matmul(self_r_3, W_self_3) + tf.matmul(r_34, W_cs_34))\r\n\t\t\t\t\th_H_B_seq_ftr = h_H_B_seq_ftr + float(FLAGS.fusion_lambda) * (tf.matmul(self_r_4, W_self_4))\r\n\r\n\t\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, 10, hid_out])\r\n\t\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, 5, hid_out])\r\n\t\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, 3, hid_out])\r\n\r\n\t\t\t\treturn h_H_B_seq_ftr, h_B_seq_ftr, h_P_seq_ftr\r\n\r\n\t\t\th_H_B_seq_ftr, h_B_seq_ftr, h_P_seq_ftr = MSC(P_in, B_in, H_B_in, P_bias_in, B_bias_in, H_B_bias_in,\r\n\t\t\t hid_units[-1], hid_units[-1])\r\n\r\n\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, hid_units[-1]])\r\n\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, hid_units[-1]])\r\n\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, hid_units[-1]])\r\n\r\n\t\t\toptimizer = tf.train.AdamOptimizer(learning_rate=float(FLAGS.lr))\r\n\t\t\tP_encode = tf.reduce_mean(tf.reshape(h_P_seq_ftr, [batch_size, time_step, -1]), axis=1)\r\n\t\t\tB_encode = tf.reduce_mean(tf.reshape(h_B_seq_ftr, [batch_size, time_step, -1]), axis=1)\r\n\t\t\tH_B_encode = tf.reduce_mean(tf.reshape(h_H_B_seq_ftr, [batch_size, time_step, -1]), axis=1)\r\n\r\n\t\t\tP_encode = tf.reshape(P_encode, [batch_size, -1])\r\n\t\t\tB_encode = tf.reshape(B_encode, [batch_size, -1])\r\n\t\t\tH_B_encode = tf.reshape(H_B_encode, [batch_size, -1])\r\n\r\n\t\t\tall_ftr = tf.concat([P_encode, B_encode, H_B_encode], axis=-1)\r\n\t\t\tall_ftr = tf.reshape(all_ftr, [batch_size, -1])\r\n\r\n\t\t\toutput = tf.matmul(all_ftr, tf.transpose(cluster_ftr))\r\n\r\n\t\t\tdef cluster_loss(pseudo_lab, all_ftr, cluster_ftr):\r\n\t\t\t\tall_ftr = tf.nn.l2_normalize(all_ftr, axis=-1)\r\n\t\t\t\tcluster_ftr = tf.nn.l2_normalize(cluster_ftr, axis=-1)\r\n\t\t\t\toutput = tf.matmul(all_ftr, tf.transpose(cluster_ftr))\r\n\t\t\t\toutput /= float(FLAGS.t)\r\n\t\t\t\tloss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=pseudo_lab, logits=output))\r\n\t\t\t\treturn loss\r\n\r\n\r\n\t\t\tdef empty_loss(b):\r\n\t\t\t\treturn tf.zeros([1])\r\n\r\n\r\n\t\t\tcontrastive_loss = tf.cond(tf.reduce_sum(pseudo_lab) > 0,\r\n\t\t\t lambda: cluster_loss(pseudo_lab, all_ftr, cluster_ftr),\r\n\t\t\t lambda: empty_loss(pseudo_lab))\r\n\t\t\tcluster_train_op = optimizer.minimize(contrastive_loss)\r\n\r\n\r\n\r\n\t\tsaver = tf.train.Saver()\r\n\t\tinit_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\r\n\r\n\t\twith tf.Session(config=config) as sess:\r\n\t\t\tsess.run(init_op)\r\n\t\t\tdef train_loader(X_train_P, X_train_B, X_train_H_B, y_train):\r\n\t\t\t\ttr_step = 0\r\n\t\t\t\ttr_size = X_train_P.shape[0]\r\n\t\t\t\ttrain_logits_all = []\r\n\t\t\t\ttrain_labels_all = []\r\n\t\t\t\ttrain_features_all = []\r\n\t\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\tX_input_P = X_train_P[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\t\tX_input_B = X_train_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\t\tX_input_H_B = X_train_H_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\t\tlabels = y_train[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t\t feed_dict={\r\n\t\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t\t is_train: True,\r\n\t\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\t\ttrain_features_all.extend(all_features.tolist())\r\n\t\t\t\t\ttrain_labels_all.extend(labels.tolist())\r\n\t\t\t\t\ttr_step += 1\r\n\r\n\t\t\t\ttrain_features_all = np.array(train_features_all).astype(np.float32)\r\n\t\t\t\ttrain_features_all = torch.from_numpy(train_features_all)\r\n\t\t\t\treturn train_features_all, train_labels_all\r\n\r\n\r\n\t\t\tdef gal_loader(X_train_P, X_train_B, X_train_H_B, y_train):\r\n\t\t\t\ttr_step = 0\r\n\t\t\t\ttr_size = X_train_P.shape[0]\r\n\t\t\t\tgal_logits_all = []\r\n\t\t\t\tgal_labels_all = []\r\n\t\t\t\tgal_features_all = []\r\n\r\n\t\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\tX_input_P = X_train_P[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\t\tX_input_B = X_train_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\t\tX_input_H_B = X_train_H_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\t\tlabels = y_train[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\r\n\t\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t\t feed_dict={\r\n\t\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t\t is_train: True,\r\n\t\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\t\tgal_features_all.extend(all_features.tolist())\r\n\t\t\t\t\tgal_labels_all.extend(labels.tolist())\r\n\t\t\t\t\ttr_step += 1\r\n\r\n\t\t\t\treturn gal_features_all, gal_labels_all\r\n\r\n\r\n\t\t\tdef evaluation():\r\n\t\t\t\tvl_step = 0\r\n\t\t\t\tvl_size = X_test_P.shape[0]\r\n\t\t\t\tpro_labels_all = []\r\n\t\t\t\tpro_features_all = []\r\n\t\t\t\tloaded_graph = tf.get_default_graph()\r\n\t\t\t\twhile vl_step * batch_size < vl_size:\r\n\t\t\t\t\tif (vl_step + 1) * batch_size > vl_size:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\tX_input_P = X_test_P[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\t\tX_input_B = X_test_B[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\t\tX_input_H_B = X_test_H_B[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\t\tlabels = y_test[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t\t feed_dict={\r\n\t\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t\t is_train: False,\r\n\t\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\t\tpro_labels_all.extend(labels.tolist())\r\n\t\t\t\t\tpro_features_all.extend(all_features.tolist())\r\n\t\t\t\t\tvl_step += 1\r\n\t\t\t\tX = np.array(gal_features_all)\r\n\t\t\t\ty = np.array(gal_labels_all)\r\n\t\t\t\tt_X = np.array(pro_features_all)\r\n\t\t\t\tt_y = np.array(pro_labels_all)\r\n\t\t\t\t# print(X.shape, t_X.shape)\r\n\t\t\t\tt_y = np.argmax(t_y, axis=-1)\r\n\t\t\t\ty = np.argmax(y, axis=-1)\r\n\r\n\t\t\t\tdef mean_ap(distmat, query_ids=None, gallery_ids=None,\r\n\t\t\t\t query_cams=None, gallery_cams=None):\r\n\t\t\t\t\t# distmat = to_numpy(distmat)\r\n\t\t\t\t\tm, n = distmat.shape\r\n\t\t\t\t\t# Fill up default values\r\n\t\t\t\t\tif query_ids is None:\r\n\t\t\t\t\t\tquery_ids = np.arange(m)\r\n\t\t\t\t\tif gallery_ids is None:\r\n\t\t\t\t\t\tgallery_ids = np.arange(n)\r\n\t\t\t\t\tif query_cams is None:\r\n\t\t\t\t\t\tquery_cams = np.zeros(m).astype(np.int32)\r\n\t\t\t\t\tif gallery_cams is None:\r\n\t\t\t\t\t\tgallery_cams = np.ones(n).astype(np.int32)\r\n\t\t\t\t\t# Ensure numpy array\r\n\t\t\t\t\tquery_ids = np.asarray(query_ids)\r\n\t\t\t\t\tgallery_ids = np.asarray(gallery_ids)\r\n\t\t\t\t\tquery_cams = np.asarray(query_cams)\r\n\t\t\t\t\tgallery_cams = np.asarray(gallery_cams)\r\n\t\t\t\t\t# Sort and find correct matches\r\n\t\t\t\t\tindices = np.argsort(distmat, axis=1)\r\n\t\t\t\t\tmatches = (gallery_ids[indices] == query_ids[:, np.newaxis])\r\n\t\t\t\t\t# Compute AP for each query\r\n\t\t\t\t\taps = []\r\n\t\t\t\t\tif (FLAGS.probe_view != '' and (FLAGS.probe_view == FLAGS.gallery_view or FLAGS.probe_type == 'nm.nm')) or (FLAGS.probe_type == 'cl.cl' or FLAGS.probe_type == 'bg.bg'):\r\n\t\t\t\t\t\tfor i in range(1, m):\r\n\t\t\t\t\t\t\tvalid = ((gallery_ids[indices[i]] != query_ids[i]) |\r\n\t\t\t\t\t\t\t (gallery_cams[indices[i]] != query_cams[i]))\r\n\t\t\t\t\t\t\ty_true = matches[i, valid]\r\n\t\t\t\t\t\t\ty_score = -distmat[i][indices[i]][valid]\r\n\t\t\t\t\t\t\tif not np.any(y_true): continue\r\n\t\t\t\t\t\t\taps.append(average_precision_score(y_true, y_score))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tfor i in range(m):\r\n\t\t\t\t\t\t\tvalid = ((gallery_ids[indices[i]] != query_ids[i]) |\r\n\t\t\t\t\t\t\t (gallery_cams[indices[i]] != query_cams[i]))\r\n\t\t\t\t\t\t\ty_true = matches[i, valid]\r\n\t\t\t\t\t\t\ty_score = -distmat[i][indices[i]][valid]\r\n\t\t\t\t\t\t\tif not np.any(y_true): continue\r\n\t\t\t\t\t\t\taps.append(average_precision_score(y_true, y_score))\r\n\t\t\t\t\tif len(aps) == 0:\r\n\t\t\t\t\t\traise RuntimeError(\"No valid query\")\r\n\t\t\t\t\treturn np.mean(aps)\r\n\r\n\r\n\t\t\t\tdef metrics(X, y, t_X, t_y):\r\n\t\t\t\t\t# compute Euclidean distance\r\n\t\t\t\t\tif dataset != 'CASIA_B':\r\n\t\t\t\t\t\ta, b = torch.from_numpy(t_X), torch.from_numpy(X)\r\n\t\t\t\t\t\tm, n = a.size(0), b.size(0)\r\n\t\t\t\t\t\ta = a.view(m, -1)\r\n\t\t\t\t\t\tb = b.view(n, -1)\r\n\t\t\t\t\t\tdist_m = torch.pow(a, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\r\n\t\t\t\t\t\t torch.pow(b, 2).sum(dim=1, keepdim=True).expand(n, m).t()\r\n\t\t\t\t\t\tdist_m.addmm_(1, -2, a, b.t())\r\n\t\t\t\t\t\tdist_m = dist_m.sqrt()\r\n\t\t\t\t\t\tmAP = mean_ap(distmat=dist_m.numpy(), query_ids=t_y, gallery_ids=y)\r\n\t\t\t\t\t\t_, dist_sort = dist_m.sort(1)\r\n\t\t\t\t\t\tdist_sort = dist_sort.numpy()\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tX = np.array(X)\r\n\t\t\t\t\t\tt_X = np.array(t_X)\r\n\t\t\t\t\t\t# pred = [cp.argmin(cp.linalg.norm(X - i, axis=1)).tolist() for i in t_X]\r\n\t\t\t\t\t\tdist_m = [(np.linalg.norm(X - i, axis=1)).tolist() for i in t_X]\r\n\t\t\t\t\t\tdist_m = np.array(dist_m)\r\n\t\t\t\t\t\tmAP = mean_ap(distmat=dist_m, query_ids=t_y, gallery_ids=y)\r\n\t\t\t\t\t\tdist_sort = [np.argsort(np.linalg.norm(X - i, axis=1)).tolist() for i in t_X]\r\n\t\t\t\t\t\tdist_sort = np.array(dist_sort)\r\n\r\n\t\t\t\t\ttop_1 = top_5 = top_10 = 0\r\n\t\t\t\t\tprobe_num = dist_sort.shape[0]\r\n\t\t\t\t\tif (FLAGS.probe_view != '' and (FLAGS.probe_view == FLAGS.gallery_view or FLAGS.probe_type == 'nm.nm')) or (FLAGS.probe_type == 'cl.cl' or FLAGS.probe_type == 'bg.bg'):\r\n\t\t\t\t\t\tfor i in range(probe_num):\r\n\t\t\t\t\t\t\t# print(dist_sort[i, :10])\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:2]]:\r\n\t\t\t\t\t\t\t\ttop_1 += 1\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:6]]:\r\n\t\t\t\t\t\t\t\ttop_5 += 1\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:11]]:\r\n\t\t\t\t\t\t\t\ttop_10 += 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tfor i in range(probe_num):\r\n\t\t\t\t\t\t\t# print(dist_sort[i, :10])\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :1]]:\r\n\t\t\t\t\t\t\t\ttop_1 += 1\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :5]]:\r\n\t\t\t\t\t\t\t\ttop_5 += 1\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :10]]:\r\n\t\t\t\t\t\t\t\ttop_10 += 1\r\n\t\t\t\t\treturn mAP, top_1 / probe_num, top_5 / probe_num, top_10 / probe_num\r\n\r\n\t\t\t\tmAP, top_1, top_5, top_10 = metrics(X, y, t_X, t_y)\r\n\t\t\t\treturn mAP, top_1, top_5, top_10\r\n\r\n\t\t\tmax_acc_1 = 0\r\n\t\t\tmax_acc_2 = 0\r\n\t\t\tbest_cluster_info_1 = [0, 0]\r\n\t\t\tbest_cluster_info_2 = [0, 0]\r\n\t\t\tcur_patience = 0\r\n\t\t\tif dataset == 'KGBD' or dataset == 'KS20':\r\n\t\t\t\tif FLAGS.gallery_view == '' and FLAGS.probe_view == '':\r\n\t\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='gallery', time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\t\telse:\r\n\t\t\t\t\t_, _, _, _, _, _, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, b_, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='view_'+FLAGS.gallery_view, time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\telif dataset == 'BIWI':\r\n\t\t\t\tif probe == 'Walking':\r\n\t\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='Still', time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\t\telse:\r\n\t\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='Walking', time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\telif dataset == 'IAS':\r\n\t\t\t\tif probe == 'A':\r\n\t\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='B', time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\t\telse:\r\n\t\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='A', time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\telif dataset == 'CASIA_B':\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split=probe, time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t PG_type=FLAGS.probe_type.split('.')[1])\r\n\t\t\tfor epoch in range(cluster_epochs):\r\n\t\t\t\ttrain_features_all, train_labels_all = train_loader(X_train_P, X_train_B, X_train_H_B, y_train)\r\n\t\t\t\tgal_features_all, gal_labels_all = gal_loader(X_gal_P, X_gal_B, X_gal_H_B, y_gal)\r\n\t\t\t\tmAP, top_1, top_5, top_10 = evaluation()\r\n\t\t\t\tcur_patience += 1\r\n\t\t\t\tif epoch > 0 and top_1 > max_acc_2:\r\n\t\t\t\t\tmax_acc_1 = mAP\r\n\t\t\t\t\tbest_cluster_info_1[0] = num_cluster\r\n\t\t\t\t\tbest_cluster_info_1[1] = outlier_num\r\n\t\t\t\t\tcur_patience = 0\r\n\t\t\t\t\tif FLAGS.mode == 'UF' and FLAGS.S_dataset == '':\r\n\t\t\t\t\t\tif FLAGS.probe_view == '' and FLAGS.gallery_view == '' and FLAGS.dataset != 'CASIA_B':\r\n\t\t\t\t\t\t\t# checkpt_file = pre_dir + dataset + '/' + probe + '_' + str(fusion_lambda) + '_' + str(\r\n\t\t\t\t\t\t\t# \tnhood) + '_' + str(\r\n\t\t\t\t\t\t\t# \ttime_step) + '_' + FLAGS.min_samples + '_' + FLAGS.lr + '_' + FLAGS.eps + '_' + \\\r\n\t\t\t\t\t\t\t# FLAGS.t + '_' + change + '_best.ckpt'\r\n\t\t\t\t\t\t\tcheckpt_file = pre_dir + dataset + '/' + probe + change + '_best.ckpt'\r\n\t\t\t\t\t\telif FLAGS.dataset == 'CASIA_B':\r\n\t\t\t\t\t\t\t# checkpt_file = pre_dir + dataset + '/' + probe + '_' + str(fusion_lambda) + '_' + str(\r\n\t\t\t\t\t\t\t# \tnhood) + '_' + str(\r\n\t\t\t\t\t\t\t# \ttime_step) + '_' + FLAGS.min_samples + '_' + FLAGS.lr + '_' + FLAGS.eps + '_' + \\\r\n\t\t\t\t\t\t\t# FLAGS.t + '_' + change + '_' + FLAGS.probe_type + '_best.ckpt'\r\n\t\t\t\t\t\t\tcheckpt_file = pre_dir + dataset + '/' + probe + change + '_' + FLAGS.probe_type + '_best.ckpt'\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t# checkpt_file = pre_dir + dataset + '/' + probe + '_' + str(fusion_lambda) + '_' + str(\r\n\t\t\t\t\t\t\t# \tnhood) + '_' + str(\r\n\t\t\t\t\t\t\t# \ttime_step) + '_' + FLAGS.min_samples + '_' + FLAGS.lr + '_' + FLAGS.eps + '_' + \\\r\n\t\t\t\t\t\t\t# FLAGS.t + '_' + FLAGS.probe_view + 'v' + FLAGS.gallery_view + change + '_best.ckpt'\r\n\t\t\t\t\t\t\tcheckpt_file = pre_dir + dataset + '/' + probe + '_' + FLAGS.probe_view + 'v' + FLAGS.gallery_view + change + '_best.ckpt'\r\n\t\t\t\t\t\tprint(checkpt_file)\r\n\t\t\t\t\t\tsaver.save(sess, checkpt_file)\r\n\t\t\t\tif epoch > 0 and top_1 > max_acc_2:\r\n\t\t\t\t\tmax_acc_2 = top_1\r\n\t\t\t\t\tbest_cluster_info_2[0] = num_cluster\r\n\t\t\t\t\tbest_cluster_info_2[1] = outlier_num\r\n\t\t\t\t\tcur_patience = 0\r\n\t\t\t\tif epoch > 0:\r\n\t\t\t\t\tif FLAGS.probe_view != '' and FLAGS.gallery_view != '':\r\n\t\t\t\t\t\tprint('[UF] View: %s v %s | mAP: %.4f (%.4f) | Top-1: %.4f (%.4f) | Top-5: %.4f | Top-10: %.4f | % d + o: %d |' % (\r\n\t\t\t\t\t\t\tFLAGS.probe_view, FLAGS.gallery_view, mAP, max_acc_1,\r\n\t\t\t\t\t\t\ttop_1, max_acc_2, top_5, top_10,\r\n\t\t\t\t\t\t\tbest_cluster_info_2[0], best_cluster_info_2[1]))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tprint(\r\n\t\t\t\t\t\t\t'[UF] %s - %s | mAP: %.4f (%.4f) | Top-1: %.4f (%.4f) | Top-5: %.4f | Top-10: %.4f | % d + o: %d |' % (\r\n\t\t\t\t\t\t\tFLAGS.dataset, FLAGS.probe, mAP, max_acc_1,\r\n\t\t\t\t\t\t\ttop_1, max_acc_2, top_5, top_10,\r\n\t\t\t\t\t\t\tbest_cluster_info_2[0], best_cluster_info_2[1]))\r\n\t\t\t\tif cur_patience == patience:\r\n\t\t\t\t\tbreak\r\n\t\t\t\trerank_dist = compute_jaccard_distance(train_features_all, k1=k1, k2=k2)\r\n\r\n\t\t\t\tif dataset == 'IAS' or dataset == 'KS20':\r\n\t\t\t\t\tcluster = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed', n_jobs=-1)\r\n\t\t\t\telse:\r\n\t\t\t\t\tcluster = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed', n_jobs=-1)\r\n\t\t\t\tpseudo_labels = cluster.fit_predict(rerank_dist)\r\n\t\t\t\t# discard outliers\r\n\t\t\t\ttrain_features_all = train_features_all[np.where(pseudo_labels != -1)]\r\n\t\t\t\tX_train_P_new = X_train_P[np.where(pseudo_labels != -1)]\r\n\t\t\t\tX_train_B_new = X_train_B[np.where(pseudo_labels != -1)]\r\n\t\t\t\tX_train_H_B_new = X_train_H_B[np.where(pseudo_labels != -1)]\r\n\t\t\t\toutlier_num = np.sum(pseudo_labels == -1)\r\n\t\t\t\tpseudo_labels = pseudo_labels[np.where(pseudo_labels != -1)]\r\n\t\t\t\tnum_cluster = len(set(pseudo_labels)) - (1 if -1 in pseudo_labels else 0)\r\n\r\n\r\n\t\t\t\tdef generate_cluster_features(labels, features):\r\n\t\t\t\t\tcenters = collections.defaultdict(list)\r\n\t\t\t\t\tfor i, label in enumerate(labels):\r\n\t\t\t\t\t\tif label == -1:\r\n\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\tcenters[labels[i]].append(features[i])\r\n\r\n\t\t\t\t\tcenters = [\r\n\t\t\t\t\t\ttorch.stack(centers[idx], dim=0).mean(0) for idx in sorted(centers.keys())\r\n\t\t\t\t\t]\r\n\t\t\t\t\tcenters = torch.stack(centers, dim=0)\r\n\t\t\t\t\treturn centers\r\n\r\n\r\n\t\t\t\tcluster_features = generate_cluster_features(pseudo_labels, train_features_all)\r\n\t\t\t\tcluster_features = cluster_features.numpy()\r\n\t\t\t\tcluster_features = cluster_features.astype(np.float64)\r\n\r\n\t\t\t\ttr_step = 0\r\n\t\t\t\ttr_size = X_train_P_new.shape[0]\r\n\t\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\tX_input_P = X_train_P_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\t\tX_input_B = X_train_B_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\t\tX_input_H_B = X_train_H_B_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\t\tlabels = pseudo_labels[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\t_, loss, P_en, B_en, all_features = sess.run(\r\n\t\t\t\t\t\t[cluster_train_op, contrastive_loss, P_encode, B_encode, all_ftr],\r\n\t\t\t\t\t\tfeed_dict={\r\n\t\t\t\t\t\t\tP_in: X_input_P,\r\n\t\t\t\t\t\t\tB_in: X_input_B,\r\n\t\t\t\t\t\t\tH_B_in: X_input_H_B,\r\n\t\t\t\t\t\t\tP_bias_in: biases_P,\r\n\t\t\t\t\t\t\tB_bias_in: biases_B,\r\n\t\t\t\t\t\t\tH_B_bias_in: biases_H_B,\r\n\t\t\t\t\t\t\tis_train: True,\r\n\t\t\t\t\t\t\tattn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t\t\tpseudo_lab: labels,\r\n\t\t\t\t\t\t\tcluster_ftr: cluster_features})\r\n\t\t\t\t\tif tr_step % display == 0:\r\n\t\t\t\t\t\tprint('[%s] Batch num: %d | Cluser num: %d | Outlier: %d | Loss: %.5f |' %\r\n\t\t\t\t\t\t (str(epoch), tr_step, num_cluster, outlier_num, loss))\r\n\t\t\t\t\ttr_step += 1\r\n\t\t\tsess.close()\r\n\r\nelif FLAGS.mode == 'DG' and FLAGS.S_dataset != '':\r\n\tif FLAGS.S_dataset == 'KGBD':\r\n\t\tbatch_size = 256\r\n\t\tFLAGS.lr = '0.00035'\r\n\t\tFLAGS.min_samples = '4'\r\n\t\tFLAGS.eps = '0.6'\r\n\telif FLAGS.S_dataset == 'CASIA_B':\r\n\t\tbatch_size = 128\r\n\t\tFLAGS.lr = '0.00035'\r\n\t\tFLAGS.min_samples = '2'\r\n\t\tFLAGS.eps = '0.75'\r\n\telse:\r\n\t\tbatch_size = 128\r\n\t\tFLAGS.lr = '0.00035'\r\n\tif FLAGS.S_dataset == 'IAS' or FLAGS.S_dataset == 'KS20':\r\n\t\t# if FLAGS.mode != 'DG':\r\n\t\tFLAGS.eps = '0.8'\r\n\t\tFLAGS.min_samples = '2'\r\n\t\tif FLAGS.S_dataset == 'KS20':\r\n\t\t\tFLAGS.min_samples = '2'\r\n\r\n\tif FLAGS.S_dataset == 'BIWI':\r\n\t\tFLAGS.min_samples = '2'\r\n\t\tif FLAGS.S_probe == 'Walking':\r\n\t\t\tFLAGS.eps = '0.6'\r\n\t\telse:\r\n\t\t\tFLAGS.eps = '0.7'\r\n\t# checkpt_file = pre_dir + FLAGS.S_dataset + '/' + FLAGS.S_probe + '_' + str(fusion_lambda) + '_' + str(\r\n\t# \tnhood) + '_' + str(\r\n\t# \ttime_step) + '_' + FLAGS.min_samples + '_' + FLAGS.lr + '_' + FLAGS.eps + '_' + \\\r\n\t# \t\t\t\t\t\t FLAGS.t + '_' + change + '_best.ckpt'\r\n\tcheckpt_file = pre_dir + dataset + '/' + probe + change + '_best.ckpt'\r\n\tchange = '_DG'\r\n\twith tf.Session(graph=loaded_graph, config=config) as sess:\r\n\t\tloader = tf.train.import_meta_graph(checkpt_file + '.meta')\r\n\t\tP_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder:0\")\r\n\t\tB_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder_1:0\")\r\n\t\tH_B_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder_2:0\")\r\n\t\tP_bias_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder_3:0\")\r\n\t\tB_bias_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder_4:0\")\r\n\t\tH_B_bias_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder_5:0\")\r\n\t\tattn_drop = loaded_graph.get_tensor_by_name(\"Input/Placeholder_6:0\")\r\n\t\tffd_drop = loaded_graph.get_tensor_by_name(\"Input/Placeholder_7:0\")\r\n\t\tis_train = loaded_graph.get_tensor_by_name(\"Input/Placeholder_8:0\")\r\n\t\tpseudo_lab = loaded_graph.get_tensor_by_name(\"Input/Placeholder_9:0\")\r\n\t\tcluster_ftr = loaded_graph.get_tensor_by_name(\"Input/Placeholder_10:0\")\r\n\r\n\t\tP_encode = loaded_graph.get_tensor_by_name(\"MG/MG/Reshape_45:0\")\r\n\t\tB_encode = loaded_graph.get_tensor_by_name(\"MG/MG/Reshape_46:0\")\r\n\t\tH_B_encode = loaded_graph.get_tensor_by_name(\"MG/MG/Reshape_47:0\")\r\n\t\tall_ftr = loaded_graph.get_tensor_by_name(\"MG/MG/Reshape_48:0\")\r\n\r\n\t\tcontrastive_loss = loaded_graph.get_tensor_by_name(\"MG/MG/cond/Merge:0\")\r\n\t\tcluster_train_op = loaded_graph.get_operation_by_name(\"MG/MG/Adam\")\r\n\r\n\t\tinit_op = tf.global_variables_initializer()\r\n\t\tsess.run(init_op)\r\n\t\tloader.restore(sess, checkpt_file)\r\n\t\tsaver = tf.train.Saver()\r\n\r\n\r\n\t\tdef train_loader(X_train_P, X_train_B, X_train_H_B, y_train):\r\n\t\t\ttr_step = 0\r\n\t\t\ttr_size = X_train_P.shape[0]\r\n\t\t\ttrain_logits_all = []\r\n\t\t\ttrain_labels_all = []\r\n\t\t\ttrain_features_all = []\r\n\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t\tX_input_P = X_train_P[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\tX_input_B = X_train_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\tX_input_H_B = X_train_H_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\tlabels = y_train[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t feed_dict={\r\n\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t is_train: True,\r\n\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\ttrain_features_all.extend(all_features.tolist())\r\n\t\t\t\ttrain_labels_all.extend(labels.tolist())\r\n\t\t\t\ttr_step += 1\r\n\r\n\t\t\ttrain_features_all = np.array(train_features_all).astype(np.float32)\r\n\t\t\ttrain_features_all = torch.from_numpy(train_features_all)\r\n\t\t\treturn train_features_all, train_labels_all\r\n\r\n\r\n\t\tdef gal_loader(X_train_P, X_train_B, X_train_H_B, y_train):\r\n\t\t\ttr_step = 0\r\n\t\t\ttr_size = X_train_P.shape[0]\r\n\t\t\tgal_logits_all = []\r\n\t\t\tgal_labels_all = []\r\n\t\t\tgal_features_all = []\r\n\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t\tX_input_P = X_train_P[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\tX_input_B = X_train_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\tX_input_H_B = X_train_H_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\tlabels = y_train[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t feed_dict={\r\n\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t is_train: True,\r\n\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\tgal_features_all.extend(all_features.tolist())\r\n\t\t\t\tgal_labels_all.extend(labels.tolist())\r\n\t\t\t\ttr_step += 1\r\n\r\n\t\t\treturn gal_features_all, gal_labels_all\r\n\r\n\r\n\t\tdef evaluation():\r\n\t\t\tvl_step = 0\r\n\t\t\tvl_size = X_test_P.shape[0]\r\n\t\t\tpro_labels_all = []\r\n\t\t\tpro_features_all = []\r\n\t\t\tloaded_graph = tf.get_default_graph()\r\n\t\t\twhile vl_step * batch_size < vl_size:\r\n\t\t\t\tif (vl_step + 1) * batch_size > vl_size:\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t\tX_input_P = X_test_P[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\tX_input_B = X_test_B[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\tX_input_H_B = X_test_H_B[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\tlabels = y_test[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t feed_dict={\r\n\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t is_train: False,\r\n\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\tpro_labels_all.extend(labels.tolist())\r\n\t\t\t\tpro_features_all.extend(all_features.tolist())\r\n\t\t\t\tvl_step += 1\r\n\t\t\tX = np.array(gal_features_all)\r\n\t\t\ty = np.array(gal_labels_all)\r\n\t\t\tt_X = np.array(pro_features_all)\r\n\t\t\tt_y = np.array(pro_labels_all)\r\n\t\t\t# print(X.shape, t_X.shape)\r\n\t\t\tt_y = np.argmax(t_y, axis=-1)\r\n\t\t\ty = np.argmax(y, axis=-1)\r\n\r\n\t\t\tdef mean_ap(distmat, query_ids=None, gallery_ids=None,\r\n\t\t\t query_cams=None, gallery_cams=None):\r\n\t\t\t\t# distmat = to_numpy(distmat)\r\n\t\t\t\tm, n = distmat.shape\r\n\t\t\t\t# Fill up default values\r\n\t\t\t\tif query_ids is None:\r\n\t\t\t\t\tquery_ids = np.arange(m)\r\n\t\t\t\tif gallery_ids is None:\r\n\t\t\t\t\tgallery_ids = np.arange(n)\r\n\t\t\t\tif query_cams is None:\r\n\t\t\t\t\tquery_cams = np.zeros(m).astype(np.int32)\r\n\t\t\t\tif gallery_cams is None:\r\n\t\t\t\t\tgallery_cams = np.ones(n).astype(np.int32)\r\n\t\t\t\t# Ensure numpy array\r\n\t\t\t\tquery_ids = np.asarray(query_ids)\r\n\t\t\t\tgallery_ids = np.asarray(gallery_ids)\r\n\t\t\t\tquery_cams = np.asarray(query_cams)\r\n\t\t\t\tgallery_cams = np.asarray(gallery_cams)\r\n\t\t\t\t# Sort and find correct matches\r\n\t\t\t\tindices = np.argsort(distmat, axis=1)\r\n\t\t\t\tmatches = (gallery_ids[indices] == query_ids[:, np.newaxis])\r\n\t\t\t\t# Compute AP for each query\r\n\t\t\t\taps = []\r\n\t\t\t\tif (FLAGS.probe_view != '' and FLAGS.probe_view == FLAGS.gallery_view) or (FLAGS.probe_type == 'nm.nm' or FLAGS.probe_type == 'cl.cl' or FLAGS.probe_type == 'bg.bg'):\r\n\t\t\t\t\tfor i in range(1, m):\r\n\t\t\t\t\t\tvalid = ((gallery_ids[indices[i]] != query_ids[i]) |\r\n\t\t\t\t\t\t (gallery_cams[indices[i]] != query_cams[i]))\r\n\t\t\t\t\t\ty_true = matches[i, valid]\r\n\t\t\t\t\t\ty_score = -distmat[i][indices[i]][valid]\r\n\t\t\t\t\t\tif not np.any(y_true): continue\r\n\t\t\t\t\t\taps.append(average_precision_score(y_true, y_score))\r\n\t\t\t\telse:\r\n\t\t\t\t\tfor i in range(m):\r\n\t\t\t\t\t\tvalid = ((gallery_ids[indices[i]] != query_ids[i]) |\r\n\t\t\t\t\t\t (gallery_cams[indices[i]] != query_cams[i]))\r\n\t\t\t\t\t\ty_true = matches[i, valid]\r\n\t\t\t\t\t\ty_score = -distmat[i][indices[i]][valid]\r\n\t\t\t\t\t\tif not np.any(y_true): continue\r\n\t\t\t\t\t\taps.append(average_precision_score(y_true, y_score))\r\n\t\t\t\tif len(aps) == 0:\r\n\t\t\t\t\traise RuntimeError(\"No valid query\")\r\n\t\t\t\treturn np.mean(aps)\r\n\r\n\t\t\tdef metrics(X, y, t_X, t_y):\r\n\t\t\t\ta, b = torch.from_numpy(t_X), torch.from_numpy(X)\r\n\t\t\t\t# compute Euclidean distance\r\n\t\t\t\tm, n = a.size(0), b.size(0)\r\n\t\t\t\ta = a.view(m, -1)\r\n\t\t\t\tb = b.view(n, -1)\r\n\t\t\t\tdist_m = torch.pow(a, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\r\n\t\t\t\t torch.pow(b, 2).sum(dim=1, keepdim=True).expand(n, m).t()\r\n\t\t\t\tdist_m.addmm_(1, -2, a, b.t())\r\n\t\t\t\tdist_m = dist_m.sqrt()\r\n\r\n\t\t\t\tmAP = mean_ap(distmat=dist_m.numpy(), query_ids=t_y, gallery_ids=y)\r\n\t\t\t\t_, dist_sort = dist_m.sort(1)\r\n\t\t\t\tdist_sort = dist_sort.numpy()\r\n\r\n\t\t\t\ttop_1 = top_5 = top_10 = 0\r\n\t\t\t\tprobe_num = dist_sort.shape[0]\r\n\t\t\t\tif (FLAGS.probe_view != '' and FLAGS.probe_view == FLAGS.gallery_view) or (FLAGS.probe_type == 'nm.nm' or FLAGS.probe_type == 'cl.cl' or FLAGS.probe_type == 'bg.bg'):\r\n\t\t\t\t\tfor i in range(probe_num):\r\n\t\t\t\t\t\t# print(dist_sort[i, :10])\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:2]]:\r\n\t\t\t\t\t\t\ttop_1 += 1\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:6]]:\r\n\t\t\t\t\t\t\ttop_5 += 1\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:11]]:\r\n\t\t\t\t\t\t\ttop_10 += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tfor i in range(probe_num):\r\n\t\t\t\t\t\t# print(dist_sort[i, :10])\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :1]]:\r\n\t\t\t\t\t\t\ttop_1 += 1\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :5]]:\r\n\t\t\t\t\t\t\ttop_5 += 1\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :10]]:\r\n\t\t\t\t\t\t\ttop_10 += 1\r\n\t\t\t\treturn mAP, top_1 / probe_num, top_5 / probe_num, top_10 / probe_num\r\n\r\n\t\t\tmAP, top_1, top_5, top_10 = metrics(X, y, t_X, t_y)\r\n\t\t\treturn mAP, top_1, top_5, top_10\r\n\r\n\r\n\t\tmax_acc_1 = 0\r\n\t\tmax_acc_2 = 0\r\n\t\tbest_cluster_info_1 = [0, 0]\r\n\t\tbest_cluster_info_2 = [0, 0]\r\n\t\tcur_patience = 0\r\n\t\tif dataset == 'KGBD' or dataset == 'KS20':\r\n\t\t\tif FLAGS.gallery_view == '' and FLAGS.probe_view == '':\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='gallery', time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\t\telse:\r\n\t\t\t\t_, _, _, _, _, _, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='view_' + FLAGS.gallery_view, time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\telif dataset == 'BIWI':\r\n\t\t\tif probe == 'Walking':\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='Still', time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\t\telse:\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='Walking', time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\telif dataset == 'IAS':\r\n\t\t\tif probe == 'A':\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='B', time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\t\telse:\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='A', time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\telif dataset == 'CASIA_B':\r\n\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\tprocess.gen_train_data(dataset=dataset, split=probe, time_step=time_step,\r\n\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t PG_type=FLAGS.probe_type.split('.')[1])\r\n\t\tfor epoch in range(cluster_epochs):\r\n\t\t\ttrain_features_all, train_labels_all = train_loader(X_train_P, X_train_B, X_train_H_B, y_train)\r\n\t\t\t# train_features_all = train_features_all.numpy()\r\n\t\t\tgal_features_all, gal_labels_all = gal_loader(X_gal_P, X_gal_B, X_gal_H_B, y_gal)\r\n\t\t\tmAP, top_1, top_5, top_10 = evaluation()\r\n\t\t\tcur_patience += 1\r\n\t\t\tif epoch > 0 and top_1 > max_acc_2:\r\n\t\t\t\tmax_acc_1 = mAP\r\n\t\t\t\tbest_cluster_info_1[0] = num_cluster\r\n\t\t\t\tbest_cluster_info_1[1] = outlier_num\r\n\t\t\t\tcur_patience = 0\r\n\t\t\t\tif FLAGS.mode == 'DG' and FLAGS.S_dataset != '':\r\n\t\t\t\t\tif FLAGS.probe_view == '' and FLAGS.gallery_view == '':\r\n\t\t\t\t\t\t# checkpt_file = pre_dir + dataset + '/' + probe + '_' + str(fusion_lambda) + '_' + str(\r\n\t\t\t\t\t\t# \tnhood) + '_' + str(\r\n\t\t\t\t\t\t# \ttime_step) + '_' + FLAGS.min_samples + '_' + FLAGS.lr + '_' + FLAGS.eps + '_' + \\\r\n\t\t\t\t\t\t# FLAGS.density_lambda + '_' + change + '_best.ckpt'\r\n\t\t\t\t\t\tcheckpt_file = pre_dir + dataset + '/' + probe + change + '_best.ckpt'\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcheckpt_file = pre_dir + dataset + '/' + probe + '_' + FLAGS.probe_view + 'v' + \\\r\n\t\t\t\t\t\t FLAGS.gallery_view + change + '_best.ckpt'\r\n\t\t\t\t\tprint(checkpt_file)\r\n\t\t\t\t\tsaver.save(sess, checkpt_file)\r\n\t\t\tif epoch > 0 and top_1 > max_acc_2:\r\n\t\t\t\tmax_acc_2 = top_1\r\n\t\t\t\tbest_cluster_info_2[0] = num_cluster\r\n\t\t\t\tbest_cluster_info_2[1] = outlier_num\r\n\t\t\t\tcur_patience = 0\r\n\t\t\tif FLAGS.evaluate == '1':\r\n\t\t\t\tprint(\r\n\t\t\t\t\t\t'[Evaluate on %s - %s] | mAP: %.4f | Top-1: %.4f | Top-5: %.4f | Top-10: %.4f' % (\r\n\t\t\t\t\tFLAGS.dataset, FLAGS.probe, mAP,\r\n\t\t\t\t\ttop_1, top_5, top_10))\r\n\t\t\t\texit()\r\n\t\t\telse:\r\n\t\t\t\tif FLAGS.probe_view != '' and FLAGS.gallery_view != '':\r\n\t\t\t\t\tprint(\r\n\t\t\t\t\t'[DG] View: %s v %s | mAP: %.4f (%.4f) | Top-1: %.4f (%.4f) | Top-5: %.4f | Top-10: %.4f | % d + o: %d |' % (\r\n\t\t\t\t\t\tFLAGS.probe_view, FLAGS.gallery_view, mAP, max_acc_1,\r\n\t\t\t\t\t\ttop_1, max_acc_2, top_5, top_10,\r\n\t\t\t\t\t\tbest_cluster_info_2[0], best_cluster_info_2[1]))\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\r\n\t\t\t\t\t\t\t'[DG] %s - %s | mAP: %.4f (%.4f) | Top-1: %.4f (%.4f) | Top-5: %.4f | Top-10: %.4f | % d + o: %d |' % (\r\n\t\t\t\t\t\tFLAGS.dataset, FLAGS.probe, mAP, max_acc_1,\r\n\t\t\t\t\t\ttop_1, max_acc_2, top_5, top_10,\r\n\t\t\t\t\t\tbest_cluster_info_2[0], best_cluster_info_2[1]))\r\n\t\t\tif cur_patience == patience:\r\n\t\t\t\tbreak\r\n\t\t\trerank_dist = compute_jaccard_distance(train_features_all, k1=k1, k2=k2)\r\n\t\t\tif dataset == 'IAS' or dataset == 'KS20':\r\n\t\t\t\tcluster = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed', n_jobs=-1)\r\n\t\t\telse:\r\n\t\t\t\tcluster = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed', n_jobs=-1)\r\n\t\t\tpseudo_labels = cluster.fit_predict(rerank_dist)\r\n\t\t\t# discard outliers\r\n\t\t\ttrain_features_all = train_features_all[np.where(pseudo_labels != -1)]\r\n\r\n\t\t\tX_train_P_new = X_train_P[np.where(pseudo_labels != -1)]\r\n\t\t\tX_train_B_new = X_train_B[np.where(pseudo_labels != -1)]\r\n\t\t\tX_train_H_B_new = X_train_H_B[np.where(pseudo_labels != -1)]\r\n\t\t\toutlier_num = np.sum(pseudo_labels == -1)\r\n\t\t\tpseudo_labels = pseudo_labels[np.where(pseudo_labels != -1)]\r\n\t\t\t# print(pseudo_labels)\r\n\t\t\tnum_cluster = len(set(pseudo_labels)) - (1 if -1 in pseudo_labels else 0)\r\n\r\n\r\n\t\t\tdef generate_cluster_features(labels, features):\r\n\t\t\t\tcenters = collections.defaultdict(list)\r\n\t\t\t\tfor i, label in enumerate(labels):\r\n\t\t\t\t\tif label == -1:\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\tcenters[labels[i]].append(features[i])\r\n\r\n\t\t\t\tcenters = [\r\n\t\t\t\t\ttorch.stack(centers[idx], dim=0).mean(0) for idx in sorted(centers.keys())\r\n\t\t\t\t]\r\n\t\t\t\t# print(centers)\r\n\r\n\t\t\t\tcenters = torch.stack(centers, dim=0)\r\n\t\t\t\treturn centers\r\n\r\n\r\n\t\t\tcluster_features = generate_cluster_features(pseudo_labels, train_features_all)\r\n\t\t\tcluster_features = cluster_features.numpy()\r\n\t\t\tcluster_features = cluster_features.astype(np.float64)\r\n\r\n\t\t\ttr_step = 0\r\n\t\t\ttr_size = X_train_P_new.shape[0]\r\n\t\t\t# pro_en_P = []\r\n\t\t\t# pro_en_B = []\r\n\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\tbreak\r\n\t\t\t\tX_input_P = X_train_P_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\tX_input_B = X_train_B_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\tX_input_H_B = X_train_H_B_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\tlabels = pseudo_labels[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t_, loss, P_en, B_en, all_features = sess.run(\r\n\t\t\t\t\t[cluster_train_op, contrastive_loss, P_encode, B_encode, all_ftr],\r\n\t\t\t\t\tfeed_dict={\r\n\t\t\t\t\t\tP_in: X_input_P,\r\n\t\t\t\t\t\tB_in: X_input_B,\r\n\t\t\t\t\t\tH_B_in: X_input_H_B,\r\n\t\t\t\t\t\tP_bias_in: biases_P,\r\n\t\t\t\t\t\tB_bias_in: biases_B,\r\n\t\t\t\t\t\tH_B_bias_in: biases_H_B,\r\n\t\t\t\t\t\tis_train: True,\r\n\t\t\t\t\t\tattn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t\tpseudo_lab: labels,\r\n\t\t\t\t\t\tcluster_ftr: cluster_features})\r\n\t\t\t\tif tr_step % display == 0:\r\n\t\t\t\t\tprint('[%s] Batch num: %d | Cluser num: %d | Outlier: %d | Loss: %.5f |' %\r\n\t\t\t\t\t (str(epoch), tr_step, num_cluster, outlier_num, loss))\r\n\t\t\t\ttr_step += 1\r\n\t\tsess.close()\r\n\r\n\r\nprint('----- Model hyperparams -----')\r\n# print('skeleton_nodes: ' + str(nb_nodes))\r\nprint('seqence_length: ' + str(time_step))\r\nprint('fusion_lambda: ' + str(fusion_lambda))\r\nprint('batch_size: ' + str(batch_size))\r\nprint('lr: ' + str(FLAGS.lr))\r\nprint('temperature: ' + FLAGS.t)\r\nprint('eps: ' + FLAGS.eps)\r\nprint('min_samples: ' + FLAGS.min_samples)\r\nprint('m: ' + FLAGS.m)\r\nprint('fusion_lambda: ' + FLAGS.fusion_lambda)\r\nprint('patience: ' + FLAGS.patience)\r\n\r\nprint('Mode: ' + FLAGS.mode)\r\n\r\nif FLAGS.mode == 'DG':\r\n\tprint('----- Mode Information -----')\r\n\tprint('Source Dataset: ' + FLAGS.S_dataset)\r\n\tprint('Target Dataset: ' + FLAGS.dataset)\r\n\tprint('Target Probe: ' + FLAGS.probe)\r\nelif FLAGS.mode == 'UF':\r\n\tprint('----- Dataset Information -----')\r\n\tprint('Dataset: ' + dataset)\r\n\tprint('Probe: ' + FLAGS.probe)\r\n\r\n\r\n" ]
[ [ "tensorflow.concat", "tensorflow.zeros", "numpy.asarray", "tensorflow.reduce_sum", "sklearn.cluster.DBSCAN", "tensorflow.app.flags.DEFINE_string", "numpy.mean", "numpy.any", "tensorflow.get_default_graph", "numpy.where", "torch.pow", "tensorflow.Graph", "numpy.arange", "torch.from_numpy", "tensorflow.train.import_meta_graph", "tensorflow.ConfigProto", "numpy.argmax", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.train.Saver", "numpy.zeros", "tensorflow.nn.l2_normalize", "tensorflow.matmul", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "torch.stack", "numpy.argsort", "numpy.array", "numpy.sum", "tensorflow.nn.softmax", "tensorflow.local_variables_initializer", "tensorflow.transpose", "tensorflow.reshape", "tensorflow.expand_dims", "numpy.linalg.norm", "numpy.ones", "sklearn.metrics.average_precision_score", "tensorflow.variable_scope", "tensorflow.random_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
EnjoyLifeFund/py36pkgs
[ "0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2", "0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2", "0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2", "0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2", "0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2", "0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2", "0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2" ]
[ "chainer/functions/pooling/pooling_2d.py", "pandas-0.21.0.dev0+412.g062f6f118-py3.6-macosx-10.12.6-x86_64.egg/pandas/tests/indexes/test_multi.py", "chainer/links/connection/n_step_gru.py", "pandas-0.21.0.dev0+412.g062f6f118-py3.6-macosx-10.12.6-x86_64.egg/pandas/core/categorical.py", "pandas-0.21.0.dev0+412.g062f6f118-py3.6-macosx-10.12.6-x86_64.egg/pandas/tests/groupby/test_whitelist.py", "pandas-0.21.0.dev0+412.g062f6f118-py3.6-macosx-10.12.6-x86_64.egg/pandas/tests/test_window.py", "theano/tensor/nnet/neighbours.py" ]
[ "import collections\n\nimport numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import conv\nfrom chainer.utils import type_check\n\n\nif cuda.cudnn_enabled:\n cudnn = cuda.cudnn\n libcudnn = cudnn.cudnn\n _cudnn_version = libcudnn.getVersion()\n\n\ndef _check_cudnn_acceptable_type(x_dtype):\n return _cudnn_version >= 3000 or x_dtype != numpy.float16\n\n\ndef _pair(x):\n if isinstance(x, collections.Iterable):\n return x\n return x, x\n\n\nclass Pooling2D(function.Function):\n\n \"\"\"Base class of pooling function over a set of 2d planes.\"\"\"\n\n def __init__(self, ksize, stride=None, pad=0, cover_all=True,\n use_cudnn=True):\n if stride is None:\n stride = ksize\n\n self.kh, self.kw = _pair(ksize)\n self.sy, self.sx = _pair(stride)\n self.ph, self.pw = _pair(pad)\n\n self.cover_all = cover_all\n self.use_cudnn = use_cudnn\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n in_types[0].dtype.kind == 'f',\n in_types[0].ndim == 4\n )\n\n def forward_gpu(self, x):\n # Implementation using cudnn\n x = cuda.cupy.ascontiguousarray(x[0])\n n, c, h, w = x.shape\n y_h = conv.get_conv_outsize(\n h, self.kh, self.sy, self.ph, self.cover_all)\n assert y_h > 0, 'Height in the output should be positive.'\n y_w = conv.get_conv_outsize(\n w, self.kw, self.sx, self.pw, self.cover_all)\n assert y_w > 0, 'Width in the output should be positive.'\n y = cuda.cupy.empty((n, c, y_h, y_w), dtype=x.dtype)\n\n handle = cudnn.get_handle()\n pool_desc = self.create_pool_desc()\n x_desc = cudnn.create_tensor_descriptor(x)\n y_desc = cudnn.create_tensor_descriptor(y)\n\n oz_dtype = 'd' if x.dtype == 'd' else 'f'\n one = numpy.array(1, dtype=oz_dtype).ctypes\n zero = numpy.array(0, dtype=oz_dtype).ctypes\n libcudnn.poolingForward(\n handle, pool_desc.value, one.data, x_desc.value,\n x.data.ptr, zero.data, y_desc.value, y.data.ptr)\n self.y = y\n\n return y,\n\n def backward_gpu(self, x, gy):\n # Implementation using cudnn\n x = cuda.cupy.ascontiguousarray(x[0])\n handle = cudnn.get_handle()\n pool_desc = self.create_pool_desc()\n\n gy = cuda.cupy.ascontiguousarray(gy[0])\n\n x_desc = cudnn.create_tensor_descriptor(x)\n y_desc = cudnn.create_tensor_descriptor(gy)\n\n oz_dtype = 'd' if x.dtype == 'd' else 'f'\n one = numpy.array(1, dtype=oz_dtype).ctypes\n zero = numpy.array(0, dtype=oz_dtype).ctypes\n gx = cuda.cupy.empty_like(x)\n libcudnn.poolingBackward(\n handle, pool_desc.value, one.data, y_desc.value,\n self.y.data.ptr, y_desc.value, gy.data.ptr, x_desc.value,\n x.data.ptr, zero.data, x_desc.value, gx.data.ptr)\n return gx,\n\n def create_pool_desc(self):\n raise NotImplementedError()\n", "# -*- coding: utf-8 -*-\n\nimport re\nimport warnings\n\nfrom datetime import timedelta\nfrom itertools import product\n\nimport pytest\n\nimport numpy as np\n\nimport pandas as pd\n\nfrom pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,\n compat, date_range, period_range)\nfrom pandas.compat import PY3, long, lrange, lzip, range, u\nfrom pandas.errors import PerformanceWarning, UnsortedIndexError\nfrom pandas.core.indexes.base import InvalidIndexError\nfrom pandas._libs import lib\nfrom pandas._libs.lib import Timestamp\n\nimport pandas.util.testing as tm\n\nfrom pandas.util.testing import assert_almost_equal, assert_copy\n\nfrom .common import Base\n\n\nclass TestMultiIndex(Base):\n _holder = MultiIndex\n _compat_props = ['shape', 'ndim', 'size', 'itemsize']\n\n def setup_method(self, method):\n major_axis = Index(['foo', 'bar', 'baz', 'qux'])\n minor_axis = Index(['one', 'two'])\n\n major_labels = np.array([0, 0, 1, 2, 3, 3])\n minor_labels = np.array([0, 1, 0, 1, 0, 1])\n self.index_names = ['first', 'second']\n self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels\n ], names=self.index_names,\n verify_integrity=False))\n self.setup_indices()\n\n def create_index(self):\n return self.index\n\n def test_boolean_context_compat2(self):\n\n # boolean context compat\n # GH7897\n i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])\n i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])\n common = i1.intersection(i2)\n\n def f():\n if common:\n pass\n\n tm.assert_raises_regex(ValueError, 'The truth value of a', f)\n\n def test_labels_dtypes(self):\n\n # GH 8456\n i = MultiIndex.from_tuples([('A', 1), ('A', 2)])\n assert i.labels[0].dtype == 'int8'\n assert i.labels[1].dtype == 'int8'\n\n i = MultiIndex.from_product([['a'], range(40)])\n assert i.labels[1].dtype == 'int8'\n i = MultiIndex.from_product([['a'], range(400)])\n assert i.labels[1].dtype == 'int16'\n i = MultiIndex.from_product([['a'], range(40000)])\n assert i.labels[1].dtype == 'int32'\n\n i = pd.MultiIndex.from_product([['a'], range(1000)])\n assert (i.labels[0] >= 0).all()\n assert (i.labels[1] >= 0).all()\n\n def test_where(self):\n i = MultiIndex.from_tuples([('A', 1), ('A', 2)])\n\n def f():\n i.where(True)\n\n pytest.raises(NotImplementedError, f)\n\n def test_where_array_like(self):\n i = MultiIndex.from_tuples([('A', 1), ('A', 2)])\n klasses = [list, tuple, np.array, pd.Series]\n cond = [False, True]\n\n for klass in klasses:\n f = lambda: i.where(klass(cond))\n pytest.raises(NotImplementedError, f)\n\n def test_repeat(self):\n reps = 2\n numbers = [1, 2, 3]\n names = np.array(['foo', 'bar'])\n\n m = MultiIndex.from_product([\n numbers, names], names=names)\n expected = MultiIndex.from_product([\n numbers, names.repeat(reps)], names=names)\n tm.assert_index_equal(m.repeat(reps), expected)\n\n with tm.assert_produces_warning(FutureWarning):\n result = m.repeat(n=reps)\n tm.assert_index_equal(result, expected)\n\n def test_numpy_repeat(self):\n reps = 2\n numbers = [1, 2, 3]\n names = np.array(['foo', 'bar'])\n\n m = MultiIndex.from_product([\n numbers, names], names=names)\n expected = MultiIndex.from_product([\n numbers, names.repeat(reps)], names=names)\n tm.assert_index_equal(np.repeat(m, reps), expected)\n\n msg = \"the 'axis' parameter is not supported\"\n tm.assert_raises_regex(\n ValueError, msg, np.repeat, m, reps, axis=1)\n\n def test_set_name_methods(self):\n # so long as these are synonyms, we don't need to test set_names\n assert self.index.rename == self.index.set_names\n new_names = [name + \"SUFFIX\" for name in self.index_names]\n ind = self.index.set_names(new_names)\n assert self.index.names == self.index_names\n assert ind.names == new_names\n with tm.assert_raises_regex(ValueError, \"^Length\"):\n ind.set_names(new_names + new_names)\n new_names2 = [name + \"SUFFIX2\" for name in new_names]\n res = ind.set_names(new_names2, inplace=True)\n assert res is None\n assert ind.names == new_names2\n\n # set names for specific level (# GH7792)\n ind = self.index.set_names(new_names[0], level=0)\n assert self.index.names == self.index_names\n assert ind.names == [new_names[0], self.index_names[1]]\n\n res = ind.set_names(new_names2[0], level=0, inplace=True)\n assert res is None\n assert ind.names == [new_names2[0], self.index_names[1]]\n\n # set names for multiple levels\n ind = self.index.set_names(new_names, level=[0, 1])\n assert self.index.names == self.index_names\n assert ind.names == new_names\n\n res = ind.set_names(new_names2, level=[0, 1], inplace=True)\n assert res is None\n assert ind.names == new_names2\n\n def test_set_levels(self):\n # side note - you probably wouldn't want to use levels and labels\n # directly like this - but it is possible.\n levels = self.index.levels\n new_levels = [[lev + 'a' for lev in level] for level in levels]\n\n def assert_matching(actual, expected, check_dtype=False):\n # avoid specifying internal representation\n # as much as possible\n assert len(actual) == len(expected)\n for act, exp in zip(actual, expected):\n act = np.asarray(act)\n exp = np.asarray(exp)\n tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)\n\n # level changing [w/o mutation]\n ind2 = self.index.set_levels(new_levels)\n assert_matching(ind2.levels, new_levels)\n assert_matching(self.index.levels, levels)\n\n # level changing [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_levels(new_levels, inplace=True)\n assert inplace_return is None\n assert_matching(ind2.levels, new_levels)\n\n # level changing specific level [w/o mutation]\n ind2 = self.index.set_levels(new_levels[0], level=0)\n assert_matching(ind2.levels, [new_levels[0], levels[1]])\n assert_matching(self.index.levels, levels)\n\n ind2 = self.index.set_levels(new_levels[1], level=1)\n assert_matching(ind2.levels, [levels[0], new_levels[1]])\n assert_matching(self.index.levels, levels)\n\n # level changing multiple levels [w/o mutation]\n ind2 = self.index.set_levels(new_levels, level=[0, 1])\n assert_matching(ind2.levels, new_levels)\n assert_matching(self.index.levels, levels)\n\n # level changing specific level [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)\n assert inplace_return is None\n assert_matching(ind2.levels, [new_levels[0], levels[1]])\n assert_matching(self.index.levels, levels)\n\n ind2 = self.index.copy()\n inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)\n assert inplace_return is None\n assert_matching(ind2.levels, [levels[0], new_levels[1]])\n assert_matching(self.index.levels, levels)\n\n # level changing multiple levels [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_levels(new_levels, level=[0, 1],\n inplace=True)\n assert inplace_return is None\n assert_matching(ind2.levels, new_levels)\n assert_matching(self.index.levels, levels)\n\n # illegal level changing should not change levels\n # GH 13754\n original_index = self.index.copy()\n for inplace in [True, False]:\n with tm.assert_raises_regex(ValueError, \"^On\"):\n self.index.set_levels(['c'], level=0, inplace=inplace)\n assert_matching(self.index.levels, original_index.levels,\n check_dtype=True)\n\n with tm.assert_raises_regex(ValueError, \"^On\"):\n self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,\n inplace=inplace)\n assert_matching(self.index.labels, original_index.labels,\n check_dtype=True)\n\n with tm.assert_raises_regex(TypeError, \"^Levels\"):\n self.index.set_levels('c', level=0, inplace=inplace)\n assert_matching(self.index.levels, original_index.levels,\n check_dtype=True)\n\n with tm.assert_raises_regex(TypeError, \"^Labels\"):\n self.index.set_labels(1, level=0, inplace=inplace)\n assert_matching(self.index.labels, original_index.labels,\n check_dtype=True)\n\n def test_set_labels(self):\n # side note - you probably wouldn't want to use levels and labels\n # directly like this - but it is possible.\n labels = self.index.labels\n major_labels, minor_labels = labels\n major_labels = [(x + 1) % 3 for x in major_labels]\n minor_labels = [(x + 1) % 1 for x in minor_labels]\n new_labels = [major_labels, minor_labels]\n\n def assert_matching(actual, expected):\n # avoid specifying internal representation\n # as much as possible\n assert len(actual) == len(expected)\n for act, exp in zip(actual, expected):\n act = np.asarray(act)\n exp = np.asarray(exp, dtype=np.int8)\n tm.assert_numpy_array_equal(act, exp)\n\n # label changing [w/o mutation]\n ind2 = self.index.set_labels(new_labels)\n assert_matching(ind2.labels, new_labels)\n assert_matching(self.index.labels, labels)\n\n # label changing [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_labels(new_labels, inplace=True)\n assert inplace_return is None\n assert_matching(ind2.labels, new_labels)\n\n # label changing specific level [w/o mutation]\n ind2 = self.index.set_labels(new_labels[0], level=0)\n assert_matching(ind2.labels, [new_labels[0], labels[1]])\n assert_matching(self.index.labels, labels)\n\n ind2 = self.index.set_labels(new_labels[1], level=1)\n assert_matching(ind2.labels, [labels[0], new_labels[1]])\n assert_matching(self.index.labels, labels)\n\n # label changing multiple levels [w/o mutation]\n ind2 = self.index.set_labels(new_labels, level=[0, 1])\n assert_matching(ind2.labels, new_labels)\n assert_matching(self.index.labels, labels)\n\n # label changing specific level [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)\n assert inplace_return is None\n assert_matching(ind2.labels, [new_labels[0], labels[1]])\n assert_matching(self.index.labels, labels)\n\n ind2 = self.index.copy()\n inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)\n assert inplace_return is None\n assert_matching(ind2.labels, [labels[0], new_labels[1]])\n assert_matching(self.index.labels, labels)\n\n # label changing multiple levels [w/ mutation]\n ind2 = self.index.copy()\n inplace_return = ind2.set_labels(new_labels, level=[0, 1],\n inplace=True)\n assert inplace_return is None\n assert_matching(ind2.labels, new_labels)\n assert_matching(self.index.labels, labels)\n\n def test_set_levels_labels_names_bad_input(self):\n levels, labels = self.index.levels, self.index.labels\n names = self.index.names\n\n with tm.assert_raises_regex(ValueError, 'Length of levels'):\n self.index.set_levels([levels[0]])\n\n with tm.assert_raises_regex(ValueError, 'Length of labels'):\n self.index.set_labels([labels[0]])\n\n with tm.assert_raises_regex(ValueError, 'Length of names'):\n self.index.set_names([names[0]])\n\n # shouldn't scalar data error, instead should demand list-like\n with tm.assert_raises_regex(TypeError, 'list of lists-like'):\n self.index.set_levels(levels[0])\n\n # shouldn't scalar data error, instead should demand list-like\n with tm.assert_raises_regex(TypeError, 'list of lists-like'):\n self.index.set_labels(labels[0])\n\n # shouldn't scalar data error, instead should demand list-like\n with tm.assert_raises_regex(TypeError, 'list-like'):\n self.index.set_names(names[0])\n\n # should have equal lengths\n with tm.assert_raises_regex(TypeError, 'list of lists-like'):\n self.index.set_levels(levels[0], level=[0, 1])\n\n with tm.assert_raises_regex(TypeError, 'list-like'):\n self.index.set_levels(levels, level=0)\n\n # should have equal lengths\n with tm.assert_raises_regex(TypeError, 'list of lists-like'):\n self.index.set_labels(labels[0], level=[0, 1])\n\n with tm.assert_raises_regex(TypeError, 'list-like'):\n self.index.set_labels(labels, level=0)\n\n # should have equal lengths\n with tm.assert_raises_regex(ValueError, 'Length of names'):\n self.index.set_names(names[0], level=[0, 1])\n\n with tm.assert_raises_regex(TypeError, 'string'):\n self.index.set_names(names, level=0)\n\n def test_set_levels_categorical(self):\n # GH13854\n index = MultiIndex.from_arrays([list(\"xyzx\"), [0, 1, 2, 3]])\n for ordered in [False, True]:\n cidx = CategoricalIndex(list(\"bac\"), ordered=ordered)\n result = index.set_levels(cidx, 0)\n expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],\n labels=index.labels)\n tm.assert_index_equal(result, expected)\n\n result_lvl = result.get_level_values(0)\n expected_lvl = CategoricalIndex(list(\"bacb\"),\n categories=cidx.categories,\n ordered=cidx.ordered)\n tm.assert_index_equal(result_lvl, expected_lvl)\n\n def test_metadata_immutable(self):\n levels, labels = self.index.levels, self.index.labels\n # shouldn't be able to set at either the top level or base level\n mutable_regex = re.compile('does not support mutable operations')\n with tm.assert_raises_regex(TypeError, mutable_regex):\n levels[0] = levels[0]\n with tm.assert_raises_regex(TypeError, mutable_regex):\n levels[0][0] = levels[0][0]\n # ditto for labels\n with tm.assert_raises_regex(TypeError, mutable_regex):\n labels[0] = labels[0]\n with tm.assert_raises_regex(TypeError, mutable_regex):\n labels[0][0] = labels[0][0]\n # and for names\n names = self.index.names\n with tm.assert_raises_regex(TypeError, mutable_regex):\n names[0] = names[0]\n\n def test_inplace_mutation_resets_values(self):\n levels = [['a', 'b', 'c'], [4]]\n levels2 = [[1, 2, 3], ['a']]\n labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]\n\n mi1 = MultiIndex(levels=levels, labels=labels)\n mi2 = MultiIndex(levels=levels2, labels=labels)\n vals = mi1.values.copy()\n vals2 = mi2.values.copy()\n\n assert mi1._tuples is not None\n\n # Make sure level setting works\n new_vals = mi1.set_levels(levels2).values\n tm.assert_almost_equal(vals2, new_vals)\n\n # Non-inplace doesn't kill _tuples [implementation detail]\n tm.assert_almost_equal(mi1._tuples, vals)\n\n # ...and values is still same too\n tm.assert_almost_equal(mi1.values, vals)\n\n # Inplace should kill _tuples\n mi1.set_levels(levels2, inplace=True)\n tm.assert_almost_equal(mi1.values, vals2)\n\n # Make sure label setting works too\n labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]\n exp_values = np.empty((6, ), dtype=object)\n exp_values[:] = [(long(1), 'a')] * 6\n\n # Must be 1d array of tuples\n assert exp_values.shape == (6, )\n new_values = mi2.set_labels(labels2).values\n\n # Not inplace shouldn't change\n tm.assert_almost_equal(mi2._tuples, vals2)\n\n # Should have correct values\n tm.assert_almost_equal(exp_values, new_values)\n\n # ...and again setting inplace should kill _tuples, etc\n mi2.set_labels(labels2, inplace=True)\n tm.assert_almost_equal(mi2.values, new_values)\n\n def test_copy_in_constructor(self):\n levels = np.array([\"a\", \"b\", \"c\"])\n labels = np.array([1, 1, 2, 0, 0, 1, 1])\n val = labels[0]\n mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],\n copy=True)\n assert mi.labels[0][0] == val\n labels[0] = 15\n assert mi.labels[0][0] == val\n val = levels[0]\n levels[0] = \"PANDA\"\n assert mi.levels[0][0] == val\n\n def test_set_value_keeps_names(self):\n # motivating example from #3742\n lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']\n lev2 = ['1', '2', '3'] * 2\n idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])\n df = pd.DataFrame(\n np.random.randn(6, 4),\n columns=['one', 'two', 'three', 'four'],\n index=idx)\n df = df.sort_index()\n assert df.is_copy is None\n assert df.index.names == ('Name', 'Number')\n df = df.set_value(('grethe', '4'), 'one', 99.34)\n assert df.is_copy is None\n assert df.index.names == ('Name', 'Number')\n\n def test_copy_names(self):\n # Check that adding a \"names\" parameter to the copy is honored\n # GH14302\n multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])\n multi_idx1 = multi_idx.copy()\n\n assert multi_idx.equals(multi_idx1)\n assert multi_idx.names == ['MyName1', 'MyName2']\n assert multi_idx1.names == ['MyName1', 'MyName2']\n\n multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])\n\n assert multi_idx.equals(multi_idx2)\n assert multi_idx.names == ['MyName1', 'MyName2']\n assert multi_idx2.names == ['NewName1', 'NewName2']\n\n multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])\n\n assert multi_idx.equals(multi_idx3)\n assert multi_idx.names == ['MyName1', 'MyName2']\n assert multi_idx3.names == ['NewName1', 'NewName2']\n\n def test_names(self):\n\n # names are assigned in setup\n names = self.index_names\n level_names = [level.name for level in self.index.levels]\n assert names == level_names\n\n # setting bad names on existing\n index = self.index\n tm.assert_raises_regex(ValueError, \"^Length of names\",\n setattr, index, \"names\",\n list(index.names) + [\"third\"])\n tm.assert_raises_regex(ValueError, \"^Length of names\",\n setattr, index, \"names\", [])\n\n # initializing with bad names (should always be equivalent)\n major_axis, minor_axis = self.index.levels\n major_labels, minor_labels = self.index.labels\n tm.assert_raises_regex(ValueError, \"^Length of names\", MultiIndex,\n levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels],\n names=['first'])\n tm.assert_raises_regex(ValueError, \"^Length of names\", MultiIndex,\n levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels],\n names=['first', 'second', 'third'])\n\n # names are assigned\n index.names = [\"a\", \"b\"]\n ind_names = list(index.names)\n level_names = [level.name for level in index.levels]\n assert ind_names == level_names\n\n def test_reference_duplicate_name(self):\n idx = MultiIndex.from_tuples(\n [('a', 'b'), ('c', 'd')], names=['x', 'x'])\n assert idx._reference_duplicate_name('x')\n\n idx = MultiIndex.from_tuples(\n [('a', 'b'), ('c', 'd')], names=['x', 'y'])\n assert not idx._reference_duplicate_name('x')\n\n def test_astype(self):\n expected = self.index.copy()\n actual = self.index.astype('O')\n assert_copy(actual.levels, expected.levels)\n assert_copy(actual.labels, expected.labels)\n self.check_level_names(actual, expected.names)\n\n with tm.assert_raises_regex(TypeError, \"^Setting.*dtype.*object\"):\n self.index.astype(np.dtype(int))\n\n def test_constructor_single_level(self):\n result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],\n labels=[[0, 1, 2, 3]], names=['first'])\n assert isinstance(result, MultiIndex)\n expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')\n tm.assert_index_equal(result.levels[0], expected)\n assert result.names == ['first']\n\n def test_constructor_no_levels(self):\n tm.assert_raises_regex(ValueError, \"non-zero number \"\n \"of levels/labels\",\n MultiIndex, levels=[], labels=[])\n both_re = re.compile('Must pass both levels and labels')\n with tm.assert_raises_regex(TypeError, both_re):\n MultiIndex(levels=[])\n with tm.assert_raises_regex(TypeError, both_re):\n MultiIndex(labels=[])\n\n def test_constructor_mismatched_label_levels(self):\n labels = [np.array([1]), np.array([2]), np.array([3])]\n levels = [\"a\"]\n tm.assert_raises_regex(ValueError, \"Length of levels and labels \"\n \"must be the same\", MultiIndex,\n levels=levels, labels=labels)\n length_error = re.compile('>= length of level')\n label_error = re.compile(r'Unequal label lengths: \\[4, 2\\]')\n\n # important to check that it's looking at the right thing.\n with tm.assert_raises_regex(ValueError, length_error):\n MultiIndex(levels=[['a'], ['b']],\n labels=[[0, 1, 2, 3], [0, 3, 4, 1]])\n\n with tm.assert_raises_regex(ValueError, label_error):\n MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])\n\n # external API\n with tm.assert_raises_regex(ValueError, length_error):\n self.index.copy().set_levels([['a'], ['b']])\n\n with tm.assert_raises_regex(ValueError, label_error):\n self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])\n\n # deprecated properties\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n with tm.assert_raises_regex(ValueError, length_error):\n self.index.copy().levels = [['a'], ['b']]\n\n with tm.assert_raises_regex(ValueError, label_error):\n self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]\n\n def assert_multiindex_copied(self, copy, original):\n # Levels should be (at least, shallow copied)\n tm.assert_copy(copy.levels, original.levels)\n tm.assert_almost_equal(copy.labels, original.labels)\n\n # Labels doesn't matter which way copied\n tm.assert_almost_equal(copy.labels, original.labels)\n assert copy.labels is not original.labels\n\n # Names doesn't matter which way copied\n assert copy.names == original.names\n assert copy.names is not original.names\n\n # Sort order should be copied\n assert copy.sortorder == original.sortorder\n\n def test_copy(self):\n i_copy = self.index.copy()\n\n self.assert_multiindex_copied(i_copy, self.index)\n\n def test_shallow_copy(self):\n i_copy = self.index._shallow_copy()\n\n self.assert_multiindex_copied(i_copy, self.index)\n\n def test_view(self):\n i_view = self.index.view()\n\n self.assert_multiindex_copied(i_view, self.index)\n\n def check_level_names(self, index, names):\n assert [level.name for level in index.levels] == list(names)\n\n def test_changing_names(self):\n\n # names should be applied to levels\n level_names = [level.name for level in self.index.levels]\n self.check_level_names(self.index, self.index.names)\n\n view = self.index.view()\n copy = self.index.copy()\n shallow_copy = self.index._shallow_copy()\n\n # changing names should change level names on object\n new_names = [name + \"a\" for name in self.index.names]\n self.index.names = new_names\n self.check_level_names(self.index, new_names)\n\n # but not on copies\n self.check_level_names(view, level_names)\n self.check_level_names(copy, level_names)\n self.check_level_names(shallow_copy, level_names)\n\n # and copies shouldn't change original\n shallow_copy.names = [name + \"c\" for name in shallow_copy.names]\n self.check_level_names(self.index, new_names)\n\n def test_duplicate_names(self):\n self.index.names = ['foo', 'foo']\n tm.assert_raises_regex(KeyError, 'Level foo not found',\n self.index._get_level_number, 'foo')\n\n def test_get_level_number_integer(self):\n self.index.names = [1, 0]\n assert self.index._get_level_number(1) == 0\n assert self.index._get_level_number(0) == 1\n pytest.raises(IndexError, self.index._get_level_number, 2)\n tm.assert_raises_regex(KeyError, 'Level fourth not found',\n self.index._get_level_number, 'fourth')\n\n def test_from_arrays(self):\n arrays = []\n for lev, lab in zip(self.index.levels, self.index.labels):\n arrays.append(np.asarray(lev).take(lab))\n\n result = MultiIndex.from_arrays(arrays)\n assert list(result) == list(self.index)\n\n # infer correctly\n result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],\n ['a', 'b']])\n assert result.levels[0].equals(Index([Timestamp('20130101')]))\n assert result.levels[1].equals(Index(['a', 'b']))\n\n def test_from_arrays_index_series_datetimetz(self):\n idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,\n tz='US/Eastern')\n idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,\n tz='Asia/Tokyo')\n result = pd.MultiIndex.from_arrays([idx1, idx2])\n tm.assert_index_equal(result.get_level_values(0), idx1)\n tm.assert_index_equal(result.get_level_values(1), idx2)\n\n result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])\n tm.assert_index_equal(result2.get_level_values(0), idx1)\n tm.assert_index_equal(result2.get_level_values(1), idx2)\n\n tm.assert_index_equal(result, result2)\n\n def test_from_arrays_index_series_timedelta(self):\n idx1 = pd.timedelta_range('1 days', freq='D', periods=3)\n idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)\n result = pd.MultiIndex.from_arrays([idx1, idx2])\n tm.assert_index_equal(result.get_level_values(0), idx1)\n tm.assert_index_equal(result.get_level_values(1), idx2)\n\n result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])\n tm.assert_index_equal(result2.get_level_values(0), idx1)\n tm.assert_index_equal(result2.get_level_values(1), idx2)\n\n tm.assert_index_equal(result, result2)\n\n def test_from_arrays_index_series_period(self):\n idx1 = pd.period_range('2011-01-01', freq='D', periods=3)\n idx2 = pd.period_range('2015-01-01', freq='H', periods=3)\n result = pd.MultiIndex.from_arrays([idx1, idx2])\n tm.assert_index_equal(result.get_level_values(0), idx1)\n tm.assert_index_equal(result.get_level_values(1), idx2)\n\n result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])\n tm.assert_index_equal(result2.get_level_values(0), idx1)\n tm.assert_index_equal(result2.get_level_values(1), idx2)\n\n tm.assert_index_equal(result, result2)\n\n def test_from_arrays_index_datetimelike_mixed(self):\n idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,\n tz='US/Eastern')\n idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)\n idx3 = pd.timedelta_range('1 days', freq='D', periods=3)\n idx4 = pd.period_range('2011-01-01', freq='D', periods=3)\n\n result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])\n tm.assert_index_equal(result.get_level_values(0), idx1)\n tm.assert_index_equal(result.get_level_values(1), idx2)\n tm.assert_index_equal(result.get_level_values(2), idx3)\n tm.assert_index_equal(result.get_level_values(3), idx4)\n\n result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),\n pd.Series(idx2),\n pd.Series(idx3),\n pd.Series(idx4)])\n tm.assert_index_equal(result2.get_level_values(0), idx1)\n tm.assert_index_equal(result2.get_level_values(1), idx2)\n tm.assert_index_equal(result2.get_level_values(2), idx3)\n tm.assert_index_equal(result2.get_level_values(3), idx4)\n\n tm.assert_index_equal(result, result2)\n\n def test_from_arrays_index_series_categorical(self):\n # GH13743\n idx1 = pd.CategoricalIndex(list(\"abcaab\"), categories=list(\"bac\"),\n ordered=False)\n idx2 = pd.CategoricalIndex(list(\"abcaab\"), categories=list(\"bac\"),\n ordered=True)\n\n result = pd.MultiIndex.from_arrays([idx1, idx2])\n tm.assert_index_equal(result.get_level_values(0), idx1)\n tm.assert_index_equal(result.get_level_values(1), idx2)\n\n result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])\n tm.assert_index_equal(result2.get_level_values(0), idx1)\n tm.assert_index_equal(result2.get_level_values(1), idx2)\n\n result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])\n tm.assert_index_equal(result3.get_level_values(0), idx1)\n tm.assert_index_equal(result3.get_level_values(1), idx2)\n\n def test_from_arrays_empty(self):\n # 0 levels\n with tm.assert_raises_regex(\n ValueError, \"Must pass non-zero number of levels/labels\"):\n MultiIndex.from_arrays(arrays=[])\n\n # 1 level\n result = MultiIndex.from_arrays(arrays=[[]], names=['A'])\n assert isinstance(result, MultiIndex)\n expected = Index([], name='A')\n tm.assert_index_equal(result.levels[0], expected)\n\n # N levels\n for N in [2, 3]:\n arrays = [[]] * N\n names = list('ABC')[:N]\n result = MultiIndex.from_arrays(arrays=arrays, names=names)\n expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,\n names=names)\n tm.assert_index_equal(result, expected)\n\n def test_from_arrays_invalid_input(self):\n invalid_inputs = [1, [1], [1, 2], [[1], 2],\n 'a', ['a'], ['a', 'b'], [['a'], 'b']]\n for i in invalid_inputs:\n pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)\n\n def test_from_arrays_different_lengths(self):\n # see gh-13599\n idx1 = [1, 2, 3]\n idx2 = ['a', 'b']\n tm.assert_raises_regex(ValueError, '^all arrays must '\n 'be same length$',\n MultiIndex.from_arrays, [idx1, idx2])\n\n idx1 = []\n idx2 = ['a', 'b']\n tm.assert_raises_regex(ValueError, '^all arrays must '\n 'be same length$',\n MultiIndex.from_arrays, [idx1, idx2])\n\n idx1 = [1, 2, 3]\n idx2 = []\n tm.assert_raises_regex(ValueError, '^all arrays must '\n 'be same length$',\n MultiIndex.from_arrays, [idx1, idx2])\n\n def test_from_product(self):\n\n first = ['foo', 'bar', 'buz']\n second = ['a', 'b', 'c']\n names = ['first', 'second']\n result = MultiIndex.from_product([first, second], names=names)\n\n tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),\n ('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),\n ('buz', 'c')]\n expected = MultiIndex.from_tuples(tuples, names=names)\n\n tm.assert_index_equal(result, expected)\n assert result.names == names\n\n def test_from_product_empty(self):\n # 0 levels\n with tm.assert_raises_regex(\n ValueError, \"Must pass non-zero number of levels/labels\"):\n MultiIndex.from_product([])\n\n # 1 level\n result = MultiIndex.from_product([[]], names=['A'])\n expected = pd.Index([], name='A')\n tm.assert_index_equal(result.levels[0], expected)\n\n # 2 levels\n l1 = [[], ['foo', 'bar', 'baz'], []]\n l2 = [[], [], ['a', 'b', 'c']]\n names = ['A', 'B']\n for first, second in zip(l1, l2):\n result = MultiIndex.from_product([first, second], names=names)\n expected = MultiIndex(levels=[first, second],\n labels=[[], []], names=names)\n tm.assert_index_equal(result, expected)\n\n # GH12258\n names = ['A', 'B', 'C']\n for N in range(4):\n lvl2 = lrange(N)\n result = MultiIndex.from_product([[], lvl2, []], names=names)\n expected = MultiIndex(levels=[[], lvl2, []],\n labels=[[], [], []], names=names)\n tm.assert_index_equal(result, expected)\n\n def test_from_product_invalid_input(self):\n invalid_inputs = [1, [1], [1, 2], [[1], 2],\n 'a', ['a'], ['a', 'b'], [['a'], 'b']]\n for i in invalid_inputs:\n pytest.raises(TypeError, MultiIndex.from_product, iterables=i)\n\n def test_from_product_datetimeindex(self):\n dt_index = date_range('2000-01-01', periods=2)\n mi = pd.MultiIndex.from_product([[1, 2], dt_index])\n etalon = lib.list_to_object_array([(1, pd.Timestamp(\n '2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(\n '2000-01-01')), (2, pd.Timestamp('2000-01-02'))])\n tm.assert_numpy_array_equal(mi.values, etalon)\n\n def test_from_product_index_series_categorical(self):\n # GH13743\n first = ['foo', 'bar']\n for ordered in [False, True]:\n idx = pd.CategoricalIndex(list(\"abcaab\"), categories=list(\"bac\"),\n ordered=ordered)\n expected = pd.CategoricalIndex(list(\"abcaab\") + list(\"abcaab\"),\n categories=list(\"bac\"),\n ordered=ordered)\n\n for arr in [idx, pd.Series(idx), idx.values]:\n result = pd.MultiIndex.from_product([first, arr])\n tm.assert_index_equal(result.get_level_values(1), expected)\n\n def test_values_boxed(self):\n tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),\n (3, pd.Timestamp('2000-01-03')),\n (1, pd.Timestamp('2000-01-04')),\n (2, pd.Timestamp('2000-01-02')),\n (3, pd.Timestamp('2000-01-03'))]\n mi = pd.MultiIndex.from_tuples(tuples)\n tm.assert_numpy_array_equal(mi.values,\n lib.list_to_object_array(tuples))\n # Check that code branches for boxed values produce identical results\n tm.assert_numpy_array_equal(mi.values[:4], mi[:4].values)\n\n def test_append(self):\n result = self.index[:3].append(self.index[3:])\n assert result.equals(self.index)\n\n foos = [self.index[:1], self.index[1:3], self.index[3:]]\n result = foos[0].append(foos[1:])\n assert result.equals(self.index)\n\n # empty\n result = self.index.append([])\n assert result.equals(self.index)\n\n def test_append_mixed_dtypes(self):\n # GH 13660\n dti = date_range('2011-01-01', freq='M', periods=3,)\n dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')\n pi = period_range('2011-01', freq='M', periods=3)\n\n mi = MultiIndex.from_arrays([[1, 2, 3],\n [1.1, np.nan, 3.3],\n ['a', 'b', 'c'],\n dti, dti_tz, pi])\n assert mi.nlevels == 6\n\n res = mi.append(mi)\n exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],\n [1.1, np.nan, 3.3, 1.1, np.nan, 3.3],\n ['a', 'b', 'c', 'a', 'b', 'c'],\n dti.append(dti),\n dti_tz.append(dti_tz),\n pi.append(pi)])\n tm.assert_index_equal(res, exp)\n\n other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],\n ['x', 'y', 'z'], ['x', 'y', 'z'],\n ['x', 'y', 'z'], ['x', 'y', 'z']])\n\n res = mi.append(other)\n exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],\n [1.1, np.nan, 3.3, 'x', 'y', 'z'],\n ['a', 'b', 'c', 'x', 'y', 'z'],\n dti.append(pd.Index(['x', 'y', 'z'])),\n dti_tz.append(pd.Index(['x', 'y', 'z'])),\n pi.append(pd.Index(['x', 'y', 'z']))])\n tm.assert_index_equal(res, exp)\n\n def test_get_level_values(self):\n result = self.index.get_level_values(0)\n expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],\n name='first')\n tm.assert_index_equal(result, expected)\n assert result.name == 'first'\n\n result = self.index.get_level_values('first')\n expected = self.index.get_level_values(0)\n tm.assert_index_equal(result, expected)\n\n # GH 10460\n index = MultiIndex(levels=[CategoricalIndex(\n ['A', 'B']), CategoricalIndex([1, 2, 3])], labels=[np.array(\n [0, 0, 0, 1, 1, 1]), np.array([0, 1, 2, 0, 1, 2])])\n exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])\n tm.assert_index_equal(index.get_level_values(0), exp)\n exp = CategoricalIndex([1, 2, 3, 1, 2, 3])\n tm.assert_index_equal(index.get_level_values(1), exp)\n\n def test_get_level_values_na(self):\n arrays = [['a', 'b', 'b'], [1, np.nan, 2]]\n index = pd.MultiIndex.from_arrays(arrays)\n values = index.get_level_values(1)\n expected = np.array([1, np.nan, 2])\n tm.assert_numpy_array_equal(values.values.astype(float), expected)\n\n arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]\n index = pd.MultiIndex.from_arrays(arrays)\n values = index.get_level_values(1)\n expected = np.array([np.nan, np.nan, 2])\n tm.assert_numpy_array_equal(values.values.astype(float), expected)\n\n arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]\n index = pd.MultiIndex.from_arrays(arrays)\n values = index.get_level_values(0)\n expected = np.array([np.nan, np.nan, np.nan])\n tm.assert_numpy_array_equal(values.values.astype(float), expected)\n values = index.get_level_values(1)\n expected = np.array(['a', np.nan, 1], dtype=object)\n tm.assert_numpy_array_equal(values.values, expected)\n\n arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]\n index = pd.MultiIndex.from_arrays(arrays)\n values = index.get_level_values(1)\n expected = pd.DatetimeIndex([0, 1, pd.NaT])\n tm.assert_numpy_array_equal(values.values, expected.values)\n\n arrays = [[], []]\n index = pd.MultiIndex.from_arrays(arrays)\n values = index.get_level_values(0)\n assert values.shape == (0, )\n\n def test_reorder_levels(self):\n # this blows up\n tm.assert_raises_regex(IndexError, '^Too many levels',\n self.index.reorder_levels, [2, 1, 0])\n\n def test_nlevels(self):\n assert self.index.nlevels == 2\n\n def test_iter(self):\n result = list(self.index)\n expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),\n ('baz', 'two'), ('qux', 'one'), ('qux', 'two')]\n assert result == expected\n\n def test_legacy_pickle(self):\n if PY3:\n pytest.skip(\"testing for legacy pickles not \"\n \"support on py3\")\n\n path = tm.get_data_path('multiindex_v1.pickle')\n obj = pd.read_pickle(path)\n\n obj2 = MultiIndex.from_tuples(obj.values)\n assert obj.equals(obj2)\n\n res = obj.get_indexer(obj)\n exp = np.arange(len(obj), dtype=np.intp)\n assert_almost_equal(res, exp)\n\n res = obj.get_indexer(obj2[::-1])\n exp = obj.get_indexer(obj[::-1])\n exp2 = obj2.get_indexer(obj2[::-1])\n assert_almost_equal(res, exp)\n assert_almost_equal(exp, exp2)\n\n def test_legacy_v2_unpickle(self):\n\n # 0.7.3 -> 0.8.0 format manage\n path = tm.get_data_path('mindex_073.pickle')\n obj = pd.read_pickle(path)\n\n obj2 = MultiIndex.from_tuples(obj.values)\n assert obj.equals(obj2)\n\n res = obj.get_indexer(obj)\n exp = np.arange(len(obj), dtype=np.intp)\n assert_almost_equal(res, exp)\n\n res = obj.get_indexer(obj2[::-1])\n exp = obj.get_indexer(obj[::-1])\n exp2 = obj2.get_indexer(obj2[::-1])\n assert_almost_equal(res, exp)\n assert_almost_equal(exp, exp2)\n\n def test_roundtrip_pickle_with_tz(self):\n\n # GH 8367\n # round-trip of timezone\n index = MultiIndex.from_product(\n [[1, 2], ['a', 'b'], date_range('20130101', periods=3,\n tz='US/Eastern')\n ], names=['one', 'two', 'three'])\n unpickled = tm.round_trip_pickle(index)\n assert index.equal_levels(unpickled)\n\n def test_from_tuples_index_values(self):\n result = MultiIndex.from_tuples(self.index)\n assert (result.values == self.index.values).all()\n\n def test_contains(self):\n assert ('foo', 'two') in self.index\n assert ('bar', 'two') not in self.index\n assert None not in self.index\n\n def test_contains_top_level(self):\n midx = MultiIndex.from_product([['A', 'B'], [1, 2]])\n assert 'A' in midx\n assert 'A' not in midx._engine\n\n def test_contains_with_nat(self):\n # MI with a NaT\n mi = MultiIndex(levels=[['C'],\n pd.date_range('2012-01-01', periods=5)],\n labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],\n names=[None, 'B'])\n assert ('C', pd.Timestamp('2012-01-01')) in mi\n for val in mi.values:\n assert val in mi\n\n def test_is_all_dates(self):\n assert not self.index.is_all_dates\n\n def test_is_numeric(self):\n # MultiIndex is never numeric\n assert not self.index.is_numeric()\n\n def test_getitem(self):\n # scalar\n assert self.index[2] == ('bar', 'one')\n\n # slice\n result = self.index[2:5]\n expected = self.index[[2, 3, 4]]\n assert result.equals(expected)\n\n # boolean\n result = self.index[[True, False, True, False, True, True]]\n result2 = self.index[np.array([True, False, True, False, True, True])]\n expected = self.index[[0, 2, 4, 5]]\n assert result.equals(expected)\n assert result2.equals(expected)\n\n def test_getitem_group_select(self):\n sorted_idx, _ = self.index.sortlevel(0)\n assert sorted_idx.get_loc('baz') == slice(3, 4)\n assert sorted_idx.get_loc('foo') == slice(0, 2)\n\n def test_get_loc(self):\n assert self.index.get_loc(('foo', 'two')) == 1\n assert self.index.get_loc(('baz', 'two')) == 3\n pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))\n pytest.raises(KeyError, self.index.get_loc, 'quux')\n\n pytest.raises(NotImplementedError, self.index.get_loc, 'foo',\n method='nearest')\n\n # 3 levels\n index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(\n lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(\n [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])\n pytest.raises(KeyError, index.get_loc, (1, 1))\n assert index.get_loc((2, 0)) == slice(3, 5)\n\n def test_get_loc_duplicates(self):\n index = Index([2, 2, 2, 2])\n result = index.get_loc(2)\n expected = slice(0, 4)\n assert result == expected\n # pytest.raises(Exception, index.get_loc, 2)\n\n index = Index(['c', 'a', 'a', 'b', 'b'])\n rs = index.get_loc('c')\n xp = 0\n assert rs == xp\n\n def test_get_value_duplicates(self):\n index = MultiIndex(levels=[['D', 'B', 'C'],\n [0, 26, 27, 37, 57, 67, 75, 82]],\n labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],\n [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],\n names=['tag', 'day'])\n\n assert index.get_loc('D') == slice(0, 3)\n with pytest.raises(KeyError):\n index._engine.get_value(np.array([]), 'D')\n\n def test_get_loc_level(self):\n index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(\n lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(\n [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])\n\n loc, new_index = index.get_loc_level((0, 1))\n expected = slice(1, 2)\n exp_index = index[expected].droplevel(0).droplevel(0)\n assert loc == expected\n assert new_index.equals(exp_index)\n\n loc, new_index = index.get_loc_level((0, 1, 0))\n expected = 1\n assert loc == expected\n assert new_index is None\n\n pytest.raises(KeyError, index.get_loc_level, (2, 2))\n\n index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(\n [0, 0, 0, 0]), np.array([0, 1, 2, 3])])\n result, new_index = index.get_loc_level((2000, slice(None, None)))\n expected = slice(None, None)\n assert result == expected\n assert new_index.equals(index.droplevel(0))\n\n def test_get_loc_missing_nan(self):\n # GH 8569\n idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])\n assert isinstance(idx.get_loc(1), slice)\n pytest.raises(KeyError, idx.get_loc, 3)\n pytest.raises(KeyError, idx.get_loc, np.nan)\n pytest.raises(KeyError, idx.get_loc, [np.nan])\n\n def test_slice_locs(self):\n df = tm.makeTimeDataFrame()\n stacked = df.stack()\n idx = stacked.index\n\n slob = slice(*idx.slice_locs(df.index[5], df.index[15]))\n sliced = stacked[slob]\n expected = df[5:16].stack()\n tm.assert_almost_equal(sliced.values, expected.values)\n\n slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),\n df.index[15] - timedelta(seconds=30)))\n sliced = stacked[slob]\n expected = df[6:15].stack()\n tm.assert_almost_equal(sliced.values, expected.values)\n\n def test_slice_locs_with_type_mismatch(self):\n df = tm.makeTimeDataFrame()\n stacked = df.stack()\n idx = stacked.index\n tm.assert_raises_regex(TypeError, '^Level type mismatch',\n idx.slice_locs, (1, 3))\n tm.assert_raises_regex(TypeError, '^Level type mismatch',\n idx.slice_locs,\n df.index[5] + timedelta(\n seconds=30), (5, 2))\n df = tm.makeCustomDataframe(5, 5)\n stacked = df.stack()\n idx = stacked.index\n with tm.assert_raises_regex(TypeError, '^Level type mismatch'):\n idx.slice_locs(timedelta(seconds=30))\n # TODO: Try creating a UnicodeDecodeError in exception message\n with tm.assert_raises_regex(TypeError, '^Level type mismatch'):\n idx.slice_locs(df.index[1], (16, \"a\"))\n\n def test_slice_locs_not_sorted(self):\n index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(\n lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(\n [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])\n\n tm.assert_raises_regex(KeyError, \"[Kk]ey length.*greater than \"\n \"MultiIndex lexsort depth\",\n index.slice_locs, (1, 0, 1), (2, 1, 0))\n\n # works\n sorted_index, _ = index.sortlevel(0)\n # should there be a test case here???\n sorted_index.slice_locs((1, 0, 1), (2, 1, 0))\n\n def test_slice_locs_partial(self):\n sorted_idx, _ = self.index.sortlevel(0)\n\n result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))\n assert result == (1, 5)\n\n result = sorted_idx.slice_locs(None, ('qux', 'one'))\n assert result == (0, 5)\n\n result = sorted_idx.slice_locs(('foo', 'two'), None)\n assert result == (1, len(sorted_idx))\n\n result = sorted_idx.slice_locs('bar', 'baz')\n assert result == (2, 4)\n\n def test_slice_locs_not_contained(self):\n # some searchsorted action\n\n index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],\n labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],\n [0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)\n\n result = index.slice_locs((1, 0), (5, 2))\n assert result == (3, 6)\n\n result = index.slice_locs(1, 5)\n assert result == (3, 6)\n\n result = index.slice_locs((2, 2), (5, 2))\n assert result == (3, 6)\n\n result = index.slice_locs(2, 5)\n assert result == (3, 6)\n\n result = index.slice_locs((1, 0), (6, 3))\n assert result == (3, 8)\n\n result = index.slice_locs(-1, 10)\n assert result == (0, len(index))\n\n def test_consistency(self):\n # need to construct an overflow\n major_axis = lrange(70000)\n minor_axis = lrange(10)\n\n major_labels = np.arange(70000)\n minor_labels = np.repeat(lrange(10), 7000)\n\n # the fact that is works means it's consistent\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n\n # inconsistent\n major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])\n minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n\n assert not index.is_unique\n\n def test_truncate(self):\n major_axis = Index(lrange(4))\n minor_axis = Index(lrange(2))\n\n major_labels = np.array([0, 0, 1, 2, 3, 3])\n minor_labels = np.array([0, 1, 0, 1, 0, 1])\n\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n\n result = index.truncate(before=1)\n assert 'foo' not in result.levels[0]\n assert 1 in result.levels[0]\n\n result = index.truncate(after=1)\n assert 2 not in result.levels[0]\n assert 1 in result.levels[0]\n\n result = index.truncate(before=1, after=2)\n assert len(result.levels[0]) == 2\n\n # after < before\n pytest.raises(ValueError, index.truncate, 3, 1)\n\n def test_get_indexer(self):\n major_axis = Index(lrange(4))\n minor_axis = Index(lrange(2))\n\n major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)\n minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)\n\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n idx1 = index[:5]\n idx2 = index[[1, 3, 5]]\n\n r1 = idx1.get_indexer(idx2)\n assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))\n\n r1 = idx2.get_indexer(idx1, method='pad')\n e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)\n assert_almost_equal(r1, e1)\n\n r2 = idx2.get_indexer(idx1[::-1], method='pad')\n assert_almost_equal(r2, e1[::-1])\n\n rffill1 = idx2.get_indexer(idx1, method='ffill')\n assert_almost_equal(r1, rffill1)\n\n r1 = idx2.get_indexer(idx1, method='backfill')\n e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)\n assert_almost_equal(r1, e1)\n\n r2 = idx2.get_indexer(idx1[::-1], method='backfill')\n assert_almost_equal(r2, e1[::-1])\n\n rbfill1 = idx2.get_indexer(idx1, method='bfill')\n assert_almost_equal(r1, rbfill1)\n\n # pass non-MultiIndex\n r1 = idx1.get_indexer(idx2.values)\n rexp1 = idx1.get_indexer(idx2)\n assert_almost_equal(r1, rexp1)\n\n r1 = idx1.get_indexer([1, 2, 3])\n assert (r1 == [-1, -1, -1]).all()\n\n # create index with duplicates\n idx1 = Index(lrange(10) + lrange(10))\n idx2 = Index(lrange(20))\n\n msg = \"Reindexing only valid with uniquely valued Index objects\"\n with tm.assert_raises_regex(InvalidIndexError, msg):\n idx1.get_indexer(idx2)\n\n def test_get_indexer_nearest(self):\n midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])\n with pytest.raises(NotImplementedError):\n midx.get_indexer(['a'], method='nearest')\n with pytest.raises(NotImplementedError):\n midx.get_indexer(['a'], method='pad', tolerance=2)\n\n def test_hash_collisions(self):\n # non-smoke test that we don't get hash collisions\n\n index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],\n names=['one', 'two'])\n result = index.get_indexer(index.values)\n tm.assert_numpy_array_equal(result, np.arange(\n len(index), dtype='intp'))\n\n for i in [0, 1, len(index) - 2, len(index) - 1]:\n result = index.get_loc(index[i])\n assert result == i\n\n def test_format(self):\n self.index.format()\n self.index[:0].format()\n\n def test_format_integer_names(self):\n index = MultiIndex(levels=[[0, 1], [0, 1]],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])\n index.format(names=True)\n\n def test_format_sparse_display(self):\n index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],\n labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],\n [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])\n\n result = index.format()\n assert result[3] == '1 0 0 0'\n\n def test_format_sparse_config(self):\n warn_filters = warnings.filters\n warnings.filterwarnings('ignore', category=FutureWarning,\n module=\".*format\")\n # GH1538\n pd.set_option('display.multi_sparse', False)\n\n result = self.index.format()\n assert result[1] == 'foo two'\n\n tm.reset_display_options()\n\n warnings.filters = warn_filters\n\n def test_to_frame(self):\n tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]\n\n index = MultiIndex.from_tuples(tuples)\n result = index.to_frame(index=False)\n expected = DataFrame(tuples)\n tm.assert_frame_equal(result, expected)\n\n result = index.to_frame()\n expected.index = index\n tm.assert_frame_equal(result, expected)\n\n tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]\n index = MultiIndex.from_tuples(tuples, names=['first', 'second'])\n result = index.to_frame(index=False)\n expected = DataFrame(tuples)\n expected.columns = ['first', 'second']\n tm.assert_frame_equal(result, expected)\n\n result = index.to_frame()\n expected.index = index\n tm.assert_frame_equal(result, expected)\n\n index = MultiIndex.from_product([range(5),\n pd.date_range('20130101', periods=3)])\n result = index.to_frame(index=False)\n expected = DataFrame(\n {0: np.repeat(np.arange(5, dtype='int64'), 3),\n 1: np.tile(pd.date_range('20130101', periods=3), 5)})\n tm.assert_frame_equal(result, expected)\n\n index = MultiIndex.from_product([range(5),\n pd.date_range('20130101', periods=3)])\n result = index.to_frame()\n expected.index = index\n tm.assert_frame_equal(result, expected)\n\n def test_to_hierarchical(self):\n index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (\n 2, 'two')])\n result = index.to_hierarchical(3)\n expected = MultiIndex(levels=[[1, 2], ['one', 'two']],\n labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])\n tm.assert_index_equal(result, expected)\n assert result.names == index.names\n\n # K > 1\n result = index.to_hierarchical(3, 2)\n expected = MultiIndex(levels=[[1, 2], ['one', 'two']],\n labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])\n tm.assert_index_equal(result, expected)\n assert result.names == index.names\n\n # non-sorted\n index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),\n (2, 'a'), (2, 'b')],\n names=['N1', 'N2'])\n\n result = index.to_hierarchical(2)\n expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),\n (1, 'b'),\n (2, 'a'), (2, 'a'),\n (2, 'b'), (2, 'b')],\n names=['N1', 'N2'])\n tm.assert_index_equal(result, expected)\n assert result.names == index.names\n\n def test_bounds(self):\n self.index._bounds\n\n def test_equals_multi(self):\n assert self.index.equals(self.index)\n assert not self.index.equals(self.index.values)\n assert self.index.equals(Index(self.index.values))\n\n assert self.index.equal_levels(self.index)\n assert not self.index.equals(self.index[:-1])\n assert not self.index.equals(self.index[-1])\n\n # different number of levels\n index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(\n lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(\n [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])\n\n index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])\n assert not index.equals(index2)\n assert not index.equal_levels(index2)\n\n # levels are different\n major_axis = Index(lrange(4))\n minor_axis = Index(lrange(2))\n\n major_labels = np.array([0, 0, 1, 2, 2, 3])\n minor_labels = np.array([0, 1, 0, 0, 1, 0])\n\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n assert not self.index.equals(index)\n assert not self.index.equal_levels(index)\n\n # some of the labels are different\n major_axis = Index(['foo', 'bar', 'baz', 'qux'])\n minor_axis = Index(['one', 'two'])\n\n major_labels = np.array([0, 0, 2, 2, 3, 3])\n minor_labels = np.array([0, 1, 0, 1, 0, 1])\n\n index = MultiIndex(levels=[major_axis, minor_axis],\n labels=[major_labels, minor_labels])\n assert not self.index.equals(index)\n\n def test_equals_missing_values(self):\n # make sure take is not using -1\n i = pd.MultiIndex.from_tuples([(0, pd.NaT),\n (0, pd.Timestamp('20130101'))])\n result = i[0:1].equals(i[0])\n assert not result\n result = i[1:2].equals(i[1])\n assert not result\n\n def test_identical(self):\n mi = self.index.copy()\n mi2 = self.index.copy()\n assert mi.identical(mi2)\n\n mi = mi.set_names(['new1', 'new2'])\n assert mi.equals(mi2)\n assert not mi.identical(mi2)\n\n mi2 = mi2.set_names(['new1', 'new2'])\n assert mi.identical(mi2)\n\n mi3 = Index(mi.tolist(), names=mi.names)\n mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)\n assert mi.identical(mi3)\n assert not mi.identical(mi4)\n assert mi.equals(mi4)\n\n def test_is_(self):\n mi = MultiIndex.from_tuples(lzip(range(10), range(10)))\n assert mi.is_(mi)\n assert mi.is_(mi.view())\n assert mi.is_(mi.view().view().view().view())\n mi2 = mi.view()\n # names are metadata, they don't change id\n mi2.names = [\"A\", \"B\"]\n assert mi2.is_(mi)\n assert mi.is_(mi2)\n\n assert mi.is_(mi.set_names([\"C\", \"D\"]))\n mi2 = mi.view()\n mi2.set_names([\"E\", \"F\"], inplace=True)\n assert mi.is_(mi2)\n # levels are inherent properties, they change identity\n mi3 = mi2.set_levels([lrange(10), lrange(10)])\n assert not mi3.is_(mi2)\n # shouldn't change\n assert mi2.is_(mi)\n mi4 = mi3.view()\n mi4.set_levels([[1 for _ in range(10)], lrange(10)], inplace=True)\n assert not mi4.is_(mi3)\n mi5 = mi.view()\n mi5.set_levels(mi5.levels, inplace=True)\n assert not mi5.is_(mi)\n\n def test_union(self):\n piece1 = self.index[:5][::-1]\n piece2 = self.index[3:]\n\n the_union = piece1 | piece2\n\n tups = sorted(self.index.values)\n expected = MultiIndex.from_tuples(tups)\n\n assert the_union.equals(expected)\n\n # corner case, pass self or empty thing:\n the_union = self.index.union(self.index)\n assert the_union is self.index\n\n the_union = self.index.union(self.index[:0])\n assert the_union is self.index\n\n # won't work in python 3\n # tuples = self.index.values\n # result = self.index[:4] | tuples[4:]\n # assert result.equals(tuples)\n\n # not valid for python 3\n # def test_union_with_regular_index(self):\n # other = Index(['A', 'B', 'C'])\n\n # result = other.union(self.index)\n # assert ('foo', 'one') in result\n # assert 'B' in result\n\n # result2 = self.index.union(other)\n # assert result.equals(result2)\n\n def test_intersection(self):\n piece1 = self.index[:5][::-1]\n piece2 = self.index[3:]\n\n the_int = piece1 & piece2\n tups = sorted(self.index[3:5].values)\n expected = MultiIndex.from_tuples(tups)\n assert the_int.equals(expected)\n\n # corner case, pass self\n the_int = self.index.intersection(self.index)\n assert the_int is self.index\n\n # empty intersection: disjoint\n empty = self.index[:2] & self.index[2:]\n expected = self.index[:0]\n assert empty.equals(expected)\n\n # can't do in python 3\n # tuples = self.index.values\n # result = self.index & tuples\n # assert result.equals(tuples)\n\n def test_sub(self):\n\n first = self.index\n\n # - now raises (previously was set op difference)\n with pytest.raises(TypeError):\n first - self.index[-3:]\n with pytest.raises(TypeError):\n self.index[-3:] - first\n with pytest.raises(TypeError):\n self.index[-3:] - first.tolist()\n with pytest.raises(TypeError):\n first.tolist() - self.index[-3:]\n\n def test_difference(self):\n\n first = self.index\n result = first.difference(self.index[-3:])\n expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),\n sortorder=0,\n names=self.index.names)\n\n assert isinstance(result, MultiIndex)\n assert result.equals(expected)\n assert result.names == self.index.names\n\n # empty difference: reflexive\n result = self.index.difference(self.index)\n expected = self.index[:0]\n assert result.equals(expected)\n assert result.names == self.index.names\n\n # empty difference: superset\n result = self.index[-3:].difference(self.index)\n expected = self.index[:0]\n assert result.equals(expected)\n assert result.names == self.index.names\n\n # empty difference: degenerate\n result = self.index[:0].difference(self.index)\n expected = self.index[:0]\n assert result.equals(expected)\n assert result.names == self.index.names\n\n # names not the same\n chunklet = self.index[-3:]\n chunklet.names = ['foo', 'baz']\n result = first.difference(chunklet)\n assert result.names == (None, None)\n\n # empty, but non-equal\n result = self.index.difference(self.index.sortlevel(1)[0])\n assert len(result) == 0\n\n # raise Exception called with non-MultiIndex\n result = first.difference(first.values)\n assert result.equals(first[:0])\n\n # name from empty array\n result = first.difference([])\n assert first.equals(result)\n assert first.names == result.names\n\n # name from non-empty array\n result = first.difference([('foo', 'one')])\n expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (\n 'foo', 'two'), ('qux', 'one'), ('qux', 'two')])\n expected.names = first.names\n assert first.names == result.names\n tm.assert_raises_regex(TypeError, \"other must be a MultiIndex \"\n \"or a list of tuples\",\n first.difference, [1, 2, 3, 4, 5])\n\n def test_from_tuples(self):\n tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '\n 'from empty list',\n MultiIndex.from_tuples, [])\n\n idx = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])\n assert len(idx) == 2\n\n def test_from_tuples_empty(self):\n # GH 16777\n result = MultiIndex.from_tuples([], names=['a', 'b'])\n expected = MultiIndex.from_arrays(arrays=[[], []],\n names=['a', 'b'])\n tm.assert_index_equal(result, expected)\n\n def test_argsort(self):\n result = self.index.argsort()\n expected = self.index.values.argsort()\n tm.assert_numpy_array_equal(result, expected)\n\n def test_sortlevel(self):\n import random\n\n tuples = list(self.index)\n random.shuffle(tuples)\n\n index = MultiIndex.from_tuples(tuples)\n\n sorted_idx, _ = index.sortlevel(0)\n expected = MultiIndex.from_tuples(sorted(tuples))\n assert sorted_idx.equals(expected)\n\n sorted_idx, _ = index.sortlevel(0, ascending=False)\n assert sorted_idx.equals(expected[::-1])\n\n sorted_idx, _ = index.sortlevel(1)\n by1 = sorted(tuples, key=lambda x: (x[1], x[0]))\n expected = MultiIndex.from_tuples(by1)\n assert sorted_idx.equals(expected)\n\n sorted_idx, _ = index.sortlevel(1, ascending=False)\n assert sorted_idx.equals(expected[::-1])\n\n def test_sortlevel_not_sort_remaining(self):\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)\n assert sorted_idx.equals(mi)\n\n def test_sortlevel_deterministic(self):\n tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),\n ('foo', 'one'), ('baz', 'two'), ('qux', 'one')]\n\n index = MultiIndex.from_tuples(tuples)\n\n sorted_idx, _ = index.sortlevel(0)\n expected = MultiIndex.from_tuples(sorted(tuples))\n assert sorted_idx.equals(expected)\n\n sorted_idx, _ = index.sortlevel(0, ascending=False)\n assert sorted_idx.equals(expected[::-1])\n\n sorted_idx, _ = index.sortlevel(1)\n by1 = sorted(tuples, key=lambda x: (x[1], x[0]))\n expected = MultiIndex.from_tuples(by1)\n assert sorted_idx.equals(expected)\n\n sorted_idx, _ = index.sortlevel(1, ascending=False)\n assert sorted_idx.equals(expected[::-1])\n\n def test_dims(self):\n pass\n\n def test_drop(self):\n dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])\n\n index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])\n dropped2 = self.index.drop(index)\n\n expected = self.index[[0, 2, 3, 5]]\n tm.assert_index_equal(dropped, expected)\n tm.assert_index_equal(dropped2, expected)\n\n dropped = self.index.drop(['bar'])\n expected = self.index[[0, 1, 3, 4, 5]]\n tm.assert_index_equal(dropped, expected)\n\n dropped = self.index.drop('foo')\n expected = self.index[[2, 3, 4, 5]]\n tm.assert_index_equal(dropped, expected)\n\n index = MultiIndex.from_tuples([('bar', 'two')])\n pytest.raises(KeyError, self.index.drop, [('bar', 'two')])\n pytest.raises(KeyError, self.index.drop, index)\n pytest.raises(KeyError, self.index.drop, ['foo', 'two'])\n\n # partially correct argument\n mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])\n pytest.raises(KeyError, self.index.drop, mixed_index)\n\n # error='ignore'\n dropped = self.index.drop(index, errors='ignore')\n expected = self.index[[0, 1, 2, 3, 4, 5]]\n tm.assert_index_equal(dropped, expected)\n\n dropped = self.index.drop(mixed_index, errors='ignore')\n expected = self.index[[0, 1, 2, 3, 5]]\n tm.assert_index_equal(dropped, expected)\n\n dropped = self.index.drop(['foo', 'two'], errors='ignore')\n expected = self.index[[2, 3, 4, 5]]\n tm.assert_index_equal(dropped, expected)\n\n # mixed partial / full drop\n dropped = self.index.drop(['foo', ('qux', 'one')])\n expected = self.index[[2, 3, 5]]\n tm.assert_index_equal(dropped, expected)\n\n # mixed partial / full drop / error='ignore'\n mixed_index = ['foo', ('qux', 'one'), 'two']\n pytest.raises(KeyError, self.index.drop, mixed_index)\n dropped = self.index.drop(mixed_index, errors='ignore')\n expected = self.index[[2, 3, 5]]\n tm.assert_index_equal(dropped, expected)\n\n def test_droplevel_with_names(self):\n index = self.index[self.index.get_loc('foo')]\n dropped = index.droplevel(0)\n assert dropped.name == 'second'\n\n index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(\n lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(\n [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],\n names=['one', 'two', 'three'])\n dropped = index.droplevel(0)\n assert dropped.names == ('two', 'three')\n\n dropped = index.droplevel('two')\n expected = index.droplevel(1)\n assert dropped.equals(expected)\n\n def test_droplevel_multiple(self):\n index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(\n lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(\n [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],\n names=['one', 'two', 'three'])\n\n dropped = index[:2].droplevel(['three', 'one'])\n expected = index[:2].droplevel(2).droplevel(0)\n assert dropped.equals(expected)\n\n def test_drop_not_lexsorted(self):\n # GH 12078\n\n # define the lexsorted version of the multi-index\n tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]\n lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])\n assert lexsorted_mi.is_lexsorted()\n\n # and the not-lexsorted version\n df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],\n data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])\n df = df.pivot_table(index='a', columns=['b', 'c'], values='d')\n df = df.reset_index()\n not_lexsorted_mi = df.columns\n assert not not_lexsorted_mi.is_lexsorted()\n\n # compare the results\n tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)\n with tm.assert_produces_warning(PerformanceWarning):\n tm.assert_index_equal(lexsorted_mi.drop('a'),\n not_lexsorted_mi.drop('a'))\n\n def test_insert(self):\n # key contained in all levels\n new_index = self.index.insert(0, ('bar', 'two'))\n assert new_index.equal_levels(self.index)\n assert new_index[0] == ('bar', 'two')\n\n # key not contained in all levels\n new_index = self.index.insert(0, ('abc', 'three'))\n\n exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')\n tm.assert_index_equal(new_index.levels[0], exp0)\n\n exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')\n tm.assert_index_equal(new_index.levels[1], exp1)\n assert new_index[0] == ('abc', 'three')\n\n # key wrong length\n msg = \"Item must have length equal to number of levels\"\n with tm.assert_raises_regex(ValueError, msg):\n self.index.insert(0, ('foo2', ))\n\n left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],\n columns=['1st', '2nd', '3rd'])\n left.set_index(['1st', '2nd'], inplace=True)\n ts = left['3rd'].copy(deep=True)\n\n left.loc[('b', 'x'), '3rd'] = 2\n left.loc[('b', 'a'), '3rd'] = -1\n left.loc[('b', 'b'), '3rd'] = 3\n left.loc[('a', 'x'), '3rd'] = 4\n left.loc[('a', 'w'), '3rd'] = 5\n left.loc[('a', 'a'), '3rd'] = 6\n\n ts.loc[('b', 'x')] = 2\n ts.loc['b', 'a'] = -1\n ts.loc[('b', 'b')] = 3\n ts.loc['a', 'x'] = 4\n ts.loc[('a', 'w')] = 5\n ts.loc['a', 'a'] = 6\n\n right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],\n ['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],\n ['a', 'w', 5], ['a', 'a', 6]],\n columns=['1st', '2nd', '3rd'])\n right.set_index(['1st', '2nd'], inplace=True)\n # FIXME data types changes to float because\n # of intermediate nan insertion;\n tm.assert_frame_equal(left, right, check_dtype=False)\n tm.assert_series_equal(ts, right['3rd'])\n\n # GH9250\n idx = [('test1', i) for i in range(5)] + \\\n [('test2', i) for i in range(6)] + \\\n [('test', 17), ('test', 18)]\n\n left = pd.Series(np.linspace(0, 10, 11),\n pd.MultiIndex.from_tuples(idx[:-2]))\n\n left.loc[('test', 17)] = 11\n left.loc[('test', 18)] = 12\n\n right = pd.Series(np.linspace(0, 12, 13),\n pd.MultiIndex.from_tuples(idx))\n\n tm.assert_series_equal(left, right)\n\n def test_take_preserve_name(self):\n taken = self.index.take([3, 0, 1])\n assert taken.names == self.index.names\n\n def test_take_fill_value(self):\n # GH 12631\n vals = [['A', 'B'],\n [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]\n idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])\n\n result = idx.take(np.array([1, 0, -1]))\n exp_vals = [('A', pd.Timestamp('2011-01-02')),\n ('A', pd.Timestamp('2011-01-01')),\n ('B', pd.Timestamp('2011-01-02'))]\n expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n exp_vals = [('A', pd.Timestamp('2011-01-02')),\n ('A', pd.Timestamp('2011-01-01')),\n (np.nan, pd.NaT)]\n expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n exp_vals = [('A', pd.Timestamp('2011-01-02')),\n ('A', pd.Timestamp('2011-01-01')),\n ('B', pd.Timestamp('2011-01-02'))]\n expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])\n tm.assert_index_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with pytest.raises(IndexError):\n idx.take(np.array([1, -5]))\n\n def take_invalid_kwargs(self):\n vals = [['A', 'B'],\n [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]\n idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])\n indices = [1, 2]\n\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n tm.assert_raises_regex(TypeError, msg, idx.take,\n indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, idx.take,\n indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n tm.assert_raises_regex(ValueError, msg, idx.take,\n indices, mode='clip')\n\n def test_join_level(self):\n def _check_how(other, how):\n join_index, lidx, ridx = other.join(self.index, how=how,\n level='second',\n return_indexers=True)\n\n exp_level = other.join(self.index.levels[1], how=how)\n assert join_index.levels[0].equals(self.index.levels[0])\n assert join_index.levels[1].equals(exp_level)\n\n # pare down levels\n mask = np.array(\n [x[1] in exp_level for x in self.index], dtype=bool)\n exp_values = self.index.values[mask]\n tm.assert_numpy_array_equal(join_index.values, exp_values)\n\n if how in ('outer', 'inner'):\n join_index2, ridx2, lidx2 = \\\n self.index.join(other, how=how, level='second',\n return_indexers=True)\n\n assert join_index.equals(join_index2)\n tm.assert_numpy_array_equal(lidx, lidx2)\n tm.assert_numpy_array_equal(ridx, ridx2)\n tm.assert_numpy_array_equal(join_index2.values, exp_values)\n\n def _check_all(other):\n _check_how(other, 'outer')\n _check_how(other, 'inner')\n _check_how(other, 'left')\n _check_how(other, 'right')\n\n _check_all(Index(['three', 'one', 'two']))\n _check_all(Index(['one']))\n _check_all(Index(['one', 'three']))\n\n # some corner cases\n idx = Index(['three', 'one', 'two'])\n result = idx.join(self.index, level='second')\n assert isinstance(result, MultiIndex)\n\n tm.assert_raises_regex(TypeError, \"Join.*MultiIndex.*ambiguous\",\n self.index.join, self.index, level=1)\n\n def test_join_self(self):\n kinds = 'outer', 'inner', 'left', 'right'\n for kind in kinds:\n res = self.index\n joined = res.join(res, how=kind)\n assert res is joined\n\n def test_join_multi(self):\n # GH 10665\n midx = pd.MultiIndex.from_product(\n [np.arange(4), np.arange(4)], names=['a', 'b'])\n idx = pd.Index([1, 2, 5], name='b')\n\n # inner\n jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)\n exp_idx = pd.MultiIndex.from_product(\n [np.arange(4), [1, 2]], names=['a', 'b'])\n exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)\n exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)\n tm.assert_index_equal(jidx, exp_idx)\n tm.assert_numpy_array_equal(lidx, exp_lidx)\n tm.assert_numpy_array_equal(ridx, exp_ridx)\n # flip\n jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)\n tm.assert_index_equal(jidx, exp_idx)\n tm.assert_numpy_array_equal(lidx, exp_lidx)\n tm.assert_numpy_array_equal(ridx, exp_ridx)\n\n # keep MultiIndex\n jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)\n exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,\n 1, -1], dtype=np.intp)\n tm.assert_index_equal(jidx, midx)\n assert lidx is None\n tm.assert_numpy_array_equal(ridx, exp_ridx)\n # flip\n jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)\n tm.assert_index_equal(jidx, midx)\n assert lidx is None\n tm.assert_numpy_array_equal(ridx, exp_ridx)\n\n def test_reindex(self):\n result, indexer = self.index.reindex(list(self.index[:4]))\n assert isinstance(result, MultiIndex)\n self.check_level_names(result, self.index[:4].names)\n\n result, indexer = self.index.reindex(list(self.index))\n assert isinstance(result, MultiIndex)\n assert indexer is None\n self.check_level_names(result, self.index.names)\n\n def test_reindex_level(self):\n idx = Index(['one'])\n\n target, indexer = self.index.reindex(idx, level='second')\n target2, indexer2 = idx.reindex(self.index, level='second')\n\n exp_index = self.index.join(idx, level='second', how='right')\n exp_index2 = self.index.join(idx, level='second', how='left')\n\n assert target.equals(exp_index)\n exp_indexer = np.array([0, 2, 4])\n tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)\n\n assert target2.equals(exp_index2)\n exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])\n tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)\n\n tm.assert_raises_regex(TypeError, \"Fill method not supported\",\n self.index.reindex, self.index,\n method='pad', level='second')\n\n tm.assert_raises_regex(TypeError, \"Fill method not supported\",\n idx.reindex, idx, method='bfill',\n level='first')\n\n def test_duplicates(self):\n assert not self.index.has_duplicates\n assert self.index.append(self.index).has_duplicates\n\n index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[\n [0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])\n assert index.has_duplicates\n\n # GH 9075\n t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),\n (u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),\n (u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),\n (u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),\n (u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),\n (u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),\n (u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),\n (u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),\n (u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),\n (u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),\n (u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),\n (u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),\n (u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),\n (u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),\n (u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),\n (u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),\n (u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),\n (u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]\n\n index = pd.MultiIndex.from_tuples(t)\n assert not index.has_duplicates\n\n # handle int64 overflow if possible\n def check(nlevels, with_nulls):\n labels = np.tile(np.arange(500), 2)\n level = np.arange(500)\n\n if with_nulls: # inject some null values\n labels[500] = -1 # common nan value\n labels = list(labels.copy() for i in range(nlevels))\n for i in range(nlevels):\n labels[i][500 + i - nlevels // 2] = -1\n\n labels += [np.array([-1, 1]).repeat(500)]\n else:\n labels = [labels] * nlevels + [np.arange(2).repeat(500)]\n\n levels = [level] * nlevels + [[0, 1]]\n\n # no dups\n index = MultiIndex(levels=levels, labels=labels)\n assert not index.has_duplicates\n\n # with a dup\n if with_nulls:\n f = lambda a: np.insert(a, 1000, a[0])\n labels = list(map(f, labels))\n index = MultiIndex(levels=levels, labels=labels)\n else:\n values = index.values.tolist()\n index = MultiIndex.from_tuples(values + [values[0]])\n\n assert index.has_duplicates\n\n # no overflow\n check(4, False)\n check(4, True)\n\n # overflow possible\n check(8, False)\n check(8, True)\n\n # GH 9125\n n, k = 200, 5000\n levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]\n labels = [np.random.choice(n, k * n) for lev in levels]\n mi = MultiIndex(levels=levels, labels=labels)\n\n for keep in ['first', 'last', False]:\n left = mi.duplicated(keep=keep)\n right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)\n tm.assert_numpy_array_equal(left, right)\n\n # GH5873\n for a in [101, 102]:\n mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])\n assert not mi.has_duplicates\n assert mi.get_duplicates() == []\n tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(\n 2, dtype='bool'))\n\n for n in range(1, 6): # 1st level shape\n for m in range(1, 5): # 2nd level shape\n # all possible unique combinations, including nan\n lab = product(range(-1, n), range(-1, m))\n mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],\n labels=np.random.permutation(list(lab)).T)\n assert len(mi) == (n + 1) * (m + 1)\n assert not mi.has_duplicates\n assert mi.get_duplicates() == []\n tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(\n len(mi), dtype='bool'))\n\n def test_duplicate_meta_data(self):\n # GH 10115\n index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[\n [0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])\n for idx in [index,\n index.set_names([None, None]),\n index.set_names([None, 'Num']),\n index.set_names(['Upper', 'Num']), ]:\n assert idx.has_duplicates\n assert idx.drop_duplicates().names == idx.names\n\n def test_get_unique_index(self):\n idx = self.index[[0, 1, 0, 1, 1, 0, 0]]\n expected = self.index._shallow_copy(idx[[0, 1]])\n\n for dropna in [False, True]:\n result = idx._get_unique_index(dropna=dropna)\n assert result.unique\n tm.assert_index_equal(result, expected)\n\n def test_unique(self):\n mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]])\n\n res = mi.unique()\n exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]])\n tm.assert_index_equal(res, exp)\n\n mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')])\n res = mi.unique()\n exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')])\n tm.assert_index_equal(res, exp)\n\n mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')])\n res = mi.unique()\n exp = pd.MultiIndex.from_arrays([['a'], ['a']])\n tm.assert_index_equal(res, exp)\n\n def test_unique_datetimelike(self):\n idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',\n '2015-01-01', 'NaT', 'NaT'])\n idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',\n '2015-01-02', 'NaT', '2015-01-01'],\n tz='Asia/Tokyo')\n result = pd.MultiIndex.from_arrays([idx1, idx2]).unique()\n\n eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])\n eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02',\n 'NaT', '2015-01-01'],\n tz='Asia/Tokyo')\n exp = pd.MultiIndex.from_arrays([eidx1, eidx2])\n tm.assert_index_equal(result, exp)\n\n def test_tolist(self):\n result = self.index.tolist()\n exp = list(self.index.values)\n assert result == exp\n\n def test_repr_with_unicode_data(self):\n with pd.core.config.option_context(\"display.encoding\", 'UTF-8'):\n d = {\"a\": [u(\"\\u05d0\"), 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\n index = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\n assert \"\\\\u\" not in repr(index) # we don't want unicode-escaped\n\n def test_repr_roundtrip(self):\n\n mi = MultiIndex.from_product([list('ab'), range(3)],\n names=['first', 'second'])\n str(mi)\n\n if PY3:\n tm.assert_index_equal(eval(repr(mi)), mi, exact=True)\n else:\n result = eval(repr(mi))\n # string coerces to unicode\n tm.assert_index_equal(result, mi, exact=False)\n assert mi.get_level_values('first').inferred_type == 'string'\n assert result.get_level_values('first').inferred_type == 'unicode'\n\n mi_u = MultiIndex.from_product(\n [list(u'ab'), range(3)], names=['first', 'second'])\n result = eval(repr(mi_u))\n tm.assert_index_equal(result, mi_u, exact=True)\n\n # formatting\n if PY3:\n str(mi)\n else:\n compat.text_type(mi)\n\n # long format\n mi = MultiIndex.from_product([list('abcdefg'), range(10)],\n names=['first', 'second'])\n\n if PY3:\n tm.assert_index_equal(eval(repr(mi)), mi, exact=True)\n else:\n result = eval(repr(mi))\n # string coerces to unicode\n tm.assert_index_equal(result, mi, exact=False)\n assert mi.get_level_values('first').inferred_type == 'string'\n assert result.get_level_values('first').inferred_type == 'unicode'\n\n result = eval(repr(mi_u))\n tm.assert_index_equal(result, mi_u, exact=True)\n\n def test_str(self):\n # tested elsewhere\n pass\n\n def test_unicode_string_with_unicode(self):\n d = {\"a\": [u(\"\\u05d0\"), 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\n idx = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\n\n if PY3:\n str(idx)\n else:\n compat.text_type(idx)\n\n def test_bytestring_with_unicode(self):\n d = {\"a\": [u(\"\\u05d0\"), 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\n idx = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\n\n if PY3:\n bytes(idx)\n else:\n str(idx)\n\n def test_slice_keep_name(self):\n x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],\n names=['x', 'y'])\n assert x[1:].names == x.names\n\n def test_isna_behavior(self):\n # should not segfault GH5123\n # NOTE: if MI representation changes, may make sense to allow\n # isna(MI)\n with pytest.raises(NotImplementedError):\n pd.isna(self.index)\n\n def test_level_setting_resets_attributes(self):\n ind = MultiIndex.from_arrays([\n ['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]\n ])\n assert ind.is_monotonic\n ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],\n inplace=True)\n\n # if this fails, probably didn't reset the cache correctly.\n assert not ind.is_monotonic\n\n def test_is_monotonic(self):\n i = MultiIndex.from_product([np.arange(10),\n np.arange(10)], names=['one', 'two'])\n assert i.is_monotonic\n assert i._is_strictly_monotonic_increasing\n assert Index(i.values).is_monotonic\n assert i._is_strictly_monotonic_increasing\n\n i = MultiIndex.from_product([np.arange(10, 0, -1),\n np.arange(10)], names=['one', 'two'])\n assert not i.is_monotonic\n assert not i._is_strictly_monotonic_increasing\n assert not Index(i.values).is_monotonic\n assert not Index(i.values)._is_strictly_monotonic_increasing\n\n i = MultiIndex.from_product([np.arange(10),\n np.arange(10, 0, -1)],\n names=['one', 'two'])\n assert not i.is_monotonic\n assert not i._is_strictly_monotonic_increasing\n assert not Index(i.values).is_monotonic\n assert not Index(i.values)._is_strictly_monotonic_increasing\n\n i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']])\n assert not i.is_monotonic\n assert not i._is_strictly_monotonic_increasing\n assert not Index(i.values).is_monotonic\n assert not Index(i.values)._is_strictly_monotonic_increasing\n\n # string ordering\n i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n assert not i.is_monotonic\n assert not Index(i.values).is_monotonic\n assert not i._is_strictly_monotonic_increasing\n assert not Index(i.values)._is_strictly_monotonic_increasing\n\n i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'],\n ['mom', 'next', 'zenith']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n assert i.is_monotonic\n assert Index(i.values).is_monotonic\n assert i._is_strictly_monotonic_increasing\n assert Index(i.values)._is_strictly_monotonic_increasing\n\n # mixed levels, hits the TypeError\n i = MultiIndex(\n levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237',\n 'nl0000289783',\n 'nl0000289965', 'nl0000301109']],\n labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],\n names=['household_id', 'asset_id'])\n\n assert not i.is_monotonic\n assert not i._is_strictly_monotonic_increasing\n\n def test_is_strictly_monotonic(self):\n idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']],\n labels=[[0, 0, 1, 1], [0, 0, 0, 1]])\n assert idx.is_monotonic_increasing\n assert not idx._is_strictly_monotonic_increasing\n\n @pytest.mark.xfail(reason=\"buggy MultiIndex.is_monotonic_decresaing.\")\n def test__is_strictly_monotonic_decreasing(self):\n idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']],\n labels=[[0, 0, 1, 1], [0, 0, 0, 1]])\n assert idx.is_monotonic_decreasing\n assert not idx._is_strictly_monotonic_decreasing\n\n def test_reconstruct_sort(self):\n\n # starts off lexsorted & monotonic\n mi = MultiIndex.from_arrays([\n ['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]\n ])\n assert mi.is_lexsorted()\n assert mi.is_monotonic\n\n recons = mi._sort_levels_monotonic()\n assert recons.is_lexsorted()\n assert recons.is_monotonic\n assert mi is recons\n\n assert mi.equals(recons)\n assert Index(mi.values).equals(Index(recons.values))\n\n # cannot convert to lexsorted\n mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'),\n ('x', 'b'), ('y', 'a'), ('z', 'b')],\n names=['one', 'two'])\n assert not mi.is_lexsorted()\n assert not mi.is_monotonic\n\n recons = mi._sort_levels_monotonic()\n assert not recons.is_lexsorted()\n assert not recons.is_monotonic\n\n assert mi.equals(recons)\n assert Index(mi.values).equals(Index(recons.values))\n\n # cannot convert to lexsorted\n mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],\n labels=[[0, 1, 0, 2], [2, 0, 0, 1]],\n names=['col1', 'col2'])\n assert not mi.is_lexsorted()\n assert not mi.is_monotonic\n\n recons = mi._sort_levels_monotonic()\n assert not recons.is_lexsorted()\n assert not recons.is_monotonic\n\n assert mi.equals(recons)\n assert Index(mi.values).equals(Index(recons.values))\n\n def test_reconstruct_remove_unused(self):\n # xref to GH 2770\n df = DataFrame([['deleteMe', 1, 9],\n ['keepMe', 2, 9],\n ['keepMeToo', 3, 9]],\n columns=['first', 'second', 'third'])\n df2 = df.set_index(['first', 'second'], drop=False)\n df2 = df2[df2['first'] != 'deleteMe']\n\n # removed levels are there\n expected = MultiIndex(levels=[['deleteMe', 'keepMe', 'keepMeToo'],\n [1, 2, 3]],\n labels=[[1, 2], [1, 2]],\n names=['first', 'second'])\n result = df2.index\n tm.assert_index_equal(result, expected)\n\n expected = MultiIndex(levels=[['keepMe', 'keepMeToo'],\n [2, 3]],\n labels=[[0, 1], [0, 1]],\n names=['first', 'second'])\n result = df2.index.remove_unused_levels()\n tm.assert_index_equal(result, expected)\n\n # idempotent\n result2 = result.remove_unused_levels()\n tm.assert_index_equal(result2, expected)\n assert result2.is_(result)\n\n @pytest.mark.parametrize('first_type,second_type', [\n ('int64', 'int64'),\n ('datetime64[D]', 'str')])\n def test_remove_unused_levels_large(self, first_type, second_type):\n # GH16556\n\n # because tests should be deterministic (and this test in particular\n # checks that levels are removed, which is not the case for every\n # random input):\n rng = np.random.RandomState(4) # seed is arbitrary value that works\n\n size = 1 << 16\n df = DataFrame(dict(\n first=rng.randint(0, 1 << 13, size).astype(first_type),\n second=rng.randint(0, 1 << 10, size).astype(second_type),\n third=rng.rand(size)))\n df = df.groupby(['first', 'second']).sum()\n df = df[df.third < 0.1]\n\n result = df.index.remove_unused_levels()\n assert len(result.levels[0]) < len(df.index.levels[0])\n assert len(result.levels[1]) < len(df.index.levels[1])\n assert result.equals(df.index)\n\n expected = df.reset_index().set_index(['first', 'second']).index\n tm.assert_index_equal(result, expected)\n\n def test_isin(self):\n values = [('foo', 2), ('bar', 3), ('quux', 4)]\n\n idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(\n 4)])\n result = idx.isin(values)\n expected = np.array([False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n # empty, return dtype bool\n idx = MultiIndex.from_arrays([[], []])\n result = idx.isin(values)\n assert len(result) == 0\n assert result.dtype == np.bool_\n\n def test_isin_nan(self):\n idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])\n tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),\n np.array([False, False]))\n tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),\n np.array([False, False]))\n\n def test_isin_level_kwarg(self):\n idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(\n 4)])\n\n vals_0 = ['foo', 'bar', 'quux']\n vals_1 = [2, 3, 10]\n\n expected = np.array([False, False, True, True])\n tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0))\n tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2))\n\n tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1))\n tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1))\n\n pytest.raises(IndexError, idx.isin, vals_0, level=5)\n pytest.raises(IndexError, idx.isin, vals_0, level=-5)\n\n pytest.raises(KeyError, idx.isin, vals_0, level=1.0)\n pytest.raises(KeyError, idx.isin, vals_1, level=-1.0)\n pytest.raises(KeyError, idx.isin, vals_1, level='A')\n\n idx.names = ['A', 'B']\n tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A'))\n tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B'))\n\n pytest.raises(KeyError, idx.isin, vals_1, level='C')\n\n def test_reindex_preserves_names_when_target_is_list_or_ndarray(self):\n # GH6552\n idx = self.index.copy()\n target = idx.copy()\n idx.names = target.names = [None, None]\n\n other_dtype = pd.MultiIndex.from_product([[1, 2], [3, 4]])\n\n # list & ndarray cases\n assert idx.reindex([])[0].names == [None, None]\n assert idx.reindex(np.array([]))[0].names == [None, None]\n assert idx.reindex(target.tolist())[0].names == [None, None]\n assert idx.reindex(target.values)[0].names == [None, None]\n assert idx.reindex(other_dtype.tolist())[0].names == [None, None]\n assert idx.reindex(other_dtype.values)[0].names == [None, None]\n\n idx.names = ['foo', 'bar']\n assert idx.reindex([])[0].names == ['foo', 'bar']\n assert idx.reindex(np.array([]))[0].names == ['foo', 'bar']\n assert idx.reindex(target.tolist())[0].names == ['foo', 'bar']\n assert idx.reindex(target.values)[0].names == ['foo', 'bar']\n assert idx.reindex(other_dtype.tolist())[0].names == ['foo', 'bar']\n assert idx.reindex(other_dtype.values)[0].names == ['foo', 'bar']\n\n def test_reindex_lvl_preserves_names_when_target_is_list_or_array(self):\n # GH7774\n idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']],\n names=['foo', 'bar'])\n assert idx.reindex([], level=0)[0].names == ['foo', 'bar']\n assert idx.reindex([], level=1)[0].names == ['foo', 'bar']\n\n def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array(self):\n # GH7774\n idx = pd.MultiIndex.from_product([[0, 1], ['a', 'b']])\n assert idx.reindex([], level=0)[0].levels[0].dtype.type == np.int64\n assert idx.reindex([], level=1)[0].levels[1].dtype.type == np.object_\n\n def test_groupby(self):\n groups = self.index.groupby(np.array([1, 1, 1, 2, 2, 2]))\n labels = self.index.get_values().tolist()\n exp = {1: labels[:3], 2: labels[3:]}\n tm.assert_dict_equal(groups, exp)\n\n # GH5620\n groups = self.index.groupby(self.index)\n exp = dict((key, [key]) for key in self.index)\n tm.assert_dict_equal(groups, exp)\n\n def test_index_name_retained(self):\n # GH9857\n result = pd.DataFrame({'x': [1, 2, 6],\n 'y': [2, 2, 8],\n 'z': [-5, 0, 5]})\n result = result.set_index('z')\n result.loc[10] = [9, 10]\n df_expected = pd.DataFrame({'x': [1, 2, 6, 9],\n 'y': [2, 2, 8, 10],\n 'z': [-5, 0, 5, 10]})\n df_expected = df_expected.set_index('z')\n tm.assert_frame_equal(result, df_expected)\n\n def test_equals_operator(self):\n # GH9785\n assert (self.index == self.index).all()\n\n def test_large_multiindex_error(self):\n # GH12527\n df_below_1000000 = pd.DataFrame(\n 1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]),\n columns=['dest'])\n with pytest.raises(KeyError):\n df_below_1000000.loc[(-1, 0), 'dest']\n with pytest.raises(KeyError):\n df_below_1000000.loc[(3, 0), 'dest']\n df_above_1000000 = pd.DataFrame(\n 1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]),\n columns=['dest'])\n with pytest.raises(KeyError):\n df_above_1000000.loc[(-1, 0), 'dest']\n with pytest.raises(KeyError):\n df_above_1000000.loc[(3, 0), 'dest']\n\n def test_partial_string_timestamp_multiindex(self):\n # GH10331\n dr = pd.date_range('2016-01-01', '2016-01-03', freq='12H')\n abc = ['a', 'b', 'c']\n ix = pd.MultiIndex.from_product([dr, abc])\n df = pd.DataFrame({'c1': range(0, 15)}, index=ix)\n idx = pd.IndexSlice\n\n # c1\n # 2016-01-01 00:00:00 a 0\n # b 1\n # c 2\n # 2016-01-01 12:00:00 a 3\n # b 4\n # c 5\n # 2016-01-02 00:00:00 a 6\n # b 7\n # c 8\n # 2016-01-02 12:00:00 a 9\n # b 10\n # c 11\n # 2016-01-03 00:00:00 a 12\n # b 13\n # c 14\n\n # partial string matching on a single index\n for df_swap in (df.swaplevel(),\n df.swaplevel(0),\n df.swaplevel(0, 1)):\n df_swap = df_swap.sort_index()\n just_a = df_swap.loc['a']\n result = just_a.loc['2016-01-01']\n expected = df.loc[idx[:, 'a'], :].iloc[0:2]\n expected.index = expected.index.droplevel(1)\n tm.assert_frame_equal(result, expected)\n\n # indexing with IndexSlice\n result = df.loc[idx['2016-01-01':'2016-02-01', :], :]\n expected = df\n tm.assert_frame_equal(result, expected)\n\n # match on secondary index\n result = df_swap.loc[idx[:, '2016-01-01':'2016-01-01'], :]\n expected = df_swap.iloc[[0, 1, 5, 6, 10, 11]]\n tm.assert_frame_equal(result, expected)\n\n # Even though this syntax works on a single index, this is somewhat\n # ambiguous and we don't want to extend this behavior forward to work\n # in multi-indexes. This would amount to selecting a scalar from a\n # column.\n with pytest.raises(KeyError):\n df['2016-01-01']\n\n # partial string match on year only\n result = df.loc['2016']\n expected = df\n tm.assert_frame_equal(result, expected)\n\n # partial string match on date\n result = df.loc['2016-01-01']\n expected = df.iloc[0:6]\n tm.assert_frame_equal(result, expected)\n\n # partial string match on date and hour, from middle\n result = df.loc['2016-01-02 12']\n expected = df.iloc[9:12]\n tm.assert_frame_equal(result, expected)\n\n # partial string match on secondary index\n result = df_swap.loc[idx[:, '2016-01-02'], :]\n expected = df_swap.iloc[[2, 3, 7, 8, 12, 13]]\n tm.assert_frame_equal(result, expected)\n\n # tuple selector with partial string match on date\n result = df.loc[('2016-01-01', 'a'), :]\n expected = df.iloc[[0, 3]]\n tm.assert_frame_equal(result, expected)\n\n # Slicing date on first level should break (of course)\n with pytest.raises(KeyError):\n df_swap.loc['2016-01-01']\n\n # GH12685 (partial string with daily resolution or below)\n dr = date_range('2013-01-01', periods=100, freq='D')\n ix = MultiIndex.from_product([dr, ['a', 'b']])\n df = DataFrame(np.random.randn(200, 1), columns=['A'], index=ix)\n\n result = df.loc[idx['2013-03':'2013-03', :], :]\n expected = df.iloc[118:180]\n tm.assert_frame_equal(result, expected)\n\n def test_rangeindex_fallback_coercion_bug(self):\n # GH 12893\n foo = pd.DataFrame(np.arange(100).reshape((10, 10)))\n bar = pd.DataFrame(np.arange(100).reshape((10, 10)))\n df = pd.concat({'foo': foo.stack(), 'bar': bar.stack()}, axis=1)\n df.index.names = ['fizz', 'buzz']\n\n str(df)\n expected = pd.DataFrame({'bar': np.arange(100),\n 'foo': np.arange(100)},\n index=pd.MultiIndex.from_product(\n [range(10), range(10)],\n names=['fizz', 'buzz']))\n tm.assert_frame_equal(df, expected, check_like=True)\n\n result = df.index.get_level_values('fizz')\n expected = pd.Int64Index(np.arange(10), name='fizz').repeat(10)\n tm.assert_index_equal(result, expected)\n\n result = df.index.get_level_values('buzz')\n expected = pd.Int64Index(np.tile(np.arange(10), 10), name='buzz')\n tm.assert_index_equal(result, expected)\n\n def test_dropna(self):\n # GH 6194\n idx = pd.MultiIndex.from_arrays([[1, np.nan, 3, np.nan, 5],\n [1, 2, np.nan, np.nan, 5],\n ['a', 'b', 'c', np.nan, 'e']])\n\n exp = pd.MultiIndex.from_arrays([[1, 5],\n [1, 5],\n ['a', 'e']])\n tm.assert_index_equal(idx.dropna(), exp)\n tm.assert_index_equal(idx.dropna(how='any'), exp)\n\n exp = pd.MultiIndex.from_arrays([[1, np.nan, 3, 5],\n [1, 2, np.nan, 5],\n ['a', 'b', 'c', 'e']])\n tm.assert_index_equal(idx.dropna(how='all'), exp)\n\n msg = \"invalid how option: xxx\"\n with tm.assert_raises_regex(ValueError, msg):\n idx.dropna(how='xxx')\n\n def test_unsortedindex(self):\n # GH 11897\n mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'),\n ('x', 'b'), ('y', 'a'), ('z', 'b')],\n names=['one', 'two'])\n df = pd.DataFrame([[i, 10 * i] for i in lrange(6)], index=mi,\n columns=['one', 'two'])\n\n # GH 16734: not sorted, but no real slicing\n result = df.loc(axis=0)['z', 'a']\n expected = df.iloc[0]\n tm.assert_series_equal(result, expected)\n\n with pytest.raises(UnsortedIndexError):\n df.loc(axis=0)['z', slice('a')]\n df.sort_index(inplace=True)\n assert len(df.loc(axis=0)['z', :]) == 2\n\n with pytest.raises(KeyError):\n df.loc(axis=0)['q', :]\n\n def test_unsortedindex_doc_examples(self):\n # http://pandas.pydata.org/pandas-docs/stable/advanced.html#sorting-a-multiindex # noqa\n dfm = DataFrame({'jim': [0, 0, 1, 1],\n 'joe': ['x', 'x', 'z', 'y'],\n 'jolie': np.random.rand(4)})\n\n dfm = dfm.set_index(['jim', 'joe'])\n with tm.assert_produces_warning(PerformanceWarning):\n dfm.loc[(1, 'z')]\n\n with pytest.raises(UnsortedIndexError):\n dfm.loc[(0, 'y'):(1, 'z')]\n\n assert not dfm.index.is_lexsorted()\n assert dfm.index.lexsort_depth == 1\n\n # sort it\n dfm = dfm.sort_index()\n dfm.loc[(1, 'z')]\n dfm.loc[(0, 'y'):(1, 'z')]\n\n assert dfm.index.is_lexsorted()\n assert dfm.index.lexsort_depth == 2\n\n def test_tuples_with_name_string(self):\n # GH 15110 and GH 14848\n\n li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]\n with pytest.raises(ValueError):\n pd.Index(li, name='abc')\n with pytest.raises(ValueError):\n pd.Index(li, name='a')\n\n def test_nan_stays_float(self):\n\n # GH 7031\n idx0 = pd.MultiIndex(levels=[[\"A\", \"B\"], []],\n labels=[[1, 0], [-1, -1]],\n names=[0, 1])\n idx1 = pd.MultiIndex(levels=[[\"C\"], [\"D\"]],\n labels=[[0], [0]],\n names=[0, 1])\n idxm = idx0.join(idx1, how='outer')\n assert pd.isna(idx0.get_level_values(1)).all()\n # the following failed in 0.14.1\n assert pd.isna(idxm.get_level_values(1)[:-1]).all()\n\n df0 = pd.DataFrame([[1, 2]], index=idx0)\n df1 = pd.DataFrame([[3, 4]], index=idx1)\n dfm = df0 - df1\n assert pd.isna(df0.index.get_level_values(1)).all()\n # the following failed in 0.14.1\n assert pd.isna(dfm.index.get_level_values(1)[:-1]).all()\n", "import numpy\nimport six\n\nimport chainer\nfrom chainer import cuda\nfrom chainer.functions.array import permutate\nfrom chainer.functions.array import transpose_sequence\nfrom chainer.functions.connection import n_step_gru as rnn\nfrom chainer import link\nfrom chainer.links.connection.n_step_rnn import argsort_list_descent\nfrom chainer.links.connection.n_step_rnn import permutate_list\n\n\nclass NStepGRUBase(link.ChainList):\n\n \"\"\"Base link class for Stacked GRU/BiGRU links.\n\n This link is base link class for :func:`chainer.links.NStepRNN` and\n :func:`chainer.links.NStepBiRNN`.\n This link's behavior depends on argument, ``use_bi_direction``.\n\n Args:\n n_layers (int): Number of layers.\n in_size (int): Dimensionality of input vectors.\n out_size (int): Dimensionality of hidden states and output vectors.\n dropout (float): Dropout ratio.\n use_cudnn (bool): Use cuDNN.\n use_bi_direction (bool): if ``True``, use Bi-directional GRU.\n if ``False``, use Uni-directional GRU.\n .. seealso::\n :func:`chainer.links.NStepGRU`\n :func:`chainer.links.NStepBiGRU`\n\n \"\"\"\n\n def __init__(self, n_layers, in_size, out_size, dropout, use_cudnn,\n use_bi_direction):\n weights = []\n direction = 2 if use_bi_direction else 1\n for i in six.moves.range(n_layers):\n for di in six.moves.range(direction):\n weight = link.Link()\n for j in six.moves.range(6):\n if i == 0 and j < 3:\n w_in = in_size\n elif i > 0 and j < 3:\n w_in = out_size * direction\n else:\n w_in = out_size\n weight.add_param('w%d' % j, (out_size, w_in))\n weight.add_param('b%d' % j, (out_size,))\n getattr(weight, 'w%d' % j).data[...] = numpy.random.normal(\n 0, numpy.sqrt(1. / w_in), (out_size, w_in))\n getattr(weight, 'b%d' % j).data[...] = 0\n weights.append(weight)\n\n super(NStepGRUBase, self).__init__(*weights)\n\n self.n_layers = n_layers\n self.dropout = dropout\n self.use_cudnn = use_cudnn\n self.out_size = out_size\n self.direction = direction\n self.rnn = rnn.n_step_bigru if use_bi_direction else rnn.n_step_gru\n\n def init_hx(self, xs):\n with cuda.get_device_from_id(self._device_id):\n hx = chainer.Variable(\n self.xp.zeros((self.n_layers * self.direction,\n len(xs), self.out_size),\n dtype=xs[0].dtype),\n volatile='auto')\n return hx\n\n def __call__(self, hx, xs, train=True):\n \"\"\"Calculate all hidden states and cell states.\n\n Args:\n hx (~chainer.Variable or None): Initial hidden states. If ``None``\n is specified zero-vector is used.\n xs (list of ~chianer.Variable): List of input sequences.\n Each element ``xs[i]`` is a :class:`chainer.Variable` holding\n a sequence.\n\n \"\"\"\n assert isinstance(xs, (list, tuple))\n indices = argsort_list_descent(xs)\n\n xs = permutate_list(xs, indices, inv=False)\n if hx is None:\n hx = self.init_hx(xs)\n else:\n hx = permutate.permutate(hx, indices, axis=1, inv=False)\n\n trans_x = transpose_sequence.transpose_sequence(xs)\n\n ws = [[w.w0, w.w1, w.w2, w.w3, w.w4, w.w5] for w in self]\n bs = [[w.b0, w.b1, w.b2, w.b3, w.b4, w.b5] for w in self]\n\n hy, trans_y = self.rnn(\n self.n_layers, self.dropout, hx, ws, bs, trans_x,\n train=train, use_cudnn=self.use_cudnn)\n\n hy = permutate.permutate(hy, indices, axis=1, inv=True)\n ys = transpose_sequence.transpose_sequence(trans_y)\n ys = permutate_list(ys, indices, inv=True)\n\n return hy, ys\n\n\nclass NStepGRU(NStepGRUBase):\n\n \"\"\"Stacked Uni-directional GRU for sequnces.\n\n This link is stacked version of Uni-directional GRU for sequences.\n It calculates hidden and cell states of all layer at end-of-string,\n and all hidden states of the last layer for each time.\n\n Unlike :func:`chainer.functions.n_step_gru`, this function automatically\n sort inputs in descending order by length, and transpose the seuqnece.\n Users just need to call the link with a list of :class:`chainer.Variable`\n holding sequences.\n\n Args:\n n_layers (int): Number of layers.\n in_size (int): Dimensionality of input vectors.\n out_size (int): Dimensionality of hidden states and output vectors.\n dropout (float): Dropout ratio.\n use_cudnn (bool): Use cuDNN.\n\n .. seealso::\n :func:`chainer.functions.n_step_gru`\n\n \"\"\"\n\n def __init__(self, n_layers, in_size, out_size, dropout, use_cudnn=True):\n NStepGRUBase.__init__(self, n_layers, in_size, out_size, dropout,\n use_cudnn, use_bi_direction=False)\n\n\nclass NStepBiGRU(NStepGRUBase):\n\n \"\"\"Stacked Bi-directional GRU for sequnces.\n\n This link is stacked version of Bi-directional GRU for sequences.\n It calculates hidden and cell states of all layer at end-of-string,\n and all hidden states of the last layer for each time.\n\n Unlike :func:`chainer.functions.n_step_bigru`, this function automatically\n sort inputs in descending order by length, and transpose the seuqnece.\n Users just need to call the link with a list of :class:`chainer.Variable`\n holding sequences.\n\n Args:\n n_layers (int): Number of layers.\n in_size (int): Dimensionality of input vectors.\n out_size (int): Dimensionality of hidden states and output vectors.\n dropout (float): Dropout ratio.\n use_cudnn (bool): Use cuDNN.\n\n .. seealso::\n :func:`chainer.functions.n_step_bigru`\n\n \"\"\"\n\n def __init__(self, n_layers, in_size, out_size, dropout, use_cudnn=True):\n NStepGRUBase.__init__(self, n_layers, in_size, out_size, dropout,\n use_cudnn, use_bi_direction=True)\n", "# pylint: disable=E1101,W0232\n\nimport numpy as np\nfrom warnings import warn\nimport types\n\nfrom pandas import compat\nfrom pandas.compat import u, lzip\nfrom pandas._libs import lib, algos as libalgos\n\nfrom pandas.core.dtypes.generic import (\n ABCSeries, ABCIndexClass, ABCCategoricalIndex)\nfrom pandas.core.dtypes.missing import isna, notna\nfrom pandas.core.dtypes.cast import (\n maybe_infer_to_datetimelike,\n coerce_indexer_dtype)\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\nfrom pandas.core.dtypes.common import (\n _ensure_int64,\n _ensure_object,\n _ensure_platform_int,\n is_dtype_equal,\n is_datetimelike,\n is_categorical,\n is_categorical_dtype,\n is_integer_dtype, is_bool,\n is_list_like, is_sequence,\n is_scalar)\nfrom pandas.core.common import is_null_slice\n\nfrom pandas.core.algorithms import factorize, take_1d, unique1d\nfrom pandas.core.base import (PandasObject, PandasDelegate,\n NoNewAttributesMixin, _shared_docs)\nimport pandas.core.common as com\nfrom pandas.core.missing import interpolate_2d\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import (\n Appender, cache_readonly, deprecate_kwarg, Substitution)\n\nfrom pandas.io.formats.terminal import get_terminal_size\nfrom pandas.util._validators import validate_bool_kwarg\nfrom pandas.core.config import get_option\n\n\ndef _cat_compare_op(op):\n def f(self, other):\n # On python2, you can usually compare any type to any type, and\n # Categoricals can be seen as a custom type, but having different\n # results depending whether categories are the same or not is kind of\n # insane, so be a bit stricter here and use the python3 idea of\n # comparing only things of equal type.\n if not self.ordered:\n if op in ['__lt__', '__gt__', '__le__', '__ge__']:\n raise TypeError(\"Unordered Categoricals can only compare \"\n \"equality or not\")\n if isinstance(other, Categorical):\n # Two Categoricals can only be be compared if the categories are\n # the same (maybe up to ordering, depending on ordered)\n\n msg = (\"Categoricals can only be compared if \"\n \"'categories' are the same.\")\n if len(self.categories) != len(other.categories):\n raise TypeError(msg + \" Categories are different lengths\")\n elif (self.ordered and not (self.categories ==\n other.categories).all()):\n raise TypeError(msg)\n elif not set(self.categories) == set(other.categories):\n raise TypeError(msg)\n\n if not (self.ordered == other.ordered):\n raise TypeError(\"Categoricals can only be compared if \"\n \"'ordered' is the same\")\n if not self.ordered and not self.categories.equals(\n other.categories):\n # both unordered and different order\n other_codes = _get_codes_for_values(other, self.categories)\n else:\n other_codes = other._codes\n\n na_mask = (self._codes == -1) | (other_codes == -1)\n f = getattr(self._codes, op)\n ret = f(other_codes)\n if na_mask.any():\n # In other series, the leads to False, so do that here too\n ret[na_mask] = False\n return ret\n\n # Numpy-1.9 and earlier may convert a scalar to a zerodim array during\n # comparison operation when second arg has higher priority, e.g.\n #\n # cat[0] < cat\n #\n # With cat[0], for example, being ``np.int64(1)`` by the time it gets\n # into this function would become ``np.array(1)``.\n other = lib.item_from_zerodim(other)\n if is_scalar(other):\n if other in self.categories:\n i = self.categories.get_loc(other)\n return getattr(self._codes, op)(i)\n else:\n if op == '__eq__':\n return np.repeat(False, len(self))\n elif op == '__ne__':\n return np.repeat(True, len(self))\n else:\n msg = (\"Cannot compare a Categorical for op {op} with a \"\n \"scalar, which is not a category.\")\n raise TypeError(msg.format(op=op))\n else:\n\n # allow categorical vs object dtype array comparisons for equality\n # these are only positional comparisons\n if op in ['__eq__', '__ne__']:\n return getattr(np.array(self), op)(np.array(other))\n\n msg = (\"Cannot compare a Categorical for op {op} with type {typ}.\"\n \"\\nIf you want to compare values, use 'np.asarray(cat) \"\n \"<op> other'.\")\n raise TypeError(msg.format(op=op, typ=type(other)))\n\n f.__name__ = op\n\n return f\n\n\ndef maybe_to_categorical(array):\n \"\"\" coerce to a categorical if a series is given \"\"\"\n if isinstance(array, (ABCSeries, ABCCategoricalIndex)):\n return array._values\n return array\n\n\n_codes_doc = \"\"\"The category codes of this categorical.\n\nLevel codes are an array if integer which are the positions of the real\nvalues in the categories array.\n\nThere is not setter, use the other categorical methods and the normal item\nsetter to change values in the categorical.\n\"\"\"\n\n_categories_doc = \"\"\"The categories of this categorical.\n\nSetting assigns new values to each category (effectively a rename of\neach individual category).\n\nThe assigned value has to be a list-like object. All items must be unique and\nthe number of items in the new categories must be the same as the number of\nitems in the old categories.\n\nAssigning to `categories` is a inplace operation!\n\nRaises\n------\nValueError\n If the new categories do not validate as categories or if the number of new\n categories is unequal the number of old categories\n\nSee also\n--------\nrename_categories\nreorder_categories\nadd_categories\nremove_categories\nremove_unused_categories\nset_categories\n\"\"\"\n\n\nclass Categorical(PandasObject):\n \"\"\"\n Represents a categorical variable in classic R / S-plus fashion\n\n `Categoricals` can only take on only a limited, and usually fixed, number\n of possible values (`categories`). In contrast to statistical categorical\n variables, a `Categorical` might have an order, but numerical operations\n (additions, divisions, ...) are not possible.\n\n All values of the `Categorical` are either in `categories` or `np.nan`.\n Assigning values outside of `categories` will raise a `ValueError`. Order\n is defined by the order of the `categories`, not lexical order of the\n values.\n\n Parameters\n ----------\n values : list-like\n The values of the categorical. If categories are given, values not in\n categories will be replaced with NaN.\n categories : Index-like (unique), optional\n The unique categories for this categorical. If not given, the\n categories are assumed to be the unique values of values.\n ordered : boolean, (default False)\n Whether or not this categorical is treated as a ordered categorical.\n If not given, the resulting categorical will not be ordered.\n\n Attributes\n ----------\n categories : Index\n The categories of this categorical\n codes : ndarray\n The codes (integer positions, which point to the categories) of this\n categorical, read only.\n ordered : boolean\n Whether or not this Categorical is ordered.\n\n Raises\n ------\n ValueError\n If the categories do not validate.\n TypeError\n If an explicit ``ordered=True`` is given but no `categories` and the\n `values` are not sortable.\n\n\n Examples\n --------\n >>> from pandas import Categorical\n >>> Categorical([1, 2, 3, 1, 2, 3])\n [1, 2, 3, 1, 2, 3]\n Categories (3, int64): [1 < 2 < 3]\n\n >>> Categorical(['a', 'b', 'c', 'a', 'b', 'c'])\n [a, b, c, a, b, c]\n Categories (3, object): [a < b < c]\n\n >>> a = Categorical(['a','b','c','a','b','c'], ['c', 'b', 'a'],\n ordered=True)\n >>> a.min()\n 'c'\n \"\"\"\n dtype = CategoricalDtype()\n \"\"\"The dtype (always \"category\")\"\"\"\n \"\"\"Whether or not this Categorical is ordered.\n\n Only ordered `Categoricals` can be sorted (according to the order\n of the categories) and have a min and max value.\n\n See also\n --------\n Categorical.sort\n Categorical.order\n Categorical.min\n Categorical.max\n \"\"\"\n\n # For comparisons, so that numpy uses our implementation if the compare\n # ops, which raise\n __array_priority__ = 1000\n _typ = 'categorical'\n\n def __init__(self, values, categories=None, ordered=False, fastpath=False):\n\n self._validate_ordered(ordered)\n\n if fastpath:\n # fast path\n self._codes = coerce_indexer_dtype(values, categories)\n self._categories = self._validate_categories(\n categories, fastpath=isinstance(categories, ABCIndexClass))\n self._ordered = ordered\n return\n\n # sanitize input\n if is_categorical_dtype(values):\n\n # we are either a Series or a CategoricalIndex\n if isinstance(values, (ABCSeries, ABCCategoricalIndex)):\n values = values._values\n\n if ordered is None:\n ordered = values.ordered\n if categories is None:\n categories = values.categories\n values = values.get_values()\n\n elif isinstance(values, (ABCIndexClass, ABCSeries)):\n pass\n\n else:\n\n # on numpy < 1.6 datetimelike get inferred to all i8 by\n # _sanitize_array which is fine, but since factorize does this\n # correctly no need here this is an issue because _sanitize_array\n # also coerces np.nan to a string under certain versions of numpy\n # as well\n values = maybe_infer_to_datetimelike(values, convert_dates=True)\n if not isinstance(values, np.ndarray):\n values = _convert_to_list_like(values)\n from pandas.core.series import _sanitize_array\n # On list with NaNs, int values will be converted to float. Use\n # \"object\" dtype to prevent this. In the end objects will be\n # casted to int/... in the category assignment step.\n if len(values) == 0 or isna(values).any():\n dtype = 'object'\n else:\n dtype = None\n values = _sanitize_array(values, None, dtype=dtype)\n\n if categories is None:\n try:\n codes, categories = factorize(values, sort=True)\n except TypeError:\n codes, categories = factorize(values, sort=False)\n if ordered:\n # raise, as we don't have a sortable data structure and so\n # the user should give us one by specifying categories\n raise TypeError(\"'values' is not ordered, please \"\n \"explicitly specify the categories order \"\n \"by passing in a categories argument.\")\n except ValueError:\n\n # FIXME\n raise NotImplementedError(\"> 1 ndim Categorical are not \"\n \"supported at this time\")\n\n categories = self._validate_categories(categories)\n\n else:\n # there were two ways if categories are present\n # - the old one, where each value is a int pointer to the levels\n # array -> not anymore possible, but code outside of pandas could\n # call us like that, so make some checks\n # - the new one, where each value is also in the categories array\n # (or np.nan)\n\n # make sure that we always have the same type here, no matter what\n # we get passed in\n categories = self._validate_categories(categories)\n codes = _get_codes_for_values(values, categories)\n\n # TODO: check for old style usage. These warnings should be removes\n # after 0.18/ in 2016\n if is_integer_dtype(values) and not is_integer_dtype(categories):\n warn(\"Values and categories have different dtypes. Did you \"\n \"mean to use\\n'Categorical.from_codes(codes, \"\n \"categories)'?\", RuntimeWarning, stacklevel=2)\n\n if (len(values) and is_integer_dtype(values) and\n (codes == -1).all()):\n warn(\"None of the categories were found in values. Did you \"\n \"mean to use\\n'Categorical.from_codes(codes, \"\n \"categories)'?\", RuntimeWarning, stacklevel=2)\n\n self.set_ordered(ordered or False, inplace=True)\n self._categories = categories\n self._codes = coerce_indexer_dtype(codes, categories)\n\n def __dir__(self):\n # Avoid IPython warnings for deprecated properties\n # https://github.com/pandas-dev/pandas/issues/16409\n rv = set(dir(type(self)))\n rv.discard(\"labels\")\n return sorted(rv)\n\n @property\n def _constructor(self):\n return Categorical\n\n def copy(self):\n \"\"\" Copy constructor. \"\"\"\n return self._constructor(values=self._codes.copy(),\n categories=self.categories,\n ordered=self.ordered,\n fastpath=True)\n\n def astype(self, dtype, copy=True):\n \"\"\"\n Coerce this type to another dtype\n\n Parameters\n ----------\n dtype : numpy dtype or pandas type\n copy : bool, default True\n By default, astype always returns a newly allocated object.\n If copy is set to False and dtype is categorical, the original\n object is returned.\n\n .. versionadded:: 0.19.0\n\n \"\"\"\n if is_categorical_dtype(dtype):\n if copy is True:\n return self.copy()\n return self\n return np.array(self, dtype=dtype, copy=copy)\n\n @cache_readonly\n def ndim(self):\n \"\"\"Number of dimensions of the Categorical \"\"\"\n return self._codes.ndim\n\n @cache_readonly\n def size(self):\n \"\"\" return the len of myself \"\"\"\n return len(self)\n\n @cache_readonly\n def itemsize(self):\n \"\"\" return the size of a single category \"\"\"\n return self.categories.itemsize\n\n def reshape(self, new_shape, *args, **kwargs):\n \"\"\"\n .. deprecated:: 0.19.0\n Calling this method will raise an error in a future release.\n\n An ndarray-compatible method that returns `self` because\n `Categorical` instances cannot actually be reshaped.\n\n Parameters\n ----------\n new_shape : int or tuple of ints\n A 1-D array of integers that correspond to the new\n shape of the `Categorical`. For more information on\n the parameter, please refer to `np.reshape`.\n \"\"\"\n warn(\"reshape is deprecated and will raise \"\n \"in a subsequent release\", FutureWarning, stacklevel=2)\n\n nv.validate_reshape(args, kwargs)\n\n # while the 'new_shape' parameter has no effect,\n # we should still enforce valid shape parameters\n np.reshape(self.codes, new_shape)\n\n return self\n\n @property\n def base(self):\n \"\"\" compat, we are always our own object \"\"\"\n return None\n\n @classmethod\n def from_array(cls, data, **kwargs):\n \"\"\"\n .. deprecated:: 0.19.0\n Use ``Categorical`` instead.\n\n Make a Categorical type from a single array-like object.\n\n For internal compatibility with numpy arrays.\n\n Parameters\n ----------\n data : array-like\n Can be an Index or array-like. The categories are assumed to be\n the unique values of `data`.\n \"\"\"\n warn(\"Categorical.from_array is deprecated, use Categorical instead\",\n FutureWarning, stacklevel=2)\n return cls(data, **kwargs)\n\n @classmethod\n def from_codes(cls, codes, categories, ordered=False):\n \"\"\"\n Make a Categorical type from codes and categories arrays.\n\n This constructor is useful if you already have codes and categories and\n so do not need the (computation intensive) factorization step, which is\n usually done on the constructor.\n\n If your data does not follow this convention, please use the normal\n constructor.\n\n Parameters\n ----------\n codes : array-like, integers\n An integer array, where each integer points to a category in\n categories or -1 for NaN\n categories : index-like\n The categories for the categorical. Items need to be unique.\n ordered : boolean, (default False)\n Whether or not this categorical is treated as a ordered\n categorical. If not given, the resulting categorical will be\n unordered.\n \"\"\"\n try:\n codes = np.asarray(codes, np.int64)\n except:\n raise ValueError(\n \"codes need to be convertible to an arrays of integers\")\n\n categories = cls._validate_categories(categories)\n\n if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):\n raise ValueError(\"codes need to be between -1 and \"\n \"len(categories)-1\")\n\n return cls(codes, categories=categories, ordered=ordered,\n fastpath=True)\n\n _codes = None\n\n def _get_codes(self):\n \"\"\" Get the codes.\n\n Returns\n -------\n codes : integer array view\n A non writable view of the `codes` array.\n \"\"\"\n v = self._codes.view()\n v.flags.writeable = False\n return v\n\n def _set_codes(self, codes):\n \"\"\"\n Not settable by the user directly\n \"\"\"\n raise ValueError(\"cannot set Categorical codes directly\")\n\n codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)\n\n def _get_labels(self):\n \"\"\"\n Get the category labels (deprecated).\n\n Deprecated, use .codes!\n \"\"\"\n warn(\"'labels' is deprecated. Use 'codes' instead\", FutureWarning,\n stacklevel=2)\n return self.codes\n\n labels = property(fget=_get_labels, fset=_set_codes)\n\n _categories = None\n\n @classmethod\n def _validate_ordered(cls, ordered):\n \"\"\"\n Validates that we have a valid ordered parameter. If\n it is not a boolean, a TypeError will be raised.\n\n Parameters\n ----------\n ordered : object\n The parameter to be verified.\n\n Raises\n ------\n TypeError\n If 'ordered' is not a boolean.\n \"\"\"\n if not is_bool(ordered):\n raise TypeError(\"'ordered' must either be 'True' or 'False'\")\n\n @classmethod\n def _validate_categories(cls, categories, fastpath=False):\n \"\"\"\n Validates that we have good categories\n\n Parameters\n ----------\n fastpath : boolean (default: False)\n Don't perform validation of the categories for uniqueness or nulls\n\n \"\"\"\n if not isinstance(categories, ABCIndexClass):\n dtype = None\n if not hasattr(categories, \"dtype\"):\n if not is_list_like(categories):\n raise TypeError(\"`categories` must be list-like. \"\n \"Got {} instead\".format(repr(categories)))\n categories = _convert_to_list_like(categories)\n # On categories with NaNs, int values would be converted to\n # float. Use \"object\" dtype to prevent this.\n if isna(categories).any():\n without_na = np.array([x for x in categories\n if notna(x)])\n with_na = np.array(categories)\n if with_na.dtype != without_na.dtype:\n dtype = \"object\"\n\n from pandas import Index\n categories = Index(categories, dtype=dtype)\n\n if not fastpath:\n\n # Categories cannot contain NaN.\n if categories.hasnans:\n raise ValueError('Categorial categories cannot be null')\n\n # Categories must be unique.\n if not categories.is_unique:\n raise ValueError('Categorical categories must be unique')\n\n return categories\n\n def _set_categories(self, categories, fastpath=False):\n \"\"\" Sets new categories\n\n Parameters\n ----------\n fastpath : boolean (default: False)\n Don't perform validation of the categories for uniqueness or nulls\n\n \"\"\"\n\n categories = self._validate_categories(categories, fastpath=fastpath)\n if (not fastpath and self._categories is not None and\n len(categories) != len(self._categories)):\n raise ValueError(\"new categories need to have the same number of \"\n \"items than the old categories!\")\n\n self._categories = categories\n\n def _get_categories(self):\n \"\"\" Gets the categories \"\"\"\n # categories is an Index, which is immutable -> no need to copy\n return self._categories\n\n categories = property(fget=_get_categories, fset=_set_categories,\n doc=_categories_doc)\n\n def _codes_for_groupby(self, sort):\n \"\"\"\n If sort=False, return a copy of self, coded with categories as\n returned by .unique(), followed by any categories not appearing in\n the data. If sort=True, return self.\n\n This method is needed solely to ensure the categorical index of the\n GroupBy result has categories in the order of appearance in the data\n (GH-8868).\n\n Parameters\n ----------\n sort : boolean\n The value of the sort paramter groupby was called with.\n\n Returns\n -------\n Categorical\n If sort=False, the new categories are set to the order of\n appearance in codes (unless ordered=True, in which case the\n original order is preserved), followed by any unrepresented\n categories in the original order.\n \"\"\"\n\n # Already sorted according to self.categories; all is fine\n if sort:\n return self\n\n # sort=False should order groups in as-encountered order (GH-8868)\n cat = self.unique()\n\n # But for groupby to work, all categories should be present,\n # including those missing from the data (GH-13179), which .unique()\n # above dropped\n cat.add_categories(\n self.categories[~self.categories.isin(cat.categories)],\n inplace=True)\n\n return self.reorder_categories(cat.categories)\n\n _ordered = None\n\n def set_ordered(self, value, inplace=False):\n \"\"\"\n Sets the ordered attribute to the boolean value\n\n Parameters\n ----------\n value : boolean to set whether this categorical is ordered (True) or\n not (False)\n inplace : boolean (default: False)\n Whether or not to set the ordered attribute inplace or return a copy\n of this categorical with ordered set to the value\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n self._validate_ordered(value)\n cat = self if inplace else self.copy()\n cat._ordered = value\n if not inplace:\n return cat\n\n def as_ordered(self, inplace=False):\n \"\"\"\n Sets the Categorical to be ordered\n\n Parameters\n ----------\n inplace : boolean (default: False)\n Whether or not to set the ordered attribute inplace or return a copy\n of this categorical with ordered set to True\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n return self.set_ordered(True, inplace=inplace)\n\n def as_unordered(self, inplace=False):\n \"\"\"\n Sets the Categorical to be unordered\n\n Parameters\n ----------\n inplace : boolean (default: False)\n Whether or not to set the ordered attribute inplace or return a copy\n of this categorical with ordered set to False\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n return self.set_ordered(False, inplace=inplace)\n\n def _get_ordered(self):\n \"\"\" Gets the ordered attribute \"\"\"\n return self._ordered\n\n ordered = property(fget=_get_ordered)\n\n def set_categories(self, new_categories, ordered=None, rename=False,\n inplace=False):\n \"\"\" Sets the categories to the specified new_categories.\n\n `new_categories` can include new categories (which will result in\n unused categories) or remove old categories (which results in values\n set to NaN). If `rename==True`, the categories will simple be renamed\n (less or more items than in old categories will result in values set to\n NaN or in unused categories respectively).\n\n This method can be used to perform more than one action of adding,\n removing, and reordering simultaneously and is therefore faster than\n performing the individual steps via the more specialised methods.\n\n On the other hand this methods does not do checks (e.g., whether the\n old categories are included in the new categories on a reorder), which\n can result in surprising changes, for example when using special string\n dtypes on python3, which does not considers a S1 string equal to a\n single char python string.\n\n Raises\n ------\n ValueError\n If new_categories does not validate as categories\n\n Parameters\n ----------\n new_categories : Index-like\n The categories in new order.\n ordered : boolean, (default: False)\n Whether or not the categorical is treated as a ordered categorical.\n If not given, do not change the ordered information.\n rename : boolean (default: False)\n Whether or not the new_categories should be considered as a rename\n of the old categories or as reordered categories.\n inplace : boolean (default: False)\n Whether or not to reorder the categories inplace or return a copy of\n this categorical with reordered categories.\n\n Returns\n -------\n cat : Categorical with reordered categories or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_categories\n remove_unused_categories\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n new_categories = self._validate_categories(new_categories)\n cat = self if inplace else self.copy()\n if rename:\n if (cat._categories is not None and\n len(new_categories) < len(cat._categories)):\n # remove all _codes which are larger and set to -1/NaN\n self._codes[self._codes >= len(new_categories)] = -1\n else:\n values = cat.__array__()\n cat._codes = _get_codes_for_values(values, new_categories)\n cat._categories = new_categories\n\n if ordered is None:\n ordered = self.ordered\n cat.set_ordered(ordered, inplace=True)\n\n if not inplace:\n return cat\n\n def rename_categories(self, new_categories, inplace=False):\n \"\"\" Renames categories.\n\n The new categories has to be a list-like object. All items must be\n unique and the number of items in the new categories must be the same\n as the number of items in the old categories.\n\n Raises\n ------\n ValueError\n If the new categories do not have the same number of items than the\n current categories or do not validate as categories\n\n Parameters\n ----------\n new_categories : Index-like\n The renamed categories.\n inplace : boolean (default: False)\n Whether or not to rename the categories inplace or return a copy of\n this categorical with renamed categories.\n\n Returns\n -------\n cat : Categorical with renamed categories added or None if inplace.\n\n See also\n --------\n reorder_categories\n add_categories\n remove_categories\n remove_unused_categories\n set_categories\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n cat = self if inplace else self.copy()\n cat.categories = new_categories\n if not inplace:\n return cat\n\n def reorder_categories(self, new_categories, ordered=None, inplace=False):\n \"\"\" Reorders categories as specified in new_categories.\n\n `new_categories` need to include all old categories and no new category\n items.\n\n Raises\n ------\n ValueError\n If the new categories do not contain all old category items or any\n new ones\n\n Parameters\n ----------\n new_categories : Index-like\n The categories in new order.\n ordered : boolean, optional\n Whether or not the categorical is treated as a ordered categorical.\n If not given, do not change the ordered information.\n inplace : boolean (default: False)\n Whether or not to reorder the categories inplace or return a copy of\n this categorical with reordered categories.\n\n Returns\n -------\n cat : Categorical with reordered categories or None if inplace.\n\n See also\n --------\n rename_categories\n add_categories\n remove_categories\n remove_unused_categories\n set_categories\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n if set(self._categories) != set(new_categories):\n raise ValueError(\"items in new_categories are not the same as in \"\n \"old categories\")\n return self.set_categories(new_categories, ordered=ordered,\n inplace=inplace)\n\n def add_categories(self, new_categories, inplace=False):\n \"\"\" Add new categories.\n\n `new_categories` will be included at the last/highest place in the\n categories and will be unused directly after this call.\n\n Raises\n ------\n ValueError\n If the new categories include old categories or do not validate as\n categories\n\n Parameters\n ----------\n new_categories : category or list-like of category\n The new categories to be included.\n inplace : boolean (default: False)\n Whether or not to add the categories inplace or return a copy of\n this categorical with added categories.\n\n Returns\n -------\n cat : Categorical with new categories added or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n remove_categories\n remove_unused_categories\n set_categories\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n if not is_list_like(new_categories):\n new_categories = [new_categories]\n already_included = set(new_categories) & set(self._categories)\n if len(already_included) != 0:\n msg = (\"new categories must not include old categories: %s\" %\n str(already_included))\n raise ValueError(msg)\n new_categories = list(self._categories) + list(new_categories)\n cat = self if inplace else self.copy()\n cat._categories = self._validate_categories(new_categories)\n cat._codes = coerce_indexer_dtype(cat._codes, new_categories)\n if not inplace:\n return cat\n\n def remove_categories(self, removals, inplace=False):\n \"\"\" Removes the specified categories.\n\n `removals` must be included in the old categories. Values which were in\n the removed categories will be set to NaN\n\n Raises\n ------\n ValueError\n If the removals are not contained in the categories\n\n Parameters\n ----------\n removals : category or list of categories\n The categories which should be removed.\n inplace : boolean (default: False)\n Whether or not to remove the categories inplace or return a copy of\n this categorical with removed categories.\n\n Returns\n -------\n cat : Categorical with removed categories or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_unused_categories\n set_categories\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n if not is_list_like(removals):\n removals = [removals]\n\n removal_set = set(list(removals))\n not_included = removal_set - set(self._categories)\n new_categories = [c for c in self._categories if c not in removal_set]\n\n # GH 10156\n if any(isna(removals)):\n not_included = [x for x in not_included if notna(x)]\n new_categories = [x for x in new_categories if notna(x)]\n\n if len(not_included) != 0:\n raise ValueError(\"removals must all be in old categories: %s\" %\n str(not_included))\n\n return self.set_categories(new_categories, ordered=self.ordered,\n rename=False, inplace=inplace)\n\n def remove_unused_categories(self, inplace=False):\n \"\"\" Removes categories which are not used.\n\n Parameters\n ----------\n inplace : boolean (default: False)\n Whether or not to drop unused categories inplace or return a copy of\n this categorical with unused categories dropped.\n\n Returns\n -------\n cat : Categorical with unused categories dropped or None if inplace.\n\n See also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_categories\n set_categories\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n cat = self if inplace else self.copy()\n idx, inv = np.unique(cat._codes, return_inverse=True)\n\n if idx.size != 0 and idx[0] == -1: # na sentinel\n idx, inv = idx[1:], inv - 1\n\n cat._categories = cat.categories.take(idx)\n cat._codes = coerce_indexer_dtype(inv, self._categories)\n\n if not inplace:\n return cat\n\n def map(self, mapper):\n \"\"\"Apply mapper function to its categories (not codes).\n\n Parameters\n ----------\n mapper : callable\n Function to be applied. When all categories are mapped\n to different categories, the result will be Categorical which has\n the same order property as the original. Otherwise, the result will\n be np.ndarray.\n\n Returns\n -------\n applied : Categorical or Index.\n\n \"\"\"\n new_categories = self.categories.map(mapper)\n try:\n return self.from_codes(self._codes.copy(),\n categories=new_categories,\n ordered=self.ordered)\n except ValueError:\n return np.take(new_categories, self._codes)\n\n __eq__ = _cat_compare_op('__eq__')\n __ne__ = _cat_compare_op('__ne__')\n __lt__ = _cat_compare_op('__lt__')\n __gt__ = _cat_compare_op('__gt__')\n __le__ = _cat_compare_op('__le__')\n __ge__ = _cat_compare_op('__ge__')\n\n # for Series/ndarray like compat\n @property\n def shape(self):\n \"\"\" Shape of the Categorical.\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n shape : tuple\n \"\"\"\n\n return tuple([len(self._codes)])\n\n def shift(self, periods):\n \"\"\"\n Shift Categorical by desired number of periods.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n\n Returns\n -------\n shifted : Categorical\n \"\"\"\n # since categoricals always have ndim == 1, an axis parameter\n # doesnt make any sense here.\n codes = self.codes\n if codes.ndim > 1:\n raise NotImplementedError(\"Categorical with ndim > 1.\")\n if np.prod(codes.shape) and (periods != 0):\n codes = np.roll(codes, _ensure_platform_int(periods), axis=0)\n if periods > 0:\n codes[:periods] = -1\n else:\n codes[periods:] = -1\n\n return self.from_codes(codes, categories=self.categories,\n ordered=self.ordered)\n\n def __array__(self, dtype=None):\n \"\"\"\n The numpy array interface.\n\n Returns\n -------\n values : numpy array\n A numpy array of either the specified dtype or,\n if dtype==None (default), the same dtype as\n categorical.categories.dtype\n \"\"\"\n ret = take_1d(self.categories.values, self._codes)\n if dtype and not is_dtype_equal(dtype, self.categories.dtype):\n return np.asarray(ret, dtype)\n return ret\n\n def __setstate__(self, state):\n \"\"\"Necessary for making this object picklable\"\"\"\n if not isinstance(state, dict):\n raise Exception('invalid pickle state')\n\n # Provide compatibility with pre-0.15.0 Categoricals.\n if '_categories' not in state and '_levels' in state:\n state['_categories'] = self._validate_categories(state.pop(\n '_levels'))\n if '_codes' not in state and 'labels' in state:\n state['_codes'] = coerce_indexer_dtype(\n state.pop('labels'), state['_categories'])\n\n # 0.16.0 ordered change\n if '_ordered' not in state:\n\n # >=15.0 < 0.16.0\n if 'ordered' in state:\n state['_ordered'] = state.pop('ordered')\n else:\n state['_ordered'] = False\n\n for k, v in compat.iteritems(state):\n setattr(self, k, v)\n\n @property\n def T(self):\n return self\n\n @property\n def nbytes(self):\n return self._codes.nbytes + self._categories.values.nbytes\n\n def memory_usage(self, deep=False):\n \"\"\"\n Memory usage of my values\n\n Parameters\n ----------\n deep : bool\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption\n\n Returns\n -------\n bytes used\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array if deep=False\n\n See Also\n --------\n numpy.ndarray.nbytes\n \"\"\"\n return self._codes.nbytes + self._categories.memory_usage(deep=deep)\n\n @Substitution(klass='Categorical')\n @Appender(_shared_docs['searchsorted'])\n @deprecate_kwarg(old_arg_name='v', new_arg_name='value')\n def searchsorted(self, value, side='left', sorter=None):\n if not self.ordered:\n raise ValueError(\"Categorical not ordered\\nyou can use \"\n \".as_ordered() to change the Categorical to an \"\n \"ordered one\")\n\n from pandas.core.series import Series\n\n values_as_codes = _get_codes_for_values(Series(value).values,\n self.categories)\n\n if -1 in values_as_codes:\n raise ValueError(\"Value(s) to be inserted must be in categories.\")\n\n return self.codes.searchsorted(values_as_codes, side=side,\n sorter=sorter)\n\n def isna(self):\n \"\"\"\n Detect missing values\n\n Both missing values (-1 in .codes) and NA as a category are detected.\n\n Returns\n -------\n a boolean array of whether my values are null\n\n See also\n --------\n isna : top-level isna\n isnull : alias of isna\n Categorical.notna : boolean inverse of Categorical.isna\n\n \"\"\"\n\n ret = self._codes == -1\n\n # String/object and float categories can hold np.nan\n if self.categories.dtype.kind in ['S', 'O', 'f']:\n if np.nan in self.categories:\n nan_pos = np.where(isna(self.categories))[0]\n # we only have one NA in categories\n ret = np.logical_or(ret, self._codes == nan_pos)\n return ret\n isnull = isna\n\n def notna(self):\n \"\"\"\n Inverse of isna\n\n Both missing values (-1 in .codes) and NA as a category are detected as\n null.\n\n Returns\n -------\n a boolean array of whether my values are not null\n\n See also\n --------\n notna : top-level notna\n notnull : alias of notna\n Categorical.isna : boolean inverse of Categorical.notna\n\n \"\"\"\n return ~self.isna()\n notnull = notna\n\n def put(self, *args, **kwargs):\n \"\"\"\n Replace specific elements in the Categorical with given values.\n \"\"\"\n raise NotImplementedError((\"'put' is not yet implemented \"\n \"for Categorical\"))\n\n def dropna(self):\n \"\"\"\n Return the Categorical without null values.\n\n Both missing values (-1 in .codes) and NA as a category are detected.\n NA is removed from the categories if present.\n\n Returns\n -------\n valid : Categorical\n \"\"\"\n result = self[self.notna()]\n if isna(result.categories).any():\n result = result.remove_categories([np.nan])\n return result\n\n def value_counts(self, dropna=True):\n \"\"\"\n Returns a Series containing counts of each category.\n\n Every category will have an entry, even those with a count of 0.\n\n Parameters\n ----------\n dropna : boolean, default True\n Don't include counts of NaN, even if NaN is a category.\n\n Returns\n -------\n counts : Series\n\n See Also\n --------\n Series.value_counts\n\n \"\"\"\n from numpy import bincount\n from pandas import isna, Series, CategoricalIndex\n\n obj = (self.remove_categories([np.nan]) if dropna and\n isna(self.categories).any() else self)\n code, cat = obj._codes, obj.categories\n ncat, mask = len(cat), 0 <= code\n ix, clean = np.arange(ncat), mask.all()\n\n if dropna or clean:\n obs = code if clean else code[mask]\n count = bincount(obs, minlength=ncat or None)\n else:\n count = bincount(np.where(mask, code, ncat))\n ix = np.append(ix, -1)\n\n ix = self._constructor(ix, categories=cat, ordered=obj.ordered,\n fastpath=True)\n\n return Series(count, index=CategoricalIndex(ix), dtype='int64')\n\n def get_values(self):\n \"\"\" Return the values.\n\n For internal compatibility with pandas formatting.\n\n Returns\n -------\n values : numpy array\n A numpy array of the same dtype as categorical.categories.dtype or\n Index if datetime / periods\n \"\"\"\n # if we are a datetime and period index, return Index to keep metadata\n if is_datetimelike(self.categories):\n return self.categories.take(self._codes, fill_value=np.nan)\n return np.array(self)\n\n def check_for_ordered(self, op):\n \"\"\" assert that we are ordered \"\"\"\n if not self.ordered:\n raise TypeError(\"Categorical is not ordered for operation {op}\\n\"\n \"you can use .as_ordered() to change the \"\n \"Categorical to an ordered one\\n\".format(op=op))\n\n def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):\n \"\"\"\n Returns the indices that would sort the Categorical instance if\n 'sort_values' was called. This function is implemented to provide\n compatibility with numpy ndarray objects.\n\n While an ordering is applied to the category values, arg-sorting\n in this context refers more to organizing and grouping together\n based on matching category values. Thus, this function can be\n called on an unordered Categorical instance unlike the functions\n 'Categorical.min' and 'Categorical.max'.\n\n Returns\n -------\n argsorted : numpy array\n\n See also\n --------\n numpy.ndarray.argsort\n \"\"\"\n ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)\n result = np.argsort(self._codes.copy(), kind=kind, **kwargs)\n if not ascending:\n result = result[::-1]\n return result\n\n def sort_values(self, inplace=False, ascending=True, na_position='last'):\n \"\"\" Sorts the Categorical by category value returning a new\n Categorical by default.\n\n While an ordering is applied to the category values, sorting in this\n context refers more to organizing and grouping together based on\n matching category values. Thus, this function can be called on an\n unordered Categorical instance unlike the functions 'Categorical.min'\n and 'Categorical.max'.\n\n Parameters\n ----------\n inplace : boolean, default False\n Do operation in place.\n ascending : boolean, default True\n Order ascending. Passing False orders descending. The\n ordering parameter provides the method by which the\n category values are organized.\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n\n Returns\n -------\n y : Categorical or None\n\n See Also\n --------\n Categorical.sort\n Series.sort_values\n\n Examples\n --------\n >>> c = pd.Categorical([1, 2, 2, 1, 5])\n >>> c\n [1, 2, 2, 1, 5]\n Categories (3, int64): [1, 2, 5]\n >>> c.sort_values()\n [1, 1, 2, 2, 5]\n Categories (3, int64): [1, 2, 5]\n >>> c.sort_values(ascending=False)\n [5, 2, 2, 1, 1]\n Categories (3, int64): [1, 2, 5]\n\n Inplace sorting can be done as well:\n\n >>> c.sort_values(inplace=True)\n >>> c\n [1, 1, 2, 2, 5]\n Categories (3, int64): [1, 2, 5]\n >>>\n >>> c = pd.Categorical([1, 2, 2, 1, 5])\n\n 'sort_values' behaviour with NaNs. Note that 'na_position'\n is independent of the 'ascending' parameter:\n\n >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])\n >>> c\n [NaN, 2.0, 2.0, NaN, 5.0]\n Categories (2, int64): [2, 5]\n >>> c.sort_values()\n [2.0, 2.0, 5.0, NaN, NaN]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(ascending=False)\n [5.0, 2.0, 2.0, NaN, NaN]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(na_position='first')\n [NaN, NaN, 2.0, 2.0, 5.0]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(ascending=False, na_position='first')\n [NaN, NaN, 5.0, 2.0, 2.0]\n Categories (2, int64): [2, 5]\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n if na_position not in ['last', 'first']:\n raise ValueError('invalid na_position: {!r}'.format(na_position))\n\n codes = np.sort(self._codes)\n if not ascending:\n codes = codes[::-1]\n\n # NaN handling\n na_mask = (codes == -1)\n if na_mask.any():\n n_nans = len(codes[na_mask])\n if na_position == \"first\":\n # in this case sort to the front\n new_codes = codes.copy()\n new_codes[0:n_nans] = -1\n new_codes[n_nans:] = codes[~na_mask]\n codes = new_codes\n elif na_position == \"last\":\n # ... and to the end\n new_codes = codes.copy()\n pos = len(codes) - n_nans\n new_codes[0:pos] = codes[~na_mask]\n new_codes[pos:] = -1\n codes = new_codes\n if inplace:\n self._codes = codes\n return\n else:\n return self._constructor(values=codes, categories=self.categories,\n ordered=self.ordered, fastpath=True)\n\n def _values_for_rank(self):\n \"\"\"\n For correctly ranking ordered categorical data. See GH#15420\n\n Ordered categorical data should be ranked on the basis of\n codes with -1 translated to NaN.\n\n Returns\n -------\n numpy array\n\n \"\"\"\n from pandas import Series\n if self.ordered:\n values = self.codes\n mask = values == -1\n if mask.any():\n values = values.astype('float64')\n values[mask] = np.nan\n elif self.categories.is_numeric():\n values = np.array(self)\n else:\n # reorder the categories (so rank can use the float codes)\n # instead of passing an object array to rank\n values = np.array(\n self.rename_categories(Series(self.categories).rank())\n )\n return values\n\n def ravel(self, order='C'):\n \"\"\" Return a flattened (numpy) array.\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n raveled : numpy array\n \"\"\"\n return np.array(self)\n\n def view(self):\n \"\"\"Return a view of myself.\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n view : Categorical\n Returns `self`!\n \"\"\"\n return self\n\n def to_dense(self):\n \"\"\"Return my 'dense' representation\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n dense : array\n \"\"\"\n return np.asarray(self)\n\n @deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')\n def fillna(self, value=None, method=None, limit=None):\n \"\"\" Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use NEXT valid observation to fill gap\n value : scalar\n Value to use to fill holes (e.g. 0)\n limit : int, default None\n (Not implemented yet for Categorical!)\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled.\n\n Returns\n -------\n filled : Categorical with NA/NaN filled\n \"\"\"\n\n if value is None:\n value = np.nan\n if limit is not None:\n raise NotImplementedError(\"specifying a limit for fillna has not \"\n \"been implemented yet\")\n\n values = self._codes\n\n # Make sure that we also get NA in categories\n if self.categories.dtype.kind in ['S', 'O', 'f']:\n if np.nan in self.categories:\n values = values.copy()\n nan_pos = np.where(isna(self.categories))[0]\n # we only have one NA in categories\n values[values == nan_pos] = -1\n\n # pad / bfill\n if method is not None:\n\n values = self.to_dense().reshape(-1, len(self))\n values = interpolate_2d(values, method, 0, None,\n value).astype(self.categories.dtype)[0]\n values = _get_codes_for_values(values, self.categories)\n\n else:\n\n if not isna(value) and value not in self.categories:\n raise ValueError(\"fill value must be in categories\")\n\n mask = values == -1\n if mask.any():\n values = values.copy()\n if isna(value):\n values[mask] = -1\n else:\n values[mask] = self.categories.get_loc(value)\n\n return self._constructor(values, categories=self.categories,\n ordered=self.ordered, fastpath=True)\n\n def take_nd(self, indexer, allow_fill=True, fill_value=None):\n \"\"\" Take the codes by the indexer, fill with the fill_value.\n\n For internal compatibility with numpy arrays.\n \"\"\"\n\n # filling must always be None/nan here\n # but is passed thru internally\n assert isna(fill_value)\n\n codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)\n result = self._constructor(codes, categories=self.categories,\n ordered=self.ordered, fastpath=True)\n return result\n\n take = take_nd\n\n def _slice(self, slicer):\n \"\"\" Return a slice of myself.\n\n For internal compatibility with numpy arrays.\n \"\"\"\n\n # only allow 1 dimensional slicing, but can\n # in a 2-d case be passd (slice(None),....)\n if isinstance(slicer, tuple) and len(slicer) == 2:\n if not is_null_slice(slicer[0]):\n raise AssertionError(\"invalid slicing for a 1-ndim \"\n \"categorical\")\n slicer = slicer[1]\n\n _codes = self._codes[slicer]\n return self._constructor(values=_codes, categories=self.categories,\n ordered=self.ordered, fastpath=True)\n\n def __len__(self):\n \"\"\"The length of this Categorical.\"\"\"\n return len(self._codes)\n\n def __iter__(self):\n \"\"\"Returns an Iterator over the values of this Categorical.\"\"\"\n return iter(self.get_values())\n\n def _tidy_repr(self, max_vals=10, footer=True):\n \"\"\" a short repr displaying only max_vals and an optional (but default\n footer)\n \"\"\"\n num = max_vals // 2\n head = self[:num]._get_repr(length=False, footer=False)\n tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)\n\n result = '%s, ..., %s' % (head[:-1], tail[1:])\n if footer:\n result = '%s\\n%s' % (result, self._repr_footer())\n\n return compat.text_type(result)\n\n def _repr_categories(self):\n \"\"\" return the base repr for the categories \"\"\"\n max_categories = (10 if get_option(\"display.max_categories\") == 0 else\n get_option(\"display.max_categories\"))\n from pandas.io.formats import format as fmt\n if len(self.categories) > max_categories:\n num = max_categories // 2\n head = fmt.format_array(self.categories[:num], None)\n tail = fmt.format_array(self.categories[-num:], None)\n category_strs = head + [\"...\"] + tail\n else:\n category_strs = fmt.format_array(self.categories, None)\n\n # Strip all leading spaces, which format_array adds for columns...\n category_strs = [x.strip() for x in category_strs]\n return category_strs\n\n def _repr_categories_info(self):\n \"\"\" Returns a string representation of the footer.\"\"\"\n\n category_strs = self._repr_categories()\n dtype = getattr(self.categories, 'dtype_str',\n str(self.categories.dtype))\n\n levheader = \"Categories (%d, %s): \" % (len(self.categories), dtype)\n width, height = get_terminal_size()\n max_width = get_option(\"display.width\") or width\n if com.in_ipython_frontend():\n # 0 = no breaks\n max_width = 0\n levstring = \"\"\n start = True\n cur_col_len = len(levheader) # header\n sep_len, sep = (3, \" < \") if self.ordered else (2, \", \")\n linesep = sep.rstrip() + \"\\n\" # remove whitespace\n for val in category_strs:\n if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:\n levstring += linesep + (\" \" * (len(levheader) + 1))\n cur_col_len = len(levheader) + 1 # header + a whitespace\n elif not start:\n levstring += sep\n cur_col_len += len(val)\n levstring += val\n start = False\n # replace to simple save space by\n return levheader + \"[\" + levstring.replace(\" < ... < \", \" ... \") + \"]\"\n\n def _repr_footer(self):\n\n return u('Length: %d\\n%s') % (len(self), self._repr_categories_info())\n\n def _get_repr(self, length=True, na_rep='NaN', footer=True):\n from pandas.io.formats import format as fmt\n formatter = fmt.CategoricalFormatter(self, length=length,\n na_rep=na_rep, footer=footer)\n result = formatter.to_string()\n return compat.text_type(result)\n\n def __unicode__(self):\n \"\"\" Unicode representation. \"\"\"\n _maxlen = 10\n if len(self._codes) > _maxlen:\n result = self._tidy_repr(_maxlen)\n elif len(self._codes) > 0:\n result = self._get_repr(length=len(self) > _maxlen)\n else:\n result = ('[], %s' %\n self._get_repr(length=False,\n footer=True, ).replace(\"\\n\", \", \"))\n\n return result\n\n def _maybe_coerce_indexer(self, indexer):\n \"\"\" return an indexer coerced to the codes dtype \"\"\"\n if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':\n indexer = indexer.astype(self._codes.dtype)\n return indexer\n\n def __getitem__(self, key):\n \"\"\" Return an item. \"\"\"\n if isinstance(key, (int, np.integer)):\n i = self._codes[key]\n if i == -1:\n return np.nan\n else:\n return self.categories[i]\n else:\n return self._constructor(values=self._codes[key],\n categories=self.categories,\n ordered=self.ordered, fastpath=True)\n\n def __setitem__(self, key, value):\n \"\"\" Item assignment.\n\n\n Raises\n ------\n ValueError\n If (one or more) Value is not in categories or if a assigned\n `Categorical` does not have the same categories\n \"\"\"\n\n # require identical categories set\n if isinstance(value, Categorical):\n if not value.categories.equals(self.categories):\n raise ValueError(\"Cannot set a Categorical with another, \"\n \"without identical categories\")\n\n rvalue = value if is_list_like(value) else [value]\n\n from pandas import Index\n to_add = Index(rvalue).difference(self.categories)\n\n # no assignments of values not in categories, but it's always ok to set\n # something to np.nan\n if len(to_add) and not isna(to_add).all():\n raise ValueError(\"Cannot setitem on a Categorical with a new \"\n \"category, set the categories first\")\n\n # set by position\n if isinstance(key, (int, np.integer)):\n pass\n\n # tuple of indexers (dataframe)\n elif isinstance(key, tuple):\n # only allow 1 dimensional slicing, but can\n # in a 2-d case be passd (slice(None),....)\n if len(key) == 2:\n if not is_null_slice(key[0]):\n raise AssertionError(\"invalid slicing for a 1-ndim \"\n \"categorical\")\n key = key[1]\n elif len(key) == 1:\n key = key[0]\n else:\n raise AssertionError(\"invalid slicing for a 1-ndim \"\n \"categorical\")\n\n # slicing in Series or Categorical\n elif isinstance(key, slice):\n pass\n\n # Array of True/False in Series or Categorical\n else:\n # There is a bug in numpy, which does not accept a Series as a\n # indexer\n # https://github.com/pandas-dev/pandas/issues/6168\n # https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9\n # FIXME: remove when numpy 1.9 is the lowest numpy version pandas\n # accepts...\n key = np.asarray(key)\n\n lindexer = self.categories.get_indexer(rvalue)\n\n # FIXME: the following can be removed after GH7820 is fixed:\n # https://github.com/pandas-dev/pandas/issues/7820\n # float categories do currently return -1 for np.nan, even if np.nan is\n # included in the index -> \"repair\" this here\n if isna(rvalue).any() and isna(self.categories).any():\n nan_pos = np.where(isna(self.categories))[0]\n lindexer[lindexer == -1] = nan_pos\n\n lindexer = self._maybe_coerce_indexer(lindexer)\n self._codes[key] = lindexer\n\n def _reverse_indexer(self):\n \"\"\"\n Compute the inverse of a categorical, returning\n a dict of categories -> indexers.\n\n *This is an internal function*\n\n Returns\n -------\n dict of categories -> indexers\n\n Example\n -------\n In [1]: c = pd.Categorical(list('aabca'))\n\n In [2]: c\n Out[2]:\n [a, a, b, c, a]\n Categories (3, object): [a, b, c]\n\n In [3]: c.categories\n Out[3]: Index([u'a', u'b', u'c'], dtype='object')\n\n In [4]: c.codes\n Out[4]: array([0, 0, 1, 2, 0], dtype=int8)\n\n In [5]: c._reverse_indexer()\n Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}\n\n \"\"\"\n categories = self.categories\n r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),\n categories.size)\n counts = counts.cumsum()\n result = [r[counts[indexer]:counts[indexer + 1]]\n for indexer in range(len(counts) - 1)]\n result = dict(zip(categories, result))\n return result\n\n # reduction ops #\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n \"\"\" perform the reduction type operation \"\"\"\n func = getattr(self, name, None)\n if func is None:\n raise TypeError(\"Categorical cannot perform the operation \"\n \"{op}\".format(op=name))\n return func(numeric_only=numeric_only, **kwds)\n\n def min(self, numeric_only=None, **kwargs):\n \"\"\" The minimum value of the object.\n\n Only ordered `Categoricals` have a minimum!\n\n Raises\n ------\n TypeError\n If the `Categorical` is not `ordered`.\n\n Returns\n -------\n min : the minimum of this `Categorical`\n \"\"\"\n self.check_for_ordered('min')\n if numeric_only:\n good = self._codes != -1\n pointer = self._codes[good].min(**kwargs)\n else:\n pointer = self._codes.min(**kwargs)\n if pointer == -1:\n return np.nan\n else:\n return self.categories[pointer]\n\n def max(self, numeric_only=None, **kwargs):\n \"\"\" The maximum value of the object.\n\n Only ordered `Categoricals` have a maximum!\n\n Raises\n ------\n TypeError\n If the `Categorical` is not `ordered`.\n\n Returns\n -------\n max : the maximum of this `Categorical`\n \"\"\"\n self.check_for_ordered('max')\n if numeric_only:\n good = self._codes != -1\n pointer = self._codes[good].max(**kwargs)\n else:\n pointer = self._codes.max(**kwargs)\n if pointer == -1:\n return np.nan\n else:\n return self.categories[pointer]\n\n def mode(self):\n \"\"\"\n Returns the mode(s) of the Categorical.\n\n Always returns `Categorical` even if only one value.\n\n Returns\n -------\n modes : `Categorical` (sorted)\n \"\"\"\n\n import pandas._libs.hashtable as htable\n good = self._codes != -1\n values = sorted(htable.mode_int64(_ensure_int64(self._codes[good])))\n result = self._constructor(values=values, categories=self.categories,\n ordered=self.ordered, fastpath=True)\n return result\n\n def unique(self):\n \"\"\"\n Return the ``Categorical`` which ``categories`` and ``codes`` are\n unique. Unused categories are NOT returned.\n\n - unordered category: values and categories are sorted by appearance\n order.\n - ordered category: values are sorted by appearance order, categories\n keeps existing order.\n\n Returns\n -------\n unique values : ``Categorical``\n\n Examples\n --------\n An unordered Categorical will return categories in the\n order of appearance.\n\n >>> pd.Categorical(list('baabc'))\n [b, a, c]\n Categories (3, object): [b, a, c]\n\n >>> pd.Categorical(list('baabc'), categories=list('abc'))\n [b, a, c]\n Categories (3, object): [b, a, c]\n\n An ordered Categorical preserves the category ordering.\n\n >>> pd.Categorical(list('baabc'),\n ... categories=list('abc'),\n ... ordered=True)\n [b, a, c]\n Categories (3, object): [a < b < c]\n\n See Also\n --------\n unique\n CategoricalIndex.unique\n Series.unique\n\n \"\"\"\n\n # unlike np.unique, unique1d does not sort\n unique_codes = unique1d(self.codes)\n cat = self.copy()\n\n # keep nan in codes\n cat._codes = unique_codes\n\n # exclude nan from indexer for categories\n take_codes = unique_codes[unique_codes != -1]\n if self.ordered:\n take_codes = sorted(take_codes)\n return cat.set_categories(cat.categories.take(take_codes))\n\n def equals(self, other):\n \"\"\"\n Returns True if categorical arrays are equal.\n\n Parameters\n ----------\n other : `Categorical`\n\n Returns\n -------\n are_equal : boolean\n \"\"\"\n return (self.is_dtype_equal(other) and\n np.array_equal(self._codes, other._codes))\n\n def is_dtype_equal(self, other):\n \"\"\"\n Returns True if categoricals are the same dtype\n same categories, and same ordered\n\n Parameters\n ----------\n other : Categorical\n\n Returns\n -------\n are_equal : boolean\n \"\"\"\n\n try:\n return (self.categories.equals(other.categories) and\n self.ordered == other.ordered)\n except (AttributeError, TypeError):\n return False\n\n def describe(self):\n \"\"\" Describes this Categorical\n\n Returns\n -------\n description: `DataFrame`\n A dataframe with frequency and counts by category.\n \"\"\"\n counts = self.value_counts(dropna=False)\n freqs = counts / float(counts.sum())\n\n from pandas.core.reshape.concat import concat\n result = concat([counts, freqs], axis=1)\n result.columns = ['counts', 'freqs']\n result.index.name = 'categories'\n\n return result\n\n def repeat(self, repeats, *args, **kwargs):\n \"\"\"\n Repeat elements of a Categorical.\n\n See also\n --------\n numpy.ndarray.repeat\n\n \"\"\"\n nv.validate_repeat(args, kwargs)\n codes = self._codes.repeat(repeats)\n return self._constructor(values=codes, categories=self.categories,\n ordered=self.ordered, fastpath=True)\n\n# The Series.cat accessor\n\n\nclass CategoricalAccessor(PandasDelegate, NoNewAttributesMixin):\n \"\"\"\n Accessor object for categorical properties of the Series values.\n\n Be aware that assigning to `categories` is a inplace operation, while all\n methods return new categorical data per default (but can be called with\n `inplace=True`).\n\n Examples\n --------\n >>> s.cat.categories\n >>> s.cat.categories = list('abc')\n >>> s.cat.rename_categories(list('cab'))\n >>> s.cat.reorder_categories(list('cab'))\n >>> s.cat.add_categories(['d','e'])\n >>> s.cat.remove_categories(['d'])\n >>> s.cat.remove_unused_categories()\n >>> s.cat.set_categories(list('abcde'))\n >>> s.cat.as_ordered()\n >>> s.cat.as_unordered()\n\n \"\"\"\n\n def __init__(self, values, index):\n self.categorical = values\n self.index = index\n self._freeze()\n\n def _delegate_property_get(self, name):\n return getattr(self.categorical, name)\n\n def _delegate_property_set(self, name, new_values):\n return setattr(self.categorical, name, new_values)\n\n @property\n def codes(self):\n from pandas import Series\n return Series(self.categorical.codes, index=self.index)\n\n def _delegate_method(self, name, *args, **kwargs):\n from pandas import Series\n method = getattr(self.categorical, name)\n res = method(*args, **kwargs)\n if res is not None:\n return Series(res, index=self.index)\n\n @classmethod\n def _make_accessor(cls, data):\n if not is_categorical_dtype(data.dtype):\n raise AttributeError(\"Can only use .cat accessor with a \"\n \"'category' dtype\")\n return CategoricalAccessor(data.values, data.index)\n\n\nCategoricalAccessor._add_delegate_accessors(delegate=Categorical,\n accessors=[\"categories\",\n \"ordered\"],\n typ='property')\nCategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=[\n \"rename_categories\", \"reorder_categories\", \"add_categories\",\n \"remove_categories\", \"remove_unused_categories\", \"set_categories\",\n \"as_ordered\", \"as_unordered\"], typ='method')\n\n# utility routines\n\n\ndef _get_codes_for_values(values, categories):\n \"\"\"\n utility routine to turn values into codes given the specified categories\n \"\"\"\n\n from pandas.core.algorithms import _get_data_algo, _hashtables\n if not is_dtype_equal(values.dtype, categories.dtype):\n values = _ensure_object(values)\n categories = _ensure_object(categories)\n\n (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)\n (_, _), cats = _get_data_algo(categories, _hashtables)\n t = hash_klass(len(cats))\n t.map_locations(cats)\n return coerce_indexer_dtype(t.lookup(vals), cats)\n\n\ndef _convert_to_list_like(list_like):\n if hasattr(list_like, \"dtype\"):\n return list_like\n if isinstance(list_like, list):\n return list_like\n if (is_sequence(list_like) or isinstance(list_like, tuple) or\n isinstance(list_like, types.GeneratorType)):\n return list(list_like)\n elif is_scalar(list_like):\n return [list_like]\n else:\n # is this reached?\n return [list_like]\n\n\ndef _factorize_from_iterable(values):\n \"\"\"\n Factorize an input `values` into `categories` and `codes`. Preserves\n categorical dtype in `categories`.\n\n *This is an internal function*\n\n Parameters\n ----------\n values : list-like\n\n Returns\n -------\n codes : ndarray\n categories : Index\n If `values` has a categorical dtype, then `categories` is\n a CategoricalIndex keeping the categories and order of `values`.\n \"\"\"\n from pandas.core.indexes.category import CategoricalIndex\n\n if not is_list_like(values):\n raise TypeError(\"Input must be list-like\")\n\n if is_categorical(values):\n if isinstance(values, (ABCCategoricalIndex, ABCSeries)):\n values = values._values\n categories = CategoricalIndex(values.categories,\n categories=values.categories,\n ordered=values.ordered)\n codes = values.codes\n else:\n cat = Categorical(values, ordered=True)\n categories = cat.categories\n codes = cat.codes\n return codes, categories\n\n\ndef _factorize_from_iterables(iterables):\n \"\"\"\n A higher-level wrapper over `_factorize_from_iterable`.\n\n *This is an internal function*\n\n Parameters\n ----------\n iterables : list-like of list-likes\n\n Returns\n -------\n codes_list : list of ndarrays\n categories_list : list of Indexes\n\n Notes\n -----\n See `_factorize_from_iterable` for more info.\n \"\"\"\n if len(iterables) == 0:\n # For consistency, it should return a list of 2 lists.\n return [[], []]\n return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))\n", "\"\"\"\ntest methods relating to generic function evaluation\nthe so-called white/black lists\n\"\"\"\n\nimport pytest\nfrom string import ascii_lowercase\nimport numpy as np\nfrom pandas import DataFrame, Series, compat, date_range, Index, MultiIndex\nfrom pandas.util import testing as tm\nfrom pandas.compat import lrange, product\n\nAGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',\n 'mad', 'std', 'var', 'sem']\nAGG_FUNCTIONS_WITH_SKIPNA = ['skew', 'mad']\n\ndf_whitelist = frozenset([\n 'last',\n 'first',\n 'mean',\n 'sum',\n 'min',\n 'max',\n 'head',\n 'tail',\n 'cumcount',\n 'ngroup',\n 'resample',\n 'rank',\n 'quantile',\n 'fillna',\n 'mad',\n 'any',\n 'all',\n 'take',\n 'idxmax',\n 'idxmin',\n 'shift',\n 'tshift',\n 'ffill',\n 'bfill',\n 'pct_change',\n 'skew',\n 'plot',\n 'hist',\n 'median',\n 'dtypes',\n 'corrwith',\n 'corr',\n 'cov',\n 'diff',\n])\n\ns_whitelist = frozenset([\n 'last',\n 'first',\n 'mean',\n 'sum',\n 'min',\n 'max',\n 'head',\n 'tail',\n 'cumcount',\n 'ngroup',\n 'resample',\n 'rank',\n 'quantile',\n 'fillna',\n 'mad',\n 'any',\n 'all',\n 'take',\n 'idxmax',\n 'idxmin',\n 'shift',\n 'tshift',\n 'ffill',\n 'bfill',\n 'pct_change',\n 'skew',\n 'plot',\n 'hist',\n 'median',\n 'dtype',\n 'corr',\n 'cov',\n 'diff',\n 'unique',\n 'nlargest',\n 'nsmallest',\n])\n\n\[email protected]\ndef mframe():\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',\n 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n return DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n\n\[email protected]\ndef df():\n return DataFrame(\n {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],\n 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],\n 'C': np.random.randn(8),\n 'D': np.random.randn(8)})\n\n\[email protected]\ndef df_letters():\n letters = np.array(list(ascii_lowercase))\n N = 10\n random_letters = letters.take(np.random.randint(0, 26, N))\n df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),\n 'letters': Series(random_letters)})\n return df\n\n\[email protected](\n \"obj, whitelist\", zip((df_letters(), df_letters().floats),\n (df_whitelist, s_whitelist)))\ndef test_groupby_whitelist(df_letters, obj, whitelist):\n df = df_letters\n\n # these are aliases so ok to have the alias __name__\n alias = {'bfill': 'backfill',\n 'ffill': 'pad',\n 'boxplot': None}\n\n gb = obj.groupby(df.letters)\n\n assert whitelist == gb._apply_whitelist\n for m in whitelist:\n\n m = alias.get(m, m)\n if m is None:\n continue\n\n f = getattr(type(gb), m)\n\n # name\n try:\n n = f.__name__\n except AttributeError:\n continue\n assert n == m\n\n # qualname\n if compat.PY3:\n try:\n n = f.__qualname__\n except AttributeError:\n continue\n assert n.endswith(m)\n\n\[email protected]\ndef raw_frame():\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',\n 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n raw_frame = DataFrame(np.random.randn(10, 3), index=index,\n columns=Index(['A', 'B', 'C'], name='exp'))\n raw_frame.iloc[1, [1, 2]] = np.nan\n raw_frame.iloc[7, [0, 1]] = np.nan\n return raw_frame\n\n\[email protected](\n \"op, level, axis, skipna\",\n product(AGG_FUNCTIONS,\n lrange(2), lrange(2),\n [True, False]))\ndef test_regression_whitelist_methods(raw_frame, op, level, axis, skipna):\n # GH6944\n # explicity test the whitelest methods\n\n if axis == 0:\n frame = raw_frame\n else:\n frame = raw_frame.T\n\n if op in AGG_FUNCTIONS_WITH_SKIPNA:\n grouped = frame.groupby(level=level, axis=axis)\n result = getattr(grouped, op)(skipna=skipna)\n expected = getattr(frame, op)(level=level, axis=axis,\n skipna=skipna)\n tm.assert_frame_equal(result, expected)\n else:\n grouped = frame.groupby(level=level, axis=axis)\n result = getattr(grouped, op)()\n expected = getattr(frame, op)(level=level, axis=axis)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_blacklist(df_letters):\n df = df_letters\n s = df_letters.floats\n\n blacklist = [\n 'eval', 'query', 'abs', 'where',\n 'mask', 'align', 'groupby', 'clip', 'astype',\n 'at', 'combine', 'consolidate', 'convert_objects',\n ]\n to_methods = [method for method in dir(df) if method.startswith('to_')]\n\n blacklist.extend(to_methods)\n\n # e.g., to_csv\n defined_but_not_allowed = (\"(?:^Cannot.+{0!r}.+{1!r}.+try using the \"\n \"'apply' method$)\")\n\n # e.g., query, eval\n not_defined = \"(?:^{1!r} object has no attribute {0!r}$)\"\n fmt = defined_but_not_allowed + '|' + not_defined\n for bl in blacklist:\n for obj in (df, s):\n gb = obj.groupby(df.letters)\n msg = fmt.format(bl, type(gb).__name__)\n with tm.assert_raises_regex(AttributeError, msg):\n getattr(gb, bl)\n\n\ndef test_tab_completion(mframe):\n grp = mframe.groupby(level='second')\n results = set([v for v in dir(grp) if not v.startswith('_')])\n expected = set(\n ['A', 'B', 'C', 'agg', 'aggregate', 'apply', 'boxplot', 'filter',\n 'first', 'get_group', 'groups', 'hist', 'indices', 'last', 'max',\n 'mean', 'median', 'min', 'ngroups', 'nth', 'ohlc', 'plot',\n 'prod', 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count',\n 'nunique', 'head', 'describe', 'cummax', 'quantile',\n 'rank', 'cumprod', 'tail', 'resample', 'cummin', 'fillna',\n 'cumsum', 'cumcount', 'ngroup', 'all', 'shift', 'skew',\n 'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',\n 'cov', 'dtypes', 'ndim', 'diff', 'idxmax', 'idxmin',\n 'ffill', 'bfill', 'pad', 'backfill', 'rolling', 'expanding'])\n assert results == expected\n\n\ndef test_groupby_function_rename(mframe):\n grp = mframe.groupby(level='second')\n for name in ['sum', 'prod', 'min', 'max', 'first', 'last']:\n f = getattr(grp, name)\n assert f.__name__ == name\n\n\ndef test_groupby_selection_with_methods(df):\n # some methods which require DatetimeIndex\n rng = date_range('2014', periods=len(df))\n df.index = rng\n\n g = df.groupby(['A'])[['C']]\n g_exp = df[['C']].groupby(df['A'])\n # TODO check groupby with > 1 col ?\n\n # methods which are called as .foo()\n methods = ['count',\n 'corr',\n 'cummax',\n 'cummin',\n 'cumprod',\n 'describe',\n 'rank',\n 'quantile',\n 'diff',\n 'shift',\n 'all',\n 'any',\n 'idxmin',\n 'idxmax',\n 'ffill',\n 'bfill',\n 'pct_change',\n 'tshift']\n\n for m in methods:\n res = getattr(g, m)()\n exp = getattr(g_exp, m)()\n\n # should always be frames!\n tm.assert_frame_equal(res, exp)\n\n # methods which aren't just .foo()\n tm.assert_frame_equal(g.fillna(0), g_exp.fillna(0))\n tm.assert_frame_equal(g.dtypes, g_exp.dtypes)\n tm.assert_frame_equal(g.apply(lambda x: x.sum()),\n g_exp.apply(lambda x: x.sum()))\n\n tm.assert_frame_equal(g.resample('D').mean(), g_exp.resample('D').mean())\n tm.assert_frame_equal(g.resample('D').ohlc(),\n g_exp.resample('D').ohlc())\n\n tm.assert_frame_equal(g.filter(lambda x: len(x) == 3),\n g_exp.filter(lambda x: len(x) == 3))\n", "from itertools import product\nimport pytest\nimport sys\nimport warnings\nfrom warnings import catch_warnings\n\nfrom datetime import datetime, timedelta\nfrom numpy.random import randn\nimport numpy as np\nfrom distutils.version import LooseVersion\n\nimport pandas as pd\nfrom pandas import (Series, DataFrame, bdate_range, isna,\n notna, concat, Timestamp, Index)\nimport pandas.stats.moments as mom\nimport pandas.core.window as rwindow\nimport pandas.tseries.offsets as offsets\nfrom pandas.core.base import SpecificationError\nfrom pandas.errors import UnsupportedFunctionCall\nimport pandas.util.testing as tm\nfrom pandas.compat import range, zip, PY3\n\nN, K = 100, 10\n\n\ndef assert_equal(left, right):\n if isinstance(left, Series):\n tm.assert_series_equal(left, right)\n else:\n tm.assert_frame_equal(left, right)\n\n\nclass Base(object):\n\n _nan_locs = np.arange(20, 40)\n _inf_locs = np.array([])\n\n def _create_data(self):\n arr = randn(N)\n arr[self._nan_locs] = np.NaN\n\n self.arr = arr\n self.rng = bdate_range(datetime(2009, 1, 1), periods=N)\n self.series = Series(arr.copy(), index=self.rng)\n self.frame = DataFrame(randn(N, K), index=self.rng,\n columns=np.arange(K))\n\n\nclass TestApi(Base):\n\n def setup_method(self, method):\n self._create_data()\n\n def test_getitem(self):\n\n r = self.frame.rolling(window=5)\n tm.assert_index_equal(r._selected_obj.columns, self.frame.columns)\n\n r = self.frame.rolling(window=5)[1]\n assert r._selected_obj.name == self.frame.columns[1]\n\n # technically this is allowed\n r = self.frame.rolling(window=5)[1, 3]\n tm.assert_index_equal(r._selected_obj.columns,\n self.frame.columns[[1, 3]])\n\n r = self.frame.rolling(window=5)[[1, 3]]\n tm.assert_index_equal(r._selected_obj.columns,\n self.frame.columns[[1, 3]])\n\n def test_select_bad_cols(self):\n df = DataFrame([[1, 2]], columns=['A', 'B'])\n g = df.rolling(window=5)\n pytest.raises(KeyError, g.__getitem__, ['C']) # g[['C']]\n\n pytest.raises(KeyError, g.__getitem__, ['A', 'C']) # g[['A', 'C']]\n with tm.assert_raises_regex(KeyError, '^[^A]+$'):\n # A should not be referenced as a bad column...\n # will have to rethink regex if you change message!\n g[['A', 'C']]\n\n def test_attribute_access(self):\n\n df = DataFrame([[1, 2]], columns=['A', 'B'])\n r = df.rolling(window=5)\n tm.assert_series_equal(r.A.sum(), r['A'].sum())\n pytest.raises(AttributeError, lambda: r.F)\n\n def tests_skip_nuisance(self):\n\n df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'})\n\n r = df.rolling(window=3)\n result = r[['A', 'B']].sum()\n expected = DataFrame({'A': [np.nan, np.nan, 3, 6, 9],\n 'B': [np.nan, np.nan, 18, 21, 24]},\n columns=list('AB'))\n tm.assert_frame_equal(result, expected)\n\n expected = pd.concat([r[['A', 'B']].sum(), df[['C']]], axis=1)\n result = r.sum()\n tm.assert_frame_equal(result, expected, check_like=True)\n\n def test_agg(self):\n df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})\n\n r = df.rolling(window=3)\n a_mean = r['A'].mean()\n a_std = r['A'].std()\n a_sum = r['A'].sum()\n b_mean = r['B'].mean()\n b_std = r['B'].std()\n b_sum = r['B'].sum()\n\n result = r.aggregate([np.mean, np.std])\n expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)\n expected.columns = pd.MultiIndex.from_product([['A', 'B'], ['mean',\n 'std']])\n tm.assert_frame_equal(result, expected)\n\n result = r.aggregate({'A': np.mean, 'B': np.std})\n\n expected = pd.concat([a_mean, b_std], axis=1)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = r.aggregate({'A': ['mean', 'std']})\n expected = pd.concat([a_mean, a_std], axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), ('A',\n 'std')])\n tm.assert_frame_equal(result, expected)\n\n result = r['A'].aggregate(['mean', 'sum'])\n expected = pd.concat([a_mean, a_sum], axis=1)\n expected.columns = ['mean', 'sum']\n tm.assert_frame_equal(result, expected)\n\n with catch_warnings(record=True):\n result = r.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})\n expected = pd.concat([a_mean, a_sum], axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),\n ('A', 'sum')])\n tm.assert_frame_equal(result, expected, check_like=True)\n\n with catch_warnings(record=True):\n result = r.aggregate({'A': {'mean': 'mean',\n 'sum': 'sum'},\n 'B': {'mean2': 'mean',\n 'sum2': 'sum'}})\n expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)\n exp_cols = [('A', 'mean'), ('A', 'sum'), ('B', 'mean2'), ('B', 'sum2')]\n expected.columns = pd.MultiIndex.from_tuples(exp_cols)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = r.aggregate({'A': ['mean', 'std'], 'B': ['mean', 'std']})\n expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)\n\n exp_cols = [('A', 'mean'), ('A', 'std'), ('B', 'mean'), ('B', 'std')]\n expected.columns = pd.MultiIndex.from_tuples(exp_cols)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n # passed lambda\n result = r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)})\n rcustom = r['B'].apply(lambda x: np.std(x, ddof=1))\n expected = pd.concat([a_sum, rcustom], axis=1)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n def test_agg_consistency(self):\n\n df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})\n r = df.rolling(window=3)\n\n result = r.agg([np.sum, np.mean]).columns\n expected = pd.MultiIndex.from_product([list('AB'), ['sum', 'mean']])\n tm.assert_index_equal(result, expected)\n\n result = r['A'].agg([np.sum, np.mean]).columns\n expected = Index(['sum', 'mean'])\n tm.assert_index_equal(result, expected)\n\n result = r.agg({'A': [np.sum, np.mean]}).columns\n expected = pd.MultiIndex.from_tuples([('A', 'sum'), ('A', 'mean')])\n tm.assert_index_equal(result, expected)\n\n def test_agg_nested_dicts(self):\n\n # API change for disallowing these types of nested dicts\n df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})\n r = df.rolling(window=3)\n\n def f():\n r.aggregate({'r1': {'A': ['mean', 'sum']},\n 'r2': {'B': ['mean', 'sum']}})\n\n pytest.raises(SpecificationError, f)\n\n expected = pd.concat([r['A'].mean(), r['A'].std(), r['B'].mean(),\n r['B'].std()], axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (\n 'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])\n with catch_warnings(record=True):\n result = r[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},\n 'B': {'rb': ['mean', 'std']}})\n tm.assert_frame_equal(result, expected, check_like=True)\n\n with catch_warnings(record=True):\n result = r.agg({'A': {'ra': ['mean', 'std']},\n 'B': {'rb': ['mean', 'std']}})\n expected.columns = pd.MultiIndex.from_tuples([('A', 'ra', 'mean'), (\n 'A', 'ra', 'std'), ('B', 'rb', 'mean'), ('B', 'rb', 'std')])\n tm.assert_frame_equal(result, expected, check_like=True)\n\n def test_count_nonnumeric_types(self):\n # GH12541\n cols = ['int', 'float', 'string', 'datetime', 'timedelta', 'periods',\n 'fl_inf', 'fl_nan', 'str_nan', 'dt_nat', 'periods_nat']\n\n df = DataFrame(\n {'int': [1, 2, 3],\n 'float': [4., 5., 6.],\n 'string': list('abc'),\n 'datetime': pd.date_range('20170101', periods=3),\n 'timedelta': pd.timedelta_range('1 s', periods=3, freq='s'),\n 'periods': [pd.Period('2012-01'), pd.Period('2012-02'),\n pd.Period('2012-03')],\n 'fl_inf': [1., 2., np.Inf],\n 'fl_nan': [1., 2., np.NaN],\n 'str_nan': ['aa', 'bb', np.NaN],\n 'dt_nat': [pd.Timestamp('20170101'), pd.Timestamp('20170203'),\n pd.Timestamp(None)],\n 'periods_nat': [pd.Period('2012-01'), pd.Period('2012-02'),\n pd.Period(None)]},\n columns=cols)\n\n expected = DataFrame(\n {'int': [1., 2., 2.],\n 'float': [1., 2., 2.],\n 'string': [1., 2., 2.],\n 'datetime': [1., 2., 2.],\n 'timedelta': [1., 2., 2.],\n 'periods': [1., 2., 2.],\n 'fl_inf': [1., 2., 2.],\n 'fl_nan': [1., 2., 1.],\n 'str_nan': [1., 2., 1.],\n 'dt_nat': [1., 2., 1.],\n 'periods_nat': [1., 2., 1.]},\n columns=cols)\n\n result = df.rolling(window=2).count()\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(1).count()\n expected = df.notna().astype(float)\n tm.assert_frame_equal(result, expected)\n\n def test_window_with_args(self):\n tm._skip_if_no_scipy()\n\n # make sure that we are aggregating window functions correctly with arg\n r = Series(np.random.randn(100)).rolling(window=10, min_periods=1,\n win_type='gaussian')\n expected = pd.concat([r.mean(std=10), r.mean(std=.01)], axis=1)\n expected.columns = ['<lambda>', '<lambda>']\n result = r.aggregate([lambda x: x.mean(std=10),\n lambda x: x.mean(std=.01)])\n tm.assert_frame_equal(result, expected)\n\n def a(x):\n return x.mean(std=10)\n\n def b(x):\n return x.mean(std=0.01)\n\n expected = pd.concat([r.mean(std=10), r.mean(std=.01)], axis=1)\n expected.columns = ['a', 'b']\n result = r.aggregate([a, b])\n tm.assert_frame_equal(result, expected)\n\n def test_preserve_metadata(self):\n # GH 10565\n s = Series(np.arange(100), name='foo')\n\n s2 = s.rolling(30).sum()\n s3 = s.rolling(20).sum()\n assert s2.name == 'foo'\n assert s3.name == 'foo'\n\n def test_how_compat(self):\n # in prior versions, we would allow how to be used in the resample\n # now that its deprecated, we need to handle this in the actual\n # aggregation functions\n s = pd.Series(\n np.random.randn(20),\n index=pd.date_range('1/1/2000', periods=20, freq='12H'))\n\n for how in ['min', 'max', 'median']:\n for op in ['mean', 'sum', 'std', 'var', 'kurt', 'skew']:\n for t in ['rolling', 'expanding']:\n\n with catch_warnings(record=True):\n\n dfunc = getattr(pd, \"{0}_{1}\".format(t, op))\n if dfunc is None:\n continue\n\n if t == 'rolling':\n kwargs = {'window': 5}\n else:\n kwargs = {}\n result = dfunc(s, freq='D', how=how, **kwargs)\n\n expected = getattr(\n getattr(s, t)(freq='D', **kwargs), op)(how=how)\n tm.assert_series_equal(result, expected)\n\n\nclass TestWindow(Base):\n\n def setup_method(self, method):\n self._create_data()\n\n def test_constructor(self):\n # GH 12669\n tm._skip_if_no_scipy()\n\n for o in [self.series, self.frame]:\n c = o.rolling\n\n # valid\n c(win_type='boxcar', window=2, min_periods=1)\n c(win_type='boxcar', window=2, min_periods=1, center=True)\n c(win_type='boxcar', window=2, min_periods=1, center=False)\n\n for wt in ['boxcar', 'triang', 'blackman', 'hamming', 'bartlett',\n 'bohman', 'blackmanharris', 'nuttall', 'barthann']:\n c(win_type=wt, window=2)\n\n # not valid\n for w in [2., 'foo', np.array([2])]:\n with pytest.raises(ValueError):\n c(win_type='boxcar', window=2, min_periods=w)\n with pytest.raises(ValueError):\n c(win_type='boxcar', window=2, min_periods=1, center=w)\n\n for wt in ['foobar', 1]:\n with pytest.raises(ValueError):\n c(win_type=wt, window=2)\n\n def test_numpy_compat(self):\n # see gh-12811\n w = rwindow.Window(Series([2, 4, 6]), window=[0, 2])\n\n msg = \"numpy operations are not valid with window objects\"\n\n for func in ('sum', 'mean'):\n tm.assert_raises_regex(UnsupportedFunctionCall, msg,\n getattr(w, func), 1, 2, 3)\n tm.assert_raises_regex(UnsupportedFunctionCall, msg,\n getattr(w, func), dtype=np.float64)\n\n\nclass TestRolling(Base):\n\n def setup_method(self, method):\n self._create_data()\n\n def test_doc_string(self):\n\n df = DataFrame({'B': [0, 1, 2, np.nan, 4]})\n df\n df.rolling(2).sum()\n df.rolling(2, min_periods=1).sum()\n\n def test_constructor(self):\n # GH 12669\n\n for o in [self.series, self.frame]:\n c = o.rolling\n\n # valid\n c(window=2)\n c(window=2, min_periods=1)\n c(window=2, min_periods=1, center=True)\n c(window=2, min_periods=1, center=False)\n\n # GH 13383\n c(0)\n with pytest.raises(ValueError):\n c(-1)\n\n # not valid\n for w in [2., 'foo', np.array([2])]:\n with pytest.raises(ValueError):\n c(window=w)\n with pytest.raises(ValueError):\n c(window=2, min_periods=w)\n with pytest.raises(ValueError):\n c(window=2, min_periods=1, center=w)\n\n def test_constructor_with_win_type(self):\n # GH 13383\n tm._skip_if_no_scipy()\n for o in [self.series, self.frame]:\n c = o.rolling\n c(0, win_type='boxcar')\n with pytest.raises(ValueError):\n c(-1, win_type='boxcar')\n\n def test_constructor_with_timedelta_window(self):\n # GH 15440\n n = 10\n df = pd.DataFrame({'value': np.arange(n)},\n index=pd.date_range('2015-12-24',\n periods=n,\n freq=\"D\"))\n expected_data = np.append([0., 1.], np.arange(3., 27., 3))\n for window in [timedelta(days=3), pd.Timedelta(days=3)]:\n result = df.rolling(window=window).sum()\n expected = pd.DataFrame({'value': expected_data},\n index=pd.date_range('2015-12-24',\n periods=n,\n freq=\"D\"))\n tm.assert_frame_equal(result, expected)\n expected = df.rolling('3D').sum()\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n 'window', [timedelta(days=3), pd.Timedelta(days=3), '3D'])\n def test_constructor_with_timedelta_window_and_minperiods(self, window):\n # GH 15305\n n = 10\n df = pd.DataFrame({'value': np.arange(n)},\n index=pd.date_range('2017-08-08',\n periods=n,\n freq=\"D\"))\n expected = pd.DataFrame({'value': np.append([np.NaN, 1.],\n np.arange(3., 27., 3))},\n index=pd.date_range('2017-08-08',\n periods=n,\n freq=\"D\"))\n result_roll_sum = df.rolling(window=window, min_periods=2).sum()\n result_roll_generic = df.rolling(window=window,\n min_periods=2).apply(sum)\n tm.assert_frame_equal(result_roll_sum, expected)\n tm.assert_frame_equal(result_roll_generic, expected)\n\n def test_numpy_compat(self):\n # see gh-12811\n r = rwindow.Rolling(Series([2, 4, 6]), window=2)\n\n msg = \"numpy operations are not valid with window objects\"\n\n for func in ('std', 'mean', 'sum', 'max', 'min', 'var'):\n tm.assert_raises_regex(UnsupportedFunctionCall, msg,\n getattr(r, func), 1, 2, 3)\n tm.assert_raises_regex(UnsupportedFunctionCall, msg,\n getattr(r, func), dtype=np.float64)\n\n def test_closed(self):\n df = DataFrame({'A': [0, 1, 2, 3, 4]})\n # closed only allowed for datetimelike\n with pytest.raises(ValueError):\n df.rolling(window=3, closed='neither')\n\n @pytest.mark.parametrize('roller', ['1s', 1])\n def tests_empty_df_rolling(self, roller):\n # GH 15819 Verifies that datetime and integer rolling windows can be\n # applied to empty DataFrames\n expected = DataFrame()\n result = DataFrame().rolling(roller).sum()\n tm.assert_frame_equal(result, expected)\n\n # Verifies that datetime and integer rolling windows can be applied to\n # empty DataFrames with datetime index\n expected = DataFrame(index=pd.DatetimeIndex([]))\n result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_multi_index_names(self):\n\n # GH 16789, 16825\n cols = pd.MultiIndex.from_product([['A', 'B'], ['C', 'D', 'E']],\n names=['1', '2'])\n df = pd.DataFrame(np.ones((10, 6)), columns=cols)\n result = df.rolling(3).cov()\n\n tm.assert_index_equal(result.columns, df.columns)\n assert result.index.names == [None, '1', '2']\n\n\nclass TestExpanding(Base):\n\n def setup_method(self, method):\n self._create_data()\n\n def test_doc_string(self):\n\n df = DataFrame({'B': [0, 1, 2, np.nan, 4]})\n df\n df.expanding(2).sum()\n\n def test_constructor(self):\n # GH 12669\n\n for o in [self.series, self.frame]:\n c = o.expanding\n\n # valid\n c(min_periods=1)\n c(min_periods=1, center=True)\n c(min_periods=1, center=False)\n\n # not valid\n for w in [2., 'foo', np.array([2])]:\n with pytest.raises(ValueError):\n c(min_periods=w)\n with pytest.raises(ValueError):\n c(min_periods=1, center=w)\n\n def test_numpy_compat(self):\n # see gh-12811\n e = rwindow.Expanding(Series([2, 4, 6]), window=2)\n\n msg = \"numpy operations are not valid with window objects\"\n\n for func in ('std', 'mean', 'sum', 'max', 'min', 'var'):\n tm.assert_raises_regex(UnsupportedFunctionCall, msg,\n getattr(e, func), 1, 2, 3)\n tm.assert_raises_regex(UnsupportedFunctionCall, msg,\n getattr(e, func), dtype=np.float64)\n\n @pytest.mark.parametrize(\n 'expander',\n [1, pytest.param('ls', marks=pytest.mark.xfail(\n reason='GH 16425 expanding with '\n 'offset not supported'))])\n def test_empty_df_expanding(self, expander):\n # GH 15819 Verifies that datetime and integer expanding windows can be\n # applied to empty DataFrames\n\n expected = DataFrame()\n result = DataFrame().expanding(expander).sum()\n tm.assert_frame_equal(result, expected)\n\n # Verifies that datetime and integer expanding windows can be applied\n # to empty DataFrames with datetime index\n expected = DataFrame(index=pd.DatetimeIndex([]))\n result = DataFrame(\n index=pd.DatetimeIndex([])).expanding(expander).sum()\n tm.assert_frame_equal(result, expected)\n\n\nclass TestEWM(Base):\n\n def setup_method(self, method):\n self._create_data()\n\n def test_doc_string(self):\n\n df = DataFrame({'B': [0, 1, 2, np.nan, 4]})\n df\n df.ewm(com=0.5).mean()\n\n def test_constructor(self):\n for o in [self.series, self.frame]:\n c = o.ewm\n\n # valid\n c(com=0.5)\n c(span=1.5)\n c(alpha=0.5)\n c(halflife=0.75)\n c(com=0.5, span=None)\n c(alpha=0.5, com=None)\n c(halflife=0.75, alpha=None)\n\n # not valid: mutually exclusive\n with pytest.raises(ValueError):\n c(com=0.5, alpha=0.5)\n with pytest.raises(ValueError):\n c(span=1.5, halflife=0.75)\n with pytest.raises(ValueError):\n c(alpha=0.5, span=1.5)\n\n # not valid: com < 0\n with pytest.raises(ValueError):\n c(com=-0.5)\n\n # not valid: span < 1\n with pytest.raises(ValueError):\n c(span=0.5)\n\n # not valid: halflife <= 0\n with pytest.raises(ValueError):\n c(halflife=0)\n\n # not valid: alpha <= 0 or alpha > 1\n for alpha in (-0.5, 1.5):\n with pytest.raises(ValueError):\n c(alpha=alpha)\n\n def test_numpy_compat(self):\n # see gh-12811\n e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5)\n\n msg = \"numpy operations are not valid with window objects\"\n\n for func in ('std', 'mean', 'var'):\n tm.assert_raises_regex(UnsupportedFunctionCall, msg,\n getattr(e, func), 1, 2, 3)\n tm.assert_raises_regex(UnsupportedFunctionCall, msg,\n getattr(e, func), dtype=np.float64)\n\n\nclass TestDeprecations(Base):\n \"\"\" test that we are catching deprecation warnings \"\"\"\n\n def setup_method(self, method):\n self._create_data()\n\n def test_deprecations(self):\n\n with catch_warnings(record=True):\n mom.rolling_mean(np.ones(10), 3, center=True, axis=0)\n mom.rolling_mean(Series(np.ones(10)), 3, center=True, axis=0)\n\n\n# gh-12373 : rolling functions error on float32 data\n# make sure rolling functions works for different dtypes\n#\n# NOTE that these are yielded tests and so _create_data\n# is explicitly called.\n#\n# further note that we are only checking rolling for fully dtype\n# compliance (though both expanding and ewm inherit)\nclass Dtype(object):\n window = 2\n\n funcs = {\n 'count': lambda v: v.count(),\n 'max': lambda v: v.max(),\n 'min': lambda v: v.min(),\n 'sum': lambda v: v.sum(),\n 'mean': lambda v: v.mean(),\n 'std': lambda v: v.std(),\n 'var': lambda v: v.var(),\n 'median': lambda v: v.median()\n }\n\n def get_expects(self):\n expects = {\n 'sr1': {\n 'count': Series([1, 2, 2, 2, 2], dtype='float64'),\n 'max': Series([np.nan, 1, 2, 3, 4], dtype='float64'),\n 'min': Series([np.nan, 0, 1, 2, 3], dtype='float64'),\n 'sum': Series([np.nan, 1, 3, 5, 7], dtype='float64'),\n 'mean': Series([np.nan, .5, 1.5, 2.5, 3.5], dtype='float64'),\n 'std': Series([np.nan] + [np.sqrt(.5)] * 4, dtype='float64'),\n 'var': Series([np.nan, .5, .5, .5, .5], dtype='float64'),\n 'median': Series([np.nan, .5, 1.5, 2.5, 3.5], dtype='float64')\n },\n 'sr2': {\n 'count': Series([1, 2, 2, 2, 2], dtype='float64'),\n 'max': Series([np.nan, 10, 8, 6, 4], dtype='float64'),\n 'min': Series([np.nan, 8, 6, 4, 2], dtype='float64'),\n 'sum': Series([np.nan, 18, 14, 10, 6], dtype='float64'),\n 'mean': Series([np.nan, 9, 7, 5, 3], dtype='float64'),\n 'std': Series([np.nan] + [np.sqrt(2)] * 4, dtype='float64'),\n 'var': Series([np.nan, 2, 2, 2, 2], dtype='float64'),\n 'median': Series([np.nan, 9, 7, 5, 3], dtype='float64')\n },\n 'df': {\n 'count': DataFrame({0: Series([1, 2, 2, 2, 2]),\n 1: Series([1, 2, 2, 2, 2])},\n dtype='float64'),\n 'max': DataFrame({0: Series([np.nan, 2, 4, 6, 8]),\n 1: Series([np.nan, 3, 5, 7, 9])},\n dtype='float64'),\n 'min': DataFrame({0: Series([np.nan, 0, 2, 4, 6]),\n 1: Series([np.nan, 1, 3, 5, 7])},\n dtype='float64'),\n 'sum': DataFrame({0: Series([np.nan, 2, 6, 10, 14]),\n 1: Series([np.nan, 4, 8, 12, 16])},\n dtype='float64'),\n 'mean': DataFrame({0: Series([np.nan, 1, 3, 5, 7]),\n 1: Series([np.nan, 2, 4, 6, 8])},\n dtype='float64'),\n 'std': DataFrame({0: Series([np.nan] + [np.sqrt(2)] * 4),\n 1: Series([np.nan] + [np.sqrt(2)] * 4)},\n dtype='float64'),\n 'var': DataFrame({0: Series([np.nan, 2, 2, 2, 2]),\n 1: Series([np.nan, 2, 2, 2, 2])},\n dtype='float64'),\n 'median': DataFrame({0: Series([np.nan, 1, 3, 5, 7]),\n 1: Series([np.nan, 2, 4, 6, 8])},\n dtype='float64'),\n }\n }\n return expects\n\n def _create_dtype_data(self, dtype):\n sr1 = Series(range(5), dtype=dtype)\n sr2 = Series(range(10, 0, -2), dtype=dtype)\n df = DataFrame(np.arange(10).reshape((5, 2)), dtype=dtype)\n\n data = {\n 'sr1': sr1,\n 'sr2': sr2,\n 'df': df\n }\n\n return data\n\n def _create_data(self):\n self.data = self._create_dtype_data(self.dtype)\n self.expects = self.get_expects()\n\n def test_dtypes(self):\n self._create_data()\n for f_name, d_name in product(self.funcs.keys(), self.data.keys()):\n\n f = self.funcs[f_name]\n d = self.data[d_name]\n exp = self.expects[d_name][f_name]\n self.check_dtypes(f, f_name, d, d_name, exp)\n\n def check_dtypes(self, f, f_name, d, d_name, exp):\n roll = d.rolling(window=self.window)\n result = f(roll)\n tm.assert_almost_equal(result, exp)\n\n\nclass TestDtype_object(Dtype):\n dtype = object\n\n\nclass Dtype_integer(Dtype):\n pass\n\n\nclass TestDtype_int8(Dtype_integer):\n dtype = np.int8\n\n\nclass TestDtype_int16(Dtype_integer):\n dtype = np.int16\n\n\nclass TestDtype_int32(Dtype_integer):\n dtype = np.int32\n\n\nclass TestDtype_int64(Dtype_integer):\n dtype = np.int64\n\n\nclass Dtype_uinteger(Dtype):\n pass\n\n\nclass TestDtype_uint8(Dtype_uinteger):\n dtype = np.uint8\n\n\nclass TestDtype_uint16(Dtype_uinteger):\n dtype = np.uint16\n\n\nclass TestDtype_uint32(Dtype_uinteger):\n dtype = np.uint32\n\n\nclass TestDtype_uint64(Dtype_uinteger):\n dtype = np.uint64\n\n\nclass Dtype_float(Dtype):\n pass\n\n\nclass TestDtype_float16(Dtype_float):\n dtype = np.float16\n\n\nclass TestDtype_float32(Dtype_float):\n dtype = np.float32\n\n\nclass TestDtype_float64(Dtype_float):\n dtype = np.float64\n\n\nclass TestDtype_category(Dtype):\n dtype = 'category'\n include_df = False\n\n def _create_dtype_data(self, dtype):\n sr1 = Series(range(5), dtype=dtype)\n sr2 = Series(range(10, 0, -2), dtype=dtype)\n\n data = {\n 'sr1': sr1,\n 'sr2': sr2\n }\n\n return data\n\n\nclass DatetimeLike(Dtype):\n\n def check_dtypes(self, f, f_name, d, d_name, exp):\n\n roll = d.rolling(window=self.window)\n\n if f_name == 'count':\n result = f(roll)\n tm.assert_almost_equal(result, exp)\n\n else:\n\n # other methods not Implemented ATM\n with pytest.raises(NotImplementedError):\n f(roll)\n\n\nclass TestDtype_timedelta(DatetimeLike):\n dtype = np.dtype('m8[ns]')\n\n\nclass TestDtype_datetime(DatetimeLike):\n dtype = np.dtype('M8[ns]')\n\n\nclass TestDtype_datetime64UTC(DatetimeLike):\n dtype = 'datetime64[ns, UTC]'\n\n def _create_data(self):\n pytest.skip(\"direct creation of extension dtype \"\n \"datetime64[ns, UTC] is not supported ATM\")\n\n\nclass TestMoments(Base):\n\n def setup_method(self, method):\n self._create_data()\n\n def test_centered_axis_validation(self):\n\n # ok\n Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()\n\n # bad axis\n with pytest.raises(ValueError):\n Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()\n\n # ok ok\n DataFrame(np.ones((10, 10))).rolling(window=3, center=True,\n axis=0).mean()\n DataFrame(np.ones((10, 10))).rolling(window=3, center=True,\n axis=1).mean()\n\n # bad axis\n with pytest.raises(ValueError):\n (DataFrame(np.ones((10, 10)))\n .rolling(window=3, center=True, axis=2).mean())\n\n def test_rolling_sum(self):\n self._check_moment_func(mom.rolling_sum, np.sum, name='sum')\n\n def test_rolling_count(self):\n counter = lambda x: np.isfinite(x).astype(float).sum()\n self._check_moment_func(mom.rolling_count, counter, name='count',\n has_min_periods=False, preserve_nan=False,\n fill_value=0)\n\n def test_rolling_mean(self):\n self._check_moment_func(mom.rolling_mean, np.mean, name='mean')\n\n def test_cmov_mean(self):\n # GH 8238\n tm._skip_if_no_scipy()\n\n vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,\n 10.63, 14.48])\n xp = np.array([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516, 12.818,\n 12.952, np.nan, np.nan])\n\n with catch_warnings(record=True):\n rs = mom.rolling_mean(vals, 5, center=True)\n tm.assert_almost_equal(xp, rs)\n\n xp = Series(rs)\n rs = Series(vals).rolling(5, center=True).mean()\n tm.assert_series_equal(xp, rs)\n\n def test_cmov_window(self):\n # GH 8238\n tm._skip_if_no_scipy()\n\n vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,\n 10.63, 14.48])\n xp = np.array([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516, 12.818,\n 12.952, np.nan, np.nan])\n\n with catch_warnings(record=True):\n rs = mom.rolling_window(vals, 5, 'boxcar', center=True)\n tm.assert_almost_equal(xp, rs)\n\n xp = Series(rs)\n rs = Series(vals).rolling(5, win_type='boxcar', center=True).mean()\n tm.assert_series_equal(xp, rs)\n\n def test_cmov_window_corner(self):\n # GH 8238\n tm._skip_if_no_scipy()\n\n # all nan\n vals = np.empty(10, dtype=float)\n vals.fill(np.nan)\n with catch_warnings(record=True):\n rs = mom.rolling_window(vals, 5, 'boxcar', center=True)\n assert np.isnan(rs).all()\n\n # empty\n vals = np.array([])\n with catch_warnings(record=True):\n rs = mom.rolling_window(vals, 5, 'boxcar', center=True)\n assert len(rs) == 0\n\n # shorter than window\n vals = np.random.randn(5)\n with catch_warnings(record=True):\n rs = mom.rolling_window(vals, 10, 'boxcar')\n assert np.isnan(rs).all()\n assert len(rs) == 5\n\n def test_cmov_window_frame(self):\n # Gh 8238\n tm._skip_if_no_scipy()\n\n vals = np.array([[12.18, 3.64], [10.18, 9.16], [13.24, 14.61],\n [4.51, 8.11], [6.15, 11.44], [9.14, 6.21],\n [11.31, 10.67], [2.94, 6.51], [9.42, 8.39], [12.44,\n 7.34]])\n\n xp = np.array([[np.nan, np.nan], [np.nan, np.nan], [9.252, 9.392],\n [8.644, 9.906], [8.87, 10.208], [6.81, 8.588],\n [7.792, 8.644], [9.05, 7.824], [np.nan, np.nan\n ], [np.nan, np.nan]])\n\n # DataFrame\n rs = DataFrame(vals).rolling(5, win_type='boxcar', center=True).mean()\n tm.assert_frame_equal(DataFrame(xp), rs)\n\n # invalid method\n with pytest.raises(AttributeError):\n (DataFrame(vals).rolling(5, win_type='boxcar', center=True)\n .std())\n\n # sum\n xp = np.array([[np.nan, np.nan], [np.nan, np.nan], [46.26, 46.96],\n [43.22, 49.53], [44.35, 51.04], [34.05, 42.94],\n [38.96, 43.22], [45.25, 39.12], [np.nan, np.nan\n ], [np.nan, np.nan]])\n\n rs = DataFrame(vals).rolling(5, win_type='boxcar', center=True).sum()\n tm.assert_frame_equal(DataFrame(xp), rs)\n\n def test_cmov_window_na_min_periods(self):\n tm._skip_if_no_scipy()\n\n # min_periods\n vals = Series(np.random.randn(10))\n vals[4] = np.nan\n vals[8] = np.nan\n\n xp = vals.rolling(5, min_periods=4, center=True).mean()\n rs = vals.rolling(5, win_type='boxcar', min_periods=4,\n center=True).mean()\n tm.assert_series_equal(xp, rs)\n\n def test_cmov_window_regular(self):\n # GH 8238\n tm._skip_if_no_scipy()\n\n win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',\n 'blackmanharris', 'nuttall', 'barthann']\n\n vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,\n 10.63, 14.48])\n xps = {\n 'hamming': [np.nan, np.nan, 8.71384, 9.56348, 12.38009, 14.03687,\n 13.8567, 11.81473, np.nan, np.nan],\n 'triang': [np.nan, np.nan, 9.28667, 10.34667, 12.00556, 13.33889,\n 13.38, 12.33667, np.nan, np.nan],\n 'barthann': [np.nan, np.nan, 8.4425, 9.1925, 12.5575, 14.3675,\n 14.0825, 11.5675, np.nan, np.nan],\n 'bohman': [np.nan, np.nan, 7.61599, 9.1764, 12.83559, 14.17267,\n 14.65923, 11.10401, np.nan, np.nan],\n 'blackmanharris': [np.nan, np.nan, 6.97691, 9.16438, 13.05052,\n 14.02156, 15.10512, 10.74574, np.nan, np.nan],\n 'nuttall': [np.nan, np.nan, 7.04618, 9.16786, 13.02671, 14.03559,\n 15.05657, 10.78514, np.nan, np.nan],\n 'blackman': [np.nan, np.nan, 7.73345, 9.17869, 12.79607, 14.20036,\n 14.57726, 11.16988, np.nan, np.nan],\n 'bartlett': [np.nan, np.nan, 8.4425, 9.1925, 12.5575, 14.3675,\n 14.0825, 11.5675, np.nan, np.nan]\n }\n\n for wt in win_types:\n xp = Series(xps[wt])\n rs = Series(vals).rolling(5, win_type=wt, center=True).mean()\n tm.assert_series_equal(xp, rs)\n\n def test_cmov_window_regular_linear_range(self):\n # GH 8238\n tm._skip_if_no_scipy()\n\n win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',\n 'blackmanharris', 'nuttall', 'barthann']\n\n vals = np.array(range(10), dtype=np.float)\n xp = vals.copy()\n xp[:2] = np.nan\n xp[-2:] = np.nan\n xp = Series(xp)\n\n for wt in win_types:\n rs = Series(vals).rolling(5, win_type=wt, center=True).mean()\n tm.assert_series_equal(xp, rs)\n\n def test_cmov_window_regular_missing_data(self):\n # GH 8238\n tm._skip_if_no_scipy()\n\n win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman',\n 'blackmanharris', 'nuttall', 'barthann']\n\n vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan,\n 10.63, 14.48])\n xps = {\n 'bartlett': [np.nan, np.nan, 9.70333, 10.5225, 8.4425, 9.1925,\n 12.5575, 14.3675, 15.61667, 13.655],\n 'blackman': [np.nan, np.nan, 9.04582, 11.41536, 7.73345, 9.17869,\n 12.79607, 14.20036, 15.8706, 13.655],\n 'barthann': [np.nan, np.nan, 9.70333, 10.5225, 8.4425, 9.1925,\n 12.5575, 14.3675, 15.61667, 13.655],\n 'bohman': [np.nan, np.nan, 8.9444, 11.56327, 7.61599, 9.1764,\n 12.83559, 14.17267, 15.90976, 13.655],\n 'hamming': [np.nan, np.nan, 9.59321, 10.29694, 8.71384, 9.56348,\n 12.38009, 14.20565, 15.24694, 13.69758],\n 'nuttall': [np.nan, np.nan, 8.47693, 12.2821, 7.04618, 9.16786,\n 13.02671, 14.03673, 16.08759, 13.65553],\n 'triang': [np.nan, np.nan, 9.33167, 9.76125, 9.28667, 10.34667,\n 12.00556, 13.82125, 14.49429, 13.765],\n 'blackmanharris': [np.nan, np.nan, 8.42526, 12.36824, 6.97691,\n 9.16438, 13.05052, 14.02175, 16.1098, 13.65509]\n }\n\n for wt in win_types:\n xp = Series(xps[wt])\n rs = Series(vals).rolling(5, win_type=wt, min_periods=3).mean()\n tm.assert_series_equal(xp, rs)\n\n def test_cmov_window_special(self):\n # GH 8238\n tm._skip_if_no_scipy()\n\n win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']\n kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2.,\n 'width': 2.}, {'width': 0.5}]\n\n vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,\n 10.63, 14.48])\n\n xps = {\n 'gaussian': [np.nan, np.nan, 8.97297, 9.76077, 12.24763, 13.89053,\n 13.65671, 12.01002, np.nan, np.nan],\n 'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589, 11.73161,\n 13.08516, 12.95111, 12.74577, np.nan, np.nan],\n 'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284, 12.88331,\n 12.96079, 12.77008, np.nan, np.nan],\n 'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161, 12.75129,\n 12.90702, 12.83757, np.nan, np.nan]\n }\n\n for wt, k in zip(win_types, kwds):\n xp = Series(xps[wt])\n rs = Series(vals).rolling(5, win_type=wt, center=True).mean(**k)\n tm.assert_series_equal(xp, rs)\n\n def test_cmov_window_special_linear_range(self):\n # GH 8238\n tm._skip_if_no_scipy()\n\n win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian']\n kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2.,\n 'width': 2.}, {'width': 0.5}]\n\n vals = np.array(range(10), dtype=np.float)\n xp = vals.copy()\n xp[:2] = np.nan\n xp[-2:] = np.nan\n xp = Series(xp)\n\n for wt, k in zip(win_types, kwds):\n rs = Series(vals).rolling(5, win_type=wt, center=True).mean(**k)\n tm.assert_series_equal(xp, rs)\n\n def test_rolling_median(self):\n with catch_warnings(record=True):\n self._check_moment_func(mom.rolling_median, np.median,\n name='median')\n\n def test_rolling_min(self):\n\n with catch_warnings(record=True):\n self._check_moment_func(mom.rolling_min, np.min, name='min')\n\n with catch_warnings(record=True):\n a = np.array([1, 2, 3, 4, 5])\n b = mom.rolling_min(a, window=100, min_periods=1)\n tm.assert_almost_equal(b, np.ones(len(a)))\n\n pytest.raises(ValueError, mom.rolling_min, np.array([1, 2, 3]),\n window=3, min_periods=5)\n\n def test_rolling_max(self):\n\n with catch_warnings(record=True):\n self._check_moment_func(mom.rolling_max, np.max, name='max')\n\n with catch_warnings(record=True):\n a = np.array([1, 2, 3, 4, 5], dtype=np.float64)\n b = mom.rolling_max(a, window=100, min_periods=1)\n tm.assert_almost_equal(a, b)\n\n pytest.raises(ValueError, mom.rolling_max, np.array([1, 2, 3]),\n window=3, min_periods=5)\n\n def test_rolling_quantile(self):\n qs = [0.0, .1, .5, .9, 1.0]\n\n def scoreatpercentile(a, per):\n values = np.sort(a, axis=0)\n\n idx = int(per / 1. * (values.shape[0] - 1))\n\n if idx == values.shape[0] - 1:\n retval = values[-1]\n\n else:\n qlow = float(idx) / float(values.shape[0] - 1)\n qhig = float(idx + 1) / float(values.shape[0] - 1)\n vlow = values[idx]\n vhig = values[idx + 1]\n retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)\n\n return retval\n\n for q in qs:\n\n def f(x, window, quantile, min_periods=None, freq=None,\n center=False):\n return mom.rolling_quantile(x, window, quantile,\n min_periods=min_periods, freq=freq,\n center=center)\n\n def alt(x):\n return scoreatpercentile(x, q)\n\n self._check_moment_func(f, alt, name='quantile', quantile=q)\n\n def test_rolling_quantile_np_percentile(self):\n # #9413: Tests that rolling window's quantile default behavior\n # is analogus to Numpy's percentile\n row = 10\n col = 5\n idx = pd.date_range(20100101, periods=row, freq='B')\n df = pd.DataFrame(np.random.rand(row * col).reshape((row, -1)),\n index=idx)\n\n df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)\n np_percentile = np.percentile(df, [25, 50, 75], axis=0)\n\n tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))\n\n def test_rolling_quantile_series(self):\n # #16211: Tests that rolling window's quantile default behavior\n # is analogus to pd.Series' quantile\n arr = np.arange(100)\n s = pd.Series(arr)\n q1 = s.quantile(0.1)\n q2 = s.rolling(100).quantile(0.1).iloc[-1]\n\n tm.assert_almost_equal(q1, q2)\n\n def test_rolling_quantile_param(self):\n ser = Series([0.0, .1, .5, .9, 1.0])\n\n with pytest.raises(ValueError):\n ser.rolling(3).quantile(-0.1)\n\n with pytest.raises(ValueError):\n ser.rolling(3).quantile(10.0)\n\n with pytest.raises(TypeError):\n ser.rolling(3).quantile('foo')\n\n def test_rolling_apply(self):\n # suppress warnings about empty slices, as we are deliberately testing\n # with a 0-length Series\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\",\n message=\".*(empty slice|0 for slice).*\",\n category=RuntimeWarning)\n\n ser = Series([])\n tm.assert_series_equal(ser,\n ser.rolling(10).apply(lambda x: x.mean()))\n\n f = lambda x: x[np.isfinite(x)].mean()\n\n def roll_mean(x, window, min_periods=None, freq=None, center=False,\n **kwargs):\n return mom.rolling_apply(x, window, func=f,\n min_periods=min_periods, freq=freq,\n center=center)\n\n self._check_moment_func(roll_mean, np.mean, name='apply', func=f)\n\n # GH 8080\n s = Series([None, None, None])\n result = s.rolling(2, min_periods=0).apply(lambda x: len(x))\n expected = Series([1., 2., 2.])\n tm.assert_series_equal(result, expected)\n\n result = s.rolling(2, min_periods=0).apply(len)\n tm.assert_series_equal(result, expected)\n\n def test_rolling_apply_out_of_bounds(self):\n # #1850\n arr = np.arange(4)\n\n # it works!\n with catch_warnings(record=True):\n result = mom.rolling_apply(arr, 10, np.sum)\n assert isna(result).all()\n\n with catch_warnings(record=True):\n result = mom.rolling_apply(arr, 10, np.sum, min_periods=1)\n tm.assert_almost_equal(result, result)\n\n def test_rolling_std(self):\n self._check_moment_func(mom.rolling_std, lambda x: np.std(x, ddof=1),\n name='std')\n self._check_moment_func(mom.rolling_std, lambda x: np.std(x, ddof=0),\n name='std', ddof=0)\n\n def test_rolling_std_1obs(self):\n with catch_warnings(record=True):\n result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),\n 1, min_periods=1)\n expected = np.array([np.nan] * 5)\n tm.assert_almost_equal(result, expected)\n\n with catch_warnings(record=True):\n result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]),\n 1, min_periods=1, ddof=0)\n expected = np.zeros(5)\n tm.assert_almost_equal(result, expected)\n\n with catch_warnings(record=True):\n result = mom.rolling_std(np.array([np.nan, np.nan, 3., 4., 5.]),\n 3, min_periods=2)\n assert np.isnan(result[2])\n\n def test_rolling_std_neg_sqrt(self):\n # unit test from Bottleneck\n\n # Test move_nanstd for neg sqrt.\n\n a = np.array([0.0011448196318903589, 0.00028718669878572767,\n 0.00028718669878572767, 0.00028718669878572767,\n 0.00028718669878572767])\n with catch_warnings(record=True):\n b = mom.rolling_std(a, window=3)\n assert np.isfinite(b[2:]).all()\n\n with catch_warnings(record=True):\n b = mom.ewmstd(a, span=3)\n assert np.isfinite(b[2:]).all()\n\n def test_rolling_var(self):\n self._check_moment_func(mom.rolling_var, lambda x: np.var(x, ddof=1),\n test_stable=True, name='var')\n self._check_moment_func(mom.rolling_var, lambda x: np.var(x, ddof=0),\n name='var', ddof=0)\n\n def test_rolling_skew(self):\n try:\n from scipy.stats import skew\n except ImportError:\n pytest.skip('no scipy')\n self._check_moment_func(mom.rolling_skew,\n lambda x: skew(x, bias=False), name='skew')\n\n def test_rolling_kurt(self):\n try:\n from scipy.stats import kurtosis\n except ImportError:\n pytest.skip('no scipy')\n self._check_moment_func(mom.rolling_kurt,\n lambda x: kurtosis(x, bias=False), name='kurt')\n\n def test_fperr_robustness(self):\n # TODO: remove this once python 2.5 out of picture\n if PY3:\n pytest.skip(\"doesn't work on python 3\")\n\n # #2114\n data = '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x1a@\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\x02@8\\x8e\\xe38\\x8e\\xe3\\xe8?z\\t\\xed%\\xb4\\x97\\xd0?\\xa2\\x0c<\\xdd\\x9a\\x1f\\xb6?\\x82\\xbb\\xfa&y\\x7f\\x9d?\\xac\\'\\xa7\\xc4P\\xaa\\x83?\\x90\\xdf\\xde\\xb0k8j?`\\xea\\xe9u\\xf2zQ?*\\xe37\\x9d\\x98N7?\\xe2.\\xf5&v\\x13\\x1f?\\xec\\xc9\\xf8\\x19\\xa4\\xb7\\x04?\\x90b\\xf6w\\x85\\x9f\\xeb>\\xb5A\\xa4\\xfaXj\\xd2>F\\x02\\xdb\\xf8\\xcb\\x8d\\xb8>.\\xac<\\xfb\\x87^\\xa0>\\xe8:\\xa6\\xf9_\\xd3\\x85>\\xfb?\\xe2cUU\\xfd?\\xfc\\x7fA\\xed8\\x8e\\xe3?\\xa5\\xaa\\xac\\x91\\xf6\\x12\\xca?n\\x1cs\\xb6\\xf9a\\xb1?\\xe8%D\\xf3L-\\x97?5\\xddZD\\x11\\xe7~?#>\\xe7\\x82\\x0b\\x9ad?\\xd9R4Y\\x0fxK?;7x;\\nP2?N\\xf4JO\\xb8j\\x18?4\\xf81\\x8a%G\\x00?\\x9a\\xf5\\x97\\r2\\xb4\\xe5>\\xcd\\x9c\\xca\\xbcB\\xf0\\xcc>3\\x13\\x87(\\xd7J\\xb3>\\x99\\x19\\xb4\\xe0\\x1e\\xb9\\x99>ff\\xcd\\x95\\x14&\\x81>\\x88\\x88\\xbc\\xc7p\\xddf>`\\x0b\\xa6_\\x96|N>@\\xb2n\\xea\\x0eS4>U\\x98\\x938i\\x19\\x1b>\\x8eeb\\xd0\\xf0\\x10\\x02>\\xbd\\xdc-k\\x96\\x16\\xe8=(\\x93\\x1e\\xf2\\x0e\\x0f\\xd0=\\xe0n\\xd3Bii\\xb5=*\\xe9\\x19Y\\x8c\\x8c\\x9c=\\xc6\\xf0\\xbb\\x90]\\x08\\x83=]\\x96\\xfa\\xc0|`i=>d\\xfc\\xd5\\xfd\\xeaP=R0\\xfb\\xc7\\xa7\\x8e6=\\xc2\\x95\\xf9_\\x8a\\x13\\x1e=\\xd6c\\xa6\\xea\\x06\\r\\x04=r\\xda\\xdd8\\t\\xbc\\xea<\\xf6\\xe6\\x93\\xd0\\xb0\\xd2\\xd1<\\x9d\\xdeok\\x96\\xc3\\xb7<&~\\xea9s\\xaf\\x9f<UUUUUU\\x13@q\\x1c\\xc7q\\x1c\\xc7\\xf9?\\xf6\\x12\\xdaKh/\\xe1?\\xf2\\xc3\"e\\xe0\\xe9\\xc6?\\xed\\xaf\\x831+\\x8d\\xae?\\xf3\\x1f\\xad\\xcb\\x1c^\\x94?\\x15\\x1e\\xdd\\xbd>\\xb8\\x02@\\xc6\\xd2&\\xfd\\xa8\\xf5\\xe8?\\xd9\\xe1\\x19\\xfe\\xc5\\xa3\\xd0?v\\x82\"\\xa8\\xb2/\\xb6?\\x9dX\\x835\\xee\\x94\\x9d?h\\x90W\\xce\\x9e\\xb8\\x83?\\x8a\\xc0th~Kj?\\\\\\x80\\xf8\\x9a\\xa9\\x87Q?%\\xab\\xa0\\xce\\x8c_7?1\\xe4\\x80\\x13\\x11*\\x1f? \\x98\\x00\\r\\xb6\\xc6\\x04?\\x80u\\xabf\\x9d\\xb3\\xeb>UNrD\\xbew\\xd2>\\x1c\\x13C[\\xa8\\x9f\\xb8>\\x12b\\xd7<pj\\xa0>m-\\x1fQ@\\xe3\\x85>\\xe6\\x91)l\\x00/m>Da\\xc6\\xf2\\xaatS>\\x05\\xd7]\\xee\\xe3\\xf09>' # noqa\n\n arr = np.frombuffer(data, dtype='<f8')\n if sys.byteorder != \"little\":\n arr = arr.byteswap().newbyteorder()\n\n with catch_warnings(record=True):\n result = mom.rolling_sum(arr, 2)\n assert (result[1:] >= 0).all()\n\n with catch_warnings(record=True):\n result = mom.rolling_mean(arr, 2)\n assert (result[1:] >= 0).all()\n\n with catch_warnings(record=True):\n result = mom.rolling_var(arr, 2)\n assert (result[1:] >= 0).all()\n\n # #2527, ugh\n arr = np.array([0.00012456, 0.0003, 0])\n with catch_warnings(record=True):\n result = mom.rolling_mean(arr, 1)\n assert result[-1] >= 0\n\n with catch_warnings(record=True):\n result = mom.rolling_mean(-arr, 1)\n assert result[-1] <= 0\n\n def _check_moment_func(self, f, static_comp, name=None, window=50,\n has_min_periods=True, has_center=True,\n has_time_rule=True, preserve_nan=True,\n fill_value=None, test_stable=False, **kwargs):\n\n with warnings.catch_warnings(record=True):\n self._check_ndarray(f, static_comp, window=window,\n has_min_periods=has_min_periods,\n preserve_nan=preserve_nan,\n has_center=has_center, fill_value=fill_value,\n test_stable=test_stable, **kwargs)\n\n with warnings.catch_warnings(record=True):\n self._check_structures(f, static_comp,\n has_min_periods=has_min_periods,\n has_time_rule=has_time_rule,\n fill_value=fill_value,\n has_center=has_center, **kwargs)\n\n # new API\n if name is not None:\n self._check_structures(f, static_comp, name=name,\n has_min_periods=has_min_periods,\n has_time_rule=has_time_rule,\n fill_value=fill_value,\n has_center=has_center, **kwargs)\n\n def _check_ndarray(self, f, static_comp, window=50, has_min_periods=True,\n preserve_nan=True, has_center=True, fill_value=None,\n test_stable=False, test_window=True, **kwargs):\n def get_result(arr, window, min_periods=None, center=False):\n return f(arr, window, min_periods=min_periods, center=center, **\n kwargs)\n\n result = get_result(self.arr, window)\n tm.assert_almost_equal(result[-1], static_comp(self.arr[-50:]))\n\n if preserve_nan:\n assert (np.isnan(result[self._nan_locs]).all())\n\n # excluding NaNs correctly\n arr = randn(50)\n arr[:10] = np.NaN\n arr[-10:] = np.NaN\n\n if has_min_periods:\n result = get_result(arr, 50, min_periods=30)\n tm.assert_almost_equal(result[-1], static_comp(arr[10:-10]))\n\n # min_periods is working correctly\n result = get_result(arr, 20, min_periods=15)\n assert np.isnan(result[23])\n assert not np.isnan(result[24])\n\n assert not np.isnan(result[-6])\n assert np.isnan(result[-5])\n\n arr2 = randn(20)\n result = get_result(arr2, 10, min_periods=5)\n assert isna(result[3])\n assert notna(result[4])\n\n # min_periods=0\n result0 = get_result(arr, 20, min_periods=0)\n result1 = get_result(arr, 20, min_periods=1)\n tm.assert_almost_equal(result0, result1)\n else:\n result = get_result(arr, 50)\n tm.assert_almost_equal(result[-1], static_comp(arr[10:-10]))\n\n # GH 7925\n if has_center:\n if has_min_periods:\n result = get_result(arr, 20, min_periods=15, center=True)\n expected = get_result(\n np.concatenate((arr, np.array([np.NaN] * 9))), 20,\n min_periods=15)[9:]\n else:\n result = get_result(arr, 20, center=True)\n expected = get_result(\n np.concatenate((arr, np.array([np.NaN] * 9))), 20)[9:]\n\n tm.assert_numpy_array_equal(result, expected)\n\n if test_stable:\n result = get_result(self.arr + 1e9, window)\n tm.assert_almost_equal(result[-1],\n static_comp(self.arr[-50:] + 1e9))\n\n # Test window larger than array, #7297\n if test_window:\n if has_min_periods:\n for minp in (0, len(self.arr) - 1, len(self.arr)):\n result = get_result(self.arr, len(self.arr) + 1,\n min_periods=minp)\n expected = get_result(self.arr, len(self.arr),\n min_periods=minp)\n nan_mask = np.isnan(result)\n tm.assert_numpy_array_equal(nan_mask, np.isnan(expected))\n\n nan_mask = ~nan_mask\n tm.assert_almost_equal(result[nan_mask],\n expected[nan_mask])\n else:\n result = get_result(self.arr, len(self.arr) + 1)\n expected = get_result(self.arr, len(self.arr))\n nan_mask = np.isnan(result)\n tm.assert_numpy_array_equal(nan_mask, np.isnan(expected))\n\n nan_mask = ~nan_mask\n tm.assert_almost_equal(result[nan_mask], expected[nan_mask])\n\n def _check_structures(self, f, static_comp, name=None,\n has_min_periods=True, has_time_rule=True,\n has_center=True, fill_value=None, **kwargs):\n def get_result(obj, window, min_periods=None, freq=None, center=False):\n\n # check via the API calls if name is provided\n if name is not None:\n\n # catch a freq deprecation warning if freq is provided and not\n # None\n with catch_warnings(record=True):\n r = obj.rolling(window=window, min_periods=min_periods,\n freq=freq, center=center)\n return getattr(r, name)(**kwargs)\n\n # check via the moments API\n with catch_warnings(record=True):\n return f(obj, window=window, min_periods=min_periods,\n freq=freq, center=center, **kwargs)\n\n series_result = get_result(self.series, window=50)\n frame_result = get_result(self.frame, window=50)\n\n assert isinstance(series_result, Series)\n assert type(frame_result) == DataFrame\n\n # check time_rule works\n if has_time_rule:\n win = 25\n minp = 10\n\n if has_min_periods:\n series_result = get_result(self.series[::2], window=win,\n min_periods=minp, freq='B')\n frame_result = get_result(self.frame[::2], window=win,\n min_periods=minp, freq='B')\n else:\n series_result = get_result(self.series[::2], window=win,\n freq='B')\n frame_result = get_result(self.frame[::2], window=win,\n freq='B')\n\n last_date = series_result.index[-1]\n prev_date = last_date - 24 * offsets.BDay()\n\n trunc_series = self.series[::2].truncate(prev_date, last_date)\n trunc_frame = self.frame[::2].truncate(prev_date, last_date)\n\n tm.assert_almost_equal(series_result[-1],\n static_comp(trunc_series))\n\n tm.assert_series_equal(frame_result.xs(last_date),\n trunc_frame.apply(static_comp),\n check_names=False)\n\n # GH 7925\n if has_center:\n\n # shifter index\n s = ['x%d' % x for x in range(12)]\n\n if has_min_periods:\n minp = 10\n\n series_xp = get_result(\n self.series.reindex(list(self.series.index) + s),\n window=25,\n min_periods=minp).shift(-12).reindex(self.series.index)\n frame_xp = get_result(\n self.frame.reindex(list(self.frame.index) + s),\n window=25,\n min_periods=minp).shift(-12).reindex(self.frame.index)\n\n series_rs = get_result(self.series, window=25,\n min_periods=minp, center=True)\n frame_rs = get_result(self.frame, window=25, min_periods=minp,\n center=True)\n\n else:\n series_xp = get_result(\n self.series.reindex(list(self.series.index) + s),\n window=25).shift(-12).reindex(self.series.index)\n frame_xp = get_result(\n self.frame.reindex(list(self.frame.index) + s),\n window=25).shift(-12).reindex(self.frame.index)\n\n series_rs = get_result(self.series, window=25, center=True)\n frame_rs = get_result(self.frame, window=25, center=True)\n\n if fill_value is not None:\n series_xp = series_xp.fillna(fill_value)\n frame_xp = frame_xp.fillna(fill_value)\n tm.assert_series_equal(series_xp, series_rs)\n tm.assert_frame_equal(frame_xp, frame_rs)\n\n def test_ewma(self):\n self._check_ew(mom.ewma, name='mean')\n\n arr = np.zeros(1000)\n arr[5] = 1\n with catch_warnings(record=True):\n result = mom.ewma(arr, span=100, adjust=False).sum()\n assert np.abs(result - 1) < 1e-2\n\n s = Series([1.0, 2.0, 4.0, 8.0])\n\n expected = Series([1.0, 1.6, 2.736842, 4.923077])\n for f in [lambda s: s.ewm(com=2.0, adjust=True).mean(),\n lambda s: s.ewm(com=2.0, adjust=True,\n ignore_na=False).mean(),\n lambda s: s.ewm(com=2.0, adjust=True, ignore_na=True).mean(),\n ]:\n result = f(s)\n tm.assert_series_equal(result, expected)\n\n expected = Series([1.0, 1.333333, 2.222222, 4.148148])\n for f in [lambda s: s.ewm(com=2.0, adjust=False).mean(),\n lambda s: s.ewm(com=2.0, adjust=False,\n ignore_na=False).mean(),\n lambda s: s.ewm(com=2.0, adjust=False,\n ignore_na=True).mean(),\n ]:\n result = f(s)\n tm.assert_series_equal(result, expected)\n\n def test_ewma_nan_handling(self):\n s = Series([1.] + [np.nan] * 5 + [1.])\n result = s.ewm(com=5).mean()\n tm.assert_series_equal(result, Series([1.] * len(s)))\n\n s = Series([np.nan] * 2 + [1.] + [np.nan] * 2 + [1.])\n result = s.ewm(com=5).mean()\n tm.assert_series_equal(result, Series([np.nan] * 2 + [1.] * 4))\n\n # GH 7603\n s0 = Series([np.nan, 1., 101.])\n s1 = Series([1., np.nan, 101.])\n s2 = Series([np.nan, 1., np.nan, np.nan, 101., np.nan])\n s3 = Series([1., np.nan, 101., 50.])\n com = 2.\n alpha = 1. / (1. + com)\n\n def simple_wma(s, w):\n return (s.multiply(w).cumsum() / w.cumsum()).fillna(method='ffill')\n\n for (s, adjust, ignore_na, w) in [\n (s0, True, False, [np.nan, (1. - alpha), 1.]),\n (s0, True, True, [np.nan, (1. - alpha), 1.]),\n (s0, False, False, [np.nan, (1. - alpha), alpha]),\n (s0, False, True, [np.nan, (1. - alpha), alpha]),\n (s1, True, False, [(1. - alpha) ** 2, np.nan, 1.]),\n (s1, True, True, [(1. - alpha), np.nan, 1.]),\n (s1, False, False, [(1. - alpha) ** 2, np.nan, alpha]),\n (s1, False, True, [(1. - alpha), np.nan, alpha]),\n (s2, True, False, [np.nan, (1. - alpha) **\n 3, np.nan, np.nan, 1., np.nan]),\n (s2, True, True, [np.nan, (1. - alpha),\n np.nan, np.nan, 1., np.nan]),\n (s2, False, False, [np.nan, (1. - alpha) **\n 3, np.nan, np.nan, alpha, np.nan]),\n (s2, False, True, [np.nan, (1. - alpha),\n np.nan, np.nan, alpha, np.nan]),\n (s3, True, False, [(1. - alpha) **\n 3, np.nan, (1. - alpha), 1.]),\n (s3, True, True, [(1. - alpha) **\n 2, np.nan, (1. - alpha), 1.]),\n (s3, False, False, [(1. - alpha) ** 3, np.nan,\n (1. - alpha) * alpha,\n alpha * ((1. - alpha) ** 2 + alpha)]),\n (s3, False, True, [(1. - alpha) ** 2,\n np.nan, (1. - alpha) * alpha, alpha])]:\n expected = simple_wma(s, Series(w))\n result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean()\n\n tm.assert_series_equal(result, expected)\n if ignore_na is False:\n # check that ignore_na defaults to False\n result = s.ewm(com=com, adjust=adjust).mean()\n tm.assert_series_equal(result, expected)\n\n def test_ewmvar(self):\n self._check_ew(mom.ewmvar, name='var')\n\n def test_ewmvol(self):\n self._check_ew(mom.ewmvol, name='vol')\n\n def test_ewma_span_com_args(self):\n with catch_warnings(record=True):\n A = mom.ewma(self.arr, com=9.5)\n B = mom.ewma(self.arr, span=20)\n tm.assert_almost_equal(A, B)\n\n pytest.raises(ValueError, mom.ewma, self.arr, com=9.5, span=20)\n pytest.raises(ValueError, mom.ewma, self.arr)\n\n def test_ewma_halflife_arg(self):\n with catch_warnings(record=True):\n A = mom.ewma(self.arr, com=13.932726172912965)\n B = mom.ewma(self.arr, halflife=10.0)\n tm.assert_almost_equal(A, B)\n\n pytest.raises(ValueError, mom.ewma, self.arr, span=20,\n halflife=50)\n pytest.raises(ValueError, mom.ewma, self.arr, com=9.5,\n halflife=50)\n pytest.raises(ValueError, mom.ewma, self.arr, com=9.5, span=20,\n halflife=50)\n pytest.raises(ValueError, mom.ewma, self.arr)\n\n def test_ewma_alpha_old_api(self):\n # GH 10789\n with catch_warnings(record=True):\n a = mom.ewma(self.arr, alpha=0.61722699889169674)\n b = mom.ewma(self.arr, com=0.62014947789973052)\n c = mom.ewma(self.arr, span=2.240298955799461)\n d = mom.ewma(self.arr, halflife=0.721792864318)\n tm.assert_numpy_array_equal(a, b)\n tm.assert_numpy_array_equal(a, c)\n tm.assert_numpy_array_equal(a, d)\n\n def test_ewma_alpha_arg_old_api(self):\n # GH 10789\n with catch_warnings(record=True):\n pytest.raises(ValueError, mom.ewma, self.arr)\n pytest.raises(ValueError, mom.ewma, self.arr,\n com=10.0, alpha=0.5)\n pytest.raises(ValueError, mom.ewma, self.arr,\n span=10.0, alpha=0.5)\n pytest.raises(ValueError, mom.ewma, self.arr,\n halflife=10.0, alpha=0.5)\n\n def test_ewm_alpha(self):\n # GH 10789\n s = Series(self.arr)\n a = s.ewm(alpha=0.61722699889169674).mean()\n b = s.ewm(com=0.62014947789973052).mean()\n c = s.ewm(span=2.240298955799461).mean()\n d = s.ewm(halflife=0.721792864318).mean()\n tm.assert_series_equal(a, b)\n tm.assert_series_equal(a, c)\n tm.assert_series_equal(a, d)\n\n def test_ewm_alpha_arg(self):\n # GH 10789\n s = Series(self.arr)\n pytest.raises(ValueError, s.ewm)\n pytest.raises(ValueError, s.ewm, com=10.0, alpha=0.5)\n pytest.raises(ValueError, s.ewm, span=10.0, alpha=0.5)\n pytest.raises(ValueError, s.ewm, halflife=10.0, alpha=0.5)\n\n def test_ewm_domain_checks(self):\n # GH 12492\n s = Series(self.arr)\n # com must satisfy: com >= 0\n pytest.raises(ValueError, s.ewm, com=-0.1)\n s.ewm(com=0.0)\n s.ewm(com=0.1)\n # span must satisfy: span >= 1\n pytest.raises(ValueError, s.ewm, span=-0.1)\n pytest.raises(ValueError, s.ewm, span=0.0)\n pytest.raises(ValueError, s.ewm, span=0.9)\n s.ewm(span=1.0)\n s.ewm(span=1.1)\n # halflife must satisfy: halflife > 0\n pytest.raises(ValueError, s.ewm, halflife=-0.1)\n pytest.raises(ValueError, s.ewm, halflife=0.0)\n s.ewm(halflife=0.1)\n # alpha must satisfy: 0 < alpha <= 1\n pytest.raises(ValueError, s.ewm, alpha=-0.1)\n pytest.raises(ValueError, s.ewm, alpha=0.0)\n s.ewm(alpha=0.1)\n s.ewm(alpha=1.0)\n pytest.raises(ValueError, s.ewm, alpha=1.1)\n\n def test_ew_empty_arrays(self):\n arr = np.array([], dtype=np.float64)\n\n funcs = [mom.ewma, mom.ewmvol, mom.ewmvar]\n for f in funcs:\n with catch_warnings(record=True):\n result = f(arr, 3)\n tm.assert_almost_equal(result, arr)\n\n def _check_ew(self, func, name=None):\n with catch_warnings(record=True):\n self._check_ew_ndarray(func, name=name)\n self._check_ew_structures(func, name=name)\n\n def _check_ew_ndarray(self, func, preserve_nan=False, name=None):\n result = func(self.arr, com=10)\n if preserve_nan:\n assert (np.isnan(result[self._nan_locs]).all())\n\n # excluding NaNs correctly\n arr = randn(50)\n arr[:10] = np.NaN\n arr[-10:] = np.NaN\n s = Series(arr)\n\n # check min_periods\n # GH 7898\n result = func(s, 50, min_periods=2)\n assert np.isnan(result.values[:11]).all()\n assert not np.isnan(result.values[11:]).any()\n\n for min_periods in (0, 1):\n result = func(s, 50, min_periods=min_periods)\n if func == mom.ewma:\n assert np.isnan(result.values[:10]).all()\n assert not np.isnan(result.values[10:]).any()\n else:\n # ewmstd, ewmvol, ewmvar (with bias=False) require at least two\n # values\n assert np.isnan(result.values[:11]).all()\n assert not np.isnan(result.values[11:]).any()\n\n # check series of length 0\n result = func(Series([]), 50, min_periods=min_periods)\n tm.assert_series_equal(result, Series([]))\n\n # check series of length 1\n result = func(Series([1.]), 50, min_periods=min_periods)\n if func == mom.ewma:\n tm.assert_series_equal(result, Series([1.]))\n else:\n # ewmstd, ewmvol, ewmvar with bias=False require at least two\n # values\n tm.assert_series_equal(result, Series([np.NaN]))\n\n # pass in ints\n result2 = func(np.arange(50), span=10)\n assert result2.dtype == np.float_\n\n def _check_ew_structures(self, func, name):\n series_result = getattr(self.series.ewm(com=10), name)()\n assert isinstance(series_result, Series)\n\n frame_result = getattr(self.frame.ewm(com=10), name)()\n assert type(frame_result) == DataFrame\n\n\nclass TestPairwise(object):\n\n # GH 7738\n df1s = [DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]],\n columns=['C', 'C']),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1., 0]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0., 1]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=['C', 1]),\n DataFrame([[2., 4.], [1., 2.], [5., 2.], [8., 1.]],\n columns=[1, 0.]),\n DataFrame([[2, 4.], [1, 2.], [5, 2.], [8, 1.]],\n columns=[0, 1.]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.]],\n columns=[1., 'X']), ]\n df2 = DataFrame([[None, 1, 1], [None, 1, 2],\n [None, 3, 2], [None, 8, 1]], columns=['Y', 'Z', 'X'])\n s = Series([1, 1, 3, 8])\n\n def compare(self, result, expected):\n\n # since we have sorted the results\n # we can only compare non-nans\n result = result.dropna().values\n expected = expected.dropna().values\n\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize('f', [lambda x: x.cov(), lambda x: x.corr()])\n def test_no_flex(self, f):\n\n # DataFrame methods (which do not call _flex_binary_moment())\n\n results = [f(df) for df in self.df1s]\n for (df, result) in zip(self.df1s, results):\n tm.assert_index_equal(result.index, df.columns)\n tm.assert_index_equal(result.columns, df.columns)\n for i, result in enumerate(results):\n if i > 0:\n self.compare(result, results[0])\n\n @pytest.mark.parametrize(\n 'f', [lambda x: x.expanding().cov(pairwise=True),\n lambda x: x.expanding().corr(pairwise=True),\n lambda x: x.rolling(window=3).cov(pairwise=True),\n lambda x: x.rolling(window=3).corr(pairwise=True),\n lambda x: x.ewm(com=3).cov(pairwise=True),\n lambda x: x.ewm(com=3).corr(pairwise=True)])\n def test_pairwise_with_self(self, f):\n\n # DataFrame with itself, pairwise=True\n results = [f(df) for df in self.df1s]\n for (df, result) in zip(self.df1s, results):\n tm.assert_index_equal(result.index.levels[0],\n df.index,\n check_names=False)\n tm.assert_index_equal(result.index.levels[1],\n df.columns,\n check_names=False)\n tm.assert_index_equal(result.columns, df.columns)\n for i, result in enumerate(results):\n if i > 0:\n self.compare(result, results[0])\n\n @pytest.mark.parametrize(\n 'f', [lambda x: x.expanding().cov(pairwise=False),\n lambda x: x.expanding().corr(pairwise=False),\n lambda x: x.rolling(window=3).cov(pairwise=False),\n lambda x: x.rolling(window=3).corr(pairwise=False),\n lambda x: x.ewm(com=3).cov(pairwise=False),\n lambda x: x.ewm(com=3).corr(pairwise=False), ])\n def test_no_pairwise_with_self(self, f):\n\n # DataFrame with itself, pairwise=False\n results = [f(df) for df in self.df1s]\n for (df, result) in zip(self.df1s, results):\n tm.assert_index_equal(result.index, df.index)\n tm.assert_index_equal(result.columns, df.columns)\n for i, result in enumerate(results):\n if i > 0:\n self.compare(result, results[0])\n\n @pytest.mark.parametrize(\n 'f', [lambda x, y: x.expanding().cov(y, pairwise=True),\n lambda x, y: x.expanding().corr(y, pairwise=True),\n lambda x, y: x.rolling(window=3).cov(y, pairwise=True),\n lambda x, y: x.rolling(window=3).corr(y, pairwise=True),\n lambda x, y: x.ewm(com=3).cov(y, pairwise=True),\n lambda x, y: x.ewm(com=3).corr(y, pairwise=True), ])\n def test_pairwise_with_other(self, f):\n\n # DataFrame with another DataFrame, pairwise=True\n results = [f(df, self.df2) for df in self.df1s]\n for (df, result) in zip(self.df1s, results):\n tm.assert_index_equal(result.index.levels[0],\n df.index,\n check_names=False)\n tm.assert_index_equal(result.index.levels[1],\n self.df2.columns,\n check_names=False)\n for i, result in enumerate(results):\n if i > 0:\n self.compare(result, results[0])\n\n @pytest.mark.parametrize(\n 'f', [lambda x, y: x.expanding().cov(y, pairwise=False),\n lambda x, y: x.expanding().corr(y, pairwise=False),\n lambda x, y: x.rolling(window=3).cov(y, pairwise=False),\n lambda x, y: x.rolling(window=3).corr(y, pairwise=False),\n lambda x, y: x.ewm(com=3).cov(y, pairwise=False),\n lambda x, y: x.ewm(com=3).corr(y, pairwise=False), ])\n def test_no_pairwise_with_other(self, f):\n\n # DataFrame with another DataFrame, pairwise=False\n results = [f(df, self.df2) if df.columns.is_unique else None\n for df in self.df1s]\n for (df, result) in zip(self.df1s, results):\n if result is not None:\n with catch_warnings(record=True):\n # we can have int and str columns\n expected_index = df.index.union(self.df2.index)\n expected_columns = df.columns.union(self.df2.columns)\n tm.assert_index_equal(result.index, expected_index)\n tm.assert_index_equal(result.columns, expected_columns)\n else:\n tm.assert_raises_regex(\n ValueError, \"'arg1' columns are not unique\", f, df,\n self.df2)\n tm.assert_raises_regex(\n ValueError, \"'arg2' columns are not unique\", f,\n self.df2, df)\n\n @pytest.mark.parametrize(\n 'f', [lambda x, y: x.expanding().cov(y),\n lambda x, y: x.expanding().corr(y),\n lambda x, y: x.rolling(window=3).cov(y),\n lambda x, y: x.rolling(window=3).corr(y),\n lambda x, y: x.ewm(com=3).cov(y),\n lambda x, y: x.ewm(com=3).corr(y), ])\n def test_pairwise_with_series(self, f):\n\n # DataFrame with a Series\n results = ([f(df, self.s) for df in self.df1s] +\n [f(self.s, df) for df in self.df1s])\n for (df, result) in zip(self.df1s, results):\n tm.assert_index_equal(result.index, df.index)\n tm.assert_index_equal(result.columns, df.columns)\n for i, result in enumerate(results):\n if i > 0:\n self.compare(result, results[0])\n\n\n# create the data only once as we are not setting it\ndef _create_consistency_data():\n def create_series():\n return [Series(),\n Series([np.nan]),\n Series([np.nan, np.nan]),\n Series([3.]),\n Series([np.nan, 3.]),\n Series([3., np.nan]),\n Series([1., 3.]),\n Series([2., 2.]),\n Series([3., 1.]),\n Series([5., 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan,\n np.nan]),\n Series([np.nan, 5., 5., 5., np.nan, np.nan, np.nan, 5., 5.,\n np.nan, np.nan]),\n Series([np.nan, np.nan, 5., 5., np.nan, np.nan, np.nan, 5., 5.,\n np.nan, np.nan]),\n Series([np.nan, 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7.,\n 12., 13., 14., 15.]),\n Series([np.nan, 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3.,\n 12., 13., 14., 15.]),\n Series([2., 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7.,\n 12., 13., 14., 15.]),\n Series([2., 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3.,\n 12., 13., 14., 15.]),\n Series(range(10)),\n Series(range(20, 0, -2)), ]\n\n def create_dataframes():\n return ([DataFrame(),\n DataFrame(columns=['a']),\n DataFrame(columns=['a', 'a']),\n DataFrame(columns=['a', 'b']),\n DataFrame(np.arange(10).reshape((5, 2))),\n DataFrame(np.arange(25).reshape((5, 5))),\n DataFrame(np.arange(25).reshape((5, 5)),\n columns=['a', 'b', 99, 'd', 'd'])] +\n [DataFrame(s) for s in create_series()])\n\n def is_constant(x):\n values = x.values.ravel()\n return len(set(values[notna(values)])) == 1\n\n def no_nans(x):\n return x.notna().all().all()\n\n # data is a tuple(object, is_contant, no_nans)\n data = create_series() + create_dataframes()\n\n return [(x, is_constant(x), no_nans(x)) for x in data]\n\n\n_consistency_data = _create_consistency_data()\n\n\ndef _rolling_consistency_cases():\n for window in [1, 2, 3, 10, 20]:\n for min_periods in set([0, 1, 2, 3, 4, window]):\n if min_periods and (min_periods > window):\n continue\n for center in [False, True]:\n yield window, min_periods, center\n\n\nclass TestMomentsConsistency(Base):\n base_functions = [\n (lambda v: Series(v).count(), None, 'count'),\n (lambda v: Series(v).max(), None, 'max'),\n (lambda v: Series(v).min(), None, 'min'),\n (lambda v: Series(v).sum(), None, 'sum'),\n (lambda v: Series(v).mean(), None, 'mean'),\n (lambda v: Series(v).std(), 1, 'std'),\n (lambda v: Series(v).cov(Series(v)), None, 'cov'),\n (lambda v: Series(v).corr(Series(v)), None, 'corr'),\n (lambda v: Series(v).var(), 1, 'var'),\n\n # restore once GH 8086 is fixed\n # lambda v: Series(v).skew(), 3, 'skew'),\n # (lambda v: Series(v).kurt(), 4, 'kurt'),\n\n # (lambda x, min_periods: mom.expanding_quantile(x, 0.3,\n # min_periods=min_periods, 'quantile'),\n\n # restore once GH 8084 is fixed\n # lambda v: Series(v).quantile(0.3), None, 'quantile'),\n\n (lambda v: Series(v).median(), None, 'median'),\n (np.nanmax, 1, 'max'),\n (np.nanmin, 1, 'min'),\n (np.nansum, 1, 'sum'),\n ]\n if np.__version__ >= LooseVersion('1.8.0'):\n base_functions += [\n (np.nanmean, 1, 'mean'),\n (lambda v: np.nanstd(v, ddof=1), 1, 'std'),\n (lambda v: np.nanvar(v, ddof=1), 1, 'var'),\n ]\n if np.__version__ >= LooseVersion('1.9.0'):\n base_functions += [(np.nanmedian, 1, 'median'), ]\n no_nan_functions = [\n (np.max, None, 'max'),\n (np.min, None, 'min'),\n (np.sum, None, 'sum'),\n (np.mean, None, 'mean'),\n (lambda v: np.std(v, ddof=1), 1, 'std'),\n (lambda v: np.var(v, ddof=1), 1, 'var'),\n (np.median, None, 'median'),\n ]\n\n def _create_data(self):\n super(TestMomentsConsistency, self)._create_data()\n self.data = _consistency_data\n\n def setup_method(self, method):\n self._create_data()\n\n def _test_moments_consistency(self, min_periods, count, mean, mock_mean,\n corr, var_unbiased=None, std_unbiased=None,\n cov_unbiased=None, var_biased=None,\n std_biased=None, cov_biased=None,\n var_debiasing_factors=None):\n def _non_null_values(x):\n values = x.values.ravel()\n return set(values[notna(values)].tolist())\n\n for (x, is_constant, no_nans) in self.data:\n count_x = count(x)\n mean_x = mean(x)\n\n if mock_mean:\n # check that mean equals mock_mean\n expected = mock_mean(x)\n assert_equal(mean_x, expected.astype('float64'))\n\n # check that correlation of a series with itself is either 1 or NaN\n corr_x_x = corr(x, x)\n\n # assert _non_null_values(corr_x_x).issubset(set([1.]))\n # restore once rolling_cov(x, x) is identically equal to var(x)\n\n if is_constant:\n exp = x.max() if isinstance(x, Series) else x.max().max()\n\n # check mean of constant series\n expected = x * np.nan\n expected[count_x >= max(min_periods, 1)] = exp\n assert_equal(mean_x, expected)\n\n # check correlation of constant series with itself is NaN\n expected[:] = np.nan\n assert_equal(corr_x_x, expected)\n\n if var_unbiased and var_biased and var_debiasing_factors:\n # check variance debiasing factors\n var_unbiased_x = var_unbiased(x)\n var_biased_x = var_biased(x)\n var_debiasing_factors_x = var_debiasing_factors(x)\n assert_equal(var_unbiased_x, var_biased_x *\n var_debiasing_factors_x)\n\n for (std, var, cov) in [(std_biased, var_biased, cov_biased),\n (std_unbiased, var_unbiased, cov_unbiased)\n ]:\n\n # check that var(x), std(x), and cov(x) are all >= 0\n var_x = var(x)\n std_x = std(x)\n assert not (var_x < 0).any().any()\n assert not (std_x < 0).any().any()\n if cov:\n cov_x_x = cov(x, x)\n assert not (cov_x_x < 0).any().any()\n\n # check that var(x) == cov(x, x)\n assert_equal(var_x, cov_x_x)\n\n # check that var(x) == std(x)^2\n assert_equal(var_x, std_x * std_x)\n\n if var is var_biased:\n # check that biased var(x) == mean(x^2) - mean(x)^2\n mean_x2 = mean(x * x)\n assert_equal(var_x, mean_x2 - (mean_x * mean_x))\n\n if is_constant:\n # check that variance of constant series is identically 0\n assert not (var_x > 0).any().any()\n expected = x * np.nan\n expected[count_x >= max(min_periods, 1)] = 0.\n if var is var_unbiased:\n expected[count_x < 2] = np.nan\n assert_equal(var_x, expected)\n\n if isinstance(x, Series):\n for (y, is_constant, no_nans) in self.data:\n if not x.isna().equals(y.isna()):\n # can only easily test two Series with similar\n # structure\n continue\n\n # check that cor(x, y) is symmetric\n corr_x_y = corr(x, y)\n corr_y_x = corr(y, x)\n assert_equal(corr_x_y, corr_y_x)\n\n if cov:\n # check that cov(x, y) is symmetric\n cov_x_y = cov(x, y)\n cov_y_x = cov(y, x)\n assert_equal(cov_x_y, cov_y_x)\n\n # check that cov(x, y) == (var(x+y) - var(x) -\n # var(y)) / 2\n var_x_plus_y = var(x + y)\n var_y = var(y)\n assert_equal(cov_x_y, 0.5 *\n (var_x_plus_y - var_x - var_y))\n\n # check that corr(x, y) == cov(x, y) / (std(x) *\n # std(y))\n std_y = std(y)\n assert_equal(corr_x_y, cov_x_y / (std_x * std_y))\n\n if cov is cov_biased:\n # check that biased cov(x, y) == mean(x*y) -\n # mean(x)*mean(y)\n mean_y = mean(y)\n mean_x_times_y = mean(x * y)\n assert_equal(cov_x_y, mean_x_times_y -\n (mean_x * mean_y))\n\n @pytest.mark.slow\n @pytest.mark.parametrize(\n 'min_periods, adjust, ignore_na', product([0, 1, 2, 3, 4],\n [True, False],\n [False, True]))\n def test_ewm_consistency(self, min_periods, adjust, ignore_na):\n def _weights(s, com, adjust, ignore_na):\n if isinstance(s, DataFrame):\n if not len(s.columns):\n return DataFrame(index=s.index, columns=s.columns)\n w = concat([\n _weights(s.iloc[:, i], com=com, adjust=adjust,\n ignore_na=ignore_na)\n for i, _ in enumerate(s.columns)], axis=1)\n w.index = s.index\n w.columns = s.columns\n return w\n\n w = Series(np.nan, index=s.index)\n alpha = 1. / (1. + com)\n if ignore_na:\n w[s.notna()] = _weights(s[s.notna()], com=com,\n adjust=adjust, ignore_na=False)\n elif adjust:\n for i in range(len(s)):\n if s.iat[i] == s.iat[i]:\n w.iat[i] = pow(1. / (1. - alpha), i)\n else:\n sum_wts = 0.\n prev_i = -1\n for i in range(len(s)):\n if s.iat[i] == s.iat[i]:\n if prev_i == -1:\n w.iat[i] = 1.\n else:\n w.iat[i] = alpha * sum_wts / pow(1. - alpha,\n i - prev_i)\n sum_wts += w.iat[i]\n prev_i = i\n return w\n\n def _variance_debiasing_factors(s, com, adjust, ignore_na):\n weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)\n cum_sum = weights.cumsum().fillna(method='ffill')\n cum_sum_sq = (weights * weights).cumsum().fillna(method='ffill')\n numerator = cum_sum * cum_sum\n denominator = numerator - cum_sum_sq\n denominator[denominator <= 0.] = np.nan\n return numerator / denominator\n\n def _ewma(s, com, min_periods, adjust, ignore_na):\n weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)\n result = s.multiply(weights).cumsum().divide(weights.cumsum(\n )).fillna(method='ffill')\n result[s.expanding().count() < (max(min_periods, 1) if min_periods\n else 1)] = np.nan\n return result\n\n com = 3.\n # test consistency between different ewm* moments\n self._test_moments_consistency(\n min_periods=min_periods,\n count=lambda x: x.expanding().count(),\n mean=lambda x: x.ewm(com=com, min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na).mean(),\n mock_mean=lambda x: _ewma(x, com=com,\n min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na),\n corr=lambda x, y: x.ewm(com=com, min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na).corr(y),\n var_unbiased=lambda x: (\n x.ewm(com=com, min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na).var(bias=False)),\n std_unbiased=lambda x: (\n x.ewm(com=com, min_periods=min_periods,\n adjust=adjust, ignore_na=ignore_na)\n .std(bias=False)),\n cov_unbiased=lambda x, y: (\n x.ewm(com=com, min_periods=min_periods,\n adjust=adjust, ignore_na=ignore_na)\n .cov(y, bias=False)),\n var_biased=lambda x: (\n x.ewm(com=com, min_periods=min_periods,\n adjust=adjust, ignore_na=ignore_na)\n .var(bias=True)),\n std_biased=lambda x: x.ewm(com=com, min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na).std(bias=True),\n cov_biased=lambda x, y: (\n x.ewm(com=com, min_periods=min_periods,\n adjust=adjust, ignore_na=ignore_na)\n .cov(y, bias=True)),\n var_debiasing_factors=lambda x: (\n _variance_debiasing_factors(x, com=com, adjust=adjust,\n ignore_na=ignore_na)))\n\n @pytest.mark.slow\n @pytest.mark.parametrize(\n 'min_periods', [0, 1, 2, 3, 4])\n def test_expanding_consistency(self, min_periods):\n\n # suppress warnings about empty slices, as we are deliberately testing\n # with empty/0-length Series/DataFrames\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\",\n message=\".*(empty slice|0 for slice).*\",\n category=RuntimeWarning)\n\n # test consistency between different expanding_* moments\n self._test_moments_consistency(\n min_periods=min_periods,\n count=lambda x: x.expanding().count(),\n mean=lambda x: x.expanding(\n min_periods=min_periods).mean(),\n mock_mean=lambda x: x.expanding(\n min_periods=min_periods).sum() / x.expanding().count(),\n corr=lambda x, y: x.expanding(\n min_periods=min_periods).corr(y),\n var_unbiased=lambda x: x.expanding(\n min_periods=min_periods).var(),\n std_unbiased=lambda x: x.expanding(\n min_periods=min_periods).std(),\n cov_unbiased=lambda x, y: x.expanding(\n min_periods=min_periods).cov(y),\n var_biased=lambda x: x.expanding(\n min_periods=min_periods).var(ddof=0),\n std_biased=lambda x: x.expanding(\n min_periods=min_periods).std(ddof=0),\n cov_biased=lambda x, y: x.expanding(\n min_periods=min_periods).cov(y, ddof=0),\n var_debiasing_factors=lambda x: (\n x.expanding().count() /\n (x.expanding().count() - 1.)\n .replace(0., np.nan)))\n\n # test consistency between expanding_xyz() and either (a)\n # expanding_apply of Series.xyz(), or (b) expanding_apply of\n # np.nanxyz()\n for (x, is_constant, no_nans) in self.data:\n functions = self.base_functions\n\n # GH 8269\n if no_nans:\n functions = self.base_functions + self.no_nan_functions\n for (f, require_min_periods, name) in functions:\n expanding_f = getattr(\n x.expanding(min_periods=min_periods), name)\n\n if (require_min_periods and\n (min_periods is not None) and\n (min_periods < require_min_periods)):\n continue\n\n if name == 'count':\n expanding_f_result = expanding_f()\n expanding_apply_f_result = x.expanding(\n min_periods=0).apply(func=f)\n else:\n if name in ['cov', 'corr']:\n expanding_f_result = expanding_f(\n pairwise=False)\n else:\n expanding_f_result = expanding_f()\n expanding_apply_f_result = x.expanding(\n min_periods=min_periods).apply(func=f)\n\n if not tm._incompat_bottleneck_version(name):\n assert_equal(expanding_f_result,\n expanding_apply_f_result)\n\n @pytest.mark.slow\n @pytest.mark.parametrize(\n 'window,min_periods,center', list(_rolling_consistency_cases()))\n def test_rolling_consistency(self, window, min_periods, center):\n\n # suppress warnings about empty slices, as we are deliberately testing\n # with empty/0-length Series/DataFrames\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\",\n message=\".*(empty slice|0 for slice).*\",\n category=RuntimeWarning)\n\n # test consistency between different rolling_* moments\n self._test_moments_consistency(\n min_periods=min_periods,\n count=lambda x: (\n x.rolling(window=window, center=center)\n .count()),\n mean=lambda x: (\n x.rolling(window=window, min_periods=min_periods,\n center=center).mean()),\n mock_mean=lambda x: (\n x.rolling(window=window,\n min_periods=min_periods,\n center=center).sum()\n .divide(x.rolling(window=window,\n min_periods=min_periods,\n center=center).count())),\n corr=lambda x, y: (\n x.rolling(window=window, min_periods=min_periods,\n center=center).corr(y)),\n\n var_unbiased=lambda x: (\n x.rolling(window=window, min_periods=min_periods,\n center=center).var()),\n\n std_unbiased=lambda x: (\n x.rolling(window=window, min_periods=min_periods,\n center=center).std()),\n\n cov_unbiased=lambda x, y: (\n x.rolling(window=window, min_periods=min_periods,\n center=center).cov(y)),\n\n var_biased=lambda x: (\n x.rolling(window=window, min_periods=min_periods,\n center=center).var(ddof=0)),\n\n std_biased=lambda x: (\n x.rolling(window=window, min_periods=min_periods,\n center=center).std(ddof=0)),\n\n cov_biased=lambda x, y: (\n x.rolling(window=window, min_periods=min_periods,\n center=center).cov(y, ddof=0)),\n var_debiasing_factors=lambda x: (\n x.rolling(window=window, center=center).count()\n .divide((x.rolling(window=window, center=center)\n .count() - 1.)\n .replace(0., np.nan))))\n\n # test consistency between rolling_xyz() and either (a)\n # rolling_apply of Series.xyz(), or (b) rolling_apply of\n # np.nanxyz()\n for (x, is_constant, no_nans) in self.data:\n functions = self.base_functions\n\n # GH 8269\n if no_nans:\n functions = self.base_functions + self.no_nan_functions\n for (f, require_min_periods, name) in functions:\n rolling_f = getattr(\n x.rolling(window=window, center=center,\n min_periods=min_periods), name)\n\n if require_min_periods and (\n min_periods is not None) and (\n min_periods < require_min_periods):\n continue\n\n if name == 'count':\n rolling_f_result = rolling_f()\n rolling_apply_f_result = x.rolling(\n window=window, min_periods=0,\n center=center).apply(func=f)\n else:\n if name in ['cov', 'corr']:\n rolling_f_result = rolling_f(\n pairwise=False)\n else:\n rolling_f_result = rolling_f()\n rolling_apply_f_result = x.rolling(\n window=window, min_periods=min_periods,\n center=center).apply(func=f)\n if not tm._incompat_bottleneck_version(name):\n assert_equal(rolling_f_result,\n rolling_apply_f_result)\n\n # binary moments\n def test_rolling_cov(self):\n A = self.series\n B = A + randn(len(A))\n\n result = A.rolling(window=50, min_periods=25).cov(B)\n tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])\n\n def test_rolling_cov_pairwise(self):\n self._check_pairwise_moment('rolling', 'cov', window=10, min_periods=5)\n\n def test_rolling_corr(self):\n A = self.series\n B = A + randn(len(A))\n\n result = A.rolling(window=50, min_periods=25).corr(B)\n tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])\n\n # test for correct bias correction\n a = tm.makeTimeSeries()\n b = tm.makeTimeSeries()\n a[:5] = np.nan\n b[:10] = np.nan\n\n result = a.rolling(window=len(a), min_periods=1).corr(b)\n tm.assert_almost_equal(result[-1], a.corr(b))\n\n def test_rolling_corr_pairwise(self):\n self._check_pairwise_moment('rolling', 'corr', window=10,\n min_periods=5)\n\n def _check_pairwise_moment(self, dispatch, name, **kwargs):\n def get_result(obj, obj2=None):\n return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2)\n\n result = get_result(self.frame)\n result = result.loc[(slice(None), 1), 5]\n result.index = result.index.droplevel(1)\n expected = get_result(self.frame[1], self.frame[5])\n tm.assert_series_equal(result, expected, check_names=False)\n\n def test_flex_binary_moment(self):\n # GH3155\n # don't blow the stack\n pytest.raises(TypeError, rwindow._flex_binary_moment, 5, 6, None)\n\n def test_corr_sanity(self):\n # GH 3155\n df = DataFrame(np.array(\n [[0.87024726, 0.18505595], [0.64355431, 0.3091617],\n [0.92372966, 0.50552513], [0.00203756, 0.04520709],\n [0.84780328, 0.33394331], [0.78369152, 0.63919667]]))\n\n res = df[0].rolling(5, center=True).corr(df[1])\n assert all([np.abs(np.nan_to_num(x)) <= 1 for x in res])\n\n # and some fuzzing\n for _ in range(10):\n df = DataFrame(np.random.rand(30, 2))\n res = df[0].rolling(5, center=True).corr(df[1])\n try:\n assert all([np.abs(np.nan_to_num(x)) <= 1 for x in res])\n except AssertionError:\n print(res)\n\n def test_flex_binary_frame(self):\n def _check(method):\n series = self.frame[1]\n\n res = getattr(series.rolling(window=10), method)(self.frame)\n res2 = getattr(self.frame.rolling(window=10), method)(series)\n exp = self.frame.apply(lambda x: getattr(\n series.rolling(window=10), method)(x))\n\n tm.assert_frame_equal(res, exp)\n tm.assert_frame_equal(res2, exp)\n\n frame2 = self.frame.copy()\n frame2.values[:] = np.random.randn(*frame2.shape)\n\n res3 = getattr(self.frame.rolling(window=10), method)(frame2)\n exp = DataFrame(dict((k, getattr(self.frame[k].rolling(\n window=10), method)(frame2[k])) for k in self.frame))\n tm.assert_frame_equal(res3, exp)\n\n methods = ['corr', 'cov']\n for meth in methods:\n _check(meth)\n\n def test_ewmcov(self):\n self._check_binary_ew('cov')\n\n def test_ewmcov_pairwise(self):\n self._check_pairwise_moment('ewm', 'cov', span=10, min_periods=5)\n\n def test_ewmcorr(self):\n self._check_binary_ew('corr')\n\n def test_ewmcorr_pairwise(self):\n self._check_pairwise_moment('ewm', 'corr', span=10, min_periods=5)\n\n def _check_binary_ew(self, name):\n def func(A, B, com, **kwargs):\n return getattr(A.ewm(com, **kwargs), name)(B)\n\n A = Series(randn(50), index=np.arange(50))\n B = A[2:] + randn(48)\n\n A[:10] = np.NaN\n B[-10:] = np.NaN\n\n result = func(A, B, 20, min_periods=5)\n assert np.isnan(result.values[:14]).all()\n assert not np.isnan(result.values[14:]).any()\n\n # GH 7898\n for min_periods in (0, 1, 2):\n result = func(A, B, 20, min_periods=min_periods)\n # binary functions (ewmcov, ewmcorr) with bias=False require at\n # least two values\n assert np.isnan(result.values[:11]).all()\n assert not np.isnan(result.values[11:]).any()\n\n # check series of length 0\n result = func(Series([]), Series([]), 50, min_periods=min_periods)\n tm.assert_series_equal(result, Series([]))\n\n # check series of length 1\n result = func(\n Series([1.]), Series([1.]), 50, min_periods=min_periods)\n tm.assert_series_equal(result, Series([np.NaN]))\n\n pytest.raises(Exception, func, A, randn(50), 20, min_periods=5)\n\n def test_expanding_apply(self):\n ser = Series([])\n tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean()))\n\n def expanding_mean(x, min_periods=1, freq=None):\n return mom.expanding_apply(x, lambda x: x.mean(),\n min_periods=min_periods, freq=freq)\n\n self._check_expanding(expanding_mean, np.mean)\n\n # GH 8080\n s = Series([None, None, None])\n result = s.expanding(min_periods=0).apply(lambda x: len(x))\n expected = Series([1., 2., 3.])\n tm.assert_series_equal(result, expected)\n\n def test_expanding_apply_args_kwargs(self):\n def mean_w_arg(x, const):\n return np.mean(x) + const\n\n df = DataFrame(np.random.rand(20, 3))\n\n expected = df.expanding().apply(np.mean) + 20.\n\n tm.assert_frame_equal(df.expanding().apply(mean_w_arg, args=(20, )),\n expected)\n tm.assert_frame_equal(df.expanding().apply(mean_w_arg,\n kwargs={'const': 20}),\n expected)\n\n def test_expanding_corr(self):\n A = self.series.dropna()\n B = (A + randn(len(A)))[:-5]\n\n result = A.expanding().corr(B)\n\n rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)\n\n tm.assert_almost_equal(rolling_result, result)\n\n def test_expanding_count(self):\n result = self.series.expanding().count()\n tm.assert_almost_equal(result, self.series.rolling(\n window=len(self.series)).count())\n\n def test_expanding_quantile(self):\n result = self.series.expanding().quantile(0.5)\n\n rolling_result = self.series.rolling(window=len(self.series),\n min_periods=1).quantile(0.5)\n\n tm.assert_almost_equal(result, rolling_result)\n\n def test_expanding_cov(self):\n A = self.series\n B = (A + randn(len(A)))[:-5]\n\n result = A.expanding().cov(B)\n\n rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)\n\n tm.assert_almost_equal(rolling_result, result)\n\n def test_expanding_max(self):\n self._check_expanding(mom.expanding_max, np.max, preserve_nan=False)\n\n def test_expanding_cov_pairwise(self):\n result = self.frame.expanding().corr()\n\n rolling_result = self.frame.rolling(window=len(self.frame),\n min_periods=1).corr()\n\n tm.assert_frame_equal(result, rolling_result)\n\n def test_expanding_corr_pairwise(self):\n result = self.frame.expanding().corr()\n\n rolling_result = self.frame.rolling(window=len(self.frame),\n min_periods=1).corr()\n tm.assert_frame_equal(result, rolling_result)\n\n def test_expanding_cov_diff_index(self):\n # GH 7512\n s1 = Series([1, 2, 3], index=[0, 1, 2])\n s2 = Series([1, 3], index=[0, 2])\n result = s1.expanding().cov(s2)\n expected = Series([None, None, 2.0])\n tm.assert_series_equal(result, expected)\n\n s2a = Series([1, None, 3], index=[0, 1, 2])\n result = s1.expanding().cov(s2a)\n tm.assert_series_equal(result, expected)\n\n s1 = Series([7, 8, 10], index=[0, 1, 3])\n s2 = Series([7, 9, 10], index=[0, 2, 3])\n result = s1.expanding().cov(s2)\n expected = Series([None, None, None, 4.5])\n tm.assert_series_equal(result, expected)\n\n def test_expanding_corr_diff_index(self):\n # GH 7512\n s1 = Series([1, 2, 3], index=[0, 1, 2])\n s2 = Series([1, 3], index=[0, 2])\n result = s1.expanding().corr(s2)\n expected = Series([None, None, 1.0])\n tm.assert_series_equal(result, expected)\n\n s2a = Series([1, None, 3], index=[0, 1, 2])\n result = s1.expanding().corr(s2a)\n tm.assert_series_equal(result, expected)\n\n s1 = Series([7, 8, 10], index=[0, 1, 3])\n s2 = Series([7, 9, 10], index=[0, 2, 3])\n result = s1.expanding().corr(s2)\n expected = Series([None, None, None, 1.])\n tm.assert_series_equal(result, expected)\n\n def test_rolling_cov_diff_length(self):\n # GH 7512\n s1 = Series([1, 2, 3], index=[0, 1, 2])\n s2 = Series([1, 3], index=[0, 2])\n result = s1.rolling(window=3, min_periods=2).cov(s2)\n expected = Series([None, None, 2.0])\n tm.assert_series_equal(result, expected)\n\n s2a = Series([1, None, 3], index=[0, 1, 2])\n result = s1.rolling(window=3, min_periods=2).cov(s2a)\n tm.assert_series_equal(result, expected)\n\n def test_rolling_corr_diff_length(self):\n # GH 7512\n s1 = Series([1, 2, 3], index=[0, 1, 2])\n s2 = Series([1, 3], index=[0, 2])\n result = s1.rolling(window=3, min_periods=2).corr(s2)\n expected = Series([None, None, 1.0])\n tm.assert_series_equal(result, expected)\n\n s2a = Series([1, None, 3], index=[0, 1, 2])\n result = s1.rolling(window=3, min_periods=2).corr(s2a)\n tm.assert_series_equal(result, expected)\n\n def test_rolling_functions_window_non_shrinkage(self):\n # GH 7764\n s = Series(range(4))\n s_expected = Series(np.nan, index=s.index)\n df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=['A', 'B'])\n df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)\n\n functions = [lambda x: (x.rolling(window=10, min_periods=5)\n .cov(x, pairwise=False)),\n lambda x: (x.rolling(window=10, min_periods=5)\n .corr(x, pairwise=False)),\n lambda x: x.rolling(window=10, min_periods=5).max(),\n lambda x: x.rolling(window=10, min_periods=5).min(),\n lambda x: x.rolling(window=10, min_periods=5).sum(),\n lambda x: x.rolling(window=10, min_periods=5).mean(),\n lambda x: x.rolling(window=10, min_periods=5).std(),\n lambda x: x.rolling(window=10, min_periods=5).var(),\n lambda x: x.rolling(window=10, min_periods=5).skew(),\n lambda x: x.rolling(window=10, min_periods=5).kurt(),\n lambda x: x.rolling(\n window=10, min_periods=5).quantile(quantile=0.5),\n lambda x: x.rolling(window=10, min_periods=5).median(),\n lambda x: x.rolling(window=10, min_periods=5).apply(sum),\n lambda x: x.rolling(win_type='boxcar',\n window=10, min_periods=5).mean()]\n for f in functions:\n try:\n s_result = f(s)\n tm.assert_series_equal(s_result, s_expected)\n\n df_result = f(df)\n tm.assert_frame_equal(df_result, df_expected)\n except (ImportError):\n\n # scipy needed for rolling_window\n continue\n\n def test_rolling_functions_window_non_shrinkage_binary(self):\n\n # corr/cov return a MI DataFrame\n df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]],\n columns=Index(['A', 'B'], name='foo'),\n index=Index(range(4), name='bar'))\n df_expected = DataFrame(\n columns=Index(['A', 'B'], name='foo'),\n index=pd.MultiIndex.from_product([df.index, df.columns],\n names=['bar', 'foo']),\n dtype='float64')\n functions = [lambda x: (x.rolling(window=10, min_periods=5)\n .cov(x, pairwise=True)),\n lambda x: (x.rolling(window=10, min_periods=5)\n .corr(x, pairwise=True))]\n for f in functions:\n df_result = f(df)\n tm.assert_frame_equal(df_result, df_expected)\n\n def test_moment_functions_zero_length(self):\n # GH 8056\n s = Series()\n s_expected = s\n df1 = DataFrame()\n df1_expected = df1\n df2 = DataFrame(columns=['a'])\n df2['a'] = df2['a'].astype('float64')\n df2_expected = df2\n\n functions = [lambda x: x.expanding().count(),\n lambda x: x.expanding(min_periods=5).cov(\n x, pairwise=False),\n lambda x: x.expanding(min_periods=5).corr(\n x, pairwise=False),\n lambda x: x.expanding(min_periods=5).max(),\n lambda x: x.expanding(min_periods=5).min(),\n lambda x: x.expanding(min_periods=5).sum(),\n lambda x: x.expanding(min_periods=5).mean(),\n lambda x: x.expanding(min_periods=5).std(),\n lambda x: x.expanding(min_periods=5).var(),\n lambda x: x.expanding(min_periods=5).skew(),\n lambda x: x.expanding(min_periods=5).kurt(),\n lambda x: x.expanding(min_periods=5).quantile(0.5),\n lambda x: x.expanding(min_periods=5).median(),\n lambda x: x.expanding(min_periods=5).apply(sum),\n lambda x: x.rolling(window=10).count(),\n lambda x: x.rolling(window=10, min_periods=5).cov(\n x, pairwise=False),\n lambda x: x.rolling(window=10, min_periods=5).corr(\n x, pairwise=False),\n lambda x: x.rolling(window=10, min_periods=5).max(),\n lambda x: x.rolling(window=10, min_periods=5).min(),\n lambda x: x.rolling(window=10, min_periods=5).sum(),\n lambda x: x.rolling(window=10, min_periods=5).mean(),\n lambda x: x.rolling(window=10, min_periods=5).std(),\n lambda x: x.rolling(window=10, min_periods=5).var(),\n lambda x: x.rolling(window=10, min_periods=5).skew(),\n lambda x: x.rolling(window=10, min_periods=5).kurt(),\n lambda x: x.rolling(\n window=10, min_periods=5).quantile(0.5),\n lambda x: x.rolling(window=10, min_periods=5).median(),\n lambda x: x.rolling(window=10, min_periods=5).apply(sum),\n lambda x: x.rolling(win_type='boxcar',\n window=10, min_periods=5).mean(),\n ]\n for f in functions:\n try:\n s_result = f(s)\n tm.assert_series_equal(s_result, s_expected)\n\n df1_result = f(df1)\n tm.assert_frame_equal(df1_result, df1_expected)\n\n df2_result = f(df2)\n tm.assert_frame_equal(df2_result, df2_expected)\n except (ImportError):\n\n # scipy needed for rolling_window\n continue\n\n def test_moment_functions_zero_length_pairwise(self):\n\n df1 = DataFrame()\n df1_expected = df1\n df2 = DataFrame(columns=Index(['a'], name='foo'),\n index=Index([], name='bar'))\n df2['a'] = df2['a'].astype('float64')\n\n df1_expected = DataFrame(\n index=pd.MultiIndex.from_product([df1.index, df1.columns]),\n columns=Index([]))\n df2_expected = DataFrame(\n index=pd.MultiIndex.from_product([df2.index, df2.columns],\n names=['bar', 'foo']),\n columns=Index(['a'], name='foo'),\n dtype='float64')\n\n functions = [lambda x: (x.expanding(min_periods=5)\n .cov(x, pairwise=True)),\n lambda x: (x.expanding(min_periods=5)\n .corr(x, pairwise=True)),\n lambda x: (x.rolling(window=10, min_periods=5)\n .cov(x, pairwise=True)),\n lambda x: (x.rolling(window=10, min_periods=5)\n .corr(x, pairwise=True)),\n ]\n for f in functions:\n df1_result = f(df1)\n tm.assert_frame_equal(df1_result, df1_expected)\n\n df2_result = f(df2)\n tm.assert_frame_equal(df2_result, df2_expected)\n\n def test_expanding_cov_pairwise_diff_length(self):\n # GH 7512\n df1 = DataFrame([[1, 5], [3, 2], [3, 9]],\n columns=Index(['A', 'B'], name='foo'))\n df1a = DataFrame([[1, 5], [3, 9]],\n index=[0, 2],\n columns=Index(['A', 'B'], name='foo'))\n df2 = DataFrame([[5, 6], [None, None], [2, 1]],\n columns=Index(['X', 'Y'], name='foo'))\n df2a = DataFrame([[5, 6], [2, 1]],\n index=[0, 2],\n columns=Index(['X', 'Y'], name='foo'))\n # TODO: xref gh-15826\n # .loc is not preserving the names\n result1 = df1.expanding().cov(df2a, pairwise=True).loc[2]\n result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]\n result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]\n result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]\n expected = DataFrame([[-3.0, -6.0], [-5.0, -10.0]],\n columns=Index(['A', 'B'], name='foo'),\n index=Index(['X', 'Y'], name='foo'))\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n tm.assert_frame_equal(result3, expected)\n tm.assert_frame_equal(result4, expected)\n\n def test_expanding_corr_pairwise_diff_length(self):\n # GH 7512\n df1 = DataFrame([[1, 2], [3, 2], [3, 4]],\n columns=['A', 'B'],\n index=Index(range(3), name='bar'))\n df1a = DataFrame([[1, 2], [3, 4]],\n index=Index([0, 2], name='bar'),\n columns=['A', 'B'])\n df2 = DataFrame([[5, 6], [None, None], [2, 1]],\n columns=['X', 'Y'],\n index=Index(range(3), name='bar'))\n df2a = DataFrame([[5, 6], [2, 1]],\n index=Index([0, 2], name='bar'),\n columns=['X', 'Y'])\n result1 = df1.expanding().corr(df2, pairwise=True).loc[2]\n result2 = df1.expanding().corr(df2a, pairwise=True).loc[2]\n result3 = df1a.expanding().corr(df2, pairwise=True).loc[2]\n result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2]\n expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]],\n columns=['A', 'B'],\n index=Index(['X', 'Y']))\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n tm.assert_frame_equal(result3, expected)\n tm.assert_frame_equal(result4, expected)\n\n def test_rolling_skew_edge_cases(self):\n\n all_nan = Series([np.NaN] * 5)\n\n # yields all NaN (0 variance)\n d = Series([1] * 5)\n x = d.rolling(window=5).skew()\n tm.assert_series_equal(all_nan, x)\n\n # yields all NaN (window too small)\n d = Series(np.random.randn(5))\n x = d.rolling(window=2).skew()\n tm.assert_series_equal(all_nan, x)\n\n # yields [NaN, NaN, NaN, 0.177994, 1.548824]\n d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401\n ])\n expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])\n x = d.rolling(window=4).skew()\n tm.assert_series_equal(expected, x)\n\n def test_rolling_kurt_edge_cases(self):\n\n all_nan = Series([np.NaN] * 5)\n\n # yields all NaN (0 variance)\n d = Series([1] * 5)\n x = d.rolling(window=5).kurt()\n tm.assert_series_equal(all_nan, x)\n\n # yields all NaN (window too small)\n d = Series(np.random.randn(5))\n x = d.rolling(window=3).kurt()\n tm.assert_series_equal(all_nan, x)\n\n # yields [NaN, NaN, NaN, 1.224307, 2.671499]\n d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401\n ])\n expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])\n x = d.rolling(window=4).kurt()\n tm.assert_series_equal(expected, x)\n\n def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,\n has_time_rule=True, preserve_nan=True):\n result = func(self.arr)\n\n tm.assert_almost_equal(result[10], static_comp(self.arr[:11]))\n\n if preserve_nan:\n assert (np.isnan(result[self._nan_locs]).all())\n\n arr = randn(50)\n\n if has_min_periods:\n result = func(arr, min_periods=30)\n assert (np.isnan(result[:29]).all())\n tm.assert_almost_equal(result[-1], static_comp(arr[:50]))\n\n # min_periods is working correctly\n result = func(arr, min_periods=15)\n assert np.isnan(result[13])\n assert not np.isnan(result[14])\n\n arr2 = randn(20)\n result = func(arr2, min_periods=5)\n assert isna(result[3])\n assert notna(result[4])\n\n # min_periods=0\n result0 = func(arr, min_periods=0)\n result1 = func(arr, min_periods=1)\n tm.assert_almost_equal(result0, result1)\n else:\n result = func(arr)\n tm.assert_almost_equal(result[-1], static_comp(arr[:50]))\n\n def _check_expanding_structures(self, func):\n series_result = func(self.series)\n assert isinstance(series_result, Series)\n frame_result = func(self.frame)\n assert type(frame_result) == DataFrame\n\n def _check_expanding(self, func, static_comp, has_min_periods=True,\n has_time_rule=True, preserve_nan=True):\n with warnings.catch_warnings(record=True):\n self._check_expanding_ndarray(func, static_comp,\n has_min_periods=has_min_periods,\n has_time_rule=has_time_rule,\n preserve_nan=preserve_nan)\n with warnings.catch_warnings(record=True):\n self._check_expanding_structures(func)\n\n def test_rolling_max_gh6297(self):\n \"\"\"Replicate result expected in GH #6297\"\"\"\n\n indices = [datetime(1975, 1, i) for i in range(1, 6)]\n # So that we can have 2 datapoints on one of the days\n indices.append(datetime(1975, 1, 3, 6, 0))\n series = Series(range(1, 7), index=indices)\n # Use floats instead of ints as values\n series = series.map(lambda x: float(x))\n # Sort chronologically\n series = series.sort_index()\n\n expected = Series([1.0, 2.0, 6.0, 4.0, 5.0],\n index=[datetime(1975, 1, i, 0) for i in range(1, 6)])\n with catch_warnings(record=True):\n x = series.rolling(window=1, freq='D').max()\n tm.assert_series_equal(expected, x)\n\n def test_rolling_max_how_resample(self):\n\n indices = [datetime(1975, 1, i) for i in range(1, 6)]\n # So that we can have 3 datapoints on last day (4, 10, and 20)\n indices.append(datetime(1975, 1, 5, 1))\n indices.append(datetime(1975, 1, 5, 2))\n series = Series(list(range(0, 5)) + [10, 20], index=indices)\n # Use floats instead of ints as values\n series = series.map(lambda x: float(x))\n # Sort chronologically\n series = series.sort_index()\n\n # Default how should be max\n expected = Series([0.0, 1.0, 2.0, 3.0, 20.0],\n index=[datetime(1975, 1, i, 0) for i in range(1, 6)])\n with catch_warnings(record=True):\n x = series.rolling(window=1, freq='D').max()\n tm.assert_series_equal(expected, x)\n\n # Now specify median (10.0)\n expected = Series([0.0, 1.0, 2.0, 3.0, 10.0],\n index=[datetime(1975, 1, i, 0) for i in range(1, 6)])\n with catch_warnings(record=True):\n x = series.rolling(window=1, freq='D').max(how='median')\n tm.assert_series_equal(expected, x)\n\n # Now specify mean (4+10+20)/3\n v = (4.0 + 10.0 + 20.0) / 3.0\n expected = Series([0.0, 1.0, 2.0, 3.0, v],\n index=[datetime(1975, 1, i, 0) for i in range(1, 6)])\n with catch_warnings(record=True):\n x = series.rolling(window=1, freq='D').max(how='mean')\n tm.assert_series_equal(expected, x)\n\n def test_rolling_min_how_resample(self):\n\n indices = [datetime(1975, 1, i) for i in range(1, 6)]\n # So that we can have 3 datapoints on last day (4, 10, and 20)\n indices.append(datetime(1975, 1, 5, 1))\n indices.append(datetime(1975, 1, 5, 2))\n series = Series(list(range(0, 5)) + [10, 20], index=indices)\n # Use floats instead of ints as values\n series = series.map(lambda x: float(x))\n # Sort chronologically\n series = series.sort_index()\n\n # Default how should be min\n expected = Series([0.0, 1.0, 2.0, 3.0, 4.0],\n index=[datetime(1975, 1, i, 0) for i in range(1, 6)])\n with catch_warnings(record=True):\n r = series.rolling(window=1, freq='D')\n tm.assert_series_equal(expected, r.min())\n\n def test_rolling_median_how_resample(self):\n\n indices = [datetime(1975, 1, i) for i in range(1, 6)]\n # So that we can have 3 datapoints on last day (4, 10, and 20)\n indices.append(datetime(1975, 1, 5, 1))\n indices.append(datetime(1975, 1, 5, 2))\n series = Series(list(range(0, 5)) + [10, 20], index=indices)\n # Use floats instead of ints as values\n series = series.map(lambda x: float(x))\n # Sort chronologically\n series = series.sort_index()\n\n # Default how should be median\n expected = Series([0.0, 1.0, 2.0, 3.0, 10],\n index=[datetime(1975, 1, i, 0) for i in range(1, 6)])\n with catch_warnings(record=True):\n x = series.rolling(window=1, freq='D').median()\n tm.assert_series_equal(expected, x)\n\n def test_rolling_median_memory_error(self):\n # GH11722\n n = 20000\n Series(np.random.randn(n)).rolling(window=2, center=False).median()\n Series(np.random.randn(n)).rolling(window=2, center=False).median()\n\n def test_rolling_min_max_numeric_types(self):\n\n # GH12373\n types_test = [np.dtype(\"f{}\".format(width)) for width in [4, 8]]\n types_test.extend([np.dtype(\"{}{}\".format(sign, width))\n for width in [1, 2, 4, 8] for sign in \"ui\"])\n for data_type in types_test:\n # Just testing that these don't throw exceptions and that\n # the return type is float64. Other tests will cover quantitative\n # correctness\n result = (DataFrame(np.arange(20, dtype=data_type))\n .rolling(window=5).max())\n assert result.dtypes[0] == np.dtype(\"f8\")\n result = (DataFrame(np.arange(20, dtype=data_type))\n .rolling(window=5).min())\n assert result.dtypes[0] == np.dtype(\"f8\")\n\n\nclass TestGrouperGrouping(object):\n\n def setup_method(self, method):\n self.series = Series(np.arange(10))\n self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,\n 'B': np.arange(40)})\n\n def test_mutated(self):\n\n def f():\n self.frame.groupby('A', foo=1)\n pytest.raises(TypeError, f)\n\n g = self.frame.groupby('A')\n assert not g.mutated\n g = self.frame.groupby('A', mutated=True)\n assert g.mutated\n\n def test_getitem(self):\n g = self.frame.groupby('A')\n g_mutated = self.frame.groupby('A', mutated=True)\n\n expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())\n\n result = g.rolling(2).mean().B\n tm.assert_series_equal(result, expected)\n\n result = g.rolling(2).B.mean()\n tm.assert_series_equal(result, expected)\n\n result = g.B.rolling(2).mean()\n tm.assert_series_equal(result, expected)\n\n result = self.frame.B.groupby(self.frame.A).rolling(2).mean()\n tm.assert_series_equal(result, expected)\n\n def test_getitem_multiple(self):\n\n # GH 13174\n g = self.frame.groupby('A')\n r = g.rolling(2)\n g_mutated = self.frame.groupby('A', mutated=True)\n expected = g_mutated.B.apply(lambda x: x.rolling(2).count())\n\n result = r.B.count()\n tm.assert_series_equal(result, expected)\n\n result = r.B.count()\n tm.assert_series_equal(result, expected)\n\n def test_rolling(self):\n g = self.frame.groupby('A')\n r = g.rolling(window=4)\n\n for f in ['sum', 'mean', 'min', 'max', 'count', 'kurt', 'skew']:\n\n result = getattr(r, f)()\n expected = g.apply(lambda x: getattr(x.rolling(4), f)())\n tm.assert_frame_equal(result, expected)\n\n for f in ['std', 'var']:\n result = getattr(r, f)(ddof=1)\n expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))\n tm.assert_frame_equal(result, expected)\n\n result = r.quantile(0.5)\n expected = g.apply(lambda x: x.rolling(4).quantile(0.5))\n tm.assert_frame_equal(result, expected)\n\n def test_rolling_corr_cov(self):\n g = self.frame.groupby('A')\n r = g.rolling(window=4)\n\n for f in ['corr', 'cov']:\n result = getattr(r, f)(self.frame)\n\n def func(x):\n return getattr(x.rolling(4), f)(self.frame)\n expected = g.apply(func)\n tm.assert_frame_equal(result, expected)\n\n result = getattr(r.B, f)(pairwise=True)\n\n def func(x):\n return getattr(x.B.rolling(4), f)(pairwise=True)\n expected = g.apply(func)\n tm.assert_series_equal(result, expected)\n\n def test_rolling_apply(self):\n g = self.frame.groupby('A')\n r = g.rolling(window=4)\n\n # reduction\n result = r.apply(lambda x: x.sum())\n expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum()))\n tm.assert_frame_equal(result, expected)\n\n def test_expanding(self):\n g = self.frame.groupby('A')\n r = g.expanding()\n\n for f in ['sum', 'mean', 'min', 'max', 'count', 'kurt', 'skew']:\n\n result = getattr(r, f)()\n expected = g.apply(lambda x: getattr(x.expanding(), f)())\n tm.assert_frame_equal(result, expected)\n\n for f in ['std', 'var']:\n result = getattr(r, f)(ddof=0)\n expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))\n tm.assert_frame_equal(result, expected)\n\n result = r.quantile(0.5)\n expected = g.apply(lambda x: x.expanding().quantile(0.5))\n tm.assert_frame_equal(result, expected)\n\n def test_expanding_corr_cov(self):\n g = self.frame.groupby('A')\n r = g.expanding()\n\n for f in ['corr', 'cov']:\n result = getattr(r, f)(self.frame)\n\n def func(x):\n return getattr(x.expanding(), f)(self.frame)\n expected = g.apply(func)\n tm.assert_frame_equal(result, expected)\n\n result = getattr(r.B, f)(pairwise=True)\n\n def func(x):\n return getattr(x.B.expanding(), f)(pairwise=True)\n expected = g.apply(func)\n tm.assert_series_equal(result, expected)\n\n def test_expanding_apply(self):\n g = self.frame.groupby('A')\n r = g.expanding()\n\n # reduction\n result = r.apply(lambda x: x.sum())\n expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum()))\n tm.assert_frame_equal(result, expected)\n\n\nclass TestRollingTS(object):\n\n # rolling time-series friendly\n # xref GH13327\n\n def setup_method(self, method):\n\n self.regular = DataFrame({'A': pd.date_range('20130101',\n periods=5,\n freq='s'),\n 'B': range(5)}).set_index('A')\n\n self.ragged = DataFrame({'B': range(5)})\n self.ragged.index = [Timestamp('20130101 09:00:00'),\n Timestamp('20130101 09:00:02'),\n Timestamp('20130101 09:00:03'),\n Timestamp('20130101 09:00:05'),\n Timestamp('20130101 09:00:06')]\n\n def test_doc_string(self):\n\n df = DataFrame({'B': [0, 1, 2, np.nan, 4]},\n index=[Timestamp('20130101 09:00:00'),\n Timestamp('20130101 09:00:02'),\n Timestamp('20130101 09:00:03'),\n Timestamp('20130101 09:00:05'),\n Timestamp('20130101 09:00:06')])\n df\n df.rolling('2s').sum()\n\n def test_valid(self):\n\n df = self.regular\n\n # not a valid freq\n with pytest.raises(ValueError):\n df.rolling(window='foobar')\n\n # not a datetimelike index\n with pytest.raises(ValueError):\n df.reset_index().rolling(window='foobar')\n\n # non-fixed freqs\n for freq in ['2MS', pd.offsets.MonthBegin(2)]:\n with pytest.raises(ValueError):\n df.rolling(window=freq)\n\n for freq in ['1D', pd.offsets.Day(2), '2ms']:\n df.rolling(window=freq)\n\n # non-integer min_periods\n for minp in [1.0, 'foo', np.array([1, 2, 3])]:\n with pytest.raises(ValueError):\n df.rolling(window='1D', min_periods=minp)\n\n # center is not implemented\n with pytest.raises(NotImplementedError):\n df.rolling(window='1D', center=True)\n\n def test_on(self):\n\n df = self.regular\n\n # not a valid column\n with pytest.raises(ValueError):\n df.rolling(window='2s', on='foobar')\n\n # column is valid\n df = df.copy()\n df['C'] = pd.date_range('20130101', periods=len(df))\n df.rolling(window='2d', on='C').sum()\n\n # invalid columns\n with pytest.raises(ValueError):\n df.rolling(window='2d', on='B')\n\n # ok even though on non-selected\n df.rolling(window='2d', on='C').B.sum()\n\n def test_monotonic_on(self):\n\n # on/index must be monotonic\n df = DataFrame({'A': pd.date_range('20130101',\n periods=5,\n freq='s'),\n 'B': range(5)})\n\n assert df.A.is_monotonic\n df.rolling('2s', on='A').sum()\n\n df = df.set_index('A')\n assert df.index.is_monotonic\n df.rolling('2s').sum()\n\n # non-monotonic\n df.index = reversed(df.index.tolist())\n assert not df.index.is_monotonic\n\n with pytest.raises(ValueError):\n df.rolling('2s').sum()\n\n df = df.reset_index()\n with pytest.raises(ValueError):\n df.rolling('2s', on='A').sum()\n\n def test_frame_on(self):\n\n df = DataFrame({'B': range(5),\n 'C': pd.date_range('20130101 09:00:00',\n periods=5,\n freq='3s')})\n\n df['A'] = [Timestamp('20130101 09:00:00'),\n Timestamp('20130101 09:00:02'),\n Timestamp('20130101 09:00:03'),\n Timestamp('20130101 09:00:05'),\n Timestamp('20130101 09:00:06')]\n\n # we are doing simulating using 'on'\n expected = (df.set_index('A')\n .rolling('2s')\n .B\n .sum()\n .reset_index(drop=True)\n )\n\n result = (df.rolling('2s', on='A')\n .B\n .sum()\n )\n tm.assert_series_equal(result, expected)\n\n # test as a frame\n # we should be ignoring the 'on' as an aggregation column\n # note that the expected is setting, computing, and reseting\n # so the columns need to be switched compared\n # to the actual result where they are ordered as in the\n # original\n expected = (df.set_index('A')\n .rolling('2s')[['B']]\n .sum()\n .reset_index()[['B', 'A']]\n )\n\n result = (df.rolling('2s', on='A')[['B']]\n .sum()\n )\n tm.assert_frame_equal(result, expected)\n\n def test_frame_on2(self):\n\n # using multiple aggregation columns\n df = DataFrame({'A': [0, 1, 2, 3, 4],\n 'B': [0, 1, 2, np.nan, 4],\n 'C': pd.Index([pd.Timestamp('20130101 09:00:00'),\n pd.Timestamp('20130101 09:00:02'),\n pd.Timestamp('20130101 09:00:03'),\n pd.Timestamp('20130101 09:00:05'),\n pd.Timestamp('20130101 09:00:06')])},\n columns=['A', 'C', 'B'])\n\n expected1 = DataFrame({'A': [0., 1, 3, 3, 7],\n 'B': [0, 1, 3, np.nan, 4],\n 'C': df['C']},\n columns=['A', 'C', 'B'])\n\n result = df.rolling('2s', on='C').sum()\n expected = expected1\n tm.assert_frame_equal(result, expected)\n\n expected = Series([0, 1, 3, np.nan, 4], name='B')\n result = df.rolling('2s', on='C').B.sum()\n tm.assert_series_equal(result, expected)\n\n expected = expected1[['A', 'B', 'C']]\n result = df.rolling('2s', on='C')[['A', 'B', 'C']].sum()\n tm.assert_frame_equal(result, expected)\n\n def test_basic_regular(self):\n\n df = self.regular.copy()\n\n df.index = pd.date_range('20130101', periods=5, freq='D')\n expected = df.rolling(window=1, min_periods=1).sum()\n result = df.rolling(window='1D').sum()\n tm.assert_frame_equal(result, expected)\n\n df.index = pd.date_range('20130101', periods=5, freq='2D')\n expected = df.rolling(window=1, min_periods=1).sum()\n result = df.rolling(window='2D', min_periods=1).sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.rolling(window=1, min_periods=1).sum()\n result = df.rolling(window='2D', min_periods=1).sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.rolling(window=1).sum()\n result = df.rolling(window='2D').sum()\n tm.assert_frame_equal(result, expected)\n\n def test_min_periods(self):\n\n # compare for min_periods\n df = self.regular\n\n # these slightly different\n expected = df.rolling(2, min_periods=1).sum()\n result = df.rolling('2s').sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.rolling(2, min_periods=1).sum()\n result = df.rolling('2s', min_periods=1).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_closed(self):\n\n # xref GH13965\n\n df = DataFrame({'A': [1] * 5},\n index=[pd.Timestamp('20130101 09:00:01'),\n pd.Timestamp('20130101 09:00:02'),\n pd.Timestamp('20130101 09:00:03'),\n pd.Timestamp('20130101 09:00:04'),\n pd.Timestamp('20130101 09:00:06')])\n\n # closed must be 'right', 'left', 'both', 'neither'\n with pytest.raises(ValueError):\n self.regular.rolling(window='2s', closed=\"blabla\")\n\n expected = df.copy()\n expected[\"A\"] = [1.0, 2, 2, 2, 1]\n result = df.rolling('2s', closed='right').sum()\n tm.assert_frame_equal(result, expected)\n\n # default should be 'right'\n result = df.rolling('2s').sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.copy()\n expected[\"A\"] = [1.0, 2, 3, 3, 2]\n result = df.rolling('2s', closed='both').sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.copy()\n expected[\"A\"] = [np.nan, 1.0, 2, 2, 1]\n result = df.rolling('2s', closed='left').sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.copy()\n expected[\"A\"] = [np.nan, 1.0, 1, 1, np.nan]\n result = df.rolling('2s', closed='neither').sum()\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_sum(self):\n\n df = self.ragged\n result = df.rolling(window='1s', min_periods=1).sum()\n expected = df.copy()\n expected['B'] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='2s', min_periods=1).sum()\n expected = df.copy()\n expected['B'] = [0.0, 1, 3, 3, 7]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='2s', min_periods=2).sum()\n expected = df.copy()\n expected['B'] = [np.nan, np.nan, 3, np.nan, 7]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='3s', min_periods=1).sum()\n expected = df.copy()\n expected['B'] = [0.0, 1, 3, 5, 7]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='3s').sum()\n expected = df.copy()\n expected['B'] = [0.0, 1, 3, 5, 7]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='4s', min_periods=1).sum()\n expected = df.copy()\n expected['B'] = [0.0, 1, 3, 6, 9]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='4s', min_periods=3).sum()\n expected = df.copy()\n expected['B'] = [np.nan, np.nan, 3, 6, 9]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='5s', min_periods=1).sum()\n expected = df.copy()\n expected['B'] = [0.0, 1, 3, 6, 10]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_mean(self):\n\n df = self.ragged\n result = df.rolling(window='1s', min_periods=1).mean()\n expected = df.copy()\n expected['B'] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='2s', min_periods=1).mean()\n expected = df.copy()\n expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_median(self):\n\n df = self.ragged\n result = df.rolling(window='1s', min_periods=1).median()\n expected = df.copy()\n expected['B'] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='2s', min_periods=1).median()\n expected = df.copy()\n expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_quantile(self):\n\n df = self.ragged\n result = df.rolling(window='1s', min_periods=1).quantile(0.5)\n expected = df.copy()\n expected['B'] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='2s', min_periods=1).quantile(0.5)\n expected = df.copy()\n expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_std(self):\n\n df = self.ragged\n result = df.rolling(window='1s', min_periods=1).std(ddof=0)\n expected = df.copy()\n expected['B'] = [0.0] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='1s', min_periods=1).std(ddof=1)\n expected = df.copy()\n expected['B'] = [np.nan] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='3s', min_periods=1).std(ddof=0)\n expected = df.copy()\n expected['B'] = [0.0] + [0.5] * 4\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='5s', min_periods=1).std(ddof=1)\n expected = df.copy()\n expected['B'] = [np.nan, 0.707107, 1.0, 1.0, 1.290994]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_var(self):\n\n df = self.ragged\n result = df.rolling(window='1s', min_periods=1).var(ddof=0)\n expected = df.copy()\n expected['B'] = [0.0] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='1s', min_periods=1).var(ddof=1)\n expected = df.copy()\n expected['B'] = [np.nan] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='3s', min_periods=1).var(ddof=0)\n expected = df.copy()\n expected['B'] = [0.0] + [0.25] * 4\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='5s', min_periods=1).var(ddof=1)\n expected = df.copy()\n expected['B'] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_skew(self):\n\n df = self.ragged\n result = df.rolling(window='3s', min_periods=1).skew()\n expected = df.copy()\n expected['B'] = [np.nan] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='5s', min_periods=1).skew()\n expected = df.copy()\n expected['B'] = [np.nan] * 2 + [0.0, 0.0, 0.0]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_kurt(self):\n\n df = self.ragged\n result = df.rolling(window='3s', min_periods=1).kurt()\n expected = df.copy()\n expected['B'] = [np.nan] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='5s', min_periods=1).kurt()\n expected = df.copy()\n expected['B'] = [np.nan] * 4 + [-1.2]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_count(self):\n\n df = self.ragged\n result = df.rolling(window='1s', min_periods=1).count()\n expected = df.copy()\n expected['B'] = [1.0, 1, 1, 1, 1]\n tm.assert_frame_equal(result, expected)\n\n df = self.ragged\n result = df.rolling(window='1s').count()\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='2s', min_periods=1).count()\n expected = df.copy()\n expected['B'] = [1.0, 1, 2, 1, 2]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='2s', min_periods=2).count()\n expected = df.copy()\n expected['B'] = [np.nan, np.nan, 2, np.nan, 2]\n tm.assert_frame_equal(result, expected)\n\n def test_regular_min(self):\n\n df = DataFrame({'A': pd.date_range('20130101',\n periods=5,\n freq='s'),\n 'B': [0.0, 1, 2, 3, 4]}).set_index('A')\n result = df.rolling('1s').min()\n expected = df.copy()\n expected['B'] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({'A': pd.date_range('20130101',\n periods=5,\n freq='s'),\n 'B': [5, 4, 3, 4, 5]}).set_index('A')\n\n tm.assert_frame_equal(result, expected)\n result = df.rolling('2s').min()\n expected = df.copy()\n expected['B'] = [5.0, 4, 3, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling('5s').min()\n expected = df.copy()\n expected['B'] = [5.0, 4, 3, 3, 3]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_min(self):\n\n df = self.ragged\n\n result = df.rolling(window='1s', min_periods=1).min()\n expected = df.copy()\n expected['B'] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='2s', min_periods=1).min()\n expected = df.copy()\n expected['B'] = [0.0, 1, 1, 3, 3]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='5s', min_periods=1).min()\n expected = df.copy()\n expected['B'] = [0.0, 0, 0, 1, 1]\n tm.assert_frame_equal(result, expected)\n\n def test_perf_min(self):\n\n N = 10000\n\n dfp = DataFrame({'B': np.random.randn(N)},\n index=pd.date_range('20130101',\n periods=N,\n freq='s'))\n expected = dfp.rolling(2, min_periods=1).min()\n result = dfp.rolling('2s').min()\n assert ((result - expected) < 0.01).all().bool()\n\n expected = dfp.rolling(200, min_periods=1).min()\n result = dfp.rolling('200s').min()\n assert ((result - expected) < 0.01).all().bool()\n\n def test_ragged_max(self):\n\n df = self.ragged\n\n result = df.rolling(window='1s', min_periods=1).max()\n expected = df.copy()\n expected['B'] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='2s', min_periods=1).max()\n expected = df.copy()\n expected['B'] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='5s', min_periods=1).max()\n expected = df.copy()\n expected['B'] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_apply(self):\n\n df = self.ragged\n\n f = lambda x: 1\n result = df.rolling(window='1s', min_periods=1).apply(f)\n expected = df.copy()\n expected['B'] = 1.\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='2s', min_periods=1).apply(f)\n expected = df.copy()\n expected['B'] = 1.\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window='5s', min_periods=1).apply(f)\n expected = df.copy()\n expected['B'] = 1.\n tm.assert_frame_equal(result, expected)\n\n def test_all(self):\n\n # simple comparision of integer vs time-based windowing\n df = self.regular * 2\n er = df.rolling(window=1)\n r = df.rolling(window='1s')\n\n for f in ['sum', 'mean', 'count', 'median', 'std',\n 'var', 'kurt', 'skew', 'min', 'max']:\n\n result = getattr(r, f)()\n expected = getattr(er, f)()\n tm.assert_frame_equal(result, expected)\n\n result = r.quantile(0.5)\n expected = er.quantile(0.5)\n tm.assert_frame_equal(result, expected)\n\n result = r.apply(lambda x: 1)\n expected = er.apply(lambda x: 1)\n tm.assert_frame_equal(result, expected)\n\n def test_all2(self):\n\n # more sophisticated comparision of integer vs.\n # time-based windowing\n df = DataFrame({'B': np.arange(50)},\n index=pd.date_range('20130101',\n periods=50, freq='H')\n )\n # in-range data\n dft = df.between_time(\"09:00\", \"16:00\")\n\n r = dft.rolling(window='5H')\n\n for f in ['sum', 'mean', 'count', 'median', 'std',\n 'var', 'kurt', 'skew', 'min', 'max']:\n\n result = getattr(r, f)()\n\n # we need to roll the days separately\n # to compare with a time-based roll\n # finally groupby-apply will return a multi-index\n # so we need to drop the day\n def agg_by_day(x):\n x = x.between_time(\"09:00\", \"16:00\")\n return getattr(x.rolling(5, min_periods=1), f)()\n expected = df.groupby(df.index.day).apply(\n agg_by_day).reset_index(level=0, drop=True)\n\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_monotonic(self):\n\n # GH 15130\n # we don't need to validate monotonicity when grouping\n\n data = [\n ['David', '1/1/2015', 100], ['David', '1/5/2015', 500],\n ['David', '5/30/2015', 50], ['David', '7/25/2015', 50],\n ['Ryan', '1/4/2014', 100], ['Ryan', '1/19/2015', 500],\n ['Ryan', '3/31/2016', 50], ['Joe', '7/1/2015', 100],\n ['Joe', '9/9/2015', 500], ['Joe', '10/15/2015', 50]]\n\n df = pd.DataFrame(data=data, columns=['name', 'date', 'amount'])\n df['date'] = pd.to_datetime(df['date'])\n\n expected = df.set_index('date').groupby('name').apply(\n lambda x: x.rolling('180D')['amount'].sum())\n result = df.groupby('name').rolling('180D', on='date')['amount'].sum()\n tm.assert_series_equal(result, expected)\n\n def test_non_monotonic(self):\n # GH 13966 (similar to #15130, closed by #15175)\n\n dates = pd.date_range(start='2016-01-01 09:30:00',\n periods=20, freq='s')\n df = pd.DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,\n 'B': np.concatenate((dates, dates)),\n 'C': np.arange(40)})\n\n result = df.groupby('A').rolling('4s', on='B').C.mean()\n expected = df.set_index('B').groupby('A').apply(\n lambda x: x.rolling('4s')['C'].mean())\n tm.assert_series_equal(result, expected)\n\n df2 = df.sort_values('B')\n result = df2.groupby('A').rolling('4s', on='B').C.mean()\n tm.assert_series_equal(result, expected)\n\n def test_rolling_cov_offset(self):\n # GH16058\n\n idx = pd.date_range('2017-01-01', periods=24, freq='1h')\n ss = pd.Series(np.arange(len(idx)), index=idx)\n\n result = ss.rolling('2h').cov()\n expected = pd.Series([np.nan] + [0.5 for _ in range(len(idx) - 1)],\n index=idx)\n tm.assert_series_equal(result, expected)\n\n expected2 = ss.rolling(2, min_periods=1).cov()\n tm.assert_series_equal(result, expected2)\n\n result = ss.rolling('3h').cov()\n expected = pd.Series([np.nan, 0.5] +\n [1.0 for _ in range(len(idx) - 2)],\n index=idx)\n tm.assert_series_equal(result, expected)\n\n expected2 = ss.rolling(3, min_periods=1).cov()\n tm.assert_series_equal(result, expected2)\n", "\"\"\"\nTODO: implement Images2Neibs.infer_shape() methods\n\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\n\nimport numpy\n\nimport theano\nfrom theano import Op, Apply\nimport theano.tensor as T\nfrom theano.gradient import grad_not_implemented\nfrom theano.gradient import grad_undefined\n\n\nclass Images2Neibs(Op):\n \"\"\"\n Reshapes the input as a 2D tensor where each row is an pooling\n example.\n\n Parameters\n ----------\n mode : {'valid', 'ignore_borders', 'wrap_centered'}\n - 'valid' :\n Requires an input that is a multiple of the pooling factor\n (in each direction).\n - 'ignore_borders' :\n Same as valid, but will ignore the borders if the shape(s)\n of the input is not a multiple of the pooling factor(s).\n - 'wrap_centered' :\n ?? TODO comment\n\n \"\"\"\n\n __props__ = (\"mode\",)\n\n def __init__(self, mode='valid'):\n if mode not in ['valid', 'wrap_centered', 'ignore_borders']:\n raise NotImplementedError(\"Only the mode valid, ignore_borders\"\n \" and wrap_centered have been\"\n \" implemented for the op Images2Neibs\")\n self.mode = mode\n\n def __str__(self):\n return self.__class__.__name__ + \"{%s}\" % self.mode\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n if not hasattr(self, \"mode\"):\n self.mode = 'valid'\n\n def make_node(self, ten4, neib_shape, neib_step=None):\n \"\"\"\n Parameters\n ----------\n ten4 : a list of lists of images\n ten4 is of shape (list 1 dim, list 2 dim, row, col).\n neib_shape\n (r,c) where r is the height of the neighborhood in rows and c is\n the width of the neighborhood in columns.\n neib_step\n (dr,dc) where dr is the number of rows to skip between patch and dc\n is the number of columns. When None, this is the same as neib_shape\n (patch are disjoint).\n\n Returns\n -------\n matrix\n A 2D matrix, written using the following pattern::\n\n idx = 0\n for i in xrange(list 1 dim)\n for j in xrange(list 2 dim)\n for k in <image column coordinates>\n for l in <image row coordinates>\n output[idx,:]\n = flattened version of ten4[i,j,l:l+r,k:k+c]\n idx += 1\n\n .. note:: The op isn't necessarily implemented internally with these\n for loops, they're just the easiest way to describe the output\n pattern.\n\n \"\"\"\n ten4 = T.as_tensor_variable(ten4)\n neib_shape = T.as_tensor_variable(neib_shape)\n if neib_step is None:\n neib_step = neib_shape\n else:\n neib_step = T.as_tensor_variable(neib_step)\n\n assert ten4.ndim == 4\n assert neib_shape.ndim == 1\n assert neib_step.ndim == 1\n\n return Apply(self, [ten4, neib_shape, neib_step],\n [T.matrix(dtype=ten4.type.dtype)])\n\n def grad(self, inp, grads):\n x, neib_shape, neib_step = inp\n gz, = grads\n\n if self.mode in ['valid', 'ignore_borders']:\n if (neib_shape is neib_step or\n neib_shape == neib_step or\n # Theano Constant == do not compare the data\n # the equals function do that.\n (hasattr(neib_shape, \"equals\") and\n neib_shape.equals(neib_step))):\n return [neibs2images(gz, neib_shape, x.shape, mode=self.mode),\n grad_undefined(self, 1, neib_shape),\n grad_undefined(self, 2, neib_step)]\n\n if self.mode in ['valid']:\n # Iterate over neighborhood positions, summing contributions.\n def pos2map(pidx, pgz, prior_result, neib_shape, neib_step):\n '''\n Helper function that adds gradient contribution from a single\n neighborhood position i,j.\n pidx = Index of position within neighborhood.\n pgz = Gradient of shape (batch_size*num_channels*neibs)\n prior_result = Shape (batch_size, num_channnels, rows, cols)\n neib_shape = Number of rows, cols in a neighborhood.\n neib_step = Step sizes from image2neibs.\n '''\n nrows, ncols = neib_shape\n rstep, cstep = neib_step\n batch_size, num_channels, rows, cols = prior_result.shape\n i = pidx // ncols\n j = pidx - (i * ncols)\n # This position does not touch some img pixels in valid mode.\n result_indices = prior_result[:, :,\n i:(rows - nrows + i + 1):rstep,\n j:(cols - ncols + j + 1):cstep]\n newshape = (batch_size, num_channels) + \\\n ((rows - nrows) // rstep + 1,) + \\\n ((cols - ncols) // cstep + 1,)\n return T.inc_subtensor(result_indices, pgz.reshape(newshape))\n indices = T.arange(neib_shape[0] * neib_shape[1])\n pgzs = gz.dimshuffle((1, 0))\n result, _ = theano.scan(fn=pos2map,\n sequences=[indices, pgzs],\n outputs_info=T.zeros(x.shape),\n non_sequences=[neib_shape, neib_step])\n grad_input = result[-1]\n return [grad_input,\n grad_undefined(self, 1, neib_shape),\n grad_undefined(self, 2, neib_step)]\n\n return [grad_not_implemented(self, 0, x),\n grad_undefined(self, 1, neib_shape),\n grad_undefined(self, 2, neib_step)]\n\n def c_code_cache_version(self):\n return (7,)\n\n def perform(self, node, inp, out_):\n ten4, neib_shape, neib_step = inp\n z, = out_\n # GpuImages2Neibs should not run this perform in DebugMode\n if type(self) != Images2Neibs:\n raise theano.gof.utils.MethodNotDefined()\n\n def CEIL_INTDIV(a, b):\n if a % b:\n return (a // b) + 1\n else:\n return a // b\n\n grid_c = -1 # number of patch in height\n grid_d = -1 # number of patch in width\n assert ten4.ndim == 4\n assert neib_shape.ndim == 1\n assert neib_shape.shape[0] == 2\n assert neib_step.ndim == 1\n assert neib_step.shape[0] == 2\n c, d = neib_shape\n step_x, step_y = neib_step\n mode = self.mode\n if step_x <= 0 or step_y <= 0:\n raise ValueError(\n \"neib_step wrong step ; values <= 0. Got \" + str(neib_step))\n if c <= 0 or d <= 0:\n raise ValueError(\n \"neib_shape values <=0. Got \" + str(neib_shape))\n\n if mode == \"wrap_centered\":\n if (c % 2 != 1) or (d % 2 != 1):\n raise TypeError(\n \"Images2Neibs:\"\n \" in mode wrap_centered need patch with odd shapes\")\n\n if (ten4.shape[2] < c) or (ten4.shape[3] < d):\n raise TypeError(\n \"Images2Neibs: in wrap_centered mode, don't support\"\n \" image shapes smaller then the patch shapes:\"\n \" neib_shape=(%d,%d), ten4[2:]=[%d,%d]\" %\n (c, d, ten4.shape[2], ten4.shape[3]))\n grid_c = CEIL_INTDIV(ten4.shape[2], step_x)\n grid_d = CEIL_INTDIV(ten4.shape[3], step_y)\n\n elif mode == \"valid\":\n if (ten4.shape[2] < c) or (((ten4.shape[2] - c) % step_x) != 0):\n raise TypeError(\n \"neib_shape[0]=%d, neib_step[0]=%d and\"\n \" ten4.shape[2]=%d not consistent\" %\n (c, step_x, ten4.shape[2]))\n if (ten4.shape[3] < d) or (((ten4.shape[3] - d) % step_y) != 0):\n raise TypeError(\n \"neib_shape[1]=%d, neib_step[1]=%d and\"\n \" ten4.shape[3]=%d not consistent\" %\n (d, step_y, ten4.shape[3]))\n # number of patch in height\n grid_c = 1 + ((ten4.shape[2] - c) // step_x)\n # number of patch in width\n grid_d = 1 + ((ten4.shape[3] - d) // step_y)\n elif mode == \"ignore_borders\":\n # number of patch in height\n grid_c = 1 + ((ten4.shape[2] - c) // step_x)\n # number of patch in width\n grid_d = 1 + ((ten4.shape[3] - d) // step_y)\n else:\n raise TypeError(\"Images2Neibs: unknow mode '%s'\" % mode)\n\n z_dim0 = grid_c * grid_d * ten4.shape[1] * ten4.shape[0]\n z_dim1 = c * d\n z[0] = numpy.empty((z_dim0, z_dim1), dtype=node.outputs[0].dtype)\n\n nb_batch = ten4.shape[0]\n nb_stack = ten4.shape[1]\n height = ten4.shape[2]\n width = ten4.shape[3]\n\n wrap_centered_idx_shift_x = c // 2\n wrap_centered_idx_shift_y = d // 2\n for n in range(nb_batch):\n for s in range(nb_stack):\n # loop over the number of patch in height\n for a in range(grid_c):\n # loop over the number of patch in width\n for b in range(grid_d):\n z_row = b + grid_d * (a + grid_c * (s + nb_stack * n))\n for i in range(c):\n ten4_2 = i + a * step_x\n if mode == \"wrap_centered\":\n ten4_2 -= wrap_centered_idx_shift_x\n if ten4_2 < 0:\n ten4_2 += height\n elif ten4_2 >= height:\n ten4_2 -= height\n for j in range(d):\n ten4_3 = j + b * step_y\n if mode == \"wrap_centered\":\n ten4_3 -= wrap_centered_idx_shift_y\n if ten4_3 < 0:\n ten4_3 += width\n elif ten4_3 >= width:\n ten4_3 -= width\n z_col = j + d * i\n\n z[0][z_row, z_col] = ten4[n, s, ten4_2, ten4_3]\n\n def infer_shape(self, node, input_shape):\n in_shape = input_shape[0]\n c, d = node.inputs[1]\n step_x, step_y = node.inputs[2]\n if self.mode == 'wrap_centered':\n grid_c = T.ceil_intdiv(in_shape[2], step_x)\n grid_d = T.ceil_intdiv(in_shape[3], step_y)\n elif self.mode == 'valid':\n grid_c = 1 + ((in_shape[2] - c) // step_x)\n grid_d = 1 + ((in_shape[3] - d) // step_y)\n elif self.mode == 'ignore_borders':\n grid_c = 1 + ((in_shape[2] - c) // step_x)\n grid_d = 1 + ((in_shape[3] - d) // step_y)\n z_dim0 = grid_c * grid_d * in_shape[1] * in_shape[0]\n z_dim1 = c * d\n return [(z_dim0, z_dim1)]\n\n def c_code(self, node, name, inp, out, sub):\n ten4, neib_shape, neib_step = inp\n z, = out\n\n fail = sub['fail']\n mode = self.mode\n return \"\"\"\n#ifndef CEIL_INTDIV\n#define CEIL_INTDIV(a, b) ((a/b) + ((a %% b) ? 1: 0))\n#endif\n\n int grid_c = -1; //number of patch in height\n int grid_d = -1; //number of patch in width\n {\n if (PyArray_NDIM(%(ten4)s) != 4)\n {\n PyErr_Format(PyExc_TypeError, \"ten4 wrong rank\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(neib_shape)s) != 1)\n {\n PyErr_Format(PyExc_TypeError, \"neib_shape wrong rank\");\n %(fail)s;\n }\n if ( (PyArray_DIMS(%(neib_shape)s))[0] != 2)\n {\n PyErr_Format(PyExc_TypeError, \"neib_shape wrong shape ; has to\"\n \" contain 2 elements\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(neib_step)s) != 1)\n {\n PyErr_Format(PyExc_TypeError, \"neib_step wrong rank\");\n %(fail)s;\n }\n if ( (PyArray_DIMS(%(neib_step)s))[0] != 2)\n {\n PyErr_Format(PyExc_TypeError,\n \"neib_step wrong step ; has to contain 2 elements\");\n %(fail)s;\n }\n\n // (c,d) = neib_shape\n const npy_intp c = (npy_intp) *(dtype_%(neib_shape)s*) PyArray_GETPTR1(%(neib_shape)s, 0);\n const npy_intp d = (npy_intp) *(dtype_%(neib_shape)s*) PyArray_GETPTR1(%(neib_shape)s, 1);\n // (step_x,step_y) = neib_step\n const dtype_%(neib_step)s step_x = *(dtype_%(neib_step)s*) PyArray_GETPTR1(%(neib_step)s, 0);\n const dtype_%(neib_step)s step_y = *(dtype_%(neib_step)s*) PyArray_GETPTR1(%(neib_step)s, 1);\n\n if (step_x <=0 || step_y <=0)\n {\n PyErr_Format(PyExc_ValueError,\n \"neib_step wrong step ; values <= 0. Got %%lld %%lld.\",\n (long long) step_x, (long long) step_y);\n %(fail)s;\n }\n\n if (c <=0 || d <=0)\n {\n PyErr_Format(PyExc_ValueError,\n \"neib_shape values <= 0. Got %%lld %%lld.\",\n (long long)c, (long long)d);\n %(fail)s;\n }\n\n if ( \"%(mode)s\" == \"wrap_centered\") {\n if (c%%2!=1 || d%%2!=1){\n PyErr_Format(PyExc_TypeError,\n \"Images2Neibs: in mode wrap_centered\"\n \" need patch with odd shapes\");\n %(fail)s;\n }\n if ( (PyArray_DIMS(%(ten4)s))[2] < c ||\n (PyArray_DIMS(%(ten4)s))[3] < d)\n {\n PyErr_Format(PyExc_TypeError,\n \"Images2Neibs: in wrap_centered mode, don't support image\"\n \" shapes smaller then the patch shapes:\"\n \" neib_shape=(%%ld,%%ld), ten4[2:]=[%%ld,%%ld]\",\n (long int)c, (long int)d,\n (long int)(PyArray_DIMS(%(ten4)s)[2]),\n (long int)(PyArray_DIMS(%(ten4)s)[3]));\n %(fail)s;\n }\n grid_c = CEIL_INTDIV(((PyArray_DIMS(%(ten4)s))[2]),step_x);\n grid_d = CEIL_INTDIV(((PyArray_DIMS(%(ten4)s))[3]),step_y);\n\n }else if ( \"%(mode)s\" == \"valid\") {\n if ( ((PyArray_DIMS(%(ten4)s))[2] < c) ||\n ( (((PyArray_DIMS(%(ten4)s))[2]-c) %% step_x)!=0))\n {\n PyErr_Format(PyExc_TypeError,\n \"neib_shape[0]=%%ld, neib_step[0]=%%ld and\"\n \" ten4.shape[2]=%%ld not consistent\",\n (long int)c, (long int)step_x,\n (long int)(PyArray_DIMS(%(ten4)s)[2]));\n %(fail)s;\n }\n if ( ((PyArray_DIMS(%(ten4)s))[3] < d) ||\n ( (((PyArray_DIMS(%(ten4)s))[3]-d) %% step_y)!=0))\n {\n PyErr_Format(PyExc_TypeError,\n \"neib_shape[1]=%%ld, neib_step[1]=%%ld and\"\n \" ten4.shape[3]=%%ld not consistent\",\n (long int)d, (long int)step_y,\n (long int)(PyArray_DIMS(%(ten4)s)[3]));\n %(fail)s;\n }\n //number of patch in height\n grid_c = 1+(((PyArray_DIMS(%(ten4)s))[2]-c)/step_x);\n //number of patch in width\n grid_d = 1+(((PyArray_DIMS(%(ten4)s))[3]-d)/step_y);\n }else if ( \"%(mode)s\" == \"ignore_borders\") {\n //number of patch in height\n grid_c = 1+(((PyArray_DIMS(%(ten4)s))[2]-c)/step_x);\n //number of patch in width\n grid_d = 1+(((PyArray_DIMS(%(ten4)s))[3]-d)/step_y);\n }else{\n PyErr_Format(PyExc_TypeError,\n \"Images2Neibs: unknow mode '%(mode)s'\");\n %(fail)s;\n }\n\n // new dimensions for z\n const npy_intp z_dim1 = c * d;\n const npy_intp z_dim0 = grid_c\n * grid_d\n * (PyArray_DIMS(%(ten4)s))[1]\n * (PyArray_DIMS(%(ten4)s))[0];\n\n if ((NULL == %(z)s)\n || ((PyArray_DIMS(%(z)s))[0] != z_dim0 )\n || ((PyArray_DIMS(%(z)s))[1] != z_dim1 )\n )\n {\n Py_XDECREF(%(z)s);\n npy_intp dims[2];\n dims[0] = z_dim0;\n dims[1] = z_dim1;\n\n %(z)s = (PyArrayObject*) PyArray_EMPTY(2,\n dims,\n PyArray_TYPE((PyArrayObject*) py_%(ten4)s),\n 0);\n\n if (!%(z)s)\n {\n PyErr_SetString(PyExc_MemoryError, \"failed to alloc z output\");\n %(fail)s;\n }\n }\n }\n\n { // NESTED SCOPE\n\n const int nb_batch = (PyArray_DIMS(%(ten4)s))[0];\n const int nb_stack = (PyArray_DIMS(%(ten4)s))[1];\n const int height = (PyArray_DIMS(%(ten4)s))[2];\n const int width = (PyArray_DIMS(%(ten4)s))[3];\n\n // (c,d) = neib_shape\n const npy_intp c = (npy_intp) *(dtype_%(neib_shape)s*) PyArray_GETPTR1(%(neib_shape)s, 0);\n const npy_intp d = (npy_intp) *(dtype_%(neib_shape)s*) PyArray_GETPTR1(%(neib_shape)s, 1);\n // (step_x,step_y) = neib_step\n const npy_intp step_x = (npy_intp) *(dtype_%(neib_step)s*) PyArray_GETPTR1(%(neib_step)s, 0);\n const npy_intp step_y = (npy_intp) *(dtype_%(neib_step)s*) PyArray_GETPTR1(%(neib_step)s, 1);\n\n const int wrap_centered_idx_shift_x = c/2;\n const int wrap_centered_idx_shift_y = d/2;\n // Oh this is messed up...\n for (int n = 0; n < nb_batch; n++) // loop over batches\n for (int s = 0; s < nb_stack; s++) // loop over stacks\n for (int a = 0; a < grid_c; a++) // loop over the number of patch in height\n for (int b = 0; b < grid_d; b++) // loop over the number of patch in width\n {\n int z_row = b + grid_d*(a + grid_c*(s + nb_stack*n));\n for (int i = 0; i < c; i++) // loop over c\n {\n int ten4_2 = i + a * step_x;\n if ( \"%(mode)s\" == \"wrap_centered\" ){\n ten4_2 -= wrap_centered_idx_shift_x;\n if ( ten4_2 < 0 ) ten4_2 += height;\n else if (ten4_2 >= height) ten4_2 -= height;\n }\n for (int j = 0; j < d; j++) // loop over d\n {\n\n int ten4_3 = j + b * step_y;\n if ( \"%(mode)s\" == \"wrap_centered\" ){\n ten4_3 -= wrap_centered_idx_shift_y;\n if ( ten4_3 < 0 ) ten4_3 += width;\n else if (ten4_3 >= width) ten4_3 -= width;\n }\n int z_col = j + d * i;\n\n dtype_%(z)s* curr_z = (dtype_%(z)s*) PyArray_GETPTR2(%(z)s, z_row, z_col);\n *curr_z = *( (dtype_%(ten4)s*) PyArray_GETPTR4(%(ten4)s, n, s, ten4_2, ten4_3));\n\n //printf(\"\\\\n(%%i,%%i,%%i,%%i) --> (%%i,%%i)\",\n // n, s, ten4_2, ten4_3, z_row, z_col);\n //printf(\"%%f \", *curr_z);\n }\n }\n }\n } // END NESTED SCOPE\n \"\"\" % locals()\n\n\ndef images2neibs(ten4, neib_shape, neib_step=None, mode='valid'):\n \"\"\"\n Function :func:`images2neibs <theano.tensor.nnet.neighbours.images2neibs>`\n allows to apply a sliding window operation to a tensor containing\n images or other two-dimensional objects.\n The sliding window operation loops over points in input data and stores\n a rectangular neighbourhood of each point.\n It is possible to assign a step of selecting patches (parameter `neib_step`).\n\n Parameters\n ----------\n ten4 : A 4d tensor-like\n A 4-dimensional tensor which represents a list of lists of images.\n It should have shape (list 1 dim, list 2 dim, row, col). The first\n two dimensions can be useful to store different channels and batches.\n neib_shape : A 1d tensor-like of 2 values\n A tuple containing two values: height and width of the neighbourhood.\n It should have shape (r,c) where r is the height of the neighborhood\n in rows and c is the width of the neighborhood in columns.\n neib_step : A 1d tensor-like of 2 values\n (dr,dc) where dr is the number of rows to skip between patch and dc is\n the number of columns. The parameter should be a tuple of two elements:\n number of rows and number of columns to skip each iteration.\n Basically, when the step is 1, the neighbourhood of every first element\n is taken and every possible rectangular subset is returned.\n By default it is equal to `neib_shape` in other words, the patches are\n disjoint. When the step is greater than `neib_shape`, some elements are\n omitted. When None, this is the same as neib_shape (patch are disjoint).\n mode : {'valid', 'ignore_borders', 'wrap_centered'}\n ``valid``\n Requires an input that is a multiple of the\n pooling factor (in each direction).\n ``ignore_borders``\n Same as valid, but will ignore the borders if the shape(s) of\n the input is not a multiple of the pooling factor(s).\n ``wrap_centered``\n ?? TODO comment\n\n Returns\n -------\n object\n Reshapes the input as a 2D tensor where each row is an\n pooling example. Pseudo-code of the output:\n\n .. code-block:: python\n\n idx = 0\n for i in xrange(list 1 dim):\n for j in xrange(list 2 dim):\n for k in <image column coordinates>:\n for l in <image row coordinates>:\n output[idx,:]\n = flattened version of ten4[i,j,l:l+r,k:k+c]\n idx += 1\n\n .. note:: The operation isn't necessarily implemented internally with\n these for loops, they're just the easiest way to describe the\n output pattern.\n\n Notes\n -----\n .. note::\n Currently the step size should be chosen in the way that the\n corresponding dimension :math:`i` (width or height) is equal\n to :math:`n * step\\_size_i + neib\\_shape_i` for some :math:`n`.\n\n Examples\n --------\n\n .. code-block:: python\n\n # Defining variables\n images = T.tensor4('images')\n neibs = images2neibs(images, neib_shape=(5, 5))\n\n # Constructing theano function\n window_function = theano.function([images], neibs)\n\n # Input tensor (one image 10x10)\n im_val = np.arange(100.).reshape((1, 1, 10, 10))\n\n # Function application\n neibs_val = window_function(im_val)\n\n .. note:: The underlying code will construct a 2D tensor of disjoint\n patches 5x5. The output has shape 4x25.\n\n \"\"\"\n return Images2Neibs(mode)(ten4, neib_shape, neib_step)\n\n\ndef neibs2images(neibs, neib_shape, original_shape, mode='valid'):\n \"\"\"\n Function :func:`neibs2images <theano.sandbox.neighbours.neibs2images>`\n performs the inverse operation of\n :func:`images2neibs <theano.sandbox.neigbours.neibs2images>`. It inputs\n the output of :func:`images2neibs <theano.sandbox.neigbours.neibs2images>`\n and reconstructs its input.\n\n Parameters\n ----------\n neibs : 2d tensor\n Like the one obtained by\n :func:`images2neibs <theano.sandbox.neigbours.neibs2images>`.\n neib_shape\n `neib_shape` that was used in\n :func:`images2neibs <theano.sandbox.neigbours.neibs2images>`.\n original_shape\n Original shape of the 4d tensor given to\n :func:`images2neibs <theano.sandbox.neigbours.neibs2images>`\n\n Returns\n -------\n object\n Reconstructs the input of\n :func:`images2neibs <theano.sandbox.neigbours.neibs2images>`,\n a 4d tensor of shape `original_shape`.\n\n Notes\n -----\n Currently, the function doesn't support tensors created with\n `neib_step` different from default value. This means that it may be\n impossible to compute the gradient of a variable gained by\n :func:`images2neibs <theano.sandbox.neigbours.neibs2images>` w.r.t.\n its inputs in this case, because it uses\n :func:`images2neibs <theano.sandbox.neigbours.neibs2images>` for\n gradient computation.\n\n Examples\n --------\n Example, which uses a tensor gained in example for\n :func:`images2neibs <theano.sandbox.neigbours.neibs2images>`:\n\n .. code-block:: python\n\n im_new = neibs2images(neibs, (5, 5), im_val.shape)\n # Theano function definition\n inv_window = theano.function([neibs], im_new)\n # Function application\n im_new_val = inv_window(neibs_val)\n\n .. note:: The code will output the initial image array.\n\n \"\"\"\n neibs = T.as_tensor_variable(neibs)\n neib_shape = T.as_tensor_variable(neib_shape)\n original_shape = T.as_tensor_variable(original_shape)\n\n new_neib_shape = T.stack([original_shape[-1] // neib_shape[1],\n neib_shape[1]])\n output_2d = images2neibs(neibs.dimshuffle('x', 'x', 0, 1),\n new_neib_shape, mode=mode)\n\n if mode == 'ignore_borders':\n # We use set_subtensor to accept original_shape we can't infer\n # the shape and still raise error when it don't have the right\n # shape.\n valid_shape = original_shape\n valid_shape = T.set_subtensor(\n valid_shape[2],\n (valid_shape[2] // neib_shape[0]) * neib_shape[0])\n valid_shape = T.set_subtensor(\n valid_shape[3],\n (valid_shape[3] // neib_shape[1]) * neib_shape[1])\n output_4d = output_2d.reshape(valid_shape, ndim=4)\n # padding the borders with zeros\n for d in [2, 3]:\n pad_shape = list(output_4d.shape)\n pad_shape[d] = original_shape[d] - valid_shape[d]\n output_4d = T.concatenate([output_4d, T.zeros(pad_shape)], axis=d)\n elif mode == 'valid':\n # TODO: we do not implement all mode with this code.\n # Add a check for the good cases.\n output_4d = output_2d.reshape(original_shape, ndim=4)\n else:\n raise NotImplementedError(\"neibs2images do not support mode=%s\" % mode)\n\n return output_4d\n" ]
[ [ "numpy.array" ], [ "pandas.Series", "numpy.linspace", "numpy.asarray", "pandas.util.testing.assert_produces_warning", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.dtype", "pandas.util.testing.assert_frame_equal", "pandas.util.testing.assert_index_equal", "numpy.random.randn", "pandas._libs.hashtable.duplicated_object", "pandas.util.testing.assert_copy", "pandas.util.testing.round_trip_pickle", "pandas.isna", "pandas.util.testing.reset_display_options", "pandas.util.testing.makeTimeDataFrame", "pandas.util.testing.assert_numpy_array_equal", "pandas.Timestamp", "numpy.arange", "pandas.compat.text_type", "pandas.Index", "pandas.DatetimeIndex", "pandas.util.testing.assert_series_equal", "pandas.core.config.option_context", "numpy.insert", "pandas.set_option", "numpy.repeat", "numpy.zeros", "pandas.compat.long", "pandas.compat.u", "pandas.MultiIndex", "numpy.random.choice", "pandas.util.testing.assert_almost_equal", "pandas.util.testing.makeStringIndex", "pandas.MultiIndex.from_product", "numpy.random.rand", "pandas.date_range", "pandas._libs.lib.list_to_object_array", "numpy.array", "pandas.util.testing.makeCustomDataframe", "numpy.random.RandomState", "pandas.timedelta_range", "pandas.CategoricalIndex", "pandas.util.testing.assert_dict_equal", "pandas.period_range", "pandas.util.testing.assert_raises_regex", "pandas.util.testing.get_data_path", "pandas.MultiIndex.from_arrays", "pandas._libs.lib.Timestamp", "pandas.compat.lrange", "pandas.read_pickle", "numpy.empty", "pandas.compat.range" ], [ "numpy.sqrt" ], [ "pandas.util._validators.validate_bool_kwarg", "pandas.util._decorators.deprecate_kwarg", "pandas.Series", "numpy.take", "numpy.asarray", "pandas.core.dtypes.common.is_datetimelike", "pandas.core.dtypes.common.is_dtype_equal", "pandas.core.dtypes.missing.notna", "pandas.compat.iteritems", "pandas.core.common.in_ipython_frontend", "pandas.core.dtypes.common._ensure_int64", "pandas.isna", "numpy.where", "pandas.core.algorithms._get_data_algo", "pandas.util._decorators.Substitution", "pandas.core.config.get_option", "numpy.unique", "pandas.compat.numpy.function.validate_reshape", "numpy.reshape", "pandas.compat.text_type", "numpy.arange", "pandas.Index", "pandas.core.algorithms.factorize", "pandas.io.formats.format.format_array", "pandas.core.dtypes.cast.coerce_indexer_dtype", "pandas.core.dtypes.common.is_categorical_dtype", "pandas.compat.numpy.function.validate_repeat", "pandas.io.formats.terminal.get_terminal_size", "pandas.core.dtypes.common.is_list_like", "pandas.util._decorators.Appender", "pandas.core.dtypes.common.is_categorical", "pandas.compat.u", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.is_sequence", "pandas.compat.numpy.function.validate_argsort_with_ascending", "numpy.logical_or", "numpy.append", "pandas.core.reshape.concat.concat", "pandas.core.dtypes.dtypes.CategoricalDtype", "pandas.core.series._sanitize_array", "pandas.core.dtypes.cast.maybe_infer_to_datetimelike", "numpy.array", "pandas.core.dtypes.common.is_bool", "pandas.CategoricalIndex", "pandas.core.dtypes.common._ensure_platform_int", "numpy.array_equal", "pandas.core.algorithms.take_1d", "pandas.core.dtypes.common.is_scalar", "pandas.core.dtypes.common._ensure_object", "pandas.core.common.is_null_slice", "numpy.sort", "pandas.core.algorithms.unique1d", "pandas.io.formats.format.CategoricalFormatter", "numpy.bincount", "numpy.prod", "pandas._libs.lib.item_from_zerodim", "pandas.core.missing.interpolate_2d" ], [ "numpy.random.random", "pandas.Series", "pandas.MultiIndex", "pandas.util.testing.assert_raises_regex", "pandas.Index", "pandas.util.testing.assert_frame_equal", "numpy.random.randn", "pandas.compat.lrange", "numpy.random.randint" ], [ "pandas.to_datetime", "pandas.util.testing._incompat_bottleneck_version", "pandas.Series", "pandas.stats.moments.ewma", "pandas.offsets.Day", "numpy.sqrt", "pandas.MultiIndex.from_tuples", "numpy.dtype", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal", "numpy.concatenate", "pandas.util.testing.assert_index_equal", "numpy.nan_to_num", "numpy.random.randn", "numpy.mean", "numpy.var", "pandas.isna", "numpy.nanstd", "pandas.stats.moments.rolling_std", "pandas.tseries.offsets.BDay", "pandas.notna", "pandas.util.testing.assert_numpy_array_equal", "pandas.stats.moments.rolling_var", "numpy.arange", "pandas.offsets.MonthBegin", "numpy.nanvar", "pandas.util.testing.assert_series_equal", "pandas.Index", "pandas.util.testing._skip_if_no_scipy", "pandas.DatetimeIndex", "numpy.frombuffer", "numpy.std", "numpy.zeros", "scipy.stats.skew", "pandas.stats.moments.ewmstd", "pandas.concat", "numpy.isnan", "pandas.stats.moments.rolling_apply", "pandas.util.testing.assert_almost_equal", "pandas.Timedelta", "pandas.stats.moments.rolling_window", "numpy.cov", "pandas.MultiIndex.from_product", "numpy.random.rand", "pandas.date_range", "scipy.stats.kurtosis", "numpy.corrcoef", "numpy.array", "pandas.stats.moments.rolling_sum", "pandas.timedelta_range", "pandas.stats.moments.rolling_min", "pandas.util.testing.makeTimeSeries", "numpy.abs", "numpy.isfinite", "pandas.util.testing.assert_raises_regex", "pandas.stats.moments.rolling_max", "numpy.percentile", "numpy.ones", "numpy.sort", "pandas.compat.zip", "pandas.Period", "pandas.stats.moments.rolling_quantile", "pandas.Timestamp", "pandas.stats.moments.rolling_mean", "numpy.empty", "pandas.compat.range" ], [ "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.24", "0.23", "0.21", "0.20" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dhb2128/scanpy
[ "78649a991197af4685a8fe2f7a0d24064e3056bd", "78649a991197af4685a8fe2f7a0d24064e3056bd" ]
[ "scanpy/preprocessing/_normalization.py", "scanpy/readwrite.py" ]
[ "import numpy as np\nfrom scipy.sparse import issparse\nfrom sklearn.utils import sparsefuncs\nfrom .. import logging as logg\nfrom ..utils import doc_params\nfrom ._docs import doc_norm_descr, doc_quant_descr, doc_params_bulk, doc_norm_quant, doc_norm_return, doc_ex_quant, doc_ex_total\n\ndef _normalize_data(X, counts, after=None, copy=False):\n X = X.copy() if copy else X\n after = np.median(counts[counts>0]) if after is None else after\n counts += (counts == 0)\n counts /= after\n if issparse(X):\n X = sparsefuncs.inplace_row_scale(X, 1/counts)\n else:\n X /= counts[:, None]\n return X if copy else None\n\n@doc_params(quant_descr=doc_quant_descr, params_bulk=doc_params_bulk, norm_quant=doc_norm_quant,\n norm_return=doc_norm_return, ex_quant=doc_ex_quant)\ndef normalize_quantile(adata, target_sum=None, quantile=1, key_added=None,\n layers=None, layer_norm=None, inplace=True):\n \"\"\"\\\n {quant_descr}\n\n {params_bulk}\n {norm_quant}\n\n {norm_return}\n\n {ex_quant}\n \"\"\"\n if quantile < 0 or quantile > 1:\n raise ValueError('Choose quantile between 0 and 1.')\n\n X = adata.X\n gene_subset = None\n if not inplace:\n # not recarray because need to support sparse\n dat = {}\n\n if quantile < 1:\n logg.msg('normalizing by count per cell for \\\n genes that make up less than quantile * total count per cell', r=True)\n X = adata.X\n\n counts_per_cell = X.sum(1)\n counts_per_cell = np.ravel(counts_per_cell)\n\n gene_subset = (X>counts_per_cell[:, None]*quantile).sum(0)\n gene_subset = (np.ravel(gene_subset) == 0)\n else:\n logg.msg('normalizing by total count per cell', r=True)\n\n X = X if gene_subset is None else adata[:, gene_subset].X\n counts_per_cell = X.sum(1)\n # get rid of adata view\n counts_per_cell = np.ravel(counts_per_cell).copy()\n del X\n del gene_subset\n\n if key_added is not None:\n adata.obs[key_added] = counts_per_cell\n\n cell_subset = counts_per_cell>0\n if not np.all(cell_subset):\n logg.warn('Some cells have total count of genes equal to zero')\n\n if layer_norm == 'after':\n after = target_sum\n elif layer_norm == 'X':\n after = np.median(counts_per_cell[cell_subset])\n elif layer_norm is None:\n after = None\n else:\n raise ValueError('layer_norm should be \"after\", \"X\" or None')\n del cell_subset\n\n if inplace:\n if hasattr(adata.X, '__itruediv__'):\n _normalize_data(adata.X, counts_per_cell, target_sum)\n else:\n adata.X = _normalize_data(adata.X, counts_per_cell, target_sum, copy=True)\n else:\n dat['X'] = _normalize_data(adata.X, counts_per_cell, target_sum, copy=True)\n\n layers = adata.layers.keys() if layers == 'all' else layers\n if layers is not None:\n for layer in layers:\n L = adata.layers[layer]\n counts = np.ravel(L.sum(1))\n if inplace:\n if hasattr(L, '__itruediv__'):\n _normalize_data(L, counts, after)\n else:\n adata.layers[layer] = _normalize_data(L, counts, after, copy=True)\n else:\n dat[layer] = _normalize_data(L, counts, after, copy=True)\n\n logg.msg(' finished', t=True, end=': ')\n logg.msg('normalized adata.X')\n if key_added is not None:\n logg.msg('and added \\'{}\\', counts per cell before normalization (adata.obs)'\n .format(key_added))\n\n return dat if not inplace else None\n\n@doc_params(norm_descr=doc_norm_descr, params_bulk=doc_params_bulk, norm_return=doc_norm_return, ex_total=doc_ex_total)\ndef normalize_total(adata, target_sum=None, key_added=None, layers=None, layer_norm=None, inplace=True):\n \"\"\"\\\n {norm_descr}\n\n {params_bulk}\n\n {norm_return}\n\n {ex_total}\n \"\"\"\n return normalize_quantile(adata=adata, target_sum=target_sum,\n key_added=key_added, layers=layers,\n layer_norm=layer_norm, quantile=1, inplace=inplace)\n", "\"\"\"Reading and Writing\n\"\"\"\n\nimport sys\nimport time\nfrom pathlib import Path, PurePath\nfrom typing import Union\n\nimport numpy as np\nimport pandas as pd\nimport tables\nimport anndata\nfrom anndata import AnnData, read_loom, \\\n read_csv, read_excel, read_text, read_hdf, read_mtx\nfrom anndata import read as read_h5ad\n\nfrom . import settings\nfrom . import logging as logg\n\n# .gz and .bz2 suffixes are also allowed for text formats\ntext_exts = {'csv',\n 'tsv', 'tab', 'data', 'txt'} # these four are all equivalent\navail_exts = {'anndata', 'xlsx',\n 'h5', 'h5ad', 'mtx', 'mtx.gz',\n 'soft.gz', 'loom'} | text_exts\n\"\"\"Available file formats for reading data. \"\"\"\n\n\n# --------------------------------------------------------------------------------\n# Reading and Writing data files and AnnData objects\n# --------------------------------------------------------------------------------\n\n\ndef read(filename, backed=False, sheet=None, ext=None, delimiter=None,\n first_column_names=False, backup_url=None, cache=False, **kwargs):\n \"\"\"Read file and return :class:`~anndata.AnnData` object.\n\n To speed up reading, consider passing `cache=True`, which creates an hdf5\n cache file.\n\n Parameters\n ----------\n filename : `str`\n If the filename has no file extension, it is interpreted as a key for\n generating a filename via `sc.settings.writedir + filename +\n sc.settings.file_format_data`. This is the same behavior as in\n `sc.read(filename, ...)`.\n backed : {`False`, `True`, 'r', 'r+'}, optional (default: `False`)\n Load :class:`~anndata.AnnData` in `backed` mode instead of fully\n loading it into memory (`memory` mode). Only applies to `.h5ad` files.\n `True` and 'r' are equivalent. If you want to modify backed attributes\n of the AnnData object, you need to choose 'r+'.\n sheet : `str`, optional (default: `None`)\n Name of sheet/table in hdf5 or Excel file.\n cache : `bool`, optional (default: `False`)\n If `False`, read from source, if `True`, read from fast 'h5ad' cache.\n ext : `str`, optional (default: `None`)\n Extension that indicates the file type. If `None`, uses extension of\n filename.\n delimiter : `str`, optional (default: `None`)\n Delimiter that separates data within text file. If `None`, will split at\n arbitrary number of white spaces, which is different from enforcing\n splitting at any single white space ' '.\n first_column_names : `bool`, optional (default: `False`)\n Assume the first column stores row names. This is only necessary if\n these are not strings: strings in the first column are automatically\n assumed to be row names.\n backup_url : `str`, optional (default: `None`)\n Retrieve the file from an URL if not present on disk.\n\n Returns\n -------\n adata : :class:`~anndata.AnnData`\n \"\"\"\n filename = str(filename) # allow passing pathlib.Path objects\n if is_valid_filename(filename):\n return _read(filename, backed=backed, sheet=sheet, ext=ext,\n delimiter=delimiter, first_column_names=first_column_names,\n backup_url=backup_url, cache=cache, **kwargs)\n # generate filename and read to dict\n filekey = filename\n filename = settings.writedir + filekey + '.' + settings.file_format_data\n if not Path(filename).exists():\n raise ValueError('Reading with filekey \"{}\" failed, the '\n 'inferred filename \"{}\" does not exist. '\n 'If you intended to provide a filename, either '\n 'use a filename ending on one of the available extensions {} '\n 'or pass the parameter `ext`.'\n .format(filekey, filename, avail_exts))\n return read_h5ad(filename, backed=backed)\n\n\ndef read_10x_h5(filename, genome=None, gex_only=True):\n \"\"\"Read 10x-Genomics-formatted hdf5 file.\n\n Parameters\n ----------\n filename : `str` | :class:`~pathlib.Path`\n Filename.\n genome : `str`, optional (default: ``None``)\n Filter expression to this genes within this genome. For legacy 10x h5\n files, this must be provided if the data contains more than one genome.\n gex_only : `bool`, optional (default: `True`)\n Only keep 'Gene Expression' data and ignore other feature types,\n e.g. 'Antibody Capture', 'CRISPR Guide Capture', or 'Custom'\n\n Returns\n -------\n adata : :class:`~anndata.AnnData`\n Annotated data matrix, where obsevations/cells are named by their\n barcode and variables/genes by gene name. The data matrix is stored in\n `adata.X`, cell names in `adata.obs_names` and gene names in\n `adata.var_names`. The gene IDs are stored in `adata.var['gene_ids']`.\n The feature types are stored in `adata.var['feature_types']`\n \"\"\"\n logg.info('reading', filename, r=True, end=' ')\n with tables.open_file(str(filename), 'r') as f:\n v3 = '/matrix' in f\n if v3:\n adata = _read_v3_10x_h5(filename)\n if genome:\n if genome not in adata.var['genome'].values:\n raise ValueError(\n \"Could not find data corresponding to genome '{genome}' in '{filename}'. \"\n \"Available genomes are: {avail}.\"\n .format(\n genome=genome, filename=filename,\n avail=list(adata.var[\"genome\"].unique()),\n )\n )\n adata = adata[:, list(map(lambda x: x == str(genome), adata.var['genome']))]\n if gex_only:\n adata = adata[:, list(map(lambda x: x == 'Gene Expression', adata.var['feature_types']))]\n return adata\n else:\n return _read_legacy_10x_h5(filename, genome=genome)\n\n\ndef _read_legacy_10x_h5(filename, genome=None):\n \"\"\"\n Read hdf5 file from Cell Ranger v2 or earlier versions.\n \"\"\"\n with tables.open_file(str(filename), 'r') as f:\n try:\n children = [x._v_name for x in f.list_nodes(f.root)]\n if not genome:\n if len(children) > 1:\n raise ValueError(\n \"'{filename}' contains more than one genome. For legacy 10x h5 \"\n \"files you must specify the genome if more than one is present. \"\n \"Available genomes are: {avail}\"\n .format(filename=filename, avail=children)\n )\n genome = children[0]\n elif genome not in children:\n raise ValueError(\n \"Could not find genome '{genome}' in '{filename}'. \"\n \"Available genomes are: {avail}\"\n .format(\n genome=genome, filename=str(filename),\n avail=children,\n )\n )\n dsets = {}\n for node in f.walk_nodes('/' + genome, 'Array'):\n dsets[node.name] = node.read()\n # AnnData works with csr matrices\n # 10x stores the transposed data, so we do the transposition right away\n from scipy.sparse import csr_matrix\n M, N = dsets['shape']\n data = dsets['data']\n if dsets['data'].dtype == np.dtype('int32'):\n data = dsets['data'].view('float32')\n data[:] = dsets['data']\n matrix = csr_matrix((data, dsets['indices'], dsets['indptr']),\n shape=(N, M))\n # the csc matrix is automatically the transposed csr matrix\n # as scanpy expects it, so, no need for a further transpostion\n adata = AnnData(matrix,\n {'obs_names': dsets['barcodes'].astype(str)},\n {'var_names': dsets['gene_names'].astype(str),\n 'gene_ids': dsets['genes'].astype(str)})\n logg.info(t=True)\n return adata\n except KeyError:\n raise Exception('File is missing one or more required datasets.')\n\n\ndef _read_v3_10x_h5(filename):\n \"\"\"\n Read hdf5 file from Cell Ranger v3 or later versions.\n \"\"\"\n with tables.open_file(str(filename), 'r') as f:\n try:\n dsets = {}\n for node in f.walk_nodes('/matrix', 'Array'):\n dsets[node.name] = node.read()\n from scipy.sparse import csr_matrix\n M, N = dsets['shape']\n data = dsets['data']\n if dsets['data'].dtype == np.dtype('int32'):\n data = dsets['data'].view('float32')\n data[:] = dsets['data']\n matrix = csr_matrix((data, dsets['indices'], dsets['indptr']),\n shape=(N, M))\n adata = AnnData(matrix,\n {'obs_names': dsets['barcodes'].astype(str)},\n {'var_names': dsets['name'].astype(str),\n 'gene_ids': dsets['id'].astype(str),\n 'feature_types': dsets['feature_type'].astype(str),\n 'genome': dsets['genome'].astype(str)})\n logg.info(t=True)\n return adata\n except KeyError:\n raise Exception('File is missing one or more required datasets.')\n\n\ndef read_10x_mtx(path, var_names='gene_symbols', make_unique=True, cache=False, gex_only=True):\n \"\"\"Read 10x-Genomics-formatted mtx directory.\n\n Parameters\n ----------\n path : `str`\n Path to directory for `.mtx` and `.tsv` files,\n e.g. './filtered_gene_bc_matrices/hg19/'.\n var_names : {'gene_symbols', 'gene_ids'}, optional (default: 'gene_symbols')\n The variables index.\n make_unique : `bool`, optional (default: `True`)\n Whether to make the variables index unique by appending '-1',\n '-2' etc. or not.\n cache : `bool`, optional (default: `False`)\n If `False`, read from source, if `True`, read from fast 'h5ad' cache.\n gex_only : `bool`, optional (default: `True`)\n Only keep 'Gene Expression' data and ignore other feature types,\n e.g. 'Antibody Capture', 'CRISPR Guide Capture', or 'Custom'\n\n Returns\n -------\n An :class:`~anndata.AnnData`.\n \"\"\"\n path = Path(path)\n genefile_exists = (path / 'genes.tsv').is_file()\n read = _read_legacy_10x_mtx if genefile_exists else _read_v3_10x_mtx\n adata = read(\n str(path),\n var_names=var_names,\n make_unique=make_unique,\n cache=cache,\n )\n if genefile_exists or not gex_only:\n return adata\n else:\n gex_rows = list(map(lambda x: x == 'Gene Expression', adata.var['feature_types']))\n return adata[:, gex_rows]\n\n\ndef _read_legacy_10x_mtx(path, var_names='gene_symbols', make_unique=True, cache=False):\n \"\"\"\n Read mex from output from Cell Ranger v2 or earlier versions\n \"\"\"\n path = Path(path)\n adata = read(path / 'matrix.mtx', cache=cache).T # transpose the data\n genes = pd.read_csv(path / 'genes.tsv', header=None, sep='\\t')\n if var_names == 'gene_symbols':\n var_names = genes[1]\n if make_unique:\n var_names = anndata.utils.make_index_unique(pd.Index(var_names))\n adata.var_names = var_names\n adata.var['gene_ids'] = genes[0].values\n elif var_names == 'gene_ids':\n adata.var_names = genes[0]\n adata.var['gene_symbols'] = genes[1].values\n else:\n raise ValueError('`var_names` needs to be \\'gene_symbols\\' or \\'gene_ids\\'')\n adata.obs_names = pd.read_csv(path / 'barcodes.tsv', header=None)[0]\n return adata\n\n\ndef _read_v3_10x_mtx(path, var_names='gene_symbols', make_unique=True, cache=False):\n \"\"\"\n Read mex from output from Cell Ranger v3 or later versions\n \"\"\"\n path = Path(path)\n adata = read(path / 'matrix.mtx.gz', cache=cache).T # transpose the data\n genes = pd.read_csv(path / 'features.tsv.gz', header=None, sep='\\t')\n if var_names == 'gene_symbols':\n var_names = genes[1]\n if make_unique:\n var_names = anndata.utils.make_index_unique(pd.Index(var_names))\n adata.var_names = var_names\n adata.var['gene_ids'] = genes[0].values\n elif var_names == 'gene_ids':\n adata.var_names = genes[0]\n adata.var['gene_symbols'] = genes[1].values\n else:\n raise ValueError('`var_names` needs to be \\'gene_symbols\\' or \\'gene_ids\\'')\n adata.var['feature_types'] = genes[2].values\n adata.obs_names = pd.read_csv(path / 'barcodes.tsv.gz', header=None)[0]\n return adata\n\n\ndef write(filename, adata, ext=None, compression='gzip', compression_opts=None):\n \"\"\"Write :class:`~anndata.AnnData` objects to file.\n\n Parameters\n ----------\n filename : `str`\n If the filename has no file extension, it is interpreted as a key for\n generating a filename via `sc.settings.writedir + filename +\n sc.settings.file_format_data`. This is the same behavior as in\n :func:`~scanpy.api.read`.\n adata : :class:`~anndata.AnnData`\n Annotated data matrix.\n ext : {`None`, `'h5'`, `'csv'`, `'txt'`, `'npz'`} (default: `None`)\n File extension from wich to infer file format. If `None`, defaults to\n `sc.settings.file_format_data`.\n compression : {`None`, 'gzip', 'lzf'}, optional (default: `'gzip'`)\n See http://docs.h5py.org/en/latest/high/dataset.html.\n compression_opts : `int`, optional (default: `None`)\n See http://docs.h5py.org/en/latest/high/dataset.html.\n \"\"\"\n filename = str(filename) # allow passing pathlib.Path objects\n if is_valid_filename(filename):\n filename = filename\n ext_ = is_valid_filename(filename, return_ext=True)\n if ext is None:\n ext = ext_\n elif ext != ext_:\n raise ValueError('It suffices to provide the file type by '\n 'providing a proper extension to the filename.'\n 'One of \"txt\", \"csv\", \"h5\" or \"npz\".')\n else:\n key = filename\n ext = settings.file_format_data if ext is None else ext\n filename = get_filename_from_key(key, ext)\n if ext == 'csv':\n adata.write_csvs(filename)\n else:\n adata.write(filename, compression=compression,\n compression_opts=compression_opts)\n\n\n# -------------------------------------------------------------------------------\n# Reading and writing parameter files\n# -------------------------------------------------------------------------------\n\n\ndef read_params(filename, asheader=False, verbosity=0):\n \"\"\"Read parameter dictionary from text file.\n\n Assumes that parameters are specified in the format:\n par1 = value1\n par2 = value2\n\n Comments that start with '#' are allowed.\n\n Parameters\n ----------\n filename : str, Path\n Filename of data file.\n asheader : bool, optional\n Read the dictionary from the header (comment section) of a file.\n\n Returns\n -------\n params : dict\n Dictionary that stores parameters.\n \"\"\"\n filename = str(filename) # allow passing pathlib.Path objects\n from collections import OrderedDict\n params = OrderedDict([])\n for line in open(filename):\n if '=' in line:\n if not asheader or line.startswith('#'):\n line = line[1:] if line.startswith('#') else line\n key, val = line.split('=')\n key = key.strip()\n val = val.strip()\n params[key] = convert_string(val)\n return params\n\n\ndef write_params(path, *args, **dicts):\n \"\"\"Write parameters to file, so that it's readable by read_params.\n\n Uses INI file format.\n \"\"\"\n path = Path(path)\n if not path.parent.is_dir():\n path.parent.mkdir(parents=True)\n if len(args) == 1:\n d = args[0]\n with path.open('w') as f:\n for key in d:\n f.write(key + ' = ' + str(d[key]) + '\\n')\n else:\n with path.open('w') as f:\n for k, d in dicts.items():\n f.write('[' + k + ']\\n')\n for key, val in d.items():\n f.write(key + ' = ' + str(val) + '\\n')\n\n\ndef get_params_from_list(params_list):\n \"\"\"Transform params list to dictionary.\n \"\"\"\n params = {}\n for i in range(0, len(params_list)):\n if '=' not in params_list[i]:\n try:\n if not isinstance(params[key], list): params[key] = [params[key]]\n params[key] += [params_list[i]]\n except KeyError:\n raise ValueError('Pass parameters like `key1=a key2=b c d key3=...`.')\n else:\n key_val = params_list[i].split('=')\n key, val = key_val\n params[key] = convert_string(val)\n return params\n\n\n# -------------------------------------------------------------------------------\n# Reading and Writing data files\n# -------------------------------------------------------------------------------\n\n\ndef _read(filename, backed=False, sheet=None, ext=None, delimiter=None,\n first_column_names=None, backup_url=None, cache=False,\n suppress_cache_warning=False, **kwargs):\n if ext is not None and ext not in avail_exts:\n raise ValueError('Please provide one of the available extensions.\\n'\n + avail_exts)\n else:\n ext = is_valid_filename(filename, return_ext=True)\n is_present = check_datafile_present_and_download(filename,\n backup_url=backup_url)\n if not is_present: logg.msg('... did not find original file', filename)\n # read hdf5 files\n if ext in {'h5', 'h5ad'}:\n if sheet is None:\n return read_h5ad(filename, backed=backed)\n else:\n logg.msg('reading sheet', sheet, 'from file', filename, v=4)\n return read_hdf(filename, sheet)\n # read other file types\n path_cache = Path(settings.cachedir) / _slugify(filename).replace('.' + ext, '.h5ad') # type: Path\n if path_cache.suffix in {'.gz', '.bz2'}:\n path_cache = path_cache.with_suffix('')\n if cache and path_cache.is_file():\n logg.info('... reading from cache file', path_cache)\n adata = read_h5ad(path_cache, backed=False)\n else:\n if not is_present:\n raise FileNotFoundError('Did not find file {}.'.format(filename))\n logg.msg('reading', filename, v=4)\n if not cache and not suppress_cache_warning:\n logg.hint('This might be very slow. Consider passing `cache=True`, '\n 'which enables much faster reading from a cache file.')\n # do the actual reading\n if ext == 'xlsx' or ext == 'xls':\n if sheet is None:\n raise ValueError(\n 'Provide `sheet` parameter when reading \\'.xlsx\\' files.')\n else:\n adata = read_excel(filename, sheet)\n elif ext in {'mtx', 'mtx.gz'}:\n adata = read_mtx(filename)\n elif ext == 'csv':\n adata = read_csv(filename, first_column_names=first_column_names)\n elif ext in {'txt', 'tab', 'data', 'tsv'}:\n if ext == 'data':\n logg.msg('... assuming \\'.data\\' means tab or white-space '\n 'separated text file', v=3)\n logg.hint('change this by passing `ext` to sc.read')\n adata = read_text(filename, delimiter, first_column_names)\n elif ext == 'soft.gz':\n adata = _read_softgz(filename)\n elif ext == 'loom':\n adata = read_loom(filename=filename, **kwargs)\n else:\n raise ValueError('Unkown extension {}.'.format(ext))\n if cache:\n logg.info('... writing an', settings.file_format_data,\n 'cache file to speedup reading next time')\n if not path_cache.parent.is_dir():\n path_cache.parent.mkdir(parents=True)\n # write for faster reading when calling the next time\n adata.write(path_cache)\n return adata\n\n\ndef _slugify(path: Union[str, PurePath]) -> str:\n \"\"\"Make a path into a filename.\"\"\"\n if not isinstance(path, PurePath):\n path = PurePath(path)\n parts = list(path.parts)\n if parts[0] == '/':\n parts.pop(0)\n elif len(parts[0]) == 3 and parts[0][1:] == ':\\\\':\n parts[0] = parts[0][0] # C:\\ → C\n filename = '-'.join(parts)\n assert '/' not in filename, filename\n assert not filename[1:].startswith(':'), filename\n return filename\n\n\ndef _read_softgz(filename):\n \"\"\"Read a SOFT format data file.\n\n The SOFT format is documented here\n http://www.ncbi.nlm.nih.gov/geo/info/soft2.html.\n\n Returns\n -------\n adata\n\n Notes\n -----\n The function is based on a script by Kerby Shedden.\n http://dept.stat.lsa.umich.edu/~kshedden/Python-Workshop/gene_expression_comparison.html\n \"\"\"\n filename = str(filename) # allow passing pathlib.Path objects\n import gzip\n with gzip.open(filename, mode='rt') as file:\n # The header part of the file contains information about the\n # samples. Read that information first.\n samples_info = {}\n for line in file:\n if line.startswith(\"!dataset_table_begin\"):\n break\n elif line.startswith(\"!subset_description\"):\n subset_description = line.split(\"=\")[1].strip()\n elif line.startswith(\"!subset_sample_id\"):\n subset_ids = line.split(\"=\")[1].split(\",\")\n subset_ids = [x.strip() for x in subset_ids]\n for k in subset_ids:\n samples_info[k] = subset_description\n # Next line is the column headers (sample id's)\n sample_names = file.readline().strip().split(\"\\t\")\n # The column indices that contain gene expression data\n I = [i for i, x in enumerate(sample_names) if x.startswith(\"GSM\")]\n # Restrict the column headers to those that we keep\n sample_names = [sample_names[i] for i in I]\n # Get a list of sample labels\n groups = [samples_info[k] for k in sample_names]\n # Read the gene expression data as a list of lists, also get the gene\n # identifiers\n gene_names, X = [], []\n for line in file:\n # This is what signals the end of the gene expression data\n # section in the file\n if line.startswith(\"!dataset_table_end\"):\n break\n V = line.split(\"\\t\")\n # Extract the values that correspond to gene expression measures\n # and convert the strings to numbers\n x = [float(V[i]) for i in I]\n X.append(x)\n gene_names.append(V[1])\n # Convert the Python list of lists to a Numpy array and transpose to match\n # the Scanpy convention of storing samples in rows and variables in colums.\n X = np.array(X).T\n row_names = sample_names\n col_names = gene_names\n obs = np.zeros((len(row_names),), dtype=[('obs_names', 'S21'), ('groups', 'S21')])\n obs['obs_names'] = sample_names\n obs['groups'] = groups\n var = np.zeros((len(gene_names),), dtype=[('var_names', 'S21')])\n var['var_names'] = gene_names\n ddata = {'X': X, 'obs': obs, 'var': var}\n return AnnData(ddata)\n\n\n# -------------------------------------------------------------------------------\n# Type conversion\n# -------------------------------------------------------------------------------\n\n\ndef is_float(string):\n \"\"\"Check whether string is float.\n\n See also\n --------\n http://stackoverflow.com/questions/736043/checking-if-a-string-can-be-converted-to-float-in-python\n \"\"\"\n try:\n float(string)\n return True\n except ValueError:\n return False\n\n\ndef is_int(string):\n \"\"\"Check whether string is integer.\n \"\"\"\n try:\n int(string)\n return True\n except ValueError:\n return False\n\n\ndef convert_bool(string):\n \"\"\"Check whether string is boolean.\n \"\"\"\n if string == 'True':\n return True, True\n elif string == 'False':\n return True, False\n else:\n return False, False\n\n\ndef convert_string(string):\n \"\"\"Convert string to int, float or bool.\n \"\"\"\n if is_int(string):\n return int(string)\n elif is_float(string):\n return float(string)\n elif convert_bool(string)[0]:\n return convert_bool(string)[1]\n elif string == 'None':\n return None\n else:\n return string\n\n\n# -------------------------------------------------------------------------------\n# Helper functions for reading and writing\n# -------------------------------------------------------------------------------\n\n\ndef get_used_files():\n \"\"\"Get files used by processes with name scanpy.\"\"\"\n import psutil\n loop_over_scanpy_processes = (proc for proc in psutil.process_iter()\n if proc.name() == 'scanpy')\n filenames = []\n for proc in loop_over_scanpy_processes:\n try:\n flist = proc.open_files()\n for nt in flist:\n filenames.append(nt.path)\n # This catches a race condition where a process ends\n # before we can examine its files\n except psutil.NoSuchProcess as err:\n pass\n return set(filenames)\n\n\ndef wait_until_file_unused(filename):\n while (filename in get_used_files()):\n time.sleep(1)\n\n\ndef get_filename_from_key(key, ext=None):\n ext = settings.file_format_data if ext is None else ext\n filename = settings.writedir + key + '.' + ext\n return filename\n\n\ndef download_progress(count, blockSize, totalSize):\n percent = int(count*blockSize*100/totalSize)\n sys.stdout.write('\\r' + '... %d%%' % percent)\n sys.stdout.flush()\n\n\ndef check_datafile_present_and_download(path, backup_url=None):\n \"\"\"Check whether the file is present, otherwise download.\n \"\"\"\n path = Path(path)\n if path.is_file(): return True\n if backup_url is None: return False\n logg.info('try downloading from url\\n' + backup_url + '\\n' +\n '... this may take a while but only happens once')\n if not path.parent.is_dir():\n logg.info('creating directory', str(path.parent) + '/', 'for saving data')\n path.parent.mkdir(parents=True)\n from urllib.request import urlretrieve\n urlretrieve(backup_url, str(path), reporthook=download_progress)\n logg.info('')\n return True\n\n\ndef is_valid_filename(filename, return_ext=False):\n \"\"\"Check whether the argument is a filename.\"\"\"\n ext = Path(filename).suffixes\n\n if len(ext) > 2:\n logg.warn('Your filename has more than two extensions: {}.\\n'\n 'Only considering the two last: {}.'.format(ext, ext[-2:]))\n ext = ext[-2:]\n\n # cases for gzipped/bzipped text files\n if len(ext) == 2 and ext[0][1:] in text_exts and ext[1][1:] in ('gz', 'bz2'):\n return ext[0][1:] if return_ext else True\n elif ext and ext[-1][1:] in avail_exts:\n return ext[-1][1:] if return_ext else True\n elif ''.join(ext) == '.soft.gz':\n return 'soft.gz' if return_ext else True\n elif ''.join(ext) == '.mtx.gz':\n return 'mtx.gz' if return_ext else True\n else:\n if return_ext:\n raise ValueError('\"{}\" does not end on a valid extension.\\n'\n 'Please, provide one of the available extensions.\\n{}\\n'\n 'Text files with .gz and .bz2 extensions are also supported.'\n .format(filename, avail_exts))\n else:\n return False\n" ]
[ [ "scipy.sparse.issparse", "sklearn.utils.sparsefuncs.inplace_row_scale", "numpy.median", "numpy.all", "numpy.ravel" ], [ "pandas.read_csv", "pandas.Index", "numpy.dtype", "scipy.sparse.csr_matrix", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
vahndi/probability
[ "6ddf88e6f3d947c96b879e426030f60eb5cb2d59", "6ddf88e6f3d947c96b879e426030f60eb5cb2d59", "6ddf88e6f3d947c96b879e426030f60eb5cb2d59" ]
[ "tests/test_calculations/base_test.py", "examples/notebooks/tb/make_data.py", "tests/test_discrete/test_discrete.py" ]
[ "from unittest.case import TestCase\n\nfrom pandas import Series, DataFrame\n\nfrom probability.distributions import Beta, Dirichlet\n\n\nclass BaseTest(TestCase):\n\n def setUp(self) -> None:\n\n self.b1 = Beta(700, 300)\n self.b2 = Beta(600, 400)\n self.b3 = Beta(500, 500)\n self.d1 = Dirichlet([500, 300, 200])\n self.d2 = Dirichlet({'x': 100, 'y': 200, 'z': 300})\n self.b1__mul__b2 = self.b1 * self.b2\n self.b3__mul__b1__mul__b2 = self.b3 * self.b1__mul__b2\n self.b1__mul__comp__b1 = self.b1 * (1 - self.b1)\n self.b_series = Series({\n 'b1': self.b1, 'b2': self.b2, 'b3': self.b3\n })\n self.b_frame = DataFrame({\n 'c1': {'r1': self.b1, 'r2': self.b2},\n 'c2': {'r1': self.b2, 'r2': self.b3}\n })\n self.float_series = Series({'$100': 0.8, '$200': 0.6})\n", "from pandas import DataFrame\n\n\ndef make_cookies_observations() -> DataFrame:\n\n return DataFrame({\n 'bowl': ['bowl 1'] * 40 + ['bowl 2'] * 40,\n 'flavor': (\n ['vanilla'] * 30 + ['chocolate'] * 10 +\n ['vanilla'] * 20 + ['chocolate'] * 20\n )\n })\n", "from unittest.case import TestCase\n\nfrom pandas import Series, DataFrame\n\nfrom probability.discrete import Conditional\nfrom probability.discrete.discrete import Discrete\n\n\nclass TestDiscrete(TestCase):\n\n def setUp(self) -> None:\n\n self.education = Discrete.from_counts(\n data={\n ('Male', 'Never finished high school'): 112,\n ('Male', 'High school'): 231,\n ('Male', 'College'): 595,\n ('Male', 'Graduate school'): 242,\n ('Female', 'Never finished high school'): 136,\n ('Female', 'High school'): 189,\n ('Female', 'College'): 763,\n ('Female', 'Graduate school'): 172,\n },\n variables=['gender', 'highest_education']\n )\n self.education__total = 112 + 231 + 595 + 242 + 136 + 189 + 763 + 172\n self.total__high_school = 231 + 189\n\n self.coin_dist = Discrete.from_probs(\n data={\n ('H', 'H', 1, 1): 0.25,\n ('H', 'T', 1, 0): 0.25,\n ('T', 'H', 1, 0): 0.25,\n ('T', 'T', 0, 1): 0.25\n },\n variables=['coin_1', 'coin_2', 'x', 'y']\n )\n\n def test_p(self):\n self.assertAlmostEqual(\n self.total__high_school / self.education__total,\n self.education.p(highest_education='High school'),\n 5\n )\n\n def test_p_or(self):\n total__high_school__or__female = 231 + 136 + 189 + 763 + 172\n self.assertAlmostEqual(\n total__high_school__or__female / self.education__total,\n self.education.p_or(highest_education='High school',\n gender='Female'),\n 5\n )\n\n def test_p_given(self):\n\n total__female = 136 + 189 + 763 + 172\n self.assertAlmostEqual(\n 189 / total__female,\n self.education.given(gender='Female').p(\n highest_education='High school'),\n 5\n )\n self.assertAlmostEqual(\n 189 / self.total__high_school,\n self.education.given(highest_education='High school').p(\n gender='Female'),\n 5\n )\n\n def test_from_counts__1_var__vars_on_index(self):\n\n counts = Series({\n 'a': 1,\n 'b': 2,\n 'c': 3\n })\n counts.index.name = 'abc'\n discrete = Discrete.from_counts(counts)\n self.assertEqual(['abc'], discrete.variables)\n self.assertEqual({'abc': ['a', 'b', 'c']},\n discrete.states)\n self.assertEqual(1 / 6, discrete.p(abc='a'))\n self.assertEqual(2 / 6, discrete.p(abc='b'))\n self.assertEqual(3 / 6, discrete.p(abc='c'))\n\n def test_from_counts__1_var__vars_as_arg(self):\n\n counts = Series({\n 'a': 1,\n 'b': 2,\n 'c': 3\n })\n discrete = Discrete.from_counts(counts, variables='abc')\n self.assertEqual(['abc'], discrete.variables)\n self.assertEqual({'abc': ['a', 'b', 'c']}, discrete.states)\n self.assertEqual(1 / 6, discrete.p(abc='a'))\n self.assertEqual(2 / 6, discrete.p(abc='b'))\n self.assertEqual(3 / 6, discrete.p(abc='c'))\n\n def test_from_counts__1_var__states_as_arg(self):\n\n counts = Series({\n 'a': 1,\n 'b': 2,\n 'c': 3\n })\n counts.index.name = 'abc'\n discrete = Discrete.from_counts(\n counts, states=['a', 'b', 'c', 'd']\n )\n self.assertEqual(['abc'], discrete.variables)\n self.assertEqual({'abc': ['a', 'b', 'c', 'd']},\n discrete.states)\n self.assertEqual(1 / 6, discrete.p(abc='a'))\n self.assertEqual(2 / 6, discrete.p(abc='b'))\n self.assertEqual(3 / 6, discrete.p(abc='c'))\n self.assertEqual(0, discrete.p(abc='d'))\n\n def test_from_counts__2_vars__vars_on_index(self):\n\n counts = Series({\n ('a', 'b'): 1,\n ('c', 'd'): 2,\n ('e', 'f'): 3\n })\n counts.index.names = ['ace', 'bdf']\n discrete = Discrete.from_counts(counts)\n self.assertEqual(['ace', 'bdf'], discrete.variables)\n self.assertEqual({'ace': ['a', 'c', 'e'], 'bdf': ['b', 'd', 'f']},\n discrete.states)\n self.assertEqual(1 / 6, discrete.p(ace='a', bdf='b'))\n self.assertEqual(2 / 6, discrete.p(ace='c', bdf='d'))\n self.assertEqual(3 / 6, discrete.p(ace='e', bdf='f'))\n\n def test_from_counts__2_vars__vars_as_arg(self):\n\n counts = Series({\n ('a', 'b'): 1,\n ('c', 'd'): 2,\n ('e', 'f'): 3\n })\n discrete = Discrete.from_counts(counts, variables=['ace', 'bdf'])\n self.assertEqual(['ace', 'bdf'], discrete.variables)\n self.assertEqual({'ace': ['a', 'c', 'e'], 'bdf': ['b', 'd', 'f']},\n discrete.states)\n self.assertEqual(1 / 6, discrete.p(ace='a', bdf='b'))\n self.assertEqual(2 / 6, discrete.p(ace='c', bdf='d'))\n self.assertEqual(3 / 6, discrete.p(ace='e', bdf='f'))\n\n def test_from_counts__2_vars__states_as_arg(self):\n\n counts = Series({\n ('a', 'b'): 1,\n ('c', 'd'): 2,\n ('e', 'f'): 3\n })\n counts.index.names = ['ace', 'bdf']\n states = {'ace': ['a', 'c', 'e', 'g'],\n 'bdf': ['b', 'd', 'f', 'h']}\n discrete = Discrete.from_counts(\n counts,\n states=states\n )\n self.assertEqual(['ace', 'bdf'], discrete.variables)\n self.assertEqual(states, discrete.states)\n self.assertEqual(1 / 6, discrete.p(ace='a', bdf='b'))\n self.assertEqual(2 / 6, discrete.p(ace='c', bdf='d'))\n self.assertEqual(3 / 6, discrete.p(ace='e', bdf='f'))\n\n def test_from_observations__1_var(self):\n\n observations = DataFrame({\n 'ace': ['a', 'c', 'c', 'e', 'e', 'e'],\n })\n discrete = Discrete.from_observations(observations)\n self.assertEqual(['ace'], discrete.variables)\n self.assertEqual({'ace': ['a', 'c', 'e']}, discrete.states)\n self.assertEqual(1 / 6, discrete.p(ace='a'))\n self.assertEqual(2 / 6, discrete.p(ace='c'))\n self.assertEqual(3 / 6, discrete.p(ace='e'))\n\n def test_from_observations__1_var__replace_vars(self):\n\n observations = DataFrame({\n 'ace': ['a', 'c', 'c', 'e', 'e', 'e'],\n })\n discrete = Discrete.from_observations(\n observations, variables='ACE'\n )\n self.assertEqual(['ACE'], discrete.variables)\n self.assertEqual({'ACE': ['a', 'c', 'e']}, discrete.states)\n self.assertEqual(1 / 6, discrete.p(ACE='a'))\n self.assertEqual(2 / 6, discrete.p(ACE='c'))\n self.assertEqual(3 / 6, discrete.p(ACE='e'))\n\n def test_from_observations__1_var__extra_states(self):\n\n observations = DataFrame({\n 'ace': ['a', 'c', 'c', 'e', 'e', 'e'],\n })\n discrete = Discrete.from_observations(\n observations, states=['a', 'c', 'e', 'g']\n )\n self.assertEqual(['ace'], discrete.variables)\n self.assertEqual({'ace': ['a', 'c', 'e', 'g']}, discrete.states)\n self.assertEqual(1 / 6, discrete.p(ace='a'))\n self.assertEqual(2 / 6, discrete.p(ace='c'))\n self.assertEqual(3 / 6, discrete.p(ace='e'))\n self.assertEqual(0, discrete.p(ace='g'))\n\n def test_from_observations__2_vars(self):\n\n observations = DataFrame({\n 'ace': ['a', 'c', 'c', 'e', 'e', 'e'],\n 'bdf': ['b', 'd', 'd', 'f', 'f', 'f']\n })\n discrete = Discrete.from_observations(observations)\n self.assertEqual(['ace', 'bdf'], discrete.variables)\n self.assertEqual({'ace': ['a', 'c', 'e'],\n 'bdf': ['b', 'd', 'f']},\n discrete.states)\n self.assertEqual(1 / 6, discrete.p(ace='a', bdf='b'))\n self.assertEqual(2 / 6, discrete.p(ace='c', bdf='d'))\n self.assertEqual(3 / 6, discrete.p(ace='e', bdf='f'))\n\n def test_from_observations__2_vars__replace_vars(self):\n\n observations = DataFrame({\n 'ace': ['a', 'c', 'c', 'e', 'e', 'e'],\n 'bdf': ['b', 'd', 'd', 'f', 'f', 'f']\n })\n discrete = Discrete.from_observations(\n observations, variables=['ACE', 'BDF']\n )\n self.assertEqual(['ACE', 'BDF'], discrete.variables)\n self.assertEqual({'ACE': ['a', 'c', 'e'],\n 'BDF': ['b', 'd', 'f']},\n discrete.states)\n self.assertEqual(1 / 6, discrete.p(ACE='a', BDF='b'))\n self.assertEqual(2 / 6, discrete.p(ACE='c', BDF='d'))\n self.assertEqual(3 / 6, discrete.p(ACE='e', BDF='f'))\n\n def test_from_observations__2_vars__extra_states(self):\n\n observations = DataFrame({\n 'ace': ['a', 'c', 'c', 'e', 'e', 'e'],\n 'bdf': ['b', 'd', 'd', 'f', 'f', 'f']\n })\n states = {\n 'ace': ['a', 'c', 'e', 'g'],\n 'bdf': ['b', 'd', 'f', 'h']\n }\n discrete = Discrete.from_observations(\n observations, states=states\n )\n self.assertEqual(['ace', 'bdf'], discrete.variables)\n self.assertEqual(states, discrete.states)\n self.assertEqual(1 / 6, discrete.p(ace='a', bdf='b'))\n self.assertEqual(2 / 6, discrete.p(ace='c', bdf='d'))\n self.assertEqual(3 / 6, discrete.p(ace='e', bdf='f'))\n\n def test_from_probs__with_dict(self):\n\n bowl = Discrete.from_probs({\n 'bowl 1': 0.5, 'bowl 2': 0.5},\n variables=['bowl']\n )\n self.assertIsInstance(bowl, Discrete)\n mix_1994 = Discrete.from_probs({\n 'brown': 0.3, 'yellow': 0.2, 'red': 0.2,\n 'green': 0.1, 'orange': 0.1, 'tan': 0.1\n }, variables='color')\n self.assertIsInstance(mix_1994, Discrete)\n\n def test_given_all_variables(self):\n\n expected = Discrete.binary(0, 'A_xor_B').data\n xor = Conditional.binary_from_probs(\n data={\n (0, 0): 0,\n (0, 1): 1,\n (1, 0): 1,\n (1, 1): 0,\n },\n joint_variable='A_xor_B',\n conditional_variables=['A', 'B']\n )\n actual = xor.given(A=1, B=1).data\n self.assertTrue(expected.equals(actual))\n\n def test_given_one_variable(self):\n\n expected = Conditional.binary_from_probs(\n data={\n 0: 0,\n 1: 1,\n },\n joint_variable='A_xor_B',\n conditional_variables='B'\n ).data\n xor = Conditional.binary_from_probs(\n data={\n (0, 0): 0,\n (0, 1): 1,\n (1, 0): 1,\n (1, 1): 0,\n },\n joint_variable='A_xor_B',\n conditional_variables=['A', 'B']\n )\n actual = xor.given(A=0).data\n self.assertTrue(expected.equals(actual))\n\n def test_mode_1d_categorical(self):\n\n counts = Series({\n 'a': 1, 'c': 2, 'e': 3\n })\n discrete = Discrete.from_counts(counts, variables='x')\n self.assertEqual('e', discrete.mode())\n\n def test_mode_1d_numeric(self):\n\n discrete = Discrete.from_probs(\n data={0: 0.7, 1000: 0.2, 2000: 0.1},\n variables='a'\n )\n self.assertEqual(0, discrete.mode())\n\n def test_mode_nd_categorical(self):\n\n counts = Series({\n ('a', 'b'): 1,\n ('c', 'd'): 2,\n ('e', 'f'): 3\n })\n discrete = Discrete.from_counts(counts, variables=['ace', 'bdf'])\n expected = DataFrame([{\n 'ace': 'e',\n 'bdf': 'f'\n }])\n actual = discrete.mode()\n self.assertTrue(expected.equals(actual))\n\n def test_mode_1d_multi_value(self):\n\n counts = Series({\n 'a': 2,\n 'b': 2,\n 'c': 1\n })\n discrete = Discrete.from_counts(counts, variables='x')\n self.assertListEqual(['a', 'b'], discrete.mode())\n\n def test_mode_nd_multi_value(self):\n\n counts = Series({\n ('a', 'b'): 1,\n ('c', 'd'): 3,\n ('e', 'f'): 3\n })\n expected = DataFrame(\n {'ace': ['c', 'e'],\n 'bdf': ['d', 'f']},\n )\n discrete = Discrete.from_counts(counts, variables=['ace', 'bdf'])\n actual = discrete.mode()\n self.assertTrue(expected.equals(actual))\n\n def test_mean_numeric(self):\n\n discrete = Discrete.from_probs(\n data={0: 0.7, 1000: 0.2, 2000: 0.1},\n variables='a'\n )\n self.assertAlmostEqual(\n 0 * 0.7 + 1000 * 0.2 + 2000 * 0.1,\n discrete.mean()\n )\n\n def test_mean_categorical(self):\n\n counts = Series({\n 'a': 1, 'c': 2, 'e': 3\n })\n discrete = Discrete.from_counts(counts, variables='x')\n self.assertRaises(TypeError, discrete.mean)\n\n def test_min_numeric(self):\n\n discrete = Discrete.from_probs(\n data={0: 0.7, 1000: 0.2, 2000: 0.1},\n variables='a'\n )\n self.assertAlmostEqual(0, discrete.min())\n discrete_2 = Discrete.from_probs(\n data={0: 0.0, 1000: 0.2, 2000: 0.8},\n variables='a'\n )\n self.assertAlmostEqual(1000, discrete_2.min())\n\n def test_min_categorical(self):\n\n discrete = Discrete.from_probs(\n data={'a': 0.7, 'b': 0.2, 'c': 0.1},\n variables='x'\n )\n self.assertRaises(TypeError, discrete.min)\n\n def test_max_numeric(self):\n\n discrete = Discrete.from_probs(\n data={0: 0.7, 1000: 0.2, 2000: 0.1},\n variables='a'\n )\n self.assertAlmostEqual(2000, discrete.max())\n discrete_2 = Discrete.from_probs(\n data={0: 0.7, 1000: 0.3, 2000: 0},\n variables='a'\n )\n self.assertAlmostEqual(1000, discrete_2.max())\n\n def test_max_categorical(self):\n\n discrete = Discrete.from_probs(\n data={'a': 0.7, 'b': 0.2, 'c': 0.1},\n variables='x'\n )\n self.assertRaises(TypeError, discrete.max)\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ], [ "pandas.DataFrame" ], [ "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
trainorpj/probability
[ "944272707d352b12b91c419082fb3ec34b83b494", "944272707d352b12b91c419082fb3ec34b83b494", "944272707d352b12b91c419082fb3ec34b83b494", "944272707d352b12b91c419082fb3ec34b83b494" ]
[ "tensorflow_probability/python/bijectors/affine_scalar_test.py", "tensorflow_probability/python/positive_semidefinite_kernels/polynomial_test.py", "tensorflow_probability/python/bijectors/correlation_cholesky_test.py", "tensorflow_probability/python/bijectors/weibull_test.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Affine Scalar Tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python.bijectors import bijector_test_util\nfrom tensorflow_probability.python.internal import test_case\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass _AffineScalarBijectorTest(object):\n \"\"\"Tests correctness of the Y = scale @ x + shift transformation.\"\"\"\n\n def testProperties(self):\n # scale corresponds to 1.\n bijector = tfb.AffineScalar(shift=-1.)\n self.assertStartsWith(bijector.name, \"affine_scalar\")\n\n def testTinyScale(self):\n log_scale = tf.cast(-2000., self.dtype)\n x = tf.cast(1., self.dtype)\n scale = tf.exp(log_scale)\n fldj_linear = tfb.AffineScalar(scale=scale).forward_log_det_jacobian(\n x, event_ndims=0)\n fldj_log = tfb.AffineScalar(log_scale=log_scale).forward_log_det_jacobian(\n x, event_ndims=0)\n fldj_linear_, fldj_log_ = self.evaluate([fldj_linear, fldj_log])\n # Using the linear scale will saturate to 0, and produce bad log-det\n # Jacobians.\n self.assertNotEqual(fldj_linear_, fldj_log_)\n self.assertAllClose(-2000., fldj_log_)\n\n def testNoBatchScalar(self):\n def static_run(fun, x, **kwargs):\n return self.evaluate(fun(x, **kwargs))\n\n def dynamic_run(fun, x_value, **kwargs):\n x_value = np.array(x_value, dtype=self.dtype)\n x = tf1.placeholder_with_default(x_value, shape=None)\n return self.evaluate(fun(x, **kwargs))\n\n for run in (static_run, dynamic_run):\n bijector = tfb.AffineScalar(shift=self.dtype(-1.), scale=self.dtype(2.))\n x = self.dtype([1., 2, 3]) # Three scalar samples (no batches).\n self.assertAllClose([1., 3, 5], run(bijector.forward, x))\n self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))\n self.assertAllClose(\n -np.log(2.),\n run(bijector.inverse_log_det_jacobian, x, event_ndims=0))\n\n def testOneBatchScalarViaIdentityUserProvidesShiftOnly(self):\n def static_run(fun, x, **kwargs):\n return self.evaluate(fun(x, **kwargs))\n\n def dynamic_run(fun, x_value, **kwargs):\n x_value = np.array(x_value, dtype=self.dtype)\n x = tf1.placeholder_with_default(x_value, shape=None)\n return self.evaluate(fun(x, **kwargs))\n\n for run in (static_run, dynamic_run):\n # Batched shift\n bijector = tfb.AffineScalar(shift=self.dtype([1.]))\n x = self.dtype([1.]) # One sample from one batches.\n self.assertAllClose([2.], run(bijector.forward, x))\n self.assertAllClose([0.], run(bijector.inverse, x))\n self.assertAllClose(\n 0.,\n run(bijector.inverse_log_det_jacobian, x, event_ndims=0))\n\n def testOneBatchScalarViaIdentityUserProvidesScaleOnly(self):\n def static_run(fun, x, **kwargs):\n return self.evaluate(fun(x, **kwargs))\n\n def dynamic_run(fun, x_value, **kwargs):\n x_value = np.array(x_value)\n x = tf1.placeholder_with_default(x_value, shape=None)\n return self.evaluate(fun(x, **kwargs))\n\n for run in (static_run, dynamic_run):\n # Batched scale\n bijector = tfb.AffineScalar(scale=self.dtype([2.]))\n x = self.dtype([1.]) # One sample from one batches.\n self.assertAllClose([2.], run(bijector.forward, x))\n self.assertAllClose([0.5], run(bijector.inverse, x))\n self.assertAllClose(\n [np.log(0.5)],\n run(bijector.inverse_log_det_jacobian, x, event_ndims=0))\n\n def testTwoBatchScalarIdentityViaIdentity(self):\n def static_run(fun, x, **kwargs):\n return self.evaluate(fun(x, **kwargs))\n\n def dynamic_run(fun, x_value, **kwargs):\n x_value = np.array(x_value, dtype=self.dtype)\n x = tf1.placeholder_with_default(x_value, shape=None)\n return self.evaluate(fun(x, **kwargs))\n\n for run in (static_run, dynamic_run):\n # Batch of 2 shifts\n bijector = tfb.AffineScalar(shift=self.dtype([1., -1]))\n x = self.dtype([1., 1]) # One sample from each of two batches.\n self.assertAllClose([2., 0], run(bijector.forward, x))\n self.assertAllClose([0., 2], run(bijector.inverse, x))\n self.assertAllClose(\n 0.,\n run(bijector.inverse_log_det_jacobian, x, event_ndims=0))\n\n def testTwoBatchScalarIdentityViaScale(self):\n def static_run(fun, x, **kwargs):\n return self.evaluate(fun(x, **kwargs))\n\n def dynamic_run(fun, x_value, **kwargs):\n x_value = np.array(x_value, dtype=self.dtype)\n x = tf1.placeholder_with_default(x_value, shape=None)\n return self.evaluate(fun(x, **kwargs))\n\n for run in (static_run, dynamic_run):\n # Batch of 2 scales and 2 shifts\n bijector = tfb.AffineScalar(\n shift=self.dtype([1., -1]),\n scale=self.dtype([2., 1]))\n x = self.dtype([1., 1]) # One sample from each of two batches.\n self.assertAllClose([3., 0], run(bijector.forward, x))\n self.assertAllClose([0., 2], run(bijector.inverse, x))\n self.assertAllClose(\n [-np.log(2), 0.],\n run(bijector.inverse_log_det_jacobian, x, event_ndims=0))\n\n def testScalarCongruency(self):\n bijector = tfb.AffineScalar(shift=self.dtype(3.6), scale=self.dtype(0.42))\n bijector_test_util.assert_scalar_congruency(\n bijector,\n lower_x=self.dtype(-2.),\n upper_x=self.dtype(2.),\n eval_func=self.evaluate)\n\n def testScalarCongruencyLogScale(self):\n bijector = tfb.AffineScalar(\n shift=self.dtype(3.6), log_scale=self.dtype(np.log(0.42)))\n bijector_test_util.assert_scalar_congruency(\n bijector,\n lower_x=self.dtype(-2.),\n upper_x=self.dtype(2.),\n eval_func=self.evaluate)\n\n def testVariableGradients(self):\n b = tfb.AffineScalar(\n shift=tf.Variable(1.),\n scale=tf.Variable(2.))\n\n with tf.GradientTape() as tape:\n y = b.forward(.1)\n self.assertAllNotNone(tape.gradient(y, [b.shift, b.scale]))\n\n def testImmutableScaleAssertion(self):\n with self.assertRaisesOpError(\"Argument `scale` must be non-zero\"):\n b = tfb.AffineScalar(scale=0., validate_args=True)\n _ = self.evaluate(b.forward(1.))\n\n def testVariableScaleAssertion(self):\n v = tf.Variable(0.)\n self.evaluate(v.initializer)\n with self.assertRaisesOpError(\"Argument `scale` must be non-zero\"):\n b = tfb.AffineScalar(scale=v, validate_args=True)\n _ = self.evaluate(b.forward(1.))\n\n def testModifiedVariableScaleAssertion(self):\n v = tf.Variable(1.)\n self.evaluate(v.initializer)\n b = tfb.AffineScalar(scale=v, validate_args=True)\n with self.assertRaisesOpError(\"Argument `scale` must be non-zero\"):\n with tf.control_dependencies([v.assign(0.)]):\n _ = self.evaluate(b.forward(1.))\n\n\nclass AffineScalarBijectorTestFloat32(test_case.TestCase,\n _AffineScalarBijectorTest):\n dtype = np.float32\n\n\nclass AffineScalarBijectorTestFloat64(test_case.TestCase,\n _AffineScalarBijectorTest):\n dtype = np.float64\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for Polynomial and Linear.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nfrom absl.testing import parameterized\nimport numpy as np\n\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability import positive_semidefinite_kernels as tfpk\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PolynomialTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Test the Polynomial kernel.\"\"\"\n\n def test_mismatched_float_types_are_bad(self):\n with self.assertRaises(TypeError):\n tfpk.Polynomial(\n bias_variance=np.float32(1.),\n slope_variance=np.float64(1.),\n shift=0.,\n exponent=1.\n )\n\n def testFloat32Fallback(self):\n # Should be OK (float32 fallback).\n self.polynomial = tfpk.Polynomial(\n bias_variance=0,\n slope_variance=1,\n shift=0,\n exponent=1)\n # Should be OK.\n tfpk.Polynomial(\n bias_variance=np.float32(1.),\n slope_variance=1.,\n shift=0.,\n exponent=1.)\n\n def testValidateArgsNonPositiveAreBad(self):\n with self.assertRaisesOpError('Condition x > 0 did not hold'):\n k = tfpk.Polynomial(\n bias_variance=-1.,\n validate_args=True)\n self.evaluate(k.apply([1.], [1.]))\n with self.assertRaisesOpError('Condition x > 0 did not hold'):\n k = tfpk.Polynomial(\n slope_variance=-1.,\n validate_args=True)\n self.evaluate(k.apply([1.], [1.]))\n with self.assertRaisesOpError('Condition x > 0 did not hold'):\n k = tfpk.Polynomial(\n exponent=-1.,\n validate_args=True)\n self.evaluate(k.apply([1.], [1.]))\n\n def testShifttNonPositiveIsOk(self):\n # No exception expected\n k = tfpk.Polynomial(\n shift=-1.,\n validate_args=True)\n self.evaluate(k.apply([1.], [1.]))\n\n def testValidateArgsNoneIsOk(self):\n # No exception expected\n k = tfpk.Polynomial(\n bias_variance=None,\n slope_variance=None,\n shift=None,\n exponent=None,\n validate_args=True)\n self.evaluate(k.apply([[1.]], [[1.]]))\n\n def testNoneShapes(self):\n k = tfpk.Polynomial(\n bias_variance=np.reshape(np.arange(12.), [2, 3, 2]))\n self.assertEqual([2, 3, 2], k.batch_shape.as_list())\n\n @parameterized.named_parameters(\n dict(\n testcase_name='Shape [] kernel',\n bias_variance=2.,\n slope_variance=2.,\n shift=2.,\n exponent=2.,\n shape=[]),\n dict(\n testcase_name='Shape [1] kernel',\n bias_variance=[2.],\n slope_variance=[2.],\n shift=[2.],\n exponent=[2.],\n shape=[1]),\n dict(\n testcase_name='Shape [2] kernel',\n bias_variance=[1., 2.],\n slope_variance=[1., 2.],\n shift=[1., 2.],\n exponent=[1., 2.],\n shape=[2]),\n dict(\n testcase_name='Shape [2, 1] kernel',\n bias_variance=[[1.], [2.]],\n slope_variance=[[1.], [2.]],\n shift=[[1.], [2.]],\n exponent=[[1.], [2.]],\n shape=[2, 1]),\n dict(\n testcase_name='Shape [2, 1] broadcast kernel',\n bias_variance=None,\n slope_variance=2.,\n shift=[2.],\n exponent=[[1.], [2.]],\n shape=[2, 1]))\n def testBatchShape(self, bias_variance, slope_variance,\n shift, exponent, shape):\n k = tfpk.Polynomial(\n bias_variance=bias_variance,\n slope_variance=slope_variance,\n shift=shift,\n exponent=exponent,\n validate_args=True)\n self.assertAllEqual(shape, k.batch_shape.as_list())\n self.assertAllEqual(shape, self.evaluate(k.batch_shape_tensor()))\n\n def testFloat32(self):\n # No exception expected\n k = tfpk.Polynomial(\n bias_variance=0.,\n slope_variance=1.,\n shift=0.,\n exponent=1.,\n feature_ndims=1)\n x = np.ones([5, 3], np.float32)\n y = np.ones([5, 3], np.float32)\n k.apply(x, y)\n\n def testFloat64(self):\n # No exception expected\n k = tfpk.Polynomial(\n bias_variance=np.float64(0.),\n slope_variance=np.float64(1.),\n shift=np.float64(0.),\n exponent=np.float64(1.),\n feature_ndims=1)\n x = np.ones([5, 3], np.float64)\n y = np.ones([5, 3], np.float64)\n k.apply(x, y)\n\n @parameterized.named_parameters(\n dict(\n testcase_name='1 feature dimension',\n feature_ndims=1,\n x_shape=(5, 3),\n y_shape=(5, 3),\n apply_shape=(5,),\n ),\n dict(\n testcase_name='2 feature dimension',\n feature_ndims=2,\n x_shape=(5, 3, 2),\n y_shape=(5, 3, 2),\n apply_shape=(5,),\n ))\n def testShapesAreCorrectApply(self, feature_ndims,\n x_shape, y_shape, apply_shape):\n k = tfpk.Polynomial(\n bias_variance=0.,\n slope_variance=1.,\n shift=0.,\n exponent=1.,\n feature_ndims=feature_ndims)\n x = np.ones(x_shape, np.float32)\n y = np.ones(y_shape, np.float32)\n self.assertAllEqual(\n apply_shape, k.apply(x, y).shape)\n\n @parameterized.named_parameters(\n dict(\n testcase_name='1 feature dimension, 1 batch dimension',\n feature_ndims=1,\n x_shape=(5, 3),\n y_shape=(4, 3),\n matrix_shape=(5, 4),\n ),\n dict(\n testcase_name='1 feature dimension, 2 batch dimensions',\n feature_ndims=1,\n x_shape=(10, 5, 3),\n y_shape=(10, 4, 3),\n matrix_shape=(10, 5, 4),\n ),\n dict(\n testcase_name='2 feature dimensions, 1 batch dimension',\n feature_ndims=2,\n x_shape=(5, 3, 2),\n y_shape=(4, 3, 2),\n matrix_shape=(5, 4),\n ))\n def testShapesAreCorrectMatrix(self, feature_ndims,\n x_shape, y_shape, matrix_shape):\n k = tfpk.Polynomial(\n bias_variance=0.,\n slope_variance=1.,\n shift=0.,\n exponent=1.,\n feature_ndims=feature_ndims)\n x = np.ones(x_shape, np.float32)\n y = np.ones(y_shape, np.float32)\n self.assertAllEqual(matrix_shape, k.matrix(x, y).shape)\n\n def testShapesAreCorrectBroadcast(self):\n k = tfpk.Polynomial(\n bias_variance=np.ones([2, 1, 1], np.float32),\n slope_variance=np.ones([1, 3, 1], np.float32))\n self.assertAllEqual(\n [2, 3, 2, 4, 5],\n #`--' | `--'\n # | | `- matrix shape\n # | `- from input batch shapes\n # `- from broadcasting kernel params\n k.matrix(\n np.ones([2, 4, 3], np.float32),\n np.ones([2, 5, 3], np.float32)\n ).shape)\n\n def testValuesAreCorrect(self):\n bias_variance = 1.5\n slope_variance = 0.5\n shift = 1.\n exponent = 2\n k = tfpk.Polynomial(\n bias_variance=bias_variance,\n slope_variance=slope_variance,\n shift=shift,\n exponent=exponent\n )\n x = np.random.uniform(-1, 1, size=[5, 3]).astype(np.float32)\n y = np.random.uniform(-1, 1, size=[4, 3]).astype(np.float32)\n self.assertAllClose(\n (bias_variance ** 2 + slope_variance ** 2 *\n ((x - shift).dot((y - shift).T)) ** exponent),\n self.evaluate(k.matrix(x, y))\n )\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass LinearTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Test the Linear kernel.\"\"\"\n\n def testIsPolynomial(self):\n # Linear kernel is subclass of Polynomial kernel\n self.assertIsInstance(tfpk.Linear(), tfpk.Polynomial)\n\n def testValuesAreCorrect(self):\n k = tfpk.Linear()\n x = np.random.uniform(-1, 1, size=[5, 3]).astype(np.float32)\n y = np.random.uniform(-1, 1, size=[4, 3]).astype(np.float32)\n self.assertAllClose(x.dot(y.T), self.evaluate(k.matrix(x, y)))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for CorrelationCholesky bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\n# Dependency imports\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python.bijectors import bijector_test_util\nfrom tensorflow_probability.python.distributions import lkj\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass CorrelationCholeskyBijectorTest(parameterized.TestCase, tf.test.TestCase):\n \"\"\"Tests the correctness of the CorrelationCholesky bijector.\"\"\"\n\n def testBijector(self):\n x = np.float32(np.array([7., -5., 5., 1., 2., -2.]))\n y = np.float32(\n np.array([[1., 0., 0., 0.], [0.707107, 0.707107, 0., 0.],\n [-0.666667, 0.666667, 0.333333, 0.], [0.5, -0.5, 0.7, 0.1]]))\n\n b = tfb.CorrelationCholesky()\n\n y_ = self.evaluate(b.forward(x))\n self.assertAllClose(y, y_, atol=1e-5, rtol=1e-5)\n\n x_ = self.evaluate(b.inverse(y))\n self.assertAllClose(x, x_, atol=1e-5, rtol=1e-5)\n\n expected_fldj = -0.5 * np.sum([2, 3, 4] * np.log([2, 9, 100]))\n\n fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=1))\n self.assertAllClose(expected_fldj, fldj)\n\n ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))\n self.assertAllClose(-expected_fldj, ildj)\n\n def testBijectorBatch(self):\n x = np.float32([[7., -5., 5., 1., 2., -2.], [1., 3., -5., 1., -4., 8.]])\n y = np.float32([\n [[1., 0., 0., 0.], [0.707107, 0.707107, 0., 0.],\n [-0.666667, 0.666667, 0.333333, 0.], [0.5, -0.5, 0.7, 0.1]],\n [[1., 0., 0., 0.], [0.707107, 0.707107, 0., 0.],\n [0.888889, -0.444444, 0.111111, 0.],\n [-0.833333, 0.5, 0.166667, 0.166667]],\n ])\n\n b = tfb.CorrelationCholesky()\n\n y_ = self.evaluate(b.forward(x))\n self.assertAllClose(y, y_, atol=1e-5, rtol=1e-5)\n\n x_ = self.evaluate(b.inverse(y))\n self.assertAllClose(x, x_, atol=1e-5, rtol=1e-5)\n\n expected_fldj = -0.5 * np.sum(\n [2, 3, 4] * np.log([[2, 9, 100], [2, 81, 36]]), axis=-1)\n\n fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=1))\n self.assertAllClose(expected_fldj, fldj)\n\n ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))\n self.assertAllClose(-expected_fldj, ildj)\n\n def testShape(self):\n x_shape = tf.TensorShape([5, 4, 6])\n y_shape = tf.TensorShape([5, 4, 4, 4])\n\n b = tfb.CorrelationCholesky(validate_args=True)\n\n x = tf.ones(shape=x_shape, dtype=tf.float32)\n y_ = b.forward(x)\n self.assertAllEqual(\n tensorshape_util.as_list(y_.shape), tensorshape_util.as_list(y_shape))\n x_ = b.inverse(y_)\n self.assertAllEqual(\n tensorshape_util.as_list(x_.shape), tensorshape_util.as_list(x_shape))\n\n y_shape_ = b.forward_event_shape(x_shape)\n self.assertAllEqual(\n tensorshape_util.as_list(y_shape_), tensorshape_util.as_list(y_shape))\n x_shape_ = b.inverse_event_shape(y_shape)\n self.assertAllEqual(\n tensorshape_util.as_list(x_shape_), tensorshape_util.as_list(x_shape))\n\n y_shape_tensor = self.evaluate(\n b.forward_event_shape_tensor(tensorshape_util.as_list(x_shape)))\n self.assertAllEqual(y_shape_tensor, tensorshape_util.as_list(y_shape))\n x_shape_tensor = self.evaluate(\n b.inverse_event_shape_tensor(tensorshape_util.as_list(y_shape)))\n self.assertAllEqual(x_shape_tensor, tensorshape_util.as_list(x_shape))\n\n def testShapeError(self):\n\n b = tfb.FillTriangular(validate_args=True)\n\n x_shape_bad = tf.TensorShape([5, 4, 7])\n with self.assertRaisesRegexp(ValueError, \"is not a triangular number\"):\n b.forward_event_shape(x_shape_bad)\n with self.assertRaisesOpError(\"is not a triangular number\"):\n self.evaluate(\n b.forward_event_shape_tensor(tensorshape_util.as_list(x_shape_bad)))\n\n y_shape_bad = tf.TensorShape([5, 4, 4, 3])\n with self.assertRaisesRegexp(ValueError, \"Matrix must be square\"):\n b.inverse_event_shape(y_shape_bad)\n with self.assertRaisesOpError(\"Matrix must be square\"):\n self.evaluate(\n b.inverse_event_shape_tensor(tensorshape_util.as_list(y_shape_bad)))\n\n def testTheoreticalFldj(self):\n bijector = tfb.CorrelationCholesky()\n x = np.linspace(-50, 50, num=30).reshape(5, 6).astype(np.float64)\n y = self.evaluate(bijector.forward(x))\n bijector_test_util.assert_bijective_and_finite(\n bijector,\n x,\n y,\n eval_func=self.evaluate,\n event_ndims=1,\n inverse_event_ndims=2,\n rtol=1e-5)\n fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)\n fldj_theoretical = bijector_test_util.get_fldj_theoretical(\n bijector,\n x,\n event_ndims=1,\n inverse_event_ndims=2,\n output_to_unconstrained=tfb.Invert(tfb.FillTriangular()))\n self.assertAllClose(\n self.evaluate(fldj_theoretical),\n self.evaluate(fldj),\n atol=1e-5,\n rtol=1e-5)\n\n def testBijectorWithVariables(self):\n x_ = np.array([1.], dtype=np.float32)\n y_ = np.array([[1., 0.], [0.707107, 0.707107]], dtype=np.float32)\n\n x = tf.Variable(x_, dtype=tf.float32)\n y = tf.Variable(y_, dtype=tf.float32)\n forward_event_ndims = tf.Variable(1, dtype=tf.int32)\n inverse_event_ndims = tf.Variable(2, dtype=tf.int32)\n self.evaluate([\n v.initializer for v in (x, y, forward_event_ndims, inverse_event_ndims)\n ])\n\n bijector = tfb.CorrelationCholesky()\n self.assertAllClose(\n y_, self.evaluate(bijector.forward(x)), atol=1e-5, rtol=1e-5)\n self.assertAllClose(\n x_, self.evaluate(bijector.inverse(y)), atol=1e-5, rtol=1e-5)\n\n fldj = bijector.forward_log_det_jacobian(x, event_ndims=forward_event_ndims)\n self.assertAllClose(-np.log(2), self.evaluate(fldj))\n\n ildj = bijector.inverse_log_det_jacobian(y, event_ndims=inverse_event_ndims)\n self.assertAllClose(np.log(2), ildj)\n\n @parameterized.parameters(itertools.product([2, 3, 4, 5, 6, 7], [1., 2., 3.]))\n def testWithLKJSamples(self, dimension, concentration):\n bijector = tfb.CorrelationCholesky()\n lkj_dist = lkj.LKJ(\n dimension=dimension,\n concentration=np.float64(concentration),\n input_output_cholesky=True)\n batch_size = 10\n y = self.evaluate(lkj_dist.sample([batch_size]))\n x = self.evaluate(bijector.inverse(y))\n\n bijector_test_util.assert_bijective_and_finite(\n bijector,\n x,\n y,\n eval_func=self.evaluate,\n event_ndims=1,\n inverse_event_ndims=2,\n rtol=1e-5)\n\n fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)\n fldj_theoretical = bijector_test_util.get_fldj_theoretical(\n bijector,\n x,\n event_ndims=1,\n inverse_event_ndims=2,\n output_to_unconstrained=tfb.Invert(tfb.FillTriangular()))\n self.assertAllClose(\n self.evaluate(fldj_theoretical),\n self.evaluate(fldj),\n atol=1e-5,\n rtol=1e-5)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for Bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nfrom scipy import stats\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import bijectors as tfb\n\nfrom tensorflow_probability.python.bijectors import bijector_test_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass WeibullBijectorTest(tf.test.TestCase):\n \"\"\"Tests correctness of the weibull bijector.\"\"\"\n\n def testBijector(self):\n scale = 5.\n concentration = 0.3\n bijector = tfb.Weibull(\n scale=scale, concentration=concentration, validate_args=True)\n self.assertStartsWith(bijector.name, 'weibull')\n x = np.array([[[0.], [1.], [14.], [20.], [100.]]], dtype=np.float32)\n # Weibull distribution\n weibull_dist = stats.frechet_r(c=concentration, scale=scale)\n y = weibull_dist.cdf(x).astype(np.float32)\n self.assertAllClose(y, self.evaluate(bijector.forward(x)))\n self.assertAllClose(x, self.evaluate(bijector.inverse(y)))\n self.assertAllClose(\n weibull_dist.logpdf(x),\n self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=0)))\n self.assertAllClose(\n self.evaluate(-bijector.inverse_log_det_jacobian(y, event_ndims=0)),\n self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=0)),\n rtol=1e-4,\n atol=0.)\n\n def testBijectorConcentration1LogDetJacobianFiniteAtZero(self):\n # When concentration = 1., forward_log_det_jacobian should be finite at\n # zero.\n scale = np.logspace(0.1, 10., num=20).astype(np.float32)\n bijector = tfb.Weibull(scale, concentration=1.)\n fldj = self.evaluate(bijector.forward_log_det_jacobian(0., event_ndims=0))\n self.assertAllEqual(np.ones_like(fldj, dtype=np.bool), np.isfinite(fldj))\n\n def testScalarCongruency(self):\n bijector_test_util.assert_scalar_congruency(\n tfb.Weibull(scale=20., concentration=0.3),\n lower_x=1.,\n upper_x=100.,\n eval_func=self.evaluate,\n rtol=0.02)\n\n def testBijectiveAndFinite(self):\n bijector = tfb.Weibull(scale=20., concentration=2., validate_args=True)\n x = np.linspace(1., 8., num=10).astype(np.float32)\n y = np.linspace(\n -np.expm1(-1 / 400.),\n -np.expm1(-16), num=10).astype(np.float32)\n bijector_test_util.assert_bijective_and_finite(\n bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)\n\n def testAsserts(self):\n with self.assertRaisesOpError('Argument `scale` must be positive.'):\n b = tfb.Weibull(\n concentration=1., scale=-1., validate_args=True)\n self.evaluate(b.forward(-3.))\n with self.assertRaisesOpError('Argument `concentration` must be positive.'):\n b = tfb.Weibull(\n concentration=-1., scale=1., validate_args=True)\n self.evaluate(b.forward(-3.))\n\n def testVariableAsserts(self):\n concentration = tf.Variable(1.)\n scale = tf.Variable(1.)\n b = tfb.Weibull(\n concentration=concentration, scale=scale, validate_args=True)\n self.evaluate([concentration.initializer, scale.initializer])\n with self.assertRaisesOpError('Argument `scale` must be positive.'):\n with tf.control_dependencies([scale.assign(-1.)]):\n self.evaluate(b.forward(-3.))\n with self.assertRaisesOpError('Argument `concentration` must be positive.'):\n with tf.control_dependencies([\n scale.assign(1.), concentration.assign(-1.)]):\n self.evaluate(b.forward(-3.))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.exp", "tensorflow.compat.v2.Variable", "numpy.log", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.GradientTape", "tensorflow.compat.v2.cast", "numpy.array", "tensorflow.compat.v1.placeholder_with_default" ], [ "tensorflow.compat.v2.test.main", "numpy.arange", "numpy.ones", "numpy.float64", "numpy.float32", "numpy.random.uniform" ], [ "tensorflow.compat.v2.Variable", "numpy.log", "tensorflow.compat.v2.test.main", "numpy.linspace", "tensorflow.compat.v2.ones", "numpy.float64", "numpy.float32", "tensorflow.compat.v2.TensorShape", "numpy.array" ], [ "tensorflow.compat.v2.Variable", "numpy.ones_like", "scipy.stats.frechet_r", "tensorflow.compat.v2.test.main", "numpy.isfinite", "numpy.logspace", "numpy.linspace", "numpy.expm1", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yuanqingsunny/recommenders-addons
[ "7fe0e213ff59fe3528e7c1877a3885cc7ca355d4" ]
[ "tensorflow_recommenders_addons/dynamic_embedding/python/ops/dynamic_feature_filter.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Filter of variables\"\"\"\n\nfrom tensorflow_recommenders_addons import dynamic_embedding\nfrom tensorflow_recommenders_addons.dynamic_embedding.python.ops import dynamic_embedding_variable\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_logging_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"dynamic_embedding.FilterPolicy\")\nclass FilterPolicy(object):\n \"\"\"\n FilterPolicy records the status of variable, while provides\n interfaces for continuously updating with the status with training\n progress, and filtering the eligible sparse tensor keys for training.\n The FilterPolicy is an abstract class, which can be inherited\n for customization.\n\n FilterPolicy holds `create_status`, `update`, `filter` and 'restrict' methods:\n create_status: creating the representation of the status.\n update: updating the status in iteration. The update operation usually\n runs before the training operation.\n filter: filtering sparse tensor keys for training according to\n the status. It's used to prohibit some feature keys from training.\n restrict: restricting the status table size to prevent the over growth of\n memory usage.\n \"\"\"\n\n def __init__(self, var):\n \"\"\"\n Construct the FilterPolicy from variable.\n\n Args:\n var: dynamic_ebmedding.Variable.\n \"\"\"\n if not isinstance(var, dynamic_embedding.Variable):\n raise TypeError(\"parameter var type should be\" \\\n \"dynamic_embedding.Variable.\")\n\n self.var = var\n self.threshold = 0\n self.create_status()\n\n def create_status(self, **kwargs):\n \"\"\"\n Create status for recording the variable.\n \"\"\"\n raise NotImplementedError\n\n def update(self, **kwargs):\n \"\"\"\n Update the status. The returned update operation is\n usually used with training operation, to keep the status\n following change of variables.\n\n Returns:\n An operation to update the status.\n \"\"\"\n raise NotImplementedError\n\n def filter(self, **kwargs):\n \"\"\"\n filter the feature keys following the direction by records\n of status.\n\n Returns:\n A list of feature keys that filter for training.\n \"\"\"\n raise NotImplementedError\n\n def restrict(self, **kwargs):\n \"\"\"\n Restrict the status table size to prevent out-of-memory\n when status table size is growing.\n\n Returns:\n An operation to restrict the status.\n \"\"\"\n raise NotImplementedError\n\n\n@tf_export(\"dynamic_embedding.FrequencyFilterPolicy\")\nclass FrequencyFilterPolicy(FilterPolicy):\n \"\"\"\n A status inherts from FilterPolicy, providing\n updating and filtering for variable by frequency rule.\n\n When call filter method, the class will filter values on\n ids by following eliminated-below-threshold rule for every ids\n in record. And when call update method, the record of every\n ids will be increased by 1.\n \"\"\"\n\n def __init__(self, var, **kwargs):\n default_count = kwargs.get('default_value', 0)\n self.default_count = constant_op.constant([default_count, 0], dtypes.int32)\n super(FrequencyFilterPolicy, self).__init__(var)\n\n def create_status(self, **kwargs):\n \"\"\"\n Create relative frequency status variables.\n \"\"\"\n scope = variable_scope.get_variable_scope()\n if scope.name:\n scope_name = scope.name + '/frequency_status_for_filter'\n else:\n scope_name = 'frequency_status_for_filter'\n\n with ops.name_scope(scope_name, \"frequency_status_for_filter\",\n []) as unique_scope:\n full_name = unique_scope + '/' + self.var.name\n self.freq_var = dynamic_embedding.get_variable(\n key_dtype=self.var.key_dtype,\n value_dtype=dtypes.int32,\n dim=2,\n name=full_name,\n devices=self.var.devices,\n partitioner=self.var.partition_fn,\n initializer=self.default_count,\n trainable=False,\n init_size=self.var.init_size,\n checkpoint=self.var.checkpoint,\n # checkpoint_path=self.var.checkpoint_path\n )\n\n def update(self, input_tensor=None, **kwargs):\n \"\"\"\n Update the frequency status. The corresponding frequency\n records will be increased by 1.\n\n Args:\n **kwargs: keyword arguments, including\n input_tensor: SparseTensor or dense tensor.\n Feature keys need to count.\n\n Returns:\n An operation for updating the frequency status.\n \"\"\"\n maxint32 = 2147483647\n update_ops = []\n\n if input_tensor is None:\n raise KeyError(\"update method expects parameter `input_tensor`.\")\n elif isinstance(input_tensor, ops.Tensor):\n values = input_tensor\n elif isinstance(input_tensor, sparse_tensor.SparseTensor):\n values = input_tensor.values\n else:\n raise TypeError(\"input_tensor must be \" \\\n \"either a SparseTensor or dense Tensor.\")\n\n values, _ = array_ops.unique(values)\n status_values = array_ops.reshape(values, (-1,))\n partition_index = \\\n self.var.partition_fn(status_values, self.var.shard_num)\n partitioned_values_list, partitioned_indices_list = \\\n dynamic_embedding_variable.make_partition(status_values,\n partition_index,\n self.var.shard_num)\n\n for idx, dev in enumerate(self.freq_var.devices):\n with ops.device(dev):\n feature_status = \\\n self.freq_var.tables[idx].lookup(\n partitioned_values_list[idx],\n dynamic_default_values=self.default_count,\n )\n feature_counts = array_ops.slice(feature_status, [0, 0], [-1, 1])\n feature_tstps = array_ops.slice(feature_status, [0, 1], [-1, 1])\n feature_tstps = array_ops.tile(\n array_ops.reshape(gen_logging_ops.timestamp(), [1]),\n array_ops.reshape(array_ops.size(feature_counts), (-1,)),\n )\n feature_tstps = math_ops.cast(feature_tstps, dtypes.int32)\n feature_tstps = array_ops.reshape(feature_tstps, (-1, 1))\n\n condition = math_ops.less(feature_counts, maxint32)\n feature_counts = array_ops.where(condition, feature_counts + 1,\n feature_counts)\n\n feature_status = array_ops.concat([feature_counts, feature_tstps], 1)\n\n mht_update = \\\n self.freq_var.tables[idx].insert(\n partitioned_values_list[idx],\n feature_status,\n )\n update_ops.append(mht_update)\n\n return control_flow_ops.group(update_ops)\n\n def filter(self, input_tensor=None, **kwargs):\n \"\"\"\n Filter feature keys below the threshold before training.\n Prevent unpopular feature keys from affecting training.\n\n Args:\n **kwargs: keyword arguments, including\n input_tensor: SparseTensor or DenseTensor.\n Feature keys need to filter.\n frequency_threshold: int. Filtering feature keys whose frequency values\n are less than the threshold.\n\n Returns:\n Tensor that are filtered for training.\n \"\"\"\n\n if input_tensor is None:\n raise KeyError(\"filter method expects parameter `input_tensor`.\")\n elif isinstance(input_tensor, ops.Tensor):\n input_type = \"DenseTensor\"\n values = input_tensor\n elif isinstance(input_tensor, sparse_tensor.SparseTensor):\n input_type = \"SparseTensor\"\n values = input_tensor.values\n indices = input_tensor.indices\n else:\n raise TypeError(\"input_tensor must be \" \\\n \"either a SparseTensor or dense Tensor.\")\n\n if 'frequency_threshold' in kwargs:\n frequency_threshold = kwargs['frequency_threshold']\n else:\n raise KeyError(\"filter method expects parameter `frequency_threshold`.\")\n if not isinstance(frequency_threshold, int):\n raise TypeError(\"frequency_threshold must be an integer.\")\n if frequency_threshold < 0:\n raise ValueError(\"frequency_threshold must be greater or equal to zero.\")\n\n unique_values, value_idx = array_ops.unique(values)\n status_values = array_ops.reshape(unique_values, (-1,))\n partition_index = \\\n self.var.partition_fn(status_values, self.var.shard_num)\n partitioned_values_list, partitioned_indices_list = \\\n dynamic_embedding_variable.make_partition(status_values,\n partition_index,\n self.var.shard_num)\n\n mask = []\n for idx, dev in enumerate(self.freq_var.devices):\n with ops.device(dev):\n feature_status = \\\n self.freq_var.tables[idx].lookup(\n partitioned_values_list[idx],\n dynamic_default_values=self.default_count,\n )\n\n feature_counts = array_ops.slice(feature_status, [0, 0], [-1, 1])\n sub_fv = array_ops.reshape(feature_counts, (-1,))\n partitioned_mask = math_ops.greater_equal(sub_fv, frequency_threshold)\n mask.append(partitioned_mask)\n\n total_mask = dynamic_embedding_variable._stitch(mask,\n partitioned_indices_list)\n total_mask = array_ops.gather(total_mask, value_idx)\n total_mask.set_shape([None])\n filter_values = array_ops.boolean_mask(values, total_mask)\n if input_type == \"DenseTensor\":\n filter_tensor = filter_values\n elif input_type == \"SparseTensor\":\n filter_indices = array_ops.boolean_mask(indices, total_mask)\n filter_tensor = sparse_tensor.SparseTensor(\n indices=filter_indices,\n values=filter_values,\n dense_shape=input_tensor.dense_shape)\n\n return filter_tensor\n\n def restrict(self, **kwargs):\n \"\"\"\n Restrict the status table size, eliminate the oldest\n feature keys, if the size of variable grow too large for\n threshold.\n\n Args:\n **kwargs: Keyword arguments, including\n threshold: int. The threshold for feature number\n in variable. When restrict method is called, the table\n size will be reduced to 'threshold'.\n factor: int,float,tf.int32,tf.int64,tf.float32.\n If the table size is greater than threshold * factor,\n restricting wiil be triggered.\n\n Returns:\n An operation to restrict the size of variable.\n \"\"\"\n try:\n self.threshold = kwargs['threshold']\n except:\n raise KeyError(\"restrict method expects parameter `threshold`.\")\n if not isinstance(self.threshold, int):\n raise TypeError(\"threshold must be an integer.\")\n if self.threshold < 0:\n raise ValueError(\"threshold must be greater or equal to zero.\")\n\n factor = kwargs.get('factor', 1.0)\n if isinstance(factor, ops.Tensor):\n if factor.dtype not in (dtypes.int32, dtypes.int64, dtypes.float32):\n raise TypeError(\n 'factor expects int, float, tf.int32, tf.int64, or tf.float32')\n factor = math_ops.cast(factor, dtype=dtypes.float32)\n if not isinstance(factor, (int, float)):\n raise TypeError(\n 'factor expects int, float, tf.int32, tf.int64, or tf.float32')\n\n cond_size = math_ops.cast(self.threshold, dtype=dtypes.float32) * factor\n cond_size = math_ops.cast(cond_size, dtype=dtypes.int64)\n condition = math_ops.greater(self.freq_var.size(), cond_size)\n restrict_status_ops = list()\n no_ops = list()\n\n for idx, dev in enumerate(self.freq_var.devices):\n with ops.device(dev):\n sub_tk, sub_tv = self.freq_var.tables[idx].export()\n sharded_threshold = int(self.threshold / self.freq_var.shard_num)\n\n sub_tv = array_ops.slice(sub_tv, [0, 1], [-1, 1])\n sub_tv = array_ops.reshape(sub_tv, (-1,))\n first_dim = array_ops.shape(sub_tv)[0]\n\n k_on_top = math_ops.cast(first_dim - sharded_threshold,\n dtype=dtypes.int32)\n k_on_top = math_ops.maximum(k_on_top, 0)\n _, removed_keys_ids = nn_ops.top_k(-sub_tv, k_on_top, sorted=False)\n removed_keys = array_ops.gather(sub_tk, removed_keys_ids)\n restrict_status_ops.append(\n self.freq_var.tables[idx].remove(removed_keys))\n no_ops.append(control_flow_ops.no_op())\n restrict_op = control_flow_ops.cond(condition, lambda: restrict_status_ops,\n lambda: no_ops)\n\n return restrict_op\n\n\n@tf_export(\"dynamic_embedding.ProbabilityFilterPolicy\")\nclass ProbabilityFilterPolicy(FilterPolicy):\n \"\"\"\n A status inherts from FilterPolicy, providing\n updating and filtering for variable by probability rule.\n\n When call filter method, the class will filter values on\n ids by following probability rule for new ids (no recorded\n in the table). And when call update method, new ids will\n be stored in the table.\n \"\"\"\n\n def __init__(self, var, **kwargs):\n self.default_tstp = constant_op.constant(0, dtypes.int32)\n super(ProbabilityFilterPolicy, self).__init__(var)\n\n def create_status(self, **kwargs):\n \"\"\"\n Create relative probability status variables.\n \"\"\"\n scope = variable_scope.get_variable_scope()\n if scope.name:\n scope_name = scope.name + '/probability_status_for_filter'\n else:\n scope_name = 'probability_status_for_filter'\n\n with ops.name_scope(scope_name, \"probability_status_for_filter\",\n []) as unique_scope:\n full_name = unique_scope + '/' + self.var.name\n self.tstp_var = dynamic_embedding.get_variable(\n key_dtype=self.var.key_dtype,\n value_dtype=dtypes.int32,\n dim=1,\n name=full_name,\n devices=self.var.devices,\n partitioner=self.var.partition_fn,\n initializer=self.default_tstp,\n trainable=False,\n init_size=self.var.init_size,\n checkpoint=self.var.checkpoint,\n # checkpoint_path=self.var.checkpoint_path\n )\n\n def update(self, input_tensor=None, **kwargs):\n \"\"\"\n Update the probability status table. The filter ids will be\n stored in the table and record timestamp.\n\n Args:\n **kwargs: keyword arguments, including\n input_tensor: SparseTensor or dense tensor.\n Feature keys need to count.\n\n Returns:\n An operation for updating the frequency status.\n \"\"\"\n update_ops = []\n\n if input_tensor is None:\n raise KeyError(\"update method expects parameter `input_tensor`.\")\n elif isinstance(input_tensor, ops.Tensor):\n values = input_tensor\n elif isinstance(input_tensor, sparse_tensor.SparseTensor):\n values = input_tensor.values\n else:\n raise TypeError(\"input_tensor must be \" \\\n \"either a SparseTensor or dense Tensor.\")\n\n values, _ = array_ops.unique(values)\n status_values = array_ops.reshape(values, (-1,))\n partition_index = \\\n self.var.partition_fn(status_values, self.var.shard_num)\n partitioned_values_list, partitioned_indices_list = \\\n dynamic_embedding_variable.make_partition(status_values,\n partition_index,\n self.var.shard_num)\n\n for idx, dev in enumerate(self.tstp_var.devices):\n with ops.device(dev):\n value_size = array_ops.size(partitioned_values_list[idx])\n feature_tstps = array_ops.tile(\n array_ops.reshape(gen_logging_ops.timestamp(), [1]),\n array_ops.reshape(value_size, (-1,)),\n )\n feature_tstps = math_ops.cast(feature_tstps, dtypes.int32)\n feature_status = array_ops.reshape(feature_tstps, (-1, 1))\n\n mht_update = \\\n self.tstp_var.tables[idx].insert(\n partitioned_values_list[idx],\n feature_status,\n )\n update_ops.append(mht_update)\n\n return control_flow_ops.group(update_ops)\n\n def filter(self, input_tensor=None, **kwargs):\n \"\"\"\n Filter new feature keys by probability before training.\n Prevent unpopular features from affecting training.\n\n Args:\n **kwargs: keyword arguments, including\n input_tensor: SparseTensor or DenseTensor.\n Feature keys need to filter.\n probability: float. Filtering new feature keys by\n probability, and permitting old keys.\n\n Returns:\n Tensor that are filtered for training.\n \"\"\"\n\n if input_tensor is None:\n raise KeyError(\"filter method expects parameter `input_tensor`.\")\n elif isinstance(input_tensor, ops.Tensor):\n input_type = \"DenseTensor\"\n values = input_tensor\n elif isinstance(input_tensor, sparse_tensor.SparseTensor):\n input_type = \"SparseTensor\"\n values = input_tensor.values\n indices = input_tensor.indices\n else:\n raise TypeError(\"input_tensor must be \" \\\n \"either a SparseTensor or dense Tensor.\")\n\n if 'probability' in kwargs:\n probability = kwargs['probability']\n else:\n raise KeyError(\"filter method expects parameter `probability`.\")\n if not isinstance(probability, float):\n raise TypeError(\"probability must be a float.\")\n if probability < 0.0 or probability > 1.0:\n raise ValueError(\"probability value must be in [0.0, 1.0].\")\n\n unique_values, value_idx = array_ops.unique(values)\n status_values = array_ops.reshape(unique_values, (-1,))\n partition_index = \\\n self.var.partition_fn(status_values, self.var.shard_num)\n partitioned_values_list, partitioned_indices_list = \\\n dynamic_embedding_variable.make_partition(status_values,\n partition_index,\n self.var.shard_num)\n\n fv_list = []\n for idx, dev in enumerate(self.tstp_var.devices):\n with ops.device(dev):\n feature_status = \\\n self.tstp_var.tables[idx].lookup(\n partitioned_values_list[idx],\n dynamic_default_values=self.default_tstp,\n )\n\n sub_fv = array_ops.reshape(feature_status, (-1,))\n fv_list.append(sub_fv)\n\n total_fv = dynamic_embedding_variable._stitch(fv_list,\n partitioned_indices_list)\n total_fv = array_ops.gather(total_fv, value_idx)\n\n value_size = array_ops.size(values)\n old_prob = array_ops.ones(value_size)\n new_prob = array_ops.fill([value_size], probability)\n random_prob = random_ops.random_uniform([value_size], maxval=1.0)\n\n condition = math_ops.greater(total_fv, self.default_tstp)\n total_prob = array_ops.where(condition, old_prob, new_prob)\n\n total_mask = math_ops.greater_equal(total_prob, random_prob)\n filter_values = array_ops.boolean_mask(values, total_mask)\n\n if input_type == \"DenseTensor\":\n filter_tensor = filter_values\n elif input_type == \"SparseTensor\":\n filter_indices = array_ops.boolean_mask(indices, total_mask)\n filter_tensor = sparse_tensor.SparseTensor(\n indices=filter_indices,\n values=filter_values,\n dense_shape=input_tensor.dense_shape)\n\n return filter_tensor\n\n def restrict(self, **kwargs):\n \"\"\"\n Restrict the status table size, eliminate the oldest\n feature keys, if the size of variable grow too large for\n threshold.\n\n Args:\n **kwargs: Keyword arguments, including\n threshold: int. The threshold for feature number\n in variable. When restrict method is called, the table\n size will be reduced to 'threshold'.\n factor: int,float,tf.int32,tf.int64,tf.float32.\n If the table size is greater than threshold * factor,\n restricting wiil be triggered.\n\n Returns:\n An operation to restrict the size of variable.\n \"\"\"\n try:\n self.threshold = kwargs['threshold']\n except:\n raise KeyError(\"restrict method expects parameter `threshold`.\")\n if not isinstance(self.threshold, int):\n raise TypeError(\"threshold must be an integer.\")\n if self.threshold < 0:\n raise ValueError(\"threshold must be greater or equal to zero.\")\n\n factor = kwargs.get('factor', 1.0)\n if isinstance(factor, ops.Tensor):\n if factor.dtype not in (dtypes.int32, dtypes.int64, dtypes.float32):\n raise TypeError(\n 'factor expects int, float, tf.int32, tf.int64, or tf.float32')\n factor = math_ops.cast(factor, dtype=dtypes.float32)\n if not isinstance(factor, (int, float)):\n raise TypeError(\n 'factor expects int, float, tf.int32, tf.int64, or tf.float32')\n\n cond_size = math_ops.cast(self.threshold, dtype=dtypes.float32) * factor\n cond_size = math_ops.cast(cond_size, dtype=dtypes.int64)\n condition = math_ops.greater(self.tstp_var.size(), cond_size)\n restrict_status_ops = list()\n no_ops = list()\n\n for idx, dev in enumerate(self.tstp_var.devices):\n with ops.device(dev):\n sub_tk, sub_tv = self.tstp_var.tables[idx].export()\n sharded_threshold = int(self.threshold / self.tstp_var.shard_num)\n\n sub_tv = array_ops.reshape(sub_tv, (-1,))\n first_dim = array_ops.shape(sub_tv)[0]\n\n k_on_top = math_ops.cast(first_dim - sharded_threshold,\n dtype=dtypes.int32)\n k_on_top = math_ops.maximum(k_on_top, 0)\n _, removed_keys_ids = nn_ops.top_k(-sub_tv, k_on_top, sorted=False)\n removed_keys = array_ops.gather(sub_tk, removed_keys_ids)\n restrict_status_ops.append(\n self.tstp_var.tables[idx].remove(removed_keys))\n no_ops.append(control_flow_ops.no_op())\n restrict_op = control_flow_ops.cond(condition, lambda: restrict_status_ops,\n lambda: no_ops)\n\n return restrict_op\n\n\n@tf_export(\"dynamic_embedding.FeatureFilter\")\nclass FeatureFilter(object):\n \"\"\"\n A feature_filter for constraining the variables sparse feature number,\n with keeping recording and eliminating the obsolete feature keys.\n Notice: FrequencyFilterPolicy running order: update->filter->train\n 1.update feature keys frequency\n 2.filter feature keys by frequency\n 3.train with filtering feature keys\n ProbabilityFilterPolicy running order: filter->update->train\n 1.filter feature keys by probability\n 2.update with filtering feature keys\n 3.trian with filtering feature keys\n # Example:\n\n ```python\n # Get a FeatureFilter.\n feature_filter = tf.dynamic_embedding.FeatureFilter(\n var_list=var_list,\n policy=FrequencyFilterPolicy,\n )\n\n # Call update to get an operation to update policy status,\n # record feature keys status.\n # There is no need to call update in inference.\n update_op = feature_filter.update(input_tensor_list=tensor_list)\n\n # Call filter to get qualified feature keys for training.\n # There is no need to call filter in inference.\n threshold = 10\n filter_tensor_list = feature_filter.filter(frequency_threshold=threshold,\n input_tensor_list=tensor_list)\n use_filter = mode != PREDICT and\n math_ops.equal(math_ops.mod(global_step, 100), 0)\n cur_tensor_list = tf.cond(use_filter,\n lambda:filter_tensor_list,\n lambda:tensor_list)\n \n # Call restrict to get an operation to restrict policy status,\n # limit the status table size.\n # There is no need to call restrict in inference.\n restrict_op = feature_filter.restrict(threshold=size)\n \n # Training with filtering keys\n # Call the minimize to the loss with optimizer.\n test_var, _ = tf.dynamic_embedding.embedding_lookup_sparse(\n embeddings,\n cur_tensor_list[idx],\n sp_weights=None,\n combiner=\"sum\",\n return_trainable=True)\n pred = math_ops.matmul(test_var, x)\n loss = pred * pred\n\n with tf.control_dependencies(update_op):\n train_op = opt.minimize(loss)\n\n with tf.Session() as sess:\n ...\n\n for step in range(num_iter):\n ...\n #Traning with filter keys\n #Need close 'update', 'filter' and 'restrict' in inference\n sess.run(train_op)\n if step % 1000 == 0:\n sess.run(restrict_op)\n ...\n\n ...\n ```\n\n \"\"\"\n\n def __init__(self,\n var_list=None,\n default_value_list=None,\n policy=FrequencyFilterPolicy):\n \"\"\"\n Creates a `FeatureFilter` object. Each variable in var_list\n of the same FeatureFilter instance share the same policy.\n\n Args:\n var_list: A list of `tf.dynamic_embedding.Variable` objects.\n default_value_list: A list of 'int' for default_value initializing.\n Some policies may use this for initializing status.\n policy: A FilterPolicy class to specify the rules for\n recoding, updating, and filtering the variable status in var_list.\n \"\"\"\n if not issubclass(policy, FilterPolicy):\n raise TypeError(\"policy must be subclass of\" \\\n \"FilterPolicy object.\")\n\n if var_list in [None, []]:\n raise ValueError(\"var_list must have a variable at least.\")\n if default_value_list is not None:\n if len(default_value_list) != len(var_list):\n raise ValueError(\"default_value_list length\" \\\n \"must be equal to var_list.\")\n else:\n default_value_list = len(var_list) * [0]\n\n self.var_list = var_list\n self.policy_list = []\n\n for idx, var in enumerate(self.var_list):\n self.policy_list.append(policy(var,\n default_value=default_value_list[idx]))\n\n def update(self, input_tensor_list=None, **kwargs):\n \"\"\"\n Update the status for every variable in var_list.\n Each variable processes different sparse tensor keys.\n\n Args:\n input_tensor_list: A list of `Tensor` objects.\n For each variable, a sparse tensor should be passed to\n the FilterPolicy to update method according to the index.\n **kwargs: Optional keyword arguments to be passed to\n the FilterPolicy update method.\n\n Returns:\n A list of operations to update the status for every variable.\n \"\"\"\n update_ops = []\n\n if input_tensor_list is None:\n raise KeyError(\"update method expects parameter\" \\\n \"`input_tensor_list`.\")\n elif not isinstance(input_tensor_list, list):\n raise TypeError(\"input_tensor_list must be a list.\")\n elif len(input_tensor_list) != len(self.var_list):\n raise ValueError(\"input_tensor_list length\" \\\n \"must be equal to var_list length.\")\n\n for idx, policy in enumerate(self.policy_list):\n update_ops.append(\n policy.update(input_tensor=input_tensor_list[idx], **kwargs))\n\n return update_ops\n\n def filter(self, input_tensor_list=None, **kwargs):\n \"\"\"\n Filter keys for every variable in var_list.\n Each variable processes different sparse tensor keys.\n\n Args:\n input_tensor_list: A list of `Tensor` objects.\n For each variable, a sparse tensor should be passed\n the FilterPolicy to update method according to the index.\n **kwargs: Optional keyword arguments to be passed to\n the FilterPolicy filter method.\n\n Returns:\n Tensor list that filter for training\n \"\"\"\n filter_list = []\n\n if input_tensor_list is None:\n raise KeyError(\"update method expects parameter\" \\\n \"`input_tensor_list`.\")\n elif not isinstance(input_tensor_list, list):\n raise TypeError(\"input_tensor_list must be a list.\")\n elif len(input_tensor_list) != len(self.var_list):\n raise ValueError(\"input_tensor_list length\" \\\n \"must be equal to var_list length.\")\n\n for idx, policy in enumerate(self.policy_list):\n filter_list.append(\n policy.filter(input_tensor=input_tensor_list[idx], **kwargs))\n\n return filter_list\n\n def restrict(self, **kwargs):\n \"\"\"\n Restrict the variables for every variable in var_list.\n\n Args:\n **kwargs: Optional keyword arguments passed to the\n method policy.restrict(**kwargs). For example,\n in the `restrict` method of `FilterFrequencyPolicy`\n has parameters `threshold` and `factor`.\n\n Returns:\n A list of operation to restrict variables.\n \"\"\"\n restrict_op = []\n for policy in self.policy_list:\n restrict_op.append(policy.restrict(**kwargs))\n return restrict_op\n" ]
[ [ "tensorflow.python.ops.math_ops.greater_equal", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.math_ops.greater", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.ops.array_ops.fill", "tensorflow.python.ops.math_ops.less", "tensorflow.python.ops.array_ops.where", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.ops.array_ops.size", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.array_ops.unique", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.ops.variable_scope.get_variable_scope", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.gen_logging_ops.timestamp", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.boolean_mask", "tensorflow.python.ops.nn_ops.top_k", "tensorflow.python.ops.math_ops.maximum", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.7" ] } ]
sstsai-adl/d2go
[ "6cff773797b14698043589afe57ea67cd76286f9" ]
[ "tests/modeling/test_optimizer.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\nimport random\nimport unittest\n\nimport d2go.runner.default_runner as default_runner\nimport torch\nfrom d2go.optimizer import (\n build_optimizer_mapper,\n)\nfrom d2go.utils.testing import helper\n\n\nclass TestArch(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 4, kernel_size=5, stride=1, padding=1)\n self.bn = torch.nn.BatchNorm2d(4)\n self.relu = torch.nn.ReLU(inplace=True)\n self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))\n self.linear = torch.nn.Linear(4, 1)\n\n def forward(self, x):\n ret = self.conv(x)\n ret = self.bn(ret)\n ret = self.relu(ret)\n ret = self.avgpool(ret)\n ret = torch.transpose(ret, 1, 3)\n ret = self.linear(ret)\n return ret\n\n\ndef _test_each_optimizer(cfg):\n print(\"Solver: \" + str(cfg.SOLVER.OPTIMIZER))\n\n model = TestArch()\n criterion = torch.nn.BCEWithLogitsLoss()\n optimizer = build_optimizer_mapper(cfg, model)\n optimizer.zero_grad()\n\n random.seed(20210912)\n for _ in range(2500):\n target = torch.empty(1, 1, 1, 1).fill_(random.randint(0, 1))\n x = torch.add(torch.rand(1, 3, 16, 16), 2 * target)\n y_pred = model(x)\n loss = criterion(y_pred, target)\n loss.backward()\n optimizer.step()\n\n n_correct = 0\n for _ in range(200):\n target = torch.empty(1, 1, 1, 1).fill_(random.randint(0, 1))\n x = torch.add(torch.rand(1, 3, 16, 16), 2 * target)\n y_pred = torch.round(torch.sigmoid(model(x)))\n if y_pred == target:\n n_correct += 1\n\n print(\"Correct prediction rate {0}.\".format(n_correct / 200))\n\n\ndef _check_param_group(self, group, num_params=None, **kwargs):\n if num_params is not None:\n self.assertEqual(len(group[\"params\"]), num_params)\n for key, val in kwargs.items():\n self.assertEqual(group[key], val)\n\n\ndef get_optimizer_cfg(\n lr,\n weight_decay=None,\n weight_decay_norm=None,\n weight_decay_bias=None,\n lr_mult=None,\n):\n runner = default_runner.Detectron2GoRunner()\n cfg = runner.get_default_cfg()\n if lr is not None:\n cfg.SOLVER.BASE_LR = lr\n if weight_decay is not None:\n cfg.SOLVER.WEIGHT_DECAY = weight_decay\n if weight_decay_norm is not None:\n cfg.SOLVER.WEIGHT_DECAY_NORM = weight_decay_norm\n if weight_decay_bias is not None:\n cfg.SOLVER.WEIGHT_DECAY_BIAS = weight_decay_bias\n if lr_mult is not None:\n cfg.SOLVER.LR_MULTIPLIER_OVERWRITE = [lr_mult]\n return cfg\n\n\nclass TestOptimizer(unittest.TestCase):\n def test_create_optimizer_default(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 3, 1)\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(self.conv(x))\n\n model = Model()\n cfg = get_optimizer_cfg(\n lr=1.0, weight_decay=1.0, weight_decay_norm=1.0, weight_decay_bias=1.0\n )\n optimizer = build_optimizer_mapper(cfg, model)\n self.assertEqual(len(optimizer.param_groups), 1)\n _check_param_group(\n self, optimizer.param_groups[0], num_params=4, weight_decay=1.0, lr=1.0\n )\n\n def test_create_optimizer_lr(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = torch.nn.Conv2d(3, 3, 1)\n self.conv2 = torch.nn.Conv2d(3, 3, 1)\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(self.conv2(self.conv1(x)))\n\n model = Model()\n cfg = get_optimizer_cfg(\n lr=1.0,\n lr_mult={\"conv1\": 3.0, \"conv2\": 3.0},\n weight_decay=2.0,\n weight_decay_norm=2.0,\n )\n optimizer = build_optimizer_mapper(cfg, model)\n\n self.assertEqual(len(optimizer.param_groups), 2)\n\n _check_param_group(self, optimizer.param_groups[0], num_params=4, lr=3.0)\n _check_param_group(self, optimizer.param_groups[1], num_params=2, lr=1.0)\n\n def test_create_optimizer_weight_decay_norm(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 3, 1)\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(self.conv(x))\n\n model = Model()\n cfg = get_optimizer_cfg(\n lr=1.0, weight_decay=1.0, weight_decay_norm=2.0, weight_decay_bias=1.0\n )\n optimizer = build_optimizer_mapper(cfg, model)\n\n self.assertEqual(len(optimizer.param_groups), 2)\n\n _check_param_group(\n self, optimizer.param_groups[0], num_params=2, lr=1.0, weight_decay=1.0\n )\n _check_param_group(\n self, optimizer.param_groups[1], num_params=2, lr=1.0, weight_decay=2.0\n )\n\n def test_all_optimizers(self):\n runner = default_runner.Detectron2GoRunner()\n cfg = runner.get_default_cfg()\n multipliers = [None, [{\"conv\": 0.1}]]\n\n for optimizer_name in [\"SGD\", \"AdamW\", \"SGD_MT\", \"AdamW_MT\"]:\n for mult in multipliers:\n cfg.SOLVER.BASE_LR = 0.01\n cfg.SOLVER.OPTIMIZER = optimizer_name\n cfg.SOLVER.MULTIPLIERS = mult\n _test_each_optimizer(cfg)\n\n def test_full_model_grad_clipping(self):\n runner = default_runner.Detectron2GoRunner()\n cfg = runner.get_default_cfg()\n\n for optimizer_name in [\"SGD\", \"AdamW\", \"SGD_MT\", \"AdamW_MT\"]:\n cfg.SOLVER.BASE_LR = 0.02\n cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 0.2\n cfg.SOLVER.CLIP_GRADIENTS.ENABLED = True\n cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = \"full_model\"\n cfg.SOLVER.OPTIMIZER = optimizer_name\n _test_each_optimizer(cfg)\n\n def test_create_optimizer_custom(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 3, 1)\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(self.conv(x))\n\n def get_optimizer_param_groups(self, _opts):\n ret = [\n {\n \"params\": [self.conv.weight],\n \"lr\": 10.0,\n }\n ]\n return ret\n\n model = Model()\n cfg = get_optimizer_cfg(lr=1.0, weight_decay=1.0, weight_decay_norm=0.0)\n optimizer = build_optimizer_mapper(cfg, model)\n\n self.assertEqual(len(optimizer.param_groups), 3)\n\n _check_param_group(\n self, optimizer.param_groups[0], num_params=1, lr=10.0, weight_decay=1.0\n )\n _check_param_group(\n self, optimizer.param_groups[1], num_params=1, lr=1.0, weight_decay=1.0\n )\n _check_param_group(\n self, optimizer.param_groups[2], num_params=2, lr=1.0, weight_decay=0.0\n )\n\n @helper.enable_ddp_env\n def test_create_optimizer_custom_ddp(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 3, 1)\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(self.conv(x))\n\n def get_optimizer_param_groups(self, _opts):\n ret = [\n {\n \"params\": [self.conv.weight],\n \"lr\": 10.0,\n }\n ]\n return ret\n\n model = Model()\n model = torch.nn.parallel.DistributedDataParallel(model)\n cfg = get_optimizer_cfg(lr=1.0, weight_decay=1.0, weight_decay_norm=0.0)\n optimizer = build_optimizer_mapper(cfg, model)\n\n self.assertEqual(len(optimizer.param_groups), 3)\n\n _check_param_group(\n self, optimizer.param_groups[0], num_params=1, lr=10.0, weight_decay=1.0\n )\n _check_param_group(\n self, optimizer.param_groups[1], num_params=1, lr=1.0, weight_decay=1.0\n )\n _check_param_group(\n self, optimizer.param_groups[2], num_params=2, lr=1.0, weight_decay=0.0\n )\n" ]
[ [ "torch.transpose", "torch.empty", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.BCEWithLogitsLoss", "torch.nn.AdaptiveAvgPool2d", "torch.rand", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.parallel.DistributedDataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
oliviernocent/AEROLAB
[ "4fd1077c5799b6c6a6b885e7baccf16925d3a36e" ]
[ "scripts/python/exposure.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nThis script computes the max mean mass concentration of several pollutants\nfrom a CSV file containing the following columns:\n - 'DateTime' : ISO 8601 date and time \n - 'Timestamp': seconds elapsed since 01/01/1970\n - 'PM10 (µg/m3)' (optional)\n - 'PM2.5 (µg/m3)' (optional)\n - 'PM1 (µg/m3)' (optional)\n - 'NO2 (µg/m3)' (optional)\n - 'CO (mg/m3)' (optional)\n - 'O3 (µg/m3)' (optional)\n\nUSAGE:\n\n./exposure.py [csv_file]\n\nIf no csv_file is provided, the script opens a file dialog box.\n'''\n\n__author__ = \"Olivier Nocent and Quentin Martinet\"\n__copyright__ = \"Copyright 2021, Université de Reims Champagne Ardenne\"\n__license__ = \"MIT\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"Olivier Nocent\"\n__email__ = \"[email protected]\"\n__status__ = \"Experimental\"\n\nfrom os import path\nimport sys\nimport easygui\nimport glob\nimport pandas as pd\nfrom aerolab_utils import *\n\nif len(sys.argv) == 1:\n filename = easygui.fileopenbox(\n title='Exposure estimation', msg='Choose a CSV file', filetypes=[['*.csv', 'CSV files']])\nelse:\n filename = sys.argv[1]\n if not path.exists(filename):\n print('\\nERROR:', filename, 'does not exist!\\n\\n')\n exit(0)\n\ndf = pd.read_csv(filename)\n\npollutants = ['PM10 (µg/m3)', 'PM2.5 (µg/m3)', 'PM1 (µg/m3)', 'NO2 (µg/m3)',\n 'CO (mg/m3)', 'O3 (µg/m3)']\n\nthreshold = {\n 'PM10 (µg/m3)': 45,\n 'PM2.5 (µg/m3)': 15,\n 'PM1 (µg/m3)': 15,\n 'NO2 (µg/m3)': 25,\n 'CO (mg/m3)': 4,\n 'O3 (µg/m3)': 100\n}\n\nmax_value = {}\nmax_index = {}\nfor pollutant in pollutants:\n max_value[pollutant] = 0\n max_index[pollutant] = 0\n\ni, end = 0, df['Timestamp'].iloc[-1] - 24 * 3600\nwhile df.loc[i, 'Timestamp'] < end:\n start = df.loc[i, 'Timestamp']\n df_24h = df[(df['Timestamp'] >= start) & (\n df['Timestamp'] < start + 24 * 3600)]\n\n for pollutant in pollutants:\n if pollutant in df.columns:\n mean_value = df_24h[pollutant].median()\n if mean_value > max_value[pollutant]:\n max_value[pollutant] = mean_value\n max_index[pollutant] = i\n\n i += 1\n\nif 'O3 (µg/m3)' in df.columns:\n i, end = 0, df['Timestamp'].iloc[-1] - 8 * 3600\n while df.loc[i, 'Timestamp'] < end:\n start = df.loc[i, 'Timestamp']\n df_8h = df[(df['Timestamp'] >= start) & (\n df['Timestamp'] < start + 8 * 3600)]\n\n mean_value = df_24h['O3 (µg/m3)'].median()\n if mean_value > max_value['O3 (µg/m3)']:\n max_value['O3 (µg/m3)'] = mean_value\n max_index['O3 (µg/m3)'] = i\n\n i += 1\n\nprint('\\nMaximum mean mass concentration during 24h:\\n')\nif 'PM10 (µg/m3)' in df.columns:\n print(f\"PM10 : {max_value['PM10 (µg/m3)']: >6.2f} µg/m3\\t\\t(45 µg/m3) at {df['DateTime'][max_index['PM10 (µg/m3)']]}\")\nif 'PM2.5 (µg/m3)' in df.columns:\n print(f\"PM2.5 : {max_value['PM2.5 (µg/m3)']: >6.2f} µg/m3\\t\\t(15 µg/m3) at {df['DateTime'][max_index['PM2.5 (µg/m3)']]}\")\nif 'PM1 (µg/m3)' in df.columns:\n print(f\"PM1 :' {max_value['PM1 (µg/m3)']: >6.2f} µg/m3\\t\\t( ? µg/m3) at {df['DateTime'][max_index['PM1 (µg/m3)']]}\")\nif 'NO2 (µg/m3)' in df.columns:\n print(f\"NO2 : {max_value['NO2 (µg/m3)']: >6.2f} µg/m3\\t\\t(25 µg/m3) at {df['DateTime'][max_index['NO2 (µg/m3)']]}\")\nif 'CO (mg/m3)' in df.columns:\n print(f\"CO : {max_value['CO (mg/m3)']: >6.2f} mg/m3\\t\\t( 4 mg/m3) at {df['DateTime'][max_index['CO (mg/m3)']]}\")\nif 'O3 (µg/m3)' in df.columns:\n print('\\nMaximum mean mass concentration during 8h:\\n')\n print(f\"O3 : {max_value['O3 (µg/m3)']: >6.2f} µg/m3\\t\\t(100 µg/m3) at {df['DateTime'][max_index['O3 (µg/m3)']]}\")\n\nperiod = {\n 'PM10 (µg/m3)': 0,\n 'PM2.5 (µg/m3)': 0,\n 'PM1 (µg/m3)': 0,\n 'NO2 (µg/m3)': 0,\n 'CO (mg/m3)': 0,\n 'O3 (µg/m3)': 0\n}\n\nfor i in range(1,len(df.index)):\n for pollutant in pollutants:\n if pollutant in df.columns and df[pollutant][i] > threshold[pollutant]:\n period[pollutant] += df['Timestamp'][i] - df['Timestamp'][i-1]\n\ntotal = df['Timestamp'][len(df.index)-1] - df['Timestamp'][0]\n\nprint(f'\\nTotal time above thresholds during {format_duration(total)}:\\n')\nfor pollutant in pollutants:\n if pollutant in df.columns:\n print(f'{pollutant} : {format_duration(period[pollutant])}')" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Vinicius-Tanigawa/Undergraduate-Research-Project
[ "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9", "e92372f07882484b127d7affe305eeec2238b8a9" ]
[ "SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Aerodynamics/Common/Fidelity_Zero/Lift/generate_wing_wake_grid.py", "SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Networks/Solar.py", "Cessna_208/Electric/mission_electric_Cessna_208.py", "SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Flight_Dynamics/Static_Stability/Approximations/datcom.py", "SUAVE/SUAVE-2.5.0/regression/scripts/motor/motor_test.py", "SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Aerodynamics/Lifting_Line/Lifting_Line.py", "SUAVE/SUAVE-2.5.0/regression/scripts/VTOL/test_Stopped_Rotor.py", "SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Networks/Battery_Propeller.py", "SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Flight_Dynamics/Static_Stability/Approximations/Supporting_Functions/extend_to_ref_area.py", "SUAVE/SUAVE-2.5.0/trunk/SUAVE/Analyses/Mission/Segments/Conditions/State.py", "SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Performance/electric_V_h_diagram.py", "SUAVE/SUAVE-2.5.0/templates/Example_Attribute.py", "References/Geovana Neves/TCC_Geovana_Neves_GitHub/SUAVE_modifications/SUAVE-feature-constant_throttle_EAS/trunk/SUAVE/Analyses/Mission/Segments/Conditions/State.py", "SUAVE/SUAVE-2.5.0/regression/scripts/electric_performance/electric_V_h_diagram.py" ]
[ "## @ingroup Methods-Aerodynamics-Common-Fidelity_Zero-Lift\n# generate_wing_wake_grid.py\n# \n# Created: April 2021, R. Erhard\n# Modified: \n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\nimport numpy as np\nimport pylab as plt\nfrom SUAVE.Core import Data\n\n\ndef generate_wing_wake_grid(geometry, H, L, hf, x_plane, Nzo=20, Nzf=35, Nyo=20, plot_grid=False):\n \"\"\" Generates the grid points for evaluating the viscous wing wake in a downstream plane.\n Uses smaller grid near the wing to better capture boundary layer.\n \n Inputs: \n geometry SUAVE vehicle data object\n H Height of full grid, normalized by wing span\n L Length of grid, normalized by wing span\n hf Height of finer grid portion\n x_plane Spanwise location of grid plane\n \n Nzo Number of vertical grid points outside finer region\n Nzf Number of vertical grid points inside finer region\n Nyo Number of horizontal grid points outside of wing span\n \"\"\"\n # unpack\n span = geometry.wings.main_wing.spans.projected\n half_span = span/2\n VD = geometry.vortex_distribution\n breaks = VD.chordwise_breaks\n \n # grid bounds\n z_bot = -H*half_span\n z_top = H*half_span\n Nzo_half = int(Nzo/2)\n Nyo_half = int(Nyo/2)\n \n # generate vertical grid point locations\n z_outer_bot = np.linspace(z_bot, -hf, Nzo_half)\n z_outer_top = np.linspace(hf, z_top, Nzo_half)\n \n # use finer concentration of grid points near the wing\n z_inner_bot = -hf*(np.flipud((1-np.cos(np.linspace(1e-6,1,Nzf)*np.pi/2))))\n z_inner_top = hf*(1-np.cos(np.linspace(0,1,Nzf)*np.pi/2))\n zlocs = np.concatenate([z_outer_bot, z_inner_bot, z_inner_top, z_outer_top])\n\n # generate spanwise grid point locations: placed between vortex lines to avoid discontinuities\n ypts = VD.YC[breaks]\n y_semispan = ypts[0:int(len(ypts)/2)]\n \n if L>=1.:\n # add grid points outside wingtip\n y_outerspan = np.linspace(1.01,L,Nyo_half)*half_span\n y_semispan = np.append(y_semispan, y_outerspan)\n else:\n # trim spanwise points to region of interest\n y_in = y_semispan<(L*half_span)\n y_semispan = y_semispan[y_in]\n \n ylocs = np.concatenate([np.flipud(-y_semispan),y_semispan])\n \n # declare new control points\n cp_YC = np.repeat(ylocs,len(zlocs)) \n cp_ZC = np.tile(zlocs,len(ylocs))\n cp_XC = np.ones_like(cp_YC)*x_plane \n \n grid_points = Data()\n grid_points.XC = cp_XC\n grid_points.YC = cp_YC\n grid_points.ZC = cp_ZC\n grid_points.yline = ylocs\n grid_points.zline = zlocs\n \n if plot_grid:\n yL = -span/2\n yR = span/2\n \n wing_y = np.array([yL, yR])\n wing_z = np.array([0,0])\n \n # plot the grid points\n fig = plt.figure()\n axes = fig.add_subplot(1,1,1)\n axes.plot(cp_YC,cp_ZC,'k.')\n \n # plot the wing projection\n axes.plot(wing_y,wing_z, 'r')\n \n axes.set_xlabel('y [m]')\n axes.set_ylabel(\"z [m]\")\n axes.set_title(\"New Grid Points\")\n \n plot_prop=True\n if plot_prop:\n for net in list(geometry.networks.keys()):\n for prop in list(geometry.networks[net].propellers.keys()):\n R = geometry.networks[net].propellers[prop].tip_radius\n origin = geometry.networks[net].propellers[prop].origin\n Na = geometry.networks[net].propellers[prop].number_azimuthal_stations\n \n psi = np.linspace(0,2*np.pi,Na+1)[:-1]\n ycoords = origin[0][1] + R*np.cos(psi)\n zcoords = origin[0][2] + R*np.sin(psi)\n axes.plot(ycoords,zcoords,'r')\n \n return grid_points", "## @ingroup Components-Energy-Networks\n# Solar.py\n# \n# Created: Jun 2014, E. Botero\n# Modified: Feb 2016, T. MacDonald \n# Mar 2020, M. Clarke\n# Jul 2021, E. Botero\n# Aug 2021, M. Clarke\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\n# package imports\nimport numpy as np\nfrom .Network import Network\nfrom SUAVE.Components.Physical_Component import Container\nfrom SUAVE.Methods.Power.Battery.pack_battery_conditions import pack_battery_conditions\nfrom SUAVE.Methods.Power.Battery.append_initial_battery_conditions import append_initial_battery_conditions\n\nfrom SUAVE.Core import Data , Units\n\n# ----------------------------------------------------------------------\n# Network\n# ----------------------------------------------------------------------\n\n## @ingroup Components-Energy-Networks\nclass Solar(Network):\n \"\"\" A solar powered system with batteries and maximum power point tracking.\n \n This network adds an extra unknowns to the mission, the torque matching between motor and propeller.\n \n Assumptions:\n None\n \n Source:\n None\n \"\"\" \n def __defaults__(self):\n \"\"\" This sets the default values for the network to function.\n \n Assumptions:\n None\n \n Source:\n N/A\n \n Inputs:\n None\n \n Outputs:\n None\n \n Properties Used:\n N/A\n \"\"\" \n self.solar_flux = None\n self.solar_panel = None\n self.motors = Container()\n self.propellers = Container()\n self.esc = None\n self.avionics = None\n self.payload = None\n self.solar_logic = None\n self.battery = None \n self.engine_length = None\n self.number_of_engines = None\n self.tag = 'Solar'\n self.use_surrogate = False\n self.generative_design_minimum = 0\n self.identical_propellers = True\n \n # manage process with a driver function\n def evaluate_thrust(self,state):\n \"\"\" Calculate thrust given the current state of the vehicle\n \n Assumptions:\n Caps the throttle at 110% and linearly interpolates thrust off that\n \n Source:\n N/A\n \n Inputs:\n state [state()]\n \n Outputs:\n results.thrust_force_vector [newtons]\n results.vehicle_mass_rate [kg/s]\n conditions.propulsion:\n solar_flux [watts/m^2] \n rpm [radians/sec]\n current [amps]\n battery_power_draw [watts]\n battery_energy [joules]\n motor_torque [N-M]\n propeller_torque [N-M]\n \n Properties Used:\n Defaulted values\n \"\"\" \n \n # unpack\n conditions = state.conditions\n numerics = state.numerics\n solar_flux = self.solar_flux\n solar_panel = self.solar_panel\n motors = self.motors\n propellers = self.propellers\n esc = self.esc\n avionics = self.avionics\n payload = self.payload\n solar_logic = self.solar_logic\n battery = self.battery\n num_engines = self.number_of_engines\n \n # Unpack conditions\n a = conditions.freestream.speed_of_sound \n \n # Set battery energy\n battery.current_energy = conditions.propulsion.battery_energy\n battery.pack_temperature = conditions.propulsion.battery_pack_temperature\n battery.cell_charge_throughput = conditions.propulsion.battery_cell_charge_throughput \n battery.age = conditions.propulsion.battery_cycle_day \n battery.R_growth_factor = conditions.propulsion.battery_resistance_growth_factor\n battery.E_growth_factor = conditions.propulsion.battery_capacity_fade_factor \n battery.max_energy = conditions.propulsion.battery_max_aged_energy \n \n # step 1\n solar_flux.solar_radiation(conditions)\n \n # link\n solar_panel.inputs.flux = solar_flux.outputs.flux\n \n # step 2\n solar_panel.power()\n \n # link\n solar_logic.inputs.powerin = solar_panel.outputs.power\n \n # step 3\n solar_logic.voltage()\n \n # link\n esc.inputs.voltagein = solar_logic.outputs.system_voltage\n \n # Step 4\n esc.voltageout(conditions)\n \n # How many evaluations to do\n if self.identical_propellers:\n n_evals = 1\n factor = num_engines*1\n else:\n n_evals = int(num_engines)\n factor = 1.\n \n # Setup numbers for iteration\n total_motor_current = 0.\n total_thrust = 0. * state.ones_row(3)\n total_power = 0.\n \n # Iterate over motor/props\n for ii in range(n_evals):\n \n # Unpack the motor and props\n motor_key = list(motors.keys())[ii]\n prop_key = list(propellers.keys())[ii]\n motor = self.motors[motor_key]\n prop = self.propellers[prop_key] \n \n # link\n motor.inputs.voltage = esc.outputs.voltageout\n motor.inputs.propeller_CP = np.atleast_2d(conditions.propulsion.propeller_power_coefficient[:,ii]).T\n \n # step 5\n motor.omega(conditions)\n \n # link\n prop.inputs.omega = motor.outputs.omega\n \n # step 6\n F, Q, P, Cplast , outputs , etap = prop.spin(conditions)\n \n # Check to see if magic thrust is needed, the ESC caps throttle at 1.1 already\n eta = conditions.propulsion.throttle[:,0,None]\n P[eta>1.0] = P[eta>1.0]*eta[eta>1.0]\n F[eta[:,0]>1.0,:] = F[eta[:,0]>1.0,:]*eta[eta[:,0]>1.0,:]\n \n # Run the motor for current\n _ , etam = motor.current(conditions) \n \n # Conditions specific to this instantation of motor and propellers\n R = prop.tip_radius\n rpm = motor.outputs.omega / Units.rpm\n F_mag = np.atleast_2d(np.linalg.norm(F, axis=1)).T\n total_thrust = total_thrust + F * factor\n total_power = total_power + P * factor\n total_motor_current = total_motor_current + factor*motor.outputs.current\n\n # Pack specific outputs\n conditions.propulsion.propeller_motor_efficiency[:,ii] = etam[:,0] \n conditions.propulsion.propeller_motor_torque[:,ii] = motor.outputs.torque[:,0]\n conditions.propulsion.propeller_torque[:,ii] = Q[:,0]\n conditions.propulsion.propeller_thrust[:,ii] = np.linalg.norm(total_thrust ,axis = 1) \n conditions.propulsion.propeller_rpm[:,ii] = rpm[:,0]\n conditions.propulsion.propeller_tip_mach[:,ii] = (R*rpm[:,0]*Units.rpm)/a[:,0]\n conditions.propulsion.disc_loading[:,ii] = (F_mag[:,0])/(np.pi*(R**2)) # N/m^2 \n conditions.propulsion.power_loading[:,ii] = (F_mag[:,0])/(P[:,0]) # N/W \n conditions.propulsion.propeller_efficiency[:,ii] = etap[:,0] \n conditions.noise.sources.propellers[prop.tag] = outputs\n \n # Run the avionics\n avionics.power()\n \n # link\n solar_logic.inputs.pavionics = avionics.outputs.power\n \n # Run the payload\n payload.power()\n \n # link\n solar_logic.inputs.ppayload = payload.outputs.power\n \n # link\n esc.inputs.currentout = total_motor_current\n \n # Run the esc\n esc.currentin(conditions)\n \n # link\n solar_logic.inputs.currentesc = esc.outputs.currentin\n solar_logic.inputs.volts_motor = esc.outputs.voltageout \n solar_logic.logic(conditions,numerics)\n \n # link\n battery.inputs = solar_logic.outputs\n battery.energy_calc(numerics)\n \n # Calculate avionics and payload power\n avionics_payload_power = avionics.outputs.power + payload.outputs.power\n \n # Pack the conditions for outputs \n battery.inputs.current = solar_logic.inputs.currentesc\n conditions.propulsion.solar_flux = solar_flux.outputs.flux \n pack_battery_conditions(conditions,battery,avionics_payload_power,P) \n\n # Create the outputs\n results = Data()\n results.thrust_force_vector = total_thrust\n results.vehicle_mass_rate = state.ones_row(1)*0.0\n\n return results\n \n \n def unpack_unknowns(self,segment):\n \"\"\" This is an extra set of unknowns which are unpacked from the mission solver and send to the network.\n \n Assumptions:\n None\n \n Source:\n N/A\n \n Inputs:\n state.unknowns.propeller_power_coefficient [None]\n \n Outputs:\n state.conditions.propulsion.propeller_power_coefficient [None]\n \n Properties Used:\n N/A\n \"\"\" \n \n # Here we are going to unpack the unknowns (Cp) provided for this network\n segment.state.conditions.propulsion.propeller_power_coefficient = segment.state.unknowns.propeller_power_coefficient\n\n return\n \n def residuals(self,segment):\n \"\"\" This packs the residuals to be send to the mission solver.\n \n Assumptions:\n None\n \n Source:\n N/A\n \n Inputs:\n state.conditions.propulsion:\n motor_torque [N-m]\n propeller_torque [N-m]\n \n Outputs:\n None\n \n Properties Used:\n None\n \"\"\" \n \n # Here we are going to pack the residuals from the network\n \n # Unpack\n q_motor = segment.state.conditions.propulsion.propeller_motor_torque\n q_prop = segment.state.conditions.propulsion.propeller_torque\n \n # Return the residuals\n segment.state.residuals.network[:,0:] = q_motor - q_prop\n \n return\n \n \n \n def add_unknowns_and_residuals_to_segment(self, segment, initial_power_coefficient = 0.005):\n \"\"\" This function sets up the information that the mission needs to run a mission segment using this network\n \n Assumptions:\n None\n \n Source:\n N/A\n \n Inputs:\n segment\n initial_voltage [v]\n initial_power_coefficient [float]s\n \n Outputs:\n segment.state.unknowns.propeller_power_coefficient\n segment.state.conditions.propulsion.propeller_motor_torque\n segment.state.conditions.propulsion.propeller_torque \n \n Properties Used:\n N/A\n \"\"\" \n \n # unpack the ones function\n ones_row = segment.state.ones_row\n \n # Count how many unknowns and residuals based on p\n n_props = len(self.propellers)\n n_motors = len(self.motors)\n n_eng = self.number_of_engines\n \n if n_props!=n_motors!=n_eng:\n print('The number of propellers is not the same as the number of motors')\n \n # Now check if the propellers are all identical, in this case they have the same of residuals and unknowns\n if self.identical_propellers:\n n_props = 1\n \n # number of residuals, props plus the battery voltage\n n_res = n_props \n\n # Assign initial segment conditions to segment if missing\n battery = self.battery\n append_initial_battery_conditions(segment,battery) \n \n # Setup the residuals\n segment.state.residuals.network = 0. * ones_row(n_res)\n \n # Setup the unknowns\n segment.state.unknowns.propeller_power_coefficient = initial_power_coefficient * ones_row(n_props)\n \n # Setup the conditions\n segment.state.conditions.propulsion.propeller_motor_efficiency = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_motor_torque = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_torque = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_thrust = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_rpm = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.disc_loading = 0. * ones_row(n_props) \n segment.state.conditions.propulsion.power_loading = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_tip_mach = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_efficiency = 0. * ones_row(n_props) \n \n # Ensure the mission knows how to pack and unpack the unknowns and residuals\n segment.process.iterate.unknowns.network = self.unpack_unknowns\n segment.process.iterate.residuals.network = self.residuals \n\n return segment \n \n __call__ = evaluate_thrust\n", "# mission_Caravan.py\n# \n# Created: Ago 2018, M. Gallani\n# Modified: Jun 2021, V. Tanigawa\n\n\n#----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\nimport SUAVE\nimport matplotlib.pyplot as plt \nimport numpy as np \nimport pylab as plt \n\nfrom SUAVE.Core import Units\nfrom SUAVE.Plots.Performance.Mission_Plots import *\nfrom SUAVE.Core import Data, Container\nfrom SUAVE.Components.Energy.Networks.Battery_Propeller import Battery_Propeller\nfrom SUAVE.Methods.Power.Battery.Sizing import initialize_from_mass\nfrom electric_Cessna_208 import vehicle_setup, configs_setup\nfrom SUAVE.Methods.Performance import payload_range\nfrom SUAVE.Input_Output.Results import print_parasite_drag, \\\n print_compress_drag, \\\n print_engine_data, \\\n print_mission_breakdown\n\n\n# ----------------------------------------------------------------------\n# Main\n# ----------------------------------------------------------------------\n\ndef main():\n \"\"\"This function gets the vehicle configuration, analysis settings, and then runs the mission.\n Once the mission is complete, the results are plotted.\"\"\"\n\n battery_chemistry = ['NMC','LFP']\n \n # Extract vehicle configurations and the analysis settings that go with them\n configs, analyses = full_setup(battery_chemistry)\n\n # Size each of the configurations according to a given set of geometry relations\n # simple_sizing(configs)\n\n # Perform operations needed to make the configurations and analyses usable in the mission\n configs.finalize()\n analyses.finalize()\n\n # Determine the vehicle weight breakdown (independent of mission fuel usage)\n weights = analyses.configs.base.weights\n breakdown = weights.evaluate() \n\n # Perform a mission analysis\n mission = analyses.missions.base\n results = mission.evaluate()\n\n # Plot all mission results, including items such as altitude profile and L/D\n plot_mission(results)\n\n return\n# ----------------------------------------------------------------------\n# Analysis Setup\n# ----------------------------------------------------------------------\n\ndef full_setup(battery_chemistry):\n \"\"\"This function gets the baseline vehicle and creates modifications for different \n configurations, as well as the mission and analyses to go with those configurations.\"\"\"\n\n # Collect baseline vehicle data and changes when using different configuration settings\n vehicle = vehicle_setup()\n\n# Modify Battery \n net = vehicle.networks.battery_propeller\n bat = net.battery \n if battery_chemistry == 'NMC': \n bat = SUAVE.Components.Energy.Storages.Batteries.Constant_Mass.Lithium_Ion_LiNiMnCoO2_18650() \n elif battery_chemistry == 'LFP': \n bat = SUAVE.Components.Energy.Storages.Batteries.Constant_Mass.Lithium_Ion_LiFePO4_18650() \n \n bat.mass_properties.mass = 500. * Units.kg \n bat.max_voltage = 500. \n initialize_from_mass(bat)\n \n # Assume a battery pack module shape. This step is optional but\n # required for thermal analysis of the pack\n number_of_modules = 10\n bat.module_config.total = int(np.ceil(bat.pack_config.total/number_of_modules))\n bat.module_config.normal_count = int(np.ceil(bat.module_config.total/bat.pack_config.series))\n bat.module_config.parallel_count = int(np.ceil(bat.module_config.total/bat.pack_config.parallel))\n net.battery = bat \n \n net.battery = bat\n net.voltage = bat.max_voltage \n\n configs = configs_setup(vehicle)\n\n # Get the analyses to be used when different configurations are evaluated\n configs_analyses = analyses_setup(configs)\n\n # Create the mission that will be flown\n mission = mission_setup(configs_analyses, vehicle)\n missions_analyses = missions_setup(mission)\n\n # Add the analyses to the proper containers\n analyses = SUAVE.Analyses.Analysis.Container()\n analyses.configs = configs_analyses\n analyses.missions = missions_analyses\n\n return configs, analyses\n\n# ----------------------------------------------------------------------\n# Define the Vehicle Analyses\n# ----------------------------------------------------------------------\n\ndef analyses_setup(configs):\n \"\"\"Set up analyses for each of the different configurations.\"\"\"\n\n analyses = SUAVE.Analyses.Analysis.Container()\n\n # Build a base analysis for each configuration. Here the base analysis is always used, but\n # this can be modified if desired for other cases.\n for tag,config in configs.items():\n analysis = base_analysis(config)\n analyses[tag] = analysis\n\n return analyses\n\ndef base_analysis(vehicle):\n \"\"\"This is the baseline set of analyses to be used with this vehicle. Of these, the most\n commonly changed are the weights and aerodynamics methods.\"\"\"\n\n # ------------------------------------------------------------------\n # Initialize the Analyses\n # ------------------------------------------------------------------ \n analyses = SUAVE.Analyses.Vehicle()\n\n # ------------------------------------------------------------------\n # Weights\n weights = SUAVE.Analyses.Weights.Weights_Transport()\n weights.vehicle = vehicle\n weights.settings.empty_weight_increment = 0.\n analyses.append(weights)\n\n # ------------------------------------------------------------------\n # Basic Geometry Relations\n sizing = SUAVE.Analyses.Sizing.Sizing()\n sizing.features.vehicle = vehicle\n analyses.append(sizing)\n\n # ------------------------------------------------------------------\n # Aerodynamics Analysis\n aerodynamics = SUAVE.Analyses.Aerodynamics.Fidelity_Zero()\n aerodynamics.geometry = vehicle\n aerodynamics.settings.drag_coefficient_increment = 0.0133\n aerodynamics.settings.oswald_efficiency_factor = 0.7860 ## Oswald for the case considering thrust effect on fuselage drag\n analyses.append(aerodynamics)\n\n # ------------------------------------------------------------------\n # Stability Analysis\n stability = SUAVE.Analyses.Stability.Fidelity_Zero()\n stability.geometry = vehicle\n analyses.append(stability)\n\n # ------------------------------------------------------------------\n # Energy\n energy= SUAVE.Analyses.Energy.Energy()\n energy.network = vehicle.networks\n analyses.append(energy)\n\n # ------------------------------------------------------------------\n # Planet Analysis\n planet = SUAVE.Analyses.Planets.Planet()\n analyses.append(planet)\n\n # ------------------------------------------------------------------\n # Atmosphere Analysis\n atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()\n atmosphere.features.planet = planet.features\n analyses.append(atmosphere) \n\n return analyses \n\n\n# ----------------------------------------------------------------------\n# Define the Configurations\n# ---------------------------------------------------------------------\n\ndef configs_setup(vehicle):\n \"\"\"This function sets up vehicle configurations for use in different parts of the mission.\n Here, this is mostly in terms of high lift settings.\"\"\"\n \n # ------------------------------------------------------------------\n # Initialize Configurations\n # ------------------------------------------------------------------\n configs = SUAVE.Components.Configs.Config.Container()\n\n base_config = SUAVE.Components.Configs.Config(vehicle)\n base_config.tag = 'base'\n configs.append(base_config)\n\n # ------------------------------------------------------------------\n # Cruise Configuration\n # ------------------------------------------------------------------\n config = SUAVE.Components.Configs.Config(base_config)\n config.tag = 'cruise'\n configs.append(config)\n\n # ------------------------------------------------------------------\n # Takeoff Configuration\n # ------------------------------------------------------------------\n config = SUAVE.Components.Configs.Config(base_config)\n config.tag = 'takeoff'\n config.wings['main_wing'].control_surfaces.flap.deflection = 15. * Units.deg\n # A max lift coefficient factor of 1 is the default, but it is highlighted here as an option\n config.max_lift_coefficient_factor = 1.\n\n configs.append(config)\n \n # ------------------------------------------------------------------\n # Cutback Configuration\n # ------------------------------------------------------------------\n config = SUAVE.Components.Configs.Config(base_config)\n config.tag = 'cutback'\n config.wings['main_wing'].control_surfaces.flap.deflection = 20. * Units.deg\n config.max_lift_coefficient_factor = 1.\n\n configs.append(config) \n\n # ------------------------------------------------------------------\n # Landing Configuration\n # ------------------------------------------------------------------\n\n config = SUAVE.Components.Configs.Config(base_config)\n config.tag = 'landing'\n\n config.wings['main_wing'].control_surfaces.flap.deflection = 20. * Units.deg\n config.max_lift_coefficient_factor = 1. \n\n configs.append(config)\n\n # ------------------------------------------------------------------\n # Short Field Takeoff Configuration\n # ------------------------------------------------------------------ \n\n config = SUAVE.Components.Configs.Config(base_config)\n config.tag = 'short_field_takeoff'\n \n config.wings['main_wing'].control_surfaces.flap.deflection = 20. * Units.deg\n config.max_lift_coefficient_factor = 1. \n \n configs.append(config)\n\n return configs\n\n# def simple_sizing(configs):\n# \"\"\"This function applies a few basic geometric sizing relations and modifies the landing\n# configuration.\"\"\"\n\n# base = configs.base\n# # Update the baseline data structure to prepare for changes\n# base.pull_base()\n\n# # Revise the zero fuel weight. This will only affect the base configuration. To do all\n# # configurations, this should be specified in the top level vehicle definition.\n# base.mass_properties.max_zero_fuel = 0.9 * base.mass_properties.max_takeoff \n\n# # Estimate wing areas\n# for wing in base.wings:\n# wing.areas.wetted = 2.0 * wing.areas.reference\n# wing.areas.exposed = 0.8 * wing.areas.wetted\n# wing.areas.affected = 0.6 * wing.areas.wetted\n\n# # Store how the changes compare to the baseline configuration\n# base.store_diff()\n\n# # ------------------------------------------------------------------\n# # Landing Configuration\n# # ------------------------------------------------------------------\n# landing = configs.landing\n\n# # Make sure base data is current\n# landing.pull_base()\n\n# # Add a landing weight parameter. This is used in field length estimation and in\n# # initially the landing mission segment type.\n# landing.mass_properties.landing = 0.85 * base.mass_properties.takeoff\n\n# # Store how the changes compare to the baseline configuration\n# landing.store_diff()\n\n# return\n\n\n# ----------------------------------------------------------------------\n# Define the Mission\n# ----------------------------------------------------------------------\ndef mission_setup(analyses,vehicle):\n\n # ------------------------------------------------------------------\n # Initialize the Mission\n # ------------------------------------------------------------------\n\n mission = SUAVE.Analyses.Mission.Sequential_Segments()\n mission.tag = 'the_mission'\n\n # atmospheric model\n atmosphere = SUAVE.Attributes.Atmospheres.Earth.US_Standard_1976()\n planet = SUAVE.Attributes.Planets.Earth()\n\n #airport\n airport = SUAVE.Attributes.Airports.Airport()\n airport.altitude = 0.0 * Units.ft\n airport.delta_isa = 0.0\n airport.atmosphere = SUAVE.Attributes.Atmospheres.Earth.US_Standard_1976()\n\n mission.airport = airport \n\n # unpack Segments module\n Segments = SUAVE.Analyses.Mission.Segments\n\n # base segment\n base_segment = Segments.Segment()\n ones_row = base_segment.state.ones_row\n base_segment.process.initialize.initialize_battery = SUAVE.Methods.Missions.Segments.Common.Energy.initialize_battery \n base_segment.process.finalize.post_process.update_battery_state_of_health = SUAVE.Methods.Missions.Segments.Common.Energy.update_battery_state_of_health\n base_segment.process.iterate.conditions.planet_position = SUAVE.Methods.skip\n base_segment.state.numerics.number_control_points = 4\n base_segment.battery_age_in_days = 1 # optional but added for regression\n base_segment.temperature_deviation = 1 # Kelvin # optional but added for regression\n # base_segment.process.iterate.unknowns.network = SUAVE.Methods.skip\n # base_segment.process.iterate.residuals.network = SUAVE.Methods.skip\n # base_segment.state.unknowns.propeller_power_coefficient = 0. * ones_row(1) \n # base_segment.state.unknowns.battery_voltage_under_load = vehicle.networks.battery_propeller.battery.max_voltage * ones_row(1) \n # base_segment.state.residuals.network = 0. * ones_row(2) \n\n # bat = vehicle.networks.battery_cell.battery\n\n\n\n # ------------------------------------------------------------------\n # Climb Segment: Constant Speed Constant Rate \n # ------------------------------------------------------------------\n\n # segment = Segments.Climb.Constant_Speed_Constant_Rate(base_segment)\n # segment.tag = \"climb\"\n \n # segment.analyses.extend( analyses.takeoff )\n\n # segment.battery_energy = vehicle.networks.battery_propeller.battery.max_energy * 0.89\n \n # segment.altitude_start = 0.0 * Units.ft\n # segment.altitude_end = 9000.0 * Units.ft\n # segment.air_speed = 140 * Units['kts'] \n # segment.climb_rate = 800 * Units.ft / Units.min\n \n # # add to misison\n # mission.append_segment(segment)\n\n # ------------------------------------------------------------------ \n # Cruise Segment: Constant Speed Constant Altitude\n # ------------------------------------------------------------------ \n\n segment = Segments.Cruise.Constant_Speed_Constant_Altitude(base_segment)\n segment.tag = \"cruise\"\n\n segment.analyses.extend( analyses.base )\n\n # segment.battery_energy = vehicle.networks.battery_propeller.battery.max_energy * 0.89\n\n segment.altitude = 6000. * Units.ft \n segment.air_speed = 170 * Units['kts'] \n segment.distance = 200 * Units.nmi\n\n segment = vehicle.networks.battery_propeller.add_unknowns_and_residuals_to_segment(segment)\n\n # add to mission\n mission.append_segment(segment)\n\n\n# ------------------------------------------------------------------\n# Descent Segment: Constant Speed Constant Rate \n# ------------------------------------------------------------------\n\n # segment = Segments.Descent.Constant_Speed_Constant_Rate(base_segment)\n # segment.tag = \"descent_1\"\n \n # segment.analyses.extend( analyses.cruise )\n \n # segment.altitude_start = 9000. * Units.ft \n # segment.altitude_end = 0. * Units.ft \n # segment.air_speed = 170 * Units.kts\n # segment.descent_rate = 500. * Units.ft / Units.min\n \n # # add to mission\n # mission.append_segment(segment)\n\n return mission\n\ndef missions_setup(base_mission):\n\n # the mission container\n missions = SUAVE.Analyses.Mission.Mission.Container()\n \n # ------------------------------------------------------------------\n # Base Mission\n # ------------------------------------------------------------------\n \n missions.base = base_mission\n \n # done!\n return missions \n\n# ----------------------------------------------------------------------\n# Plot Results\n# ----------------------------------------------------------------------\ndef plot_mission(results, line_style = 'bo-'):\n\n plot_flight_conditions(results, line_style) \n\n plot_aerodynamic_coefficients(results, line_style) \n\n plot_aerodynamic_forces(results, line_style)\n\n plot_stability_coefficients(results, line_style) \n\n plot_drag_components(results, line_style)\n\n plot_aircraft_velocities(results, line_style)\n\n plot_altitude_sfc_weight(results, line_style) \n\n plot_battery_pack_conditions(results, line_style)\n\n plot_battery_cell_conditions(results, line_style)\n\n plot_battery_degradation(results, line_style)\n\n plot_lift_cruise_network(results, line_style)\n \n plot_propeller_conditions(results, line_style) \n \n plot_eMotor_Prop_efficiencies(results, line_style)\n \n plot_disc_power_loading(results, line_style) \n\n \n return \n\nif __name__ == '__main__':\n main()\n \n plt.show()", "## @ingroup Methods-Flight_Dynamics-Static_Stability-Approximations\n# datcom.py\n#\n# Created: Feb 2014, T. Momose\n# Modified: Jul 2014, A. Wendorff\n\n# NOTES/QUESTIONS\n# - May need to bring in Roskam Figure 8.47 data (Airplane Design Part VI) for supersonic\n# - IS THIS ACTUALLY VALID FOR SUPERSONIC? ROSKAM FRAMES IT AS A SUBSONIC METHOD\n# - For now, this uses an assumed 2D lift coefficient slope of 6.13rad^-1, from Roskam.\n# Should possibly later change it to take a Cl_alpha input based on VPM, etc.\n# - Possibly add an effective aspect ratio variable to the wing object for winglet effects, etc.\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\nimport numpy as np\nfrom SUAVE.Methods.Flight_Dynamics.Static_Stability.Approximations.Supporting_Functions.convert_sweep import convert_sweep\n\n# ----------------------------------------------------------------------\n# Method\n# ----------------------------------------------------------------------\n\n## @ingroup Methods-Flight_Dynamics-Static_Stability-Approximations\ndef datcom(wing,mach):\n \"\"\" This method uses the DATCOM formula to compute dCL/dalpha without \n correlations for downwash of lifting surfaces further ahead on the \n aircraft or upwash resulting from the position of the wing on the body.\n\n CAUTION: The method presented here is applicable for subsonic speeds.\n May be inaccurate for transonic or supersonic flight. A correction factor\n for supersonic flight is included, but may not be completely accurate.\n\n Assumptions:\n Mach number should not be transonic\n \n Source:\n None\n \n Inputs:\n wing - a data dictionary with the fields:\n effective_apsect_ratio - wing aspect ratio [dimensionless]. If \n this variable is not inlcuded in the input, the method will look\n for a variable named 'aspect_ratio'.\n sweep_le - wing leading-edge sweep angle [radians]\n taper - wing taper ratio [dimensionless]\n mach - flight Mach number [dimensionless]. Should be a numpy array\n with one or more elements.\n\n Outputs:\n cL_alpha - The derivative of 3D lift coefficient with respect to AoA\n\n Properties Used:\n N/A\n \"\"\" \n \n #Unpack inputs\n if 'effective_aspect_ratio' in wing:\n ar = wing.effective_aspect_ratio\n elif 'extended' in wing:\n if 'aspect_ratio' in wing.extended:\n ar = wing.extended.aspect_ratio\n else:\n ar = wing.aspect_ratio\n else:\n ar = wing.aspect_ratio \n \n #Compute relevent parameters\n cL_alpha = []\n half_chord_sweep = convert_sweep(wing,0.25,0.5) #Assumes original sweep is that of LE\n \n #Compute k correction factor for Mach number \n #First, compute corrected 2D section lift curve slope (C_la) for the given Mach number\n cla = 6.13 #Section C_la at M = 0; Roskam Airplane Design Part VI, Table 8.1 \n \n cL_alpha = np.ones_like(mach)\n Beta = np.ones_like(mach)\n k = np.ones_like(mach)\n cla_M = np.ones_like(mach)\n \n Beta[mach<1.] = (1.0-mach[mach<1.]**2.0)**0.5\n Beta[mach>1.] = (mach[mach>1.]**2.0-1.0)**0.5\n cla_M[mach<1.] = cla/Beta[mach<1.]\n cla_M[mach>1.] = 4.0/Beta[mach>1.]\n k = cla_M/(2.0*np.pi/Beta)\n \n #Compute aerodynamic surface 3D lift curve slope using the DATCOM formula\n cL_alpha =(2.0*np.pi*ar/(2.0+((ar**2.0*(Beta*Beta)/(k*k))*(1.0+(np.tan(half_chord_sweep))**2.0/(Beta*Beta))+4.0)**0.5))\n \n return cL_alpha\n", "# motor_test.py\n# \n# Created: M. Clarke, Feb 2020 \n# Mar 2020, M. Clarke\n# Sep 2020, M. Clarke \n\n#----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\nimport SUAVE\nfrom SUAVE.Core import Units\n\nfrom SUAVE.Core import (\nData, Container,\n)\nfrom SUAVE.Methods.Propulsion.electric_motor_sizing import size_from_mass , size_optimal_motor\nfrom SUAVE.Methods.Propulsion import propeller_design\nimport numpy as np\nimport copy, time\n\ndef main():\n '''This script checks the functions in in Motor.py used to compute motor torques \n and output voltage and currents'''\n # Propeller \n prop = SUAVE.Components.Energy.Converters.Propeller()\n prop.number_of_blades = 2.0 \n prop.freestream_velocity = 50.0\n prop.angular_velocity = 209.43951023931953\n prop.tip_radius = 1.5\n prop.hub_radius = 0.05\n prop.design_Cl = 0.7 \n prop.design_altitude = 0.0 * Units.km\n prop.design_thrust = 2271.2220451593753 \n\n prop.airfoil_geometry = ['../Vehicles/Airfoils/NACA_4412.txt'] \n prop.airfoil_polars = [['../Vehicles/Airfoils/Polars/NACA_4412_polar_Re_50000.txt' ,\n '../Vehicles/Airfoils/Polars/NACA_4412_polar_Re_100000.txt' ,\n '../Vehicles/Airfoils/Polars/NACA_4412_polar_Re_200000.txt' ,\n '../Vehicles/Airfoils/Polars/NACA_4412_polar_Re_500000.txt' ,\n '../Vehicles/Airfoils/Polars/NACA_4412_polar_Re_1000000.txt' ]]\n\n prop.airfoil_polar_stations = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] \n prop = propeller_design(prop) \n \n # Motor\n #------------------------------------------------------------------\n # Design Motors\n #------------------------------------------------------------------\n # Propeller (Thrust) motor\n motor = SUAVE.Components.Energy.Converters.Motor()\n motor.mass_properties.mass = 9. * Units.kg \n motor.efficiency = 0.935\n motor.gear_ratio = 1. \n motor.gearbox_efficiency = 1. # Gear box efficiency \n motor.no_load_current = 2.0 \n motor.propeller_radius = prop.tip_radius\n motor.nominal_voltage = 400\n motor = size_optimal_motor(motor,prop) \n \n # Propeller (Thrust) motor\n motor_low_fid = SUAVE.Components.Energy.Converters.Motor_Lo_Fid()\n motor_low_fid.motor_efficiency = 0.98\n motor_low_fid.rated_power = 1000\n motor_low_fid.rated_voltage = 200\n motor_low_fid.mass_properties.mass = 9. * Units.kg \n size_from_mass(motor_low_fid)\n \n # Find the operating conditions\n atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()\n atmosphere_conditions = atmosphere.compute_values(prop.design_altitude) \n V = prop.freestream_velocity\n conditions = Data()\n conditions.freestream = Data()\n conditions.propulsion = Data()\n conditions.frames = Data()\n conditions.frames.body = Data()\n conditions.frames.inertial = Data()\n conditions.freestream.update(atmosphere_conditions)\n conditions.freestream.dynamic_viscosity = atmosphere_conditions.dynamic_viscosity\n conditions.freestream.velocity = np.array([[V,0,0]])\n conditions.propulsion.throttle = np.array([[1.0]])\n conditions.frames.body.transform_to_inertial = np.array([np.eye(3)]) \n conditions.propulsion.propeller_power_coefficient = np.array([[0.02]]) \n \n #------------------------------------\n # Motor Omega Function \n #------------------------------------\n # create copy of motor to test functions \n motor_1 = motor \n \n # Define function specific inputs \n voltage_1 = 400\n motor_1.inputs.voltage = np.array([[voltage_1]]) \n motor_1.inputs.propeller_CP = conditions.propulsion.propeller_power_coefficient\n \n # Run Motor Omega Function \n omega_1 = motor_1.omega(conditions) \n torque_1 = motor_1.outputs.torque[0][0] \n \n #------------------------------------\n # Motor Current Function \n #------------------------------------\n # create copy of motor to test functions \n motor_2 = motor \n \n # Define function specific inputs \n motor_2.inputs.voltage = np.array([[voltage_1]])\n motor_2.outputs.omega = np.array([[prop.angular_velocity]])\n \n # Run Motor Current Function \n i, etam = motor_2.current(conditions) \n current_2 = i[0][0]\n \n #------------------------------------\n # Motor Torque Function \n #------------------------------------ \n # create copy of motor to test functions \n motor_3 = motor \n \n # Define function specific inputs \n motor_3.inputs.voltage = np.array([[voltage_1]]) \n motor_3.inputs.omega = np.array([[prop.angular_velocity]]) \n \n # Run Motor Torque Function \n motor_3.torque(conditions)\n torque_3 = motor_3.outputs.torque[0][0] \n \n #------------------------------------\n # Motor Voltage-Current Function \n #------------------------------------ \n # create copy of motor to test functions \n motor_4 = motor \n \n # Define function specific inputs \n motor_4.inputs.torque = np.array([[torque_1]])\n \n # Run Motor Voltage-Current Function \n motor_4.voltage_current(conditions) \n voltage_4 = motor_4.outputs.voltage[0][0]\n current_4 = motor_4.outputs.current[0][0]\n \n #------------------------------------\n # Low Fidelity Motor \n #------------------------------------ \n motor_low_fid.inputs.voltage = np.array([[voltage_1]]) \n p , i = motor_low_fid.power_lo(conditions)\n power_out = p[0][0]\n current = i[0][0] \n \n # Truth values\n omega_1_truth = 163.57739949\n torque_1_truth = 642.2133839748203\n current_2_truth = 280.91758304681844 \n torque_3_truth = 394.3371015401603\n voltage_4_truth = 464.840414954647\n current_4_truth = 456.2423326614951\n power_out_truth = 1960.0\n \n error = Data()\n error.omega_test = np.max(np.abs(omega_1_truth - omega_1[0][0] ))\n error.torque_test_1 = np.max(np.abs(torque_1_truth - torque_1 ))\n error.current_test_1 = np.max(np.abs(current_2_truth - current_2))\n error.torque_test_2 = np.max(np.abs(torque_3_truth - torque_3 ))\n error.voltage_test = np.max(np.abs(voltage_4_truth - voltage_4))\n error.current_test_2 = np.max(np.abs(current_4_truth - current_4))\n error.power_out_test = np.max(np.abs(power_out_truth - power_out)) \n \n print('Errors:')\n print(error)\n \n for k,v in list(error.items()):\n assert(np.abs(v)<1e-6)\n \n return\n\n# ---------------------------------------------------------------------- \n# Call Main\n# ---------------------------------------------------------------------- \n\nif __name__ == '__main__':\n main()", "## @ingroup Methods-Aerodynamics-Lifting_line\n# Lifting_Line.py\n# \n# Created: Aug 2017, E. Botero\n# Modified: \n# \n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\nimport numpy as np\n\n# ----------------------------------------------------------------------\n# The Function\n# ----------------------------------------------------------------------\n\n## @ingroup Methods-Aerodynamics-Lifting_line\ndef lifting_line(conditions,settings,geometry):\n \"\"\"\n\n Assumptions:\n subsonic and unswept\n\n Source:\n Traub, L. W., Botero, E., Waghela, R., Callahan, R., & Watson, A. (2015). Effect of Taper Ratio at Low Reynolds Number. Journal of Aircraft.\n \n Inputs:\n wing.\n spans.projected [m]\n chords.root [m]\n chords.tip [m]\n chords.mean_aerodynamic [m]\n twists.root [radians]\n twists.tip [radians]\n aspect_ratio [Unitless]\n areas.reference [m^2]\n vertical [Boolean]\n\n settings.number_of_stations [int]\n conditions.aerodynamics.angle_of_attack [radians]\n\n Outputs:\n CL [Unitless]\n CD [Unitless]\n\n Properties Used:\n N/A\n \"\"\" \n \n # Unpack first round:\n wing = geometry\n orientation = wing.vertical \n \n # Don't bother doing the calculation if it is a vertical tail\n if orientation == True:\n CL = 0.0\n CD = 0.0\n return CL, CD\n else:\n pass\n \n # Unpack fo'real\n b = wing.spans.projected\n S = wing.areas.reference\n AR = wing.aspect_ratio\n MAC = wing.chords.mean_aerodynamic\n taper = wing.taper\n tip_twist = wing.twists.root\n root_twist = wing.twists.tip \n root_chord = wing.chords.root\n tip_chord = wing.chords.tip \n r = settings.number_of_stations # Number of divisions\n alpha = conditions.aerodynamics.angle_of_attack\n \n # Make sure alpha is 2D\n alpha = np.atleast_2d(alpha)\n \n repeats = np.size(alpha)\n \n # Need to set to something\n cla = 2 * np.pi # 2-D lift curve slope\n azl = 0. # 2-D \n\n # Start doing calculations\n N = r-1 # number of spanwise divisions\n n = np.linspace(1,N,N) # vectorize\n thetan = n*np.pi/r # angular stations\n yn = -b*np.cos(thetan)/2. # y locations based on the angular spacing\n etan = np.abs(2.*yn/b) # normalized coordinates\n etam = np.pi*np.sin(thetan)/(2*r) # Useful mulitplier\n \n # Project the spanwise y locations into the chords\n segment_keys = wing.Segments.keys()\n n_segments = len(segment_keys)\n # If spanwise stations are setup\n if n_segments>0:\n c = np.ones_like(etan) * wing.chords.root\n ageo = np.ones_like(etan) * wing.twists.root \n for i_seg in range(n_segments):\n \n # Figure out where the segment starts\n X1 = wing.Segments[segment_keys[i_seg]].percent_span_location\n L1 = wing.Segments[segment_keys[i_seg]].root_chord_percent\n T1 = wing.Segments[segment_keys[i_seg]].twist \n\n if i_seg == n_segments-1 and X1 == 1.0:\n X2 = 1.0\n L2 = wing.chords.tip/wing.chords.root\n T2 = wing.twists.tip\n else:\n X2 = wing.Segments[segment_keys[i_seg+1]].percent_span_location\n L2 = wing.Segments[segment_keys[i_seg+1]].root_chord_percent\n T2 = wing.Segments[segment_keys[i_seg+1]].twist\n \n \n bools = np.logical_and(etan>X1,etan<X2)\n \n c[bools] = (L1 + (etan[bools]-X1)*(L2-L1)/(X2-X1)) * root_chord\n ageo[bools] = (T1 + (etan[bools]-X1)*(T2-T1)/(X2-X1))\n \n\n # Spanwise stations are not setup\n else:\n # Use the taper ratio to determine the chord distribution\n # Use the geometric twist applied to the ends to \n \n # Find the chords and twist profile\n c = root_chord+root_chord*(taper-1.)*etan\n ageo = (tip_twist-root_twist)*etan+root_twist\n\n k = c*cla/(4.*b) # Grouped term \n\n \n n_trans = np.atleast_2d(n).T\n \n # Right hand side matrix\n RHS = (np.sin(n_trans*thetan)*(np.sin(thetan)+n_trans*k))\n \n # Expand out for all the angles of attack\n RHS2 = np.tile(RHS.T, (repeats,1,1))\n\n # Left hand side vector \n LHS = k*np.sin(thetan)*(alpha+ageo-azl)\n \n # The Fourier Coefficients\n A = np.linalg.solve(RHS2,LHS)\n \n # The 3-D Coefficient of lift\n CL = A[:,0]*np.pi*AR\n \n # Find the sectional coefficients of lift\n Cl = b*np.cumsum(4*A*np.sin(n*thetan),axis=1)/c\n \n # induced alpha\n alpha_i = np.cumsum(n*A*np.sin(n*A)/np.sin(thetan),axis=1)\n \n # Sectional vortex drag\n Cdv = Cl*alpha_i\n \n # Total vortex drag\n CDv = np.sum(Cdv*AR*etam,axis=1)\n \n #############\n # Profile drag of a 2-D section\n # This is currently stubbed out. If the 2-D sectional data is known it can be added to get viscous drag\n Cdn = 0.00\n #############\n \n # Find the profile drag\n CDp = np.sum(Cdn*c*etam)/MAC\n \n CD = CDv + CDp\n \n return CL, CD", "# test_Stopped_Rotor.py\n#\n# Created: Feb 2020, M. Clarke\n# Sep 2020, M. Clarke\n# Jul 2021, R. Erhard\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\nimport SUAVE\nfrom SUAVE.Core import Units , Data\nfrom SUAVE.Plots.Performance.Mission_Plots import *\nfrom SUAVE.Plots.Geometry import *\nimport sys\nimport numpy as np\n\nsys.path.append('../Vehicles')\n# the analysis functions\n\nfrom Stopped_Rotor import vehicle_setup, configs_setup\n\n# ----------------------------------------------------------------------\n# Main\n# ----------------------------------------------------------------------\n\ndef main():\n\n # ------------------------------------------------------------------------------------------------------------------\n # Stopped-Rotor\n # ------------------------------------------------------------------------------------------------------------------\n # build the vehicle, configs, and analyses\n configs, analyses = full_setup()\n configs.finalize()\n analyses.finalize()\n\n # Print weight properties of vehicle\n weights = configs.base.weight_breakdown\n print(weights)\n print(configs.base.mass_properties.center_of_gravity) \n \n # check weights\n empty_r = 1013.0718119599941\n structural_r = 330.4958877631757\n total_r = 1213.0718119599942\n lift_rotors_r = 16.445392185186808\n propellers_r = 3.2944573008378044\n prop_motors_r = 2.0\n rot_motors_r = 36.0\n\n weights_error = Data()\n weights_error.empty = abs(empty_r - weights.empty)/empty_r\n weights_error.structural = abs(structural_r - weights.structural)/structural_r\n weights_error.total = abs(total_r - weights.total)/total_r\n weights_error.lift_rotors = abs(lift_rotors_r - weights.lift_rotors)/lift_rotors_r\n weights_error.propellers = abs(propellers_r - weights.propellers)/propellers_r\n weights_error.propellers = abs(prop_motors_r - weights.propeller_motors)/prop_motors_r\n weights_error.propellers = abs(rot_motors_r - weights.lift_rotor_motors)/rot_motors_r\n\n for k, v in weights_error.items():\n assert (np.abs(v) < 1E-6)\n\n # evaluate mission\n mission = analyses.missions.base\n results = mission.evaluate()\n\n # plot results\n plot_mission(results,configs.base)\n\n # save, load and plot old results\n #save_stopped_rotor_results(results)\n old_results = load_stopped_rotor_results()\n plot_mission(old_results,configs.base, 'k-')\n\n # RPM of rotor check during hover\n RPM = results.segments.climb_1.conditions.propulsion.lift_rotor_rpm[0][0]\n RPM_true = 2383.999687566465\n print(RPM)\n diff_RPM = np.abs(RPM - RPM_true)\n print('RPM difference')\n print(diff_RPM)\n assert np.abs((RPM - RPM_true)/RPM_true) < 1e-3\n\n # Battery Energy Check During Transition\n battery_energy_hover_to_transition = results.segments.transition_1.conditions.propulsion.battery_energy[:,0]\n battery_energy_hover_to_transition_true = np.array([3.37412525e+08, 3.36777016e+08, 3.35686588e+08])\n \n print(battery_energy_hover_to_transition)\n diff_battery_energy_hover_to_transition = np.abs(battery_energy_hover_to_transition - battery_energy_hover_to_transition_true)\n print('battery_energy_hover_to_transition difference')\n print(diff_battery_energy_hover_to_transition)\n assert all(np.abs((battery_energy_hover_to_transition - battery_energy_hover_to_transition_true)/battery_energy_hover_to_transition) < 1e-3)\n\n # lift Coefficient Check During Cruise\n lift_coefficient = results.segments.departure_terminal_procedures.conditions.aerodynamics.lift_coefficient[0][0]\n lift_coefficient_true = 0.8043927973520482\n print(lift_coefficient)\n diff_CL = np.abs(lift_coefficient - lift_coefficient_true)\n print('CL difference')\n print(diff_CL)\n assert np.abs((lift_coefficient - lift_coefficient_true)/lift_coefficient_true) < 1e-3\n\n return\n\n# ----------------------------------------------------------------------\n# Analysis Setup\n# ----------------------------------------------------------------------\ndef full_setup():\n\n # vehicle data\n vehicle = vehicle_setup()\n configs = configs_setup(vehicle)\n plot_vehicle(vehicle,plot_control_points = False)\n\n # vehicle analyses\n configs_analyses = analyses_setup(configs)\n\n # mission analyses\n mission = mission_setup(configs_analyses,vehicle)\n missions_analyses = missions_setup(mission)\n\n analyses = SUAVE.Analyses.Analysis.Container()\n analyses.configs = configs_analyses\n analyses.missions = missions_analyses\n\n return configs, analyses\n\n# ----------------------------------------------------------------------\n# Define the Vehicle Analyses\n# ----------------------------------------------------------------------\n\ndef analyses_setup(configs):\n\n analyses = SUAVE.Analyses.Analysis.Container()\n\n # build a base analysis for each config\n for tag,config in configs.items():\n analysis = base_analysis(config)\n analyses[tag] = analysis\n\n return analyses\n\ndef base_analysis(vehicle):\n\n # ------------------------------------------------------------------\n # Initialize the Analyses\n # ------------------------------------------------------------------\n analyses = SUAVE.Analyses.Vehicle()\n\n # ------------------------------------------------------------------\n # Basic Geometry Relations\n sizing = SUAVE.Analyses.Sizing.Sizing()\n sizing.features.vehicle = vehicle\n analyses.append(sizing)\n\n # ------------------------------------------------------------------\n # Weights\n weights = SUAVE.Analyses.Weights.Weights_eVTOL()\n weights.vehicle = vehicle\n analyses.append(weights)\n\n # ------------------------------------------------------------------\n # Aerodynamics Analysis\n aerodynamics = SUAVE.Analyses.Aerodynamics.Fidelity_Zero()\n aerodynamics.geometry = vehicle\n aerodynamics.settings.drag_coefficient_increment = 0.4*vehicle.excrescence_area_spin / vehicle.reference_area\n analyses.append(aerodynamics)\n\n # ------------------------------------------------------------------\n # Energy\n energy= SUAVE.Analyses.Energy.Energy()\n energy.network = vehicle.networks\n analyses.append(energy)\n\n\n # ------------------------------------------------------------------\n # Noise Analysis\n noise = SUAVE.Analyses.Noise.Fidelity_One()\n noise.geometry = vehicle\n analyses.append(noise)\n\n # ------------------------------------------------------------------\n # Planet Analysis\n planet = SUAVE.Analyses.Planets.Planet()\n analyses.append(planet)\n\n # ------------------------------------------------------------------\n # Atmosphere Analysis\n atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()\n atmosphere.features.planet = planet.features\n analyses.append(atmosphere)\n\n return analyses\n\n\ndef mission_setup(analyses,vehicle):\n\n # ------------------------------------------------------------------\n # Initialize the Mission\n # ------------------------------------------------------------------\n mission = SUAVE.Analyses.Mission.Sequential_Segments()\n mission.tag = 'the_mission'\n\n # airport\n airport = SUAVE.Attributes.Airports.Airport()\n airport.altitude = 0.0 * Units.ft\n airport.delta_isa = 0.0\n airport.atmosphere = SUAVE.Attributes.Atmospheres.Earth.US_Standard_1976()\n\n mission.airport = airport\n\n # unpack Segments module\n Segments = SUAVE.Analyses.Mission.Segments\n\n # base segment\n base_segment = Segments.Segment()\n base_segment.state.numerics.number_control_points = 3 \n base_segment.process.initialize.initialize_battery = SUAVE.Methods.Missions.Segments.Common.Energy.initialize_battery\n base_segment.process.iterate.conditions.planet_position = SUAVE.Methods.skip\n\n # VSTALL Calculation\n m = vehicle.mass_properties.max_takeoff\n g = 9.81\n S = vehicle.reference_area\n atmo = SUAVE.Analyses.Atmospheric.US_Standard_1976()\n rho = atmo.compute_values(1000.*Units.feet,0.).density\n CLmax = 1.2\n Vstall = float(np.sqrt(2.*m*g/(rho*S*CLmax)))\n\n\n # ------------------------------------------------------------------\n # First Climb Segment: Constant Speed, Constant Rate\n # ------------------------------------------------------------------\n segment = Segments.Hover.Climb(base_segment)\n segment.tag = \"climb_1\"\n segment.analyses.extend( analyses.base )\n segment.altitude_start = 0.0 * Units.ft\n segment.altitude_end = 40. * Units.ft\n segment.climb_rate = 500. * Units['ft/min']\n segment.battery_energy = vehicle.networks.lift_cruise.battery.max_energy\n segment.process.iterate.unknowns.mission = SUAVE.Methods.skip\n segment.process.iterate.conditions.stability = SUAVE.Methods.skip\n segment.process.finalize.post_process.stability = SUAVE.Methods.skip \n segment = vehicle.networks.lift_cruise.add_lift_unknowns_and_residuals_to_segment(segment,\\\n initial_lift_rotor_power_coefficient = 0.01,\n initial_throttle_lift = 0.9)\n # add to misison\n mission.append_segment(segment)\n\n # ------------------------------------------------------------------\n # First Cruise Segment: Transition\n # ------------------------------------------------------------------\n segment = Segments.Transition.Constant_Acceleration_Constant_Pitchrate_Constant_Altitude(base_segment)\n segment.tag = \"transition_1\"\n segment.analyses.extend( analyses.base )\n\n segment.altitude = 40. * Units.ft\n segment.air_speed_start = 500. * Units['ft/min']\n segment.air_speed_end = 0.8 * Vstall\n segment.acceleration = 9.8/5\n segment.pitch_initial = 0.0 * Units.degrees\n segment.pitch_final = 5. * Units.degrees\n ones_row = segment.state.ones_row\n segment.state.unknowns.throttle = 1. * ones_row(1)\n segment.process.iterate.unknowns.mission = SUAVE.Methods.skip\n segment.process.iterate.conditions.stability = SUAVE.Methods.skip\n segment.process.finalize.post_process.stability = SUAVE.Methods.skip\n segment = vehicle.networks.lift_cruise.add_transition_unknowns_and_residuals_to_segment(segment,\n initial_prop_power_coefficient = 0.2,\n initial_lift_rotor_power_coefficient = 0.01,\n initial_throttle_lift = 0.9,)\n\n # add to misison\n mission.append_segment(segment)\n\n # ------------------------------------------------------------------\n # First Cruise Segment: Transition\n # ------------------------------------------------------------------\n segment = Segments.Transition.Constant_Acceleration_Constant_Angle_Linear_Climb(base_segment)\n segment.tag = \"transition_2\"\n segment.analyses.extend( analyses.base )\n segment.altitude_start = 40.0 * Units.ft\n segment.altitude_end = 50.0 * Units.ft\n segment.air_speed = 0.8 * Vstall\n segment.climb_angle = 1 * Units.degrees\n segment.acceleration = 0.5 * Units['m/s/s']\n segment.pitch_initial = 5. * Units.degrees\n segment.pitch_final = 7. * Units.degrees\n segment.state.unknowns.throttle = 0.95 * ones_row(1)\n segment.process.iterate.unknowns.mission = SUAVE.Methods.skip\n segment.process.iterate.conditions.stability = SUAVE.Methods.skip\n segment.process.finalize.post_process.stability = SUAVE.Methods.skip\n segment = vehicle.networks.lift_cruise.add_transition_unknowns_and_residuals_to_segment(segment,\n initial_prop_power_coefficient = 0.2,\n initial_lift_rotor_power_coefficient = 0.01,\n initial_throttle_lift = 0.9,)\n\n # add to misison\n mission.append_segment(segment)\n\n\n # ------------------------------------------------------------------\n # Second Climb Segment: Constant Speed, Constant Rate\n # ------------------------------------------------------------------\n segment = Segments.Climb.Constant_Speed_Constant_Rate(base_segment)\n segment.tag = \"climb_2\"\n segment.analyses.extend( analyses.base )\n segment.air_speed = 1.1*Vstall\n segment.altitude_start = 50.0 * Units.ft\n segment.altitude_end = 300. * Units.ft\n segment.climb_rate = 500. * Units['ft/min'] \n segment.state.unknowns.throttle = 0.80 * ones_row(1)\n segment = vehicle.networks.lift_cruise.add_cruise_unknowns_and_residuals_to_segment(segment)\n\n # add to misison\n mission.append_segment(segment)\n\n # ------------------------------------------------------------------\n # Second Cruise Segment: Constant Speed, Constant Altitude\n # ------------------------------------------------------------------\n segment = Segments.Cruise.Constant_Speed_Constant_Altitude_Loiter(base_segment)\n segment.tag = \"departure_terminal_procedures\"\n segment.analyses.extend( analyses.base )\n segment.altitude = 300.0 * Units.ft\n segment.time = 60. * Units.second\n segment.air_speed = 1.2*Vstall\n segment.state.unknowns.throttle = 0.80 * ones_row(1)\n segment = vehicle.networks.lift_cruise.add_cruise_unknowns_and_residuals_to_segment(segment,\\\n initial_prop_power_coefficient = 0.16)\n\n # add to misison\n mission.append_segment(segment)\n \n # ------------------------------------------------------------------\n # Third Climb Segment: Constant Acceleration, Constant Rate\n # ------------------------------------------------------------------ \n segment = Segments.Climb.Linear_Speed_Constant_Rate(base_segment)\n segment.tag = \"climb_2\" \n segment.analyses.extend( analyses.base) \n segment.altitude_start = 300.0 * Units.ft \n segment.altitude_end = 1000. * Units.ft\n segment.climb_rate = 500. * Units['ft/min']\n segment.air_speed_start = 1.2*Vstall\n segment.air_speed_end = 110. * Units['mph'] \n segment.state.unknowns.throttle = 0.90 * ones_row(1)\n segment = vehicle.networks.lift_cruise.add_cruise_unknowns_and_residuals_to_segment(segment) \n mission.append_segment(segment) \n \n # ------------------------------------------------------------------ \n # Cruise Segment: constant speed, constant altitude\n # ------------------------------------------------------------------ \n segment = Segments.Cruise.Constant_Speed_Constant_Altitude(base_segment)\n segment.tag = \"cruise\" \n segment.analyses.extend( analyses.base ) \n segment.altitude = 1000.0 * Units.ft\n segment.air_speed = 110. * Units['mph']\n segment.distance = 50. * Units.miles \n segment.state.unknowns.throttle = 0.60 * ones_row(1) \n segment = vehicle.networks.lift_cruise.add_cruise_unknowns_and_residuals_to_segment(segment) \n mission.append_segment(segment) \n\n \n\n return mission\n\ndef missions_setup(base_mission):\n\n # the mission container\n missions = SUAVE.Analyses.Mission.Mission.Container()\n\n # ------------------------------------------------------------------\n # Base Mission\n # ------------------------------------------------------------------\n\n missions.base = base_mission\n\n\n # done!\n return missions \n\n\n# ----------------------------------------------------------------------\n# Plot Results\n# ----------------------------------------------------------------------\ndef plot_mission(results,vec_configs,line_style='bo-'):\n\n # Plot Flight Conditions\n plot_flight_conditions(results, line_style)\n\n # Plot Aerodynamic Coefficients\n plot_aerodynamic_coefficients(results, line_style)\n\n # Plot Aircraft Flight Speed\n plot_aircraft_velocities(results, line_style)\n\n # Plot Aircraft Electronics\n plot_battery_pack_conditions(results, line_style)\n\n # Plot Electric Motor and Propeller Efficiencies of Lift Cruise Network\n plot_lift_cruise_network(results, line_style)\n\n return\n\ndef load_stopped_rotor_results():\n return SUAVE.Input_Output.SUAVE.load('results_stopped_rotor.res')\n\ndef save_stopped_rotor_results(results):\n\n for segment in results.segments.values():\n del segment.conditions.noise\n\n SUAVE.Input_Output.SUAVE.archive(results,'results_stopped_rotor.res')\n return\n\n\nif __name__ == '__main__':\n main()\n plt.show(block=True)\n", "## @ingroup Components-Energy-Networks\n# Battery_Propeller.py\n# \n# Created: Jul 2015, E. Botero\n# Modified: Feb 2016, T. MacDonald\n# Mar 2020, M. Clarke \n# Apr 2021, M. Clarke\n# Jul 2021, E. Botero\n# Jul 2021, R. Erhard\n# Aug 2021, M. Clarke\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\n# package imports\nimport SUAVE\nimport numpy as np\nfrom .Network import Network\nfrom SUAVE.Analyses.Mission.Segments.Conditions import Residuals\nfrom SUAVE.Components.Physical_Component import Container \nfrom SUAVE.Methods.Power.Battery.pack_battery_conditions import pack_battery_conditions\nfrom SUAVE.Methods.Power.Battery.append_initial_battery_conditions import append_initial_battery_conditions\nfrom SUAVE.Core import Data , Units \n\n# ----------------------------------------------------------------------\n# Network\n# ----------------------------------------------------------------------\n\n## @ingroup Components-Energy-Networks\nclass Battery_Propeller(Network):\n \"\"\" This is a simple network with a battery powering a propeller through\n an electric motor\n \n This network adds 2 extra unknowns to the mission. The first is\n a voltage, to calculate the thevenin voltage drop in the pack.\n The second is torque matching between motor and propeller.\n \n Assumptions:\n The y axis rotation is used for rotating the propeller about the Y-axis for tilt rotors and tiltwings\n \n Source:\n None\n \"\"\" \n def __defaults__(self):\n \"\"\" This sets the default values for the network to function.\n \n Assumptions:\n None\n \n Source:\n N/A\n \n Inputs:\n None\n \n Outputs:\n None\n \n Properties Used:\n N/A\n \"\"\" \n self.propeller_motors = Container()\n self.lift_rotor_motors = Container()\n self.propellers = Container()\n self.lift_rotors = Container()\n self.esc = None\n self.avionics = None\n self.payload = None\n self.battery = None\n self.nacelle_diameter = None\n self.engine_length = None\n self.number_of_propeller_engines = None\n self.number_of_lift_rotor_engines = None\n self.voltage = None\n self.tag = 'Battery_Propeller'\n self.use_surrogate = False\n self.pitch_command = 0.0\n self.generative_design_minimum = 0\n self.pitch_command = 0\n self.identical_propellers = True\n self.identical_lift_rotors = True\n self.thrust_angle = 0. \n \n # manage process with a driver function\n def evaluate_thrust(self,state):\n \"\"\" Calculate thrust given the current state of the vehicle\n \n Assumptions:\n Caps the throttle at 110% and linearly interpolates thrust off that\n \n Source:\n N/A\n \n Inputs:\n state [state()]\n \n Outputs:\n results.thrust_force_vector [newtons]\n results.vehicle_mass_rate [kg/s]\n conditions.propulsion:\n rpm [radians/sec]\n current [amps]\n battery_power_draw [watts]\n battery_energy [joules]\n battery_voltage_open_circuit [V]\n battery_voltage_under_load [V]\n motor_torque [N-M]\n propeller_torque [N-M]\n \n Properties Used:\n Defaulted values\n \"\"\" \n \n # unpack \n conditions = state.conditions\n numerics = state.numerics\n esc = self.esc\n avionics = self.avionics\n payload = self.payload\n battery = self.battery \n \n if self.number_of_lift_rotor_engines != None: \n num_engines = self.number_of_lift_rotor_engines\n identical_flag = self.identical_lift_rotors\n motors = self.lift_rotor_motors\n props = self.lift_rotors\n else:\n num_engines = self.number_of_propeller_engines \n identical_flag = self.identical_propellers\n motors = self.propeller_motors\n props = self.propellers \n \n # Set battery energy\n battery.current_energy = conditions.propulsion.battery_energy\n battery.pack_temperature = conditions.propulsion.battery_pack_temperature\n battery.cell_charge_throughput = conditions.propulsion.battery_cell_charge_throughput \n battery.age = conditions.propulsion.battery_cycle_day \n discharge_flag = conditions.propulsion.battery_discharge_flag \n battery.R_growth_factor = conditions.propulsion.battery_resistance_growth_factor\n battery.E_growth_factor = conditions.propulsion.battery_capacity_fade_factor \n battery.max_energy = conditions.propulsion.battery_max_aged_energy \n n_series = battery.pack_config.series \n n_parallel = battery.pack_config.parallel\n \n # update ambient temperature based on altitude\n battery.ambient_temperature = conditions.freestream.temperature \n battery.cooling_fluid.thermal_conductivity = conditions.freestream.thermal_conductivity\n battery.cooling_fluid.kinematic_viscosity = conditions.freestream.kinematic_viscosity\n battery.cooling_fluid.prandtl_number = conditions.freestream.prandtl_number\n battery.cooling_fluid.density = conditions.freestream.density \n battery.ambient_pressure = conditions.freestream.pressure \n a = conditions.freestream.speed_of_sound \n \n # Predict voltage based on battery \n volts = battery.compute_voltage(state) \n \n # --------------------------------------------------------------------------------\n # Run Motor, Avionics and Systems (Discharge Model)\n # -------------------------------------------------------------------------------- \n if discharge_flag: \n # Step 1 battery power\n esc.inputs.voltagein = volts\n \n # Step 2\n esc.voltageout(conditions)\n \n # How many evaluations to do\n if identical_flag:\n n_evals = 1\n factor = num_engines*1\n else:\n n_evals = int(num_engines)\n factor = 1.\n \n # Setup numbers for iteration\n total_motor_current = 0.\n total_thrust = 0. * state.ones_row(3)\n total_power = 0.\n \n # Iterate over motor/props\n for ii in range(n_evals):\n \n # Unpack the motor and props\n motor_key = list(motors.keys())[ii]\n prop_key = list(props.keys())[ii]\n \n \n if self.number_of_propeller_engines != None: \n motor = self.propeller_motors[motor_key]\n prop = self.propellers[prop_key]\n else: \n motor = self.lift_rotor_motors[motor_key]\n prop = self.lift_rotors[prop_key]\n \n # link \n motor.inputs.voltage = esc.outputs.voltageout\n motor.inputs.propeller_CP = np.atleast_2d(conditions.propulsion.propeller_power_coefficient[:,ii]).T\n \n # step 3\n motor.omega(conditions)\n \n # link\n prop.inputs.omega = motor.outputs.omega\n prop.inputs.pitch_command = self.pitch_command\n prop.inputs.y_axis_rotation = self.thrust_angle\n \n # step 4\n F, Q, P, Cp, outputs, etap = prop.spin(conditions)\n \n # Check to see if magic thrust is needed, the ESC caps throttle at 1.1 already\n eta = conditions.propulsion.throttle[:,0,None]\n P[eta>1.0] = P[eta>1.0]*eta[eta>1.0]\n F[eta[:,0]>1.0,:] = F[eta[:,0]>1.0,:]*eta[eta[:,0]>1.0,:]\n \n # Run the motor for current\n _ , etam = motor.current(conditions)\n \n # Conditions specific to this instantation of motor and propellers\n R = prop.tip_radius\n rpm = motor.outputs.omega / Units.rpm\n F_mag = np.atleast_2d(np.linalg.norm(F, axis=1)).T\n total_thrust = total_thrust + F * factor\n total_power = total_power + P * factor\n total_motor_current = total_motor_current + factor*motor.outputs.current\n \n # Pack specific outputs\n conditions.propulsion.propeller_motor_efficiency[:,ii] = etam[:,0] \n conditions.propulsion.propeller_motor_torque[:,ii] = motor.outputs.torque[:,0]\n conditions.propulsion.propeller_torque[:,ii] = Q[:,0]\n conditions.propulsion.propeller_thrust[:,ii] = np.linalg.norm(total_thrust ,axis = 1) \n conditions.propulsion.propeller_rpm[:,ii] = rpm[:,0]\n conditions.propulsion.propeller_tip_mach[:,ii] = (R*rpm[:,0]*Units.rpm)/a[:,0]\n conditions.propulsion.disc_loading[:,ii] = (F_mag[:,0])/(np.pi*(R**2)) # N/m^2 \n conditions.propulsion.power_loading[:,ii] = (F_mag[:,0])/(P[:,0]) # N/W \n conditions.propulsion.propeller_efficiency[:,ii] = etap[:,0] \n \n if self.number_of_propeller_engines != None: \n conditions.noise.sources.propellers[prop.tag] = outputs\n else: \n conditions.noise.sources.lift_rotors[prop.tag] = outputs\n \n # Run the avionics\n avionics.power()\n \n # Run the payload\n payload.power()\n \n # link\n esc.inputs.currentout = total_motor_current\n \n # Run the esc\n esc.currentin(conditions) \n \n # Calculate avionics and payload power\n avionics_payload_power = avionics.outputs.power + payload.outputs.power\n \n # Calculate avionics and payload current\n avionics_payload_current = avionics_payload_power/self.voltage \n \n # link\n battery.inputs.current = esc.outputs.currentin + avionics_payload_current\n battery.inputs.power_in = -(esc.outputs.voltageout *esc.outputs.currentin + avionics_payload_power)\n battery.energy_calc(numerics,discharge_flag) \n \n # --------------------------------------------------------------------------------\n # Run Charge Model \n # -------------------------------------------------------------------------------- \n else: \n # link \n battery.inputs.current = -battery.cell.charging_current*n_parallel * np.ones_like(volts)\n battery.inputs.voltage = battery.cell.charging_voltage*n_series * np.ones_like(volts)\n battery.inputs.power_in = -battery.inputs.current * battery.inputs.voltage \n battery.energy_calc(numerics,discharge_flag) \n \n avionics_payload_power = np.zeros((len(volts),1)) \n total_thrust = np.zeros((len(volts),3)) \n P = battery.inputs.power_in\n \n # Pack the conditions for outputs\n pack_battery_conditions(conditions,battery,avionics_payload_power,P) \n \n # Create the outputs\n results = Data()\n results.thrust_force_vector = total_thrust\n results.vehicle_mass_rate = state.ones_row(1)*0.0 \n \n return results\n \n def unpack_unknowns(self,segment):\n \"\"\" This is an extra set of unknowns which are unpacked from the mission solver and send to the network.\n \n Assumptions:\n None\n \n Source:\n N/A\n \n Inputs:\n state.unknowns.propeller_power_coefficient [None] \n unknowns specific to the battery cell \n \n Outputs:\n state.conditions.propulsion.propeller_power_coefficient [None] \n conditions specific to the battery cell\n \n Properties Used:\n N/A\n \"\"\" \n \n # unpack the ones function\n ones_row = segment.state.ones_row\n \n # Here we are going to unpack the unknowns (Cp) provided for this network\n ss = segment.state \n if segment.battery_discharge:\n ss.conditions.propulsion.propeller_power_coefficient = ss.unknowns.propeller_power_coefficient \n else: \n ss.conditions.propulsion.propeller_power_coefficient = 0. * ones_row(1)\n \n battery = self.battery \n battery.append_battery_unknowns(segment) \n \n return \n\n \n \n def residuals(self,segment):\n \"\"\" This packs the residuals to be sent to the mission solver.\n \n Assumptions:\n None\n \n Source:\n N/A\n \n Inputs:\n state.conditions.propulsion:\n motor_torque [N-m]\n propeller_torque [N-m] \n unknowns specific to the battery cell \n \n Outputs:\n residuals specific to battery cell and network\n \n Properties Used: \n N/A\n \"\"\" \n \n network = self\n battery = self.battery \n battery.append_battery_residuals(segment,network) \n \n if segment.battery_discharge: \n q_motor = segment.state.conditions.propulsion.propeller_motor_torque\n q_prop = segment.state.conditions.propulsion.propeller_torque \n segment.state.residuals.network.propellers = q_motor - q_prop\n \n return \n\n def add_unknowns_and_residuals_to_segment(self, segment, initial_voltage = None, initial_power_coefficient = 0.02,\n initial_battery_cell_temperature = 283. , initial_battery_state_of_charge = 0.5,\n initial_battery_cell_current = 5.):\n \"\"\" This function sets up the information that the mission needs to run a mission segment using this network\n \n Assumptions:\n None\n \n Source:\n N/A\n \n Inputs:\n segment\n initial_voltage [v]\n initial_power_coefficient [float]s\n \n Outputs:\n segment.state.unknowns.battery_voltage_under_load\n segment.state.unknowns.propeller_power_coefficient\n segment.state.conditions.propulsion.propeller_motor_torque\n segment.state.conditions.propulsion.propeller_torque \n \n Properties Used:\n N/A\n \"\"\" \n\n if self.number_of_lift_rotor_engines != None: \n n_eng = int(self.number_of_lift_rotor_engines)\n identical_flag = self.identical_lift_rotors\n n_props = len(self.lift_rotors)\n n_motors = len(self.lift_rotor_motors)\n else:\n n_eng = int(self.number_of_propeller_engines)\n identical_flag = self.identical_propellers\n n_props = len(self.propellers)\n n_motors = len(self.propeller_motors)\n \n # unpack the ones function\n ones_row = segment.state.ones_row\n \n # unpack the initial values if the user doesn't specify\n if initial_voltage==None:\n initial_voltage = self.battery.max_voltage\n \n # Count how many unknowns and residuals based on p) \n \n if n_props!=n_motors!=n_eng:\n print('The number of propellers is not the same as the number of motors')\n \n # Now check if the propellers are all identical, in this case they have the same of residuals and unknowns\n if identical_flag:\n n_props = 1 \n\n # Assign initial segment conditions to segment if missing\n battery = self.battery\n append_initial_battery_conditions(segment,battery) \n \n # add unknowns and residuals specific to battery cell \n segment.state.residuals.network = Residuals() \n battery.append_battery_unknowns_and_residuals_to_segment(segment,initial_voltage,\n initial_battery_cell_temperature , initial_battery_state_of_charge,\n initial_battery_cell_current) \n\n if segment.battery_discharge: \n segment.state.unknowns.propeller_power_coefficient = initial_power_coefficient * ones_row(n_props) \n \n # Setup the conditions\n segment.state.conditions.propulsion.propeller_motor_efficiency = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_motor_torque = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_torque = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_thrust = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_rpm = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.disc_loading = 0. * ones_row(n_props) \n segment.state.conditions.propulsion.power_loading = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_tip_mach = 0. * ones_row(n_props)\n segment.state.conditions.propulsion.propeller_efficiency = 0. * ones_row(n_props) \n \n # Ensure the mission knows how to pack and unpack the unknowns and residuals\n segment.process.iterate.unknowns.network = self.unpack_unknowns\n segment.process.iterate.residuals.network = self.residuals \n\n return segment\n \n __call__ = evaluate_thrust\n\n\n", "## @ingroup Methods-Flight_Dynamics-Static_Stability-Approximations-Supporting_Functions\n# extend_to_ref_area.py\n#\n# Created: Mar 2014, T. Momose\n# Modified: Jan 2016, E. Botero\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\n#SUAVE Imports \nimport numpy as np\nfrom SUAVE.Core import Data\n\n# ----------------------------------------------------------------------\n# Method\n# ----------------------------------------------------------------------\n\n## @ingroup Methods-Flight_Dynamics-Static_Stability-Approximations-Supporting_Functions\ndef extend_to_ref_area(surface):\n \"\"\" This method takes inputs describing the exposed portion of a trapezoidal\n aerodynamic surface and calculates the dimensions of a corresponding\n aerodynamic surface that extends all the way to the fuselage centerline.\n Particularly used to get the vertical tail reference area for lateral\n stability calculations when the dimensions of the exposed tail are known.\n\n\n Assumptions:\n Assumes a simple trapezoidal half-wing shape.\n\n Source:\n Unknown\n \n Inputs:\n surface - a SUAVE Wing object with the fields:\n spans.projected - projected span (height for a vertical tail) of\n the exposed surface [meters]\n sweep - leading edge sweep of the aerodynamic surface [radians]\n chords.root - chord length at the junction between the tail and\n the fuselage [meters]\n chords.tip - chord length at the tip of the aerodynamic surface [meters] \n symmetric - Is the wing symmetric across the fuselage centerline?\n exposed_root_chord_offset - the displacement from the fuselage\n centerline to the exposed area's physical root chordline [meters]\n\n Outputs:\n ref_surface - a data dictionary with the fields:\n spans.projected - The span/height measured from the fuselage centerline [meters] \n area.reference - The area of the extended trapezoidal surface [meters**2] \n aspect_ratio - The aspect ratio of the extended surface [meters] \n chords.root - The chord of the extended trapezoidal surface\n where it meets the fuselage centerline [meters]\n root_LE_change - The change in the leading edge position of the\n surface compared to the smaller surface that only extended to the\n fuselage surface. This value is negative for sweptback surfaces [meters]\n \n Properties Used:\n N/A \n \"\"\"\n # Unpack inputs\n symm = surface.symmetric\n try:\n b1 = surface.spans.exposed * 0.5 * (2 - symm)\n except AttributeError:\n b1 = surface.spans.projected * 0.5 * (2 - symm)\n c_t = surface.chords.tip\n c_r1 = surface.chords.root\n Lambda = surface.sweeps.quarter_chord\n dh_center = surface.exposed_root_chord_offset\n\n #Compute reference area dimensions\n b = b1+dh_center\n c_root = c_t + (b/b1)*(c_r1-c_t)\n S = 0.5*b*(c_root+c_t)\n dx_LE = -dh_center*np.tan(Lambda)\n AR = b**2/S\n\n ref_surface = surface\n surface.extended = Data()\n surface.extended.spans = Data()\n surface.extended.areas = Data()\n surface.extended.chords = Data()\n ref_surface.extended.origin = np.array(surface.origin) * 1.\n ref_surface.extended.spans.projected = b * (1 + symm)\n ref_surface.extended.areas.reference = S * (1 + symm)\n ref_surface.extended.aspect_ratio = AR * (1 + symm)\n ref_surface.extended.chords.root = c_root\n ref_surface.extended.root_LE_change = dx_LE\n ref_surface.extended.origin[0] = ref_surface.origin[0] + dx_LE\n\n return ref_surface\n", "## @ingroup Analyses-Mission-Segments-Conditions\n# State.py\n#\n# Created: \n# Modified: Feb 2016, Andrew Wendorff\n# Jan 2020, M. Clarke\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\n# python imports\nimport numpy as np\n\n# SUAVE imports\nfrom .Conditions import Conditions\nfrom .Unknowns import Unknowns\nfrom .Residuals import Residuals\nfrom .Numerics import Numerics\n\nimport SUAVE\nfrom SUAVE.Core.Arrays import array_type\nfrom SUAVE.Core import DataOrdered\n\n# ----------------------------------------------------------------------\n# State\n# ----------------------------------------------------------------------\n\n## @ingroup Analyses-Mission-Segments-Conditions\nclass State(Conditions):\n \"\"\" Creates the State data structure for storing daata that solved in a mission\n \n Assumptions:\n None\n \n Source:\n None\n \"\"\" \n \n \n def __defaults__(self):\n \"\"\" This sets the default values.\n \n Assumptions:\n None\n \n Source:\n N/A\n \n Inputs:\n None\n \n Outputs:\n None\n \n Properties Used:\n None\n \"\"\" \n \n self.tag = 'state'\n self.unknowns = Unknowns()\n self.conditions = Conditions()\n self.residuals = Residuals()\n self.numerics = Numerics()\n self.initials = Conditions()\n \n def expand_rows(self,rows,override=False):\n \"\"\" Makes a 1-D array the right size. Often used after a mission is initialized to size out the vectors to the\n right size. Will not overwrite an array if it already exists, unless override is True.\n \n Assumptions:\n Doesn't expand initials or numerics\n \n Source:\n N/A\n \n Inputs:\n rows [int]\n \n Outputs:\n None\n \n Properties Used:\n None\n \"\"\" \n \n # store\n self._size = rows\n \n for k,v in self.items(): \n try:\n rank = v.ndim\n except:\n rank = 0 \n # don't expand initials or numerics\n if k in ('initials','numerics'):\n continue\n \n # recursion\n elif isinstance(v,Conditions):\n v.expand_rows(rows,override=override)\n # need arrays here\n elif rank == 2:\n self[k] = np.resize(v,[rows,v.shape[1]])\n #: if type\n #: for each key,value \n \n \n## @ingroup Analyses-Mission-Segments-Conditions \nclass Container(State):\n def __defaults__(self):\n \"\"\" This sets the default values.\n \n Assumptions:\n Puts the segments in the right order\n \n Source:\n N/A\n \n Inputs:\n None\n \n Outputs:\n None\n \n Properties Used:\n None\n \"\"\" \n self.segments = DataOrdered()\n \n def merged(self):\n \"\"\" Combines the states of multiple segments\n \n Assumptions:\n None\n \n Source:\n N/A\n \n Inputs:\n None\n \n Outputs:\n state_out [State()]\n \n Properties Used:\n None\n \"\"\" \n \n state_out = State()\n \n for i,(tag,sub_state) in enumerate(self.segments.items()):\n for key in ['unknowns','conditions','residuals']:\n if i == 0:\n state_out[key].update(sub_state[key])\n else:\n state_out[key] = state_out[key].do_recursive(append_array,sub_state[key])\n \n return state_out\n \nState.Container = Container\n\n## @ingroup Analyses-Mission-Segments-Conditions\ndef append_array(A,B=None):\n \"\"\" A stacking operation used by merged to put together data structures\n\n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n A [array]\n B [array]\n\n Outputs:\n array\n\n Properties Used:\n None\n \"\"\" \n if isinstance(A,array_type) and isinstance(B,array_type):\n return np.vstack([A,B])\n else:\n return None", "## @ingroup Methods-Performance\n# electric_V_h_diagram.py\n#\n# Created: Jan 2021, J. Smart\n# Modified:\n\n#------------------------------------------------------------------------------\n# Imports\n#------------------------------------------------------------------------------\n\nimport SUAVE\nfrom SUAVE.Core import Units, Data\n\nfrom SUAVE.Methods.Performance.propeller_single_point import propeller_single_point\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#------------------------------------------------------------------------------\n# Flight Envelope Function\n#------------------------------------------------------------------------------\n\n## @ingroup Methods-Performance\ndef electric_V_h_diagram(vehicle,\n analyses,\n CL_max,\n delta_isa = 0.,\n grid_points = 20.,\n altitude_ceiling = 2e4 * Units.ft,\n max_speed = 130 * Units['m/s'],\n test_omega = 800. * Units.rpm,\n display_plot = True,\n climb_rate_contours = [0.]\n ):\n \"\"\"electric_V_h_diagram(vehicle,\n analyses,\n delta_isa = 0.,\n grid_points = 20.,\n altitude_ceiling = 2e4 * Units.ft,\n max_speed = 130 * Units['m/s'],\n test_omega = 800. * Units.rpm,\n display_plot = True,\n climb_rate_contours = [0.]\n ):\n\n Calculates and optionally displays climb rate and contours thereof over\n a specified airspeed and altitude range. Climb rate determination ref.\n Raymer, \"Aircraft Design: A Conceptual Approach\"\n\n Sources:\n D. Raymer, \"Aircraft Design: A Conceptual Approach\"\n\n Assumptions:\n\n Assumes use of Battery Propeller Energy Network\n\n Inputs:\n\n vehicle SUAVE Vehicle Structure\n .mass_properties\n .takeoff [kg]\n analyses SUAVE Analyses Structure\n .atmosphere\n .planet\n .sea_level_gravity [m/s^2]\n delta_isa ISA Temperature Offset [deg. K/C]\n grid_points Num. Test Points per Dim. [Int]\n altitude_ceiling Maximum Test Altitude [User Set]\n max_speed Maximum Test Speed [User Set]\n test_omega Maximum Power Prop Speed [User Set]\n display_plot Flag for Plot Generation [Boolean]\n climb_rate_contours Climb Rates to Display [ft/min]\n\n Outputs:\n\n climb_rate Climb Rates at Test Points [ft/min]\n \"\"\"\n\n # Unpack Inputs\n\n g = analyses.atmosphere.planet.sea_level_gravity\n W = vehicle.mass_properties.takeoff * g\n S = vehicle.reference_area\n\n # Single Point Mission for Drag Determination\n\n def mini_mission(altitude, speed):\n\n mission = SUAVE.Analyses.Mission.Sequential_Segments()\n mission.tag = 'the_mission'\n\n segment = SUAVE.Analyses.Mission.Segments.Single_Point.Set_Speed_Set_Altitude_No_Propulsion()\n segment.tag = 'single_point'\n segment.analyses.extend(analyses)\n segment.altitude = altitude\n segment.air_speed = speed\n\n mission.append_segment(segment)\n\n return mission\n\n # Specify Altitude and Speed Sample Points\n\n alt_range = np.linspace(0., altitude_ceiling, num=grid_points, endpoint=True)\n speed_range = np.linspace(0., max_speed, num = grid_points, endpoint=False)\n\n # Initialize Climb Rate Grid\n\n climb_rate = np.zeros((grid_points, grid_points))\n\n # Loop Through Altitude and Speed Gridpoints\n\n for alt_idx in range(grid_points):\n\n altitude = alt_range[alt_idx]\n atmo_data = analyses.atmosphere.compute_values(altitude, delta_isa)\n rho = atmo_data.density\n Vs = np.sqrt(2*W/(rho*S*CL_max)) # Determine Vehicle Stall Speed\n\n for speed_idx in range(grid_points):\n\n V = speed_range[speed_idx]\n\n if V > Vs: # Only Bother Calculating if Vehicle is Above Stall Speed\n\n # Determine Vehicle Drag at Altitude and Speed\n\n mission = mini_mission(altitude, V)\n results = mission.evaluate()\n\n D = -results.segments.single_point.conditions.frames.wind.drag_force_vector[0][0]\n\n # Determine Propeller Power at Altitude and Speed\n\n P = propeller_single_point(vehicle.networks.battery_propeller,\n analyses,\n pitch=0.,\n omega=test_omega,\n altitude=altitude,\n delta_isa=0.,\n speed=V).power\n\n # Check if Propeller Power Exceeds Max Battery Power, Switch to Max Battery Power if So\n\n P = np.min([P, vehicle.networks.battery_propeller.battery.max_power])\n\n # Determine Climb Rate (ref. Raymer)\n\n cr = 1/W * (P - D*V)\n\n # If Climb Rate is Negative, Replace with 0 for Easy Contour-Finding\n\n climb_rate[speed_idx, alt_idx] = np.max([0., cr])\n\n\n climb_rate = climb_rate / Units['ft/min']\n\n if display_plot:\n\n # Get Speed and Altitude to Agree with Climb Rate Dimensions\n\n speed_space, alt_space = np.meshgrid(speed_range, alt_range)\n speed_space = np.transpose(speed_space)\n alt_space = np.transpose(alt_space) / Units.ft\n\n # Make Contour Plot of Climb Rates\n\n CS = plt.contour(speed_space, alt_space, climb_rate, levels = climb_rate_contours)\n plt.xlabel('Airspeed (m/s)')\n plt.ylabel('Altitude (ft)')\n plt.title('Climb Rate (ft/min)')\n plt.clabel(CS)\n\n plt.show()\n\n return climb_rate", "## @ingroup templates\n# Example_Attribute.py\n# \n# Created: Jan 2015, J. Dawson\n# Modified:\n\n## style note --\n## since this is an Attribute class, it is Camel_Case_With_Underscore()\n## it should not have any major analysis methods, \n## only data manipulation methods\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n## remove any unnecessary imports\n\n# suave imports\n## these should start with SUAVE, unless importing locally\nfrom SUAVE.Core import (\n Data, Container\n)\n\n# python imports\nimport os, sys, shutil\nfrom warnings import warn\n\n# package imports\nimport numpy as np\nimport scipy as sp\n# import pylab as plt\n\n\n# ----------------------------------------------------------------------\n# Attribute\n# ----------------------------------------------------------------------\n\n## @ingroup templates\nclass Example_Attribute(Data):\n \"\"\"<Description>\n \n Assumptions:\n <any assumptions>\n \n Source:\n <source>\n \"\"\"\n \n def __defaults__(self):\n \"\"\"<Description>\n \n Assumptions:\n <any assumptions>\n \n Source:\n <source>\n \n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n \n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n \n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n \"\"\" \n # default attributes, \n self.area = None # [units]\n self.taper = None # [units]\n \n \n def __check__(self):\n \"\"\"<Description>\n \n Assumptions:\n <any assumptions>\n \n Source:\n <source>\n \n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n \n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n \n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n \"\"\" \n # called after initialized data\n # use to check the data's fields, and modify as needed\n # will not recieve any inputs other than self\n \n # for example\n if self.taper == 10:\n self.area = 20\n \n def do_this(input1,input2=None):\n \"\"\"<Description>\n \n Assumptions:\n <any assumptions>\n \n Source:\n <source>\n \n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n \n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n \n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n \"\"\" \n \n # unpack inputs\n var1 = input1.var1\n var2 = inputs.var2\n \n # setup\n var3 = var1 * var2\n \n # process\n magic = np.log(var3)\n \n # packup outputs\n output = Data()\n output.magic = magic\n output.var3 = var3\n \n return output\n \n \n# ----------------------------------------------------------------------\n# Attribute Container\n# ----------------------------------------------------------------------\n## @ingroup templates\nclass Container(Container):\n \"\"\"<Description>\n \n Assumptions:\n <any assumptions>\n \n Source:\n <source>\n \"\"\"\n pass\n\n# add to attribute\nExample_Attribute.Container = Container \n \n \n# ----------------------------------------------------------------------\n# Helper Functions\n# ----------------------------------------------------------------------\n# these will not be available in the SUAVE namespace\n## @ingroup templates\ndef helper_function(input1,inputs2=None):\n \"\"\"<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n N/A\n \"\"\" \n \n # unpack inputs\n var1 = input1.var1\n var2 = inputs.var2\n \n # setup\n var3 = var1 * var2\n \n # process\n magic = np.log(var3)\n \n # packup outputs\n output = Data()\n output.magic = magic\n output.var3 = var3\n \n return output\n \n \n# ----------------------------------------------------------------------\n# Unit Tests\n# ----------------------------------------------------------------------\n# this will run from command line, put simple tests for your code here\nif __name__ == '__main__': \n raise RuntimeError('test failed, not implemented')\n\n\n\n\n\n", "# State.py\n#\n# Created: \n# Modified: Feb 2016, Andrew Wendorff\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\n# python imports\nimport numpy as np\n\n# SUAVE imports\nfrom Conditions import Conditions\nfrom Unknowns import Unknowns\nfrom Residuals import Residuals\nfrom Numerics import Numerics\n\nimport SUAVE\nfrom SUAVE.Core.Arrays import array_type\nfrom SUAVE.Core import DataOrdered\n\n# ----------------------------------------------------------------------\n# State\n# ----------------------------------------------------------------------\n\nclass State(Conditions):\n \n def __defaults__(self):\n \n self.unknowns = Unknowns()\n \n self.conditions = Conditions()\n \n self.residuals = Residuals()\n \n self.numerics = Numerics()\n \n self.initials = Conditions()\n \n \n def expand_rows(self,rows):\n \n # store\n self._size = rows\n \n for k,v in self.iteritems():\n \n # don't expand initials or numerics\n if k in ('initials','numerics'):\n continue\n \n # recursion\n elif isinstance(v,Conditions):\n v.expand_rows(rows)\n # need arrays here\n elif np.rank(v) == 2:\n self[k] = np.resize(v,[rows,v.shape[1]])\n #: if type\n #: for each key,value \n \n \n \nclass Container(State):\n def __defaults__(self):\n self.segments = DataOrdered()\n \n def merged(self):\n \n state_out = State()\n \n for i,(tag,sub_state) in enumerate(self.segments.items()):\n for key in ['unknowns','conditions','residuals']:\n if i == 0:\n state_out[key].update(sub_state[key])\n else:\n state_out[key] = state_out[key].do_recursive(append_array,sub_state[key])\n \n return state_out\n \nState.Container = Container\n\n\ndef append_array(A,B=None):\n if isinstance(A,array_type) and isinstance(B,array_type):\n return np.vstack([A,B])\n else:\n return None", "# electric_V_h_diagram.py\n#\n# Created: Jan 2021, J. Smart\n# Modified:\n\n#-------------------------------------------------------------------------------\n# Imports\n#_______________________________________________________________________________\n\nimport SUAVE\n\nfrom SUAVE.Core import Units, Data\nfrom SUAVE.Methods.Performance.electric_V_h_diagram import electric_V_h_diagram\n\nimport numpy as np\n\nimport sys\nsys.path.append('../Vehicles')\n\nfrom X57_Maxwell_Mod2 import vehicle_setup\n\n#-------------------------------------------------------------------------------\n# Test Function\n#-------------------------------------------------------------------------------\n\ndef main():\n\n vehicle = vehicle_setup()\n\n analyses = SUAVE.Analyses.Vehicle()\n\n sizing = SUAVE.Analyses.Sizing.Sizing()\n sizing.features.vehicle = vehicle\n analyses.append(sizing)\n\n weights = SUAVE.Analyses.Weights.Weights_Transport()\n weights.vehicle = vehicle\n analyses.append(weights)\n\n aerodynamics = SUAVE.Analyses.Aerodynamics.AERODAS()\n aerodynamics.geometry = vehicle\n aerodynamics.settings.drag_coefficient_increment = 0.0000\n analyses.append(aerodynamics)\n\n stability = SUAVE.Analyses.Stability.Fidelity_Zero()\n stability.geometry = vehicle\n analyses.append(stability)\n\n energy = SUAVE.Analyses.Energy.Energy()\n energy.network = vehicle.networks\n analyses.append(energy)\n\n planet = SUAVE.Analyses.Planets.Planet()\n analyses.append(planet)\n\n atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()\n atmosphere.features.planet = planet.features\n analyses.append(atmosphere)\n\n analyses.finalize()\n\n climb_rate = electric_V_h_diagram(vehicle,\n analyses,\n CL_max=1.4,\n delta_isa=0.,\n grid_points=5,\n altitude_ceiling= 2e4 * Units.ft,\n max_speed=130 * Units['m/s'],\n test_omega= 1000 * Units.rpm,\n display_plot=True)\n\n climb_rate_r = [[ 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. ],\n [719.7179757 , 582.17272069, 452.68248026, 329.12481042, 212.19190451],\n [ 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 0. , 0. ]]\n\n assert (np.all(np.nan_to_num(np.abs(climb_rate-climb_rate_r)/climb_rate_r) < 1e-6)), \"Electric V_h Diagram Regression Failed\"\n\n return\n\nif __name__ == '__main__':\n main()\n\n print('Electric V_h Diagram Regression Passed.')" ]
[ [ "numpy.ones_like", "numpy.linspace", "numpy.flipud", "numpy.cos", "numpy.sin", "numpy.concatenate", "numpy.append", "numpy.array" ], [ "numpy.atleast_2d", "numpy.linalg.norm" ], [ "numpy.ceil" ], [ "numpy.tan", "numpy.ones_like" ], [ "numpy.eye", "numpy.array", "numpy.abs" ], [ "numpy.linalg.solve", "numpy.ones_like", "numpy.abs", "numpy.linspace", "numpy.tile", "numpy.cos", "numpy.sin", "numpy.atleast_2d", "numpy.size", "numpy.logical_and", "numpy.sum" ], [ "numpy.sqrt", "numpy.array", "numpy.abs" ], [ "numpy.atleast_2d", "numpy.ones_like", "numpy.linalg.norm" ], [ "numpy.tan", "numpy.array" ], [ "numpy.resize", "numpy.vstack" ], [ "matplotlib.pyplot.clabel", "numpy.sqrt", "numpy.linspace", "matplotlib.pyplot.title", "numpy.min", "numpy.max", "matplotlib.pyplot.contour", "numpy.transpose", "matplotlib.pyplot.xlabel", "numpy.meshgrid", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.log" ], [ "numpy.rank", "numpy.resize", "numpy.vstack" ], [ "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pitchdarkdata/InfluxDays2021_Demo
[ "d5625566cefd983203983e158f3325cfb2c16029" ]
[ "API/gerrit_api.py" ]
[ "\"\"\"\nThis Module interacts with Gerrit and retrieves Data from Gerrit\n\"\"\"\n\nimport os\nimport json\nimport logging\nimport argparse\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom json.decoder import JSONDecodeError\nfrom urllib.parse import urlunsplit, urlencode\nfrom typing import Tuple, Union\ntry:\n from requests import __version__, Session, adapters, exceptions, urllib3, status_codes\n logging.debug(f'Available request module of version {__version__}')\nexcept ImportError:\n logging.error('Please install requests module. Use pip install requests.')\n\nclass GerritApi:\n \"\"\"\n *Class name :* GerritHandler\n\n *Description :* Class to retrieve data from Gerrit\n \"\"\"\n GET_ALL_REPO_URI = \"/projects/?d\"\n GET_ALL_CHANGES_URI = \"/changes/?q=repo:{repo_name}\"\n GET_ALL_ACTIVE_USERS_URI = \"/accounts/?q=is:active\"\n GET_COMMITS_BY_AGE = \"/changes/?q=-age:\"\n GET_COMMITS_USING_AFTER = \"/changes/?q=after:\"\n\n def __init__(self, gerrit_server: str, username: str=None, password: str=None):\n \"\"\"\n *Method description :* Initializing values for Gerrit operations from OVF\n\n :param username: Username to login to Gerrit\n :type username: String\n :param password: Password to login to Gerrit\n :type password: String\n :param url: Gerrit URL to get commit details\n :type url: String\n \"\"\"\n self.gerrit_username = username\n self.gerrit_password = password\n self.gerrit_url = f\"https://{gerrit_server}\"\n logging.debug(f\"GerritDetails:: {self.gerrit_url}, {self.gerrit_username}, {self.gerrit_password}\")\n if username and password:\n self.rest_engine = RestEngine(auth=(self.gerrit_username, self.gerrit_password))\n else:\n self.rest_engine = RestEngine()\n\n def get_all_projects(self) -> dict:\n \"\"\"\n Method to get all repositories\n\n :returns: :class:`repo_details`: All repo details\n :rtype: :class:`repo_details`: Dict\n \"\"\"\n all_repo_details = {}\n get_all_repo_url = f\"{self.gerrit_url}{GerritApi.GET_ALL_REPO_URI}\"\n all_repo_resp = self.decode_response(self.rest_engine.rest_request(get_all_repo_url))\n for key, value in all_repo_resp.items():\n all_repo_details[key] = {\"id\": value.get(\"id\"), \"description\": value.get(\"description\"),\n \"state\": value.get(\"state\")}\n logging.info(f\"List of All repositories : {all_repo_details} {len(all_repo_details)}\")\n return all_repo_details\n\n def get_all_active_projects(self) -> list:\n \"\"\"\n Method to get all active repositories\n\n :returns: :class:`active_repo_list`: List of active repositories\n :rtype: :class:`active_repo_list`: List\n \"\"\"\n active_repo_list = []\n all_repo_details = self.get_all_projects()\n for key, value in all_repo_details.items():\n if value[\"state\"] == \"ACTIVE\":\n active_repo_list.append(key)\n logging.info(f\"List of active repositories : {active_repo_list} {len(active_repo_list)}\")\n return active_repo_list\n\n def get_active_user_accounts(self) -> list:\n \"\"\"\n *Method description :* Method to get active user accounts in server\n\n :returns: :class:`all_users_details`: List of commit changes as dict\n :rtype: :class:`all_users_details`: list\n \"\"\"\n all_users_details = []\n all_users_list, mocker_response = [], []\n all_users_url = f\"{self.gerrit_url}{GerritApi.GET_ALL_ACTIVE_USERS_URI}&S=0\"\n response = self.decode_response(self.rest_engine.rest_request(all_users_url))\n all_users_list.extend(response)\n mocker_response = self.no_limit_mocker(response, mocker_response,\n url_to_be_used=f\"{self.gerrit_url}{GerritApi.GET_ALL_ACTIVE_USERS_URI}\")\n if all_users_list:\n all_users_list.extend(mocker_response)\n logging.info(f\"Number Of Active User Accounts in Gerrit: {len(all_users_list)}\")\n for each_user in all_users_list:\n user_id = each_user.get(\"_account_id\")\n user_details_url = f\"{self.gerrit_url}/accounts/{user_id}/detail\"\n detailed_response = self.decode_response(self.rest_engine.rest_request(user_details_url))\n all_users_details.append(detailed_response)\n logging.info(f\"Active User Account Details in Gerrit: {all_users_details}\")\n return all_users_details\n\n def get_commit_details_in_given_period(self, start=None, duration=\"24Hours\", stop=datetime.utcnow()):\n all_commits_list, mocker_response = [], []\n if not start:\n start = self.get_start_time(duration, stop)\n commits_url = f\"{self.gerrit_url}{GerritApi.GET_COMMITS_USING_AFTER}\\\"{start}\\\"&S=0\"\n print(commits_url)\n response = self.decode_response(self.rest_engine.rest_request(commits_url))\n all_commits_list.extend(response)\n mocker_response = self.no_limit_mocker(response, mocker_response,\n url_to_be_used=f\"{self.gerrit_url}{GerritApi.GET_COMMITS_USING_AFTER}\\\"{start}\\\"\")\n if mocker_response:\n all_commits_list.extend(mocker_response)\n for each_commit in all_commits_list:\n owner_account_url = f\"{self.gerrit_url}/accounts/{each_commit.get('owner').get('_account_id')}/detail\"\n each_commit[\"owner\"] = self.decode_response(self.rest_engine.rest_request(owner_account_url)).get(\"name\")\n if each_commit.get(\"submitter\"):\n submitter_id = each_commit.get('submitter').get('_account_id')\n submit_account_url = f\"{self.gerrit_url}/accounts/{submitter_id}/detail\"\n each_commit[\"submitter\"] = self.decode_response(self.rest_engine.rest_request(\n submit_account_url)).get(\"name\")\n print(f\"Total commits from {start} is: {len(all_commits_list)}\")\n return all_commits_list\n\n @staticmethod\n def get_start_time(duration, stop):\n if \"minutes\" in str(duration).lower():\n min_delta = int(str(duration).lower().strip(\"minutes\"))\n start = stop - timedelta(minutes=min_delta)\n if \"hours\" in str(duration).lower():\n hour_delta = int(str(duration).lower().strip(\"hours\"))\n start = stop - timedelta(hours=hour_delta)\n elif \"days\" in str(duration).lower():\n day_delta = int(str(duration).lower().strip(\"days\"))\n start = stop - timedelta(days=day_delta)\n elif \"months\" in str(duration).lower():\n month_delta = int(str(duration).lower().strip(\"months\"))\n start = stop - timedelta(months=month_delta)\n return start\n\n @staticmethod\n def decode_response(response: str) -> dict:\n \"\"\"\n *Method description :* Method to decode rest response with Gerrit Magic Prefix\n\n :param response: Raw REST Response Content\n :type response: String\n :raises: :class:`ValueError`: Invaid Response Json Content\n :returns: :class:`resp_dict`: Dictionary of the given Response content\n :rtype: :class:`resp_dict`: Dictionary\n \"\"\"\n output = response[1]\n # prefix that comes with the json responses.\n gerrit_magic_json_prefix = \")]}'\\n\"\n if str(response[0]) == '200' and isinstance(response[1], str):\n if response[1].startswith(gerrit_magic_json_prefix):\n output = response[1][len(gerrit_magic_json_prefix):]\n try:\n output = json.loads(output)\n except ValueError:\n logging.error(f\"Invalid Json in response {output}\")\n else:\n logging.error(f'Rest Call Failed with the status code {response[0]} and response {response[1]}')\n return output\n\n def no_limit_mocker(self, response: str, mocker_response: list, url_to_be_used: str,\n def_limit: int =0) -> list:\n \"\"\"\n *Method description :* Method to mock no_limit option in Gerrit Server\n\n :param response: Previous GET Call Response\n :type response: String\n :param mocker_response: Mocker response list on which no_limit responses are accumulated\n :type mocker_response: list\n :param url_to_be_used: URL to be used for REST Call in no_limits mocker block\n :type url_to_be_used: String\n :param def_limit: Number Of Commits Limit for GET call\n :type def_limit: Integer\n :returns: :class:`mocker_response`: Get REST Response in List\n :rtype: :class:`mocker_response`: List\n \"\"\"\n if \"_more_\" in str(response):\n def_limit = def_limit + 500\n start_limit = def_limit - 500 + 1\n logging.info(f\"Fetching {start_limit} - {def_limit} Records. Please Wait...\")\n new_url = f\"{url_to_be_used}&S={str(def_limit)}&n=500\"\n int_response = self.decode_response(self.rest_engine.rest_request(new_url))\n mocker_response.extend(int_response)\n self.no_limit_mocker(int_response, mocker_response, url_to_be_used, def_limit)\n else:\n def_limit = def_limit + 500\n new_url = f\"{url_to_be_used}&S={str(def_limit)}&n=500\"\n int_response = self.decode_response(self.rest_engine.rest_request(new_url))\n mocker_response.extend(int_response)\n return mocker_response\n\nclass RestEngine:\n \"\"\"\n Class to perform rest operations like PUT, PATCH, POST, GET\n DELETE, HEAD, OPTIONS.\n \"\"\"\n def __init__(self, **session_args: str):\n \"\"\"\n *Method description :* Initialization method.\n\n 1. Initialize a http session with the session parameters passed by user\n 2. Default authentication is set to (username, password) as (admin, admin).\n And a header with json content type is added.\n 3. These session level parameters are overwritten when the same are provided\n at the method level.\n\n :param session_args: Rest arguments that can be set at the session level.\n Supported: 'headers', 'cookies', 'auth', 'proxies', 'hooks',\n 'params', 'verify', 'cert', 'stream', 'trust_env', 'max_redirects'\n :type session_args: dict\n \"\"\"\n self.http_session = Session()\n self.http_session.auth = session_args.get('auth')\n self.http_session.headers.update(session_args.get('headers', {}))\n #as verify is set to False,requests in this session will accept any TLS certificate\n #will ignore SSL certificate verification\n self.http_session.verify = session_args.get('verify', False)\n #Retries to establish a http secure connection.\n https_adapter = adapters.HTTPAdapter(max_retries=3)\n self.http_session.mount('https://', https_adapter)\n #To set other session parameters supported by requests\n self.http_session.params = session_args.get('params')\n self.http_session.proxies = session_args.get('proxies')\n self.http_session.cert = session_args.get('cert')\n self.http_session.hooks = session_args.get('hooks')\n self.http_session.stream = session_args.get('stream')\n self.http_session.max_redirects = session_args.get('max_redirects')\n self.http_session.cookies.update(session_args.get('cookies', {}))\n self.http_session.trust_env = session_args.get('trust_env')\n\n @staticmethod\n def build_api_url(netloc: str, scheme: str =\"https\", path: str =\"\", query: Union[str, dict]=\"\",\n fragments: str =\"\") -> str:\n \"\"\"Generates complete url from the inputs provided by the user.\n URL format : scheme://netloc/path?query#fragments\n\n #query str: page=12\n eg : https://docs.python.com/tutorial/index.html?page=12#datatypes\n\n #query dict: {page:12, type:tuple)\n eg : https://docs.python.com/tutorial/index.html?page=12&type=tuple#datatypes\n\n :param netloc: Network location part. Domain name should be given as input.\n (eg): example.com, 168.0.0.1:8080, jenkins.com:8443\n :type netloc: str\n :param scheme: URL scheme specifier. Can be either http or https, defaults to \"https\"\n :type scheme: str, optional\n :param path: Hierarchical path. Additional path to be added to the netloc, defaults to \"\"\n :type path: str, optional\n :param query: query string needed to be added. It will be added after the \"?\" symbol.\n Can be given directly as string or dict with multiple key value pairs. if multiple key\n value pairs are given then query string will be concatenated with \"&\" symbol, defaults to \"\"\n :type query: str or dict, optional\n :param fragments: Additional piece of information to be added to the url. This will be added\n after the \"#\" symbol, defaults to \"\"\n :type fragments: str, optional\n :return: complete api url\n :rtype: str\n \"\"\"\n query_str = urlencode(query) if isinstance(query, dict) else query\n api_url = urlunsplit((scheme, netloc, path, query_str, fragments))\n logging.debug(f\"Api url formed --> {api_url}\")\n return api_url\n\n def rest_request(self, uri: str, operation: str ='GET', **func_args: str) -> Tuple[int, str, dict]:\n \"\"\"\n *Method description :* Common rest request method be called for performing the rest operations.\n\n :param uri: rest uri\n :type uri: str\n :param operation: rest operation, could be GET, POST, PATCH, DELETE, PUT, HEAD, OPTIONS.\n :type operation: str\n :param func_args: Rest arguments such as 'auth', 'cookies', 'data', 'files',\n 'headers', 'hooks', 'json', 'params', 'timeout', 'allow_redirects', 'proxies',\n 'hooks', 'stream', 'verify', 'cert' that can be set at the method request level.\n Overrides the session arguments.\n :type func_args: dict\n :returns: :class:`response_code`: Response code of the rest request call performed\n :class:`response`: Response received from the rest request call\n :class:'response_headers`: Headers in response\n :rtype: :class:`response_code`: int\n :class:`response`: dict/str\n :class:`response_headers`: dict\n \"\"\"\n response_code, response, response_headers = None, None, None\n #suppress Insecure certificate warning\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n try:\n rest_response = self.http_session.request(operation.upper(), uri, **func_args)\n logging.debug(f'Request uri : {rest_response.request.url}')\n logging.debug(f'Request method : {rest_response.request.method}')\n logging.debug(f'Request headers : {rest_response.request.headers}')\n logging.debug(f'Request data : {rest_response.request.body}')\n response_code, response, response_headers = rest_response.status_code, rest_response.content, rest_response.headers\n #Uncomment the below line if status code has to raise an exception/error\n #rest_response.raise_for_status()\n if response:\n try:\n response = rest_response.json()\n except JSONDecodeError:\n #default utf-8 encoding is done.\n response = rest_response.text\n except exceptions.InvalidURL:\n logging.error(f'The uri {uri} passed for this {operation.upper()} method is invalid')\n except exceptions.HTTPError:\n logging.error(f'The {operation.upper()} method failed with the status code {response_code}' \\\n f' and status message would be any of {status_codes._codes[response_code]}.')\n except exceptions.SSLError:\n logging.error('SSL Certificate verification failed.')\n except exceptions.ConnectionError:\n logging.error(f'Failed to establish a connection with {uri}')\n except exceptions.InvalidHeader:\n logging.error(f'Invalid header exception. Request headers added : {rest_response.request.headers}')\n except exceptions.TooManyRedirects:\n logging.error('The URL redirects has crossed the maximum limit of 30.')\n except exceptions.Timeout:\n logging.error(f'{operation.upper()} request timed out. Can be either Connection or Read timeout.')\n except exceptions.RequestException:\n logging.error('Exception occurred while handling request. Please check if the input passed are correct.')\n except TypeError:\n logging.error('Please re-check if the input arguments passed are valid.')\n logging.debug(f'Rest Response : {response}')\n logging.debug(f'Rest Response status code : {response_code}')\n logging.debug(f'Rest Response headers : {response_headers}')\n if response_code:\n logging.debug(f'Possible status message for {response_code} : {status_codes._codes[response_code]}')\n return response_code, response, response_headers\n\nclass Common:\n \"\"\"\n Class to perform rest operations like PUT, PATCH, POST, GET\n DELETE, HEAD, OPTIONS.\n \"\"\"\n\n @staticmethod\n def convert_json_to_dict(json_file: str) -> Union[dict, None]:\n \"\"\"Converts the input json file into dictionary\n\n :param json_file: Name of the json file to be converted\n :type json_file: str\n :return: Converted dictionary\n :rtype: dict or None\n \"\"\"\n try:\n assert os.path.exists(json_file)\n with open(json_file, 'r') as file_obj:\n data_dict = json.load(file_obj)\n return data_dict\n except AssertionError:\n logging.error(f'Json file {json_file} doesnot exists')\n except json.decoder.JSONDecodeError as decode_err:\n logging.error(f'unable to parse {json_file}. Kindly validate the json file. Error occured: {decode_err}')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--servername\", type=str, help=\"Gerrit Server Name/IP\")\n parser.add_argument(\"-u\", \"--user\", type=str, help=\"Gerrit Login Username\", default=None)\n parser.add_argument(\"-p\", \"--password\", type=str, help=\"Gerrit Login Password\", default=None)\n parser.add_argument(\"-d\", \"--duration\", type=str, help=\"Duration for which gerrit changes to be fetched\\n\\\n Supported are Minutes, Hours, Days, Months. Examples: 120Minutes, 48Hours, 2Days, 1Month \\n\\\n Default : 24Hours\", default=\"24Hours\")\n args = parser.parse_args()\n if args.servername and args.duration:\n obj = GerritApi(f\"{args.servername}\")\n commits_list = obj.get_commit_details_in_given_period(duration=args.duration)\n print(f\"Gerrit commits for given {args.duration} is: {len(commits_list)}\\n\")\n print(\"Gerrit Commits Details are saved in new_commits.csv file\")\n cl_df = pd.DataFrame(commits_list)\n cl_df.to_csv('new_commits.csv')\n else:\n print(\"Please pass Gerrit server name with -s and duration with -d argument !!!\")\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
cenyk1230/cogdl
[ "fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce", "fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce", "fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce", "fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce", "fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce", "fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce" ]
[ "cogdl/models/nn/lightgcn.py", "cogdl/oag/bert_model.py", "cogdl/tasks/oag_zero_shot_infer.py", "cogdl/models/emb/hin2vec.py", "scripts/display_data.py", "cogdl/models/nn/graphsaint.py" ]
[ "\"\"\"\nfrom from https://github.com/huangtinglin/MixGCF\n\nCreated on October 1, 2020\n\n@author: Tinglin Huang ([email protected])\n\"\"\"\nimport torch\nimport torch.nn as nn\n\nfrom cogdl.models import BaseModel, register_model\n\n\nclass GraphConv(nn.Module):\n \"\"\"\n Graph Convolutional Network\n \"\"\"\n\n def __init__(self, n_hops, n_users, interact_mat, edge_dropout_rate=0.5, mess_dropout_rate=0.1):\n super(GraphConv, self).__init__()\n\n self.interact_mat = interact_mat\n self.n_users = n_users\n self.n_hops = n_hops\n self.edge_dropout_rate = edge_dropout_rate\n self.mess_dropout_rate = mess_dropout_rate\n\n self.dropout = nn.Dropout(p=mess_dropout_rate) # mess dropout\n\n def to(self, *args, **kwargs):\n self = super().to(*args, **kwargs)\n self.interact_mat = self.interact_mat.to(*args, **kwargs)\n return self\n\n def _sparse_dropout(self, x, rate=0.5):\n noise_shape = x._nnz()\n\n random_tensor = rate\n random_tensor += torch.rand(noise_shape).to(x.device)\n dropout_mask = torch.floor(random_tensor).type(torch.bool)\n i = x._indices()\n v = x._values()\n\n i = i[:, dropout_mask]\n v = v[dropout_mask]\n\n out = torch.sparse.FloatTensor(i, v, x.shape).to(x.device)\n return out * (1.0 / (1 - rate))\n\n def forward(self, user_embed, item_embed, mess_dropout=True, edge_dropout=True):\n # user_embed: [n_users, channel]\n # item_embed: [n_items, channel]\n\n # all_embed: [n_users+n_items, channel]\n all_embed = torch.cat([user_embed, item_embed], dim=0)\n agg_embed = all_embed\n embs = [all_embed]\n\n for hop in range(self.n_hops):\n interact_mat = (\n self._sparse_dropout(self.interact_mat, self.edge_dropout_rate) if edge_dropout else self.interact_mat\n )\n\n agg_embed = torch.sparse.mm(interact_mat, agg_embed)\n if mess_dropout:\n agg_embed = self.dropout(agg_embed)\n # agg_embed = F.normalize(agg_embed)\n embs.append(agg_embed)\n embs = torch.stack(embs, dim=1) # [n_entity, n_hops+1, emb_size]\n return embs[: self.n_users, :], embs[self.n_users :, :]\n\n\n@register_model(\"lightgcn\")\nclass LightGCN(BaseModel):\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--dim', type=int, default=64, help='embedding size')\n parser.add_argument('--l2', type=float, default=1e-4, help='l2 regularization weight, 1e-5 for NGCF')\n parser.add_argument(\"--mess_dropout\", type=bool, default=False, help=\"consider mess dropout or not\")\n parser.add_argument(\"--mess_dropout_rate\", type=float, default=0.1, help=\"ratio of mess dropout\")\n parser.add_argument(\"--edge_dropout\", type=bool, default=False, help=\"consider edge dropout or not\")\n parser.add_argument(\"--edge_dropout_rate\", type=float, default=0.1, help=\"ratio of edge sampling\")\n parser.add_argument(\"--ns\", type=str, default='mixgcf', help=\"rns,mixgcf\")\n parser.add_argument(\"--K\", type=int, default=1, help=\"number of negative in K-pair loss\")\n parser.add_argument(\"--n_negs\", type=int, default=64, help=\"number of candidate negative\")\n parser.add_argument(\"--pool\", type=str, default='mean', help=\"[concat, mean, sum, final]\")\n parser.add_argument(\"--context_hops\", type=int, default=3, help=\"hop\")\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n args.n_users,\n args.n_items,\n args.l2,\n args.dim,\n args.context_hops,\n args.mess_dropout,\n args.mess_dropout_rate,\n args.edge_dropout,\n args.edge_dropout_rate,\n args.pool,\n args.n_negs,\n args.ns,\n args.K,\n args.adj_mat,\n )\n\n def __init__(\n self,\n n_users,\n n_items,\n l2,\n dim,\n context_hops,\n mess_dropout,\n mess_dropout_rate,\n edge_dropout,\n edge_dropout_rate,\n pool,\n n_negs,\n ns,\n K,\n adj_mat,\n ):\n super(LightGCN, self).__init__()\n\n self.n_users = n_users\n self.n_items = n_items\n self.adj_mat = adj_mat\n\n self.decay = l2\n self.emb_size = dim\n self.context_hops = context_hops\n self.mess_dropout = mess_dropout\n self.mess_dropout_rate = mess_dropout_rate\n self.edge_dropout = edge_dropout\n self.edge_dropout_rate = edge_dropout_rate\n self.pool = pool\n self.n_negs = n_negs\n self.ns = ns\n self.K = K\n\n self._init_weight()\n self.user_embed = nn.Parameter(self.user_embed)\n self.item_embed = nn.Parameter(self.item_embed)\n\n self.gcn = self._init_model()\n\n def _init_weight(self):\n initializer = nn.init.xavier_uniform_\n self.user_embed = initializer(torch.empty(self.n_users, self.emb_size))\n self.item_embed = initializer(torch.empty(self.n_items, self.emb_size))\n\n # [n_users+n_items, n_users+n_items]\n self.sparse_norm_adj = self._convert_sp_mat_to_sp_tensor(self.adj_mat)\n\n def _init_model(self):\n return GraphConv(\n n_hops=self.context_hops,\n n_users=self.n_users,\n interact_mat=self.sparse_norm_adj,\n edge_dropout_rate=self.edge_dropout_rate,\n mess_dropout_rate=self.mess_dropout_rate,\n )\n\n def _convert_sp_mat_to_sp_tensor(self, X):\n coo = X.tocoo()\n i = torch.LongTensor([coo.row, coo.col])\n v = torch.from_numpy(coo.data).float()\n return torch.sparse.FloatTensor(i, v, coo.shape)\n\n def to(self, *args, **kwargs):\n self = super().to(*args, **kwargs)\n # self.sparse_norm_adj = self.sparse_norm_adj.to(*args, **kwargs)\n self.gcn.to(*args, **kwargs)\n return self\n\n def forward(self, batch=None):\n user = batch[\"users\"]\n pos_item = batch[\"pos_items\"]\n neg_item = batch[\"neg_items\"] # [batch_size, n_negs * K]\n\n # user_gcn_emb: [n_users, channel]\n # item_gcn_emb: [n_users, channel]\n user_gcn_emb, item_gcn_emb = self.gcn(\n self.user_embed, self.item_embed, edge_dropout=self.edge_dropout, mess_dropout=self.mess_dropout\n )\n\n if self.ns == \"rns\": # n_negs = 1\n neg_gcn_embs = item_gcn_emb[neg_item[:, : self.K]]\n else:\n neg_gcn_embs = []\n for k in range(self.K):\n neg_gcn_embs.append(\n self.negative_sampling(\n user_gcn_emb, item_gcn_emb, user, neg_item[:, k * self.n_negs : (k + 1) * self.n_negs], pos_item\n )\n )\n neg_gcn_embs = torch.stack(neg_gcn_embs, dim=1)\n\n return self.create_bpr_loss(user_gcn_emb[user], item_gcn_emb[pos_item], neg_gcn_embs)\n\n def negative_sampling(self, user_gcn_emb, item_gcn_emb, user, neg_candidates, pos_item):\n batch_size = user.shape[0]\n s_e, p_e = user_gcn_emb[user], item_gcn_emb[pos_item] # [batch_size, n_hops+1, channel]\n if self.pool != \"concat\":\n s_e = self.pooling(s_e).unsqueeze(dim=1)\n\n \"\"\"positive mixing\"\"\"\n seed = torch.rand(batch_size, 1, p_e.shape[1], 1).to(p_e.device) # (0, 1)\n n_e = item_gcn_emb[neg_candidates] # [batch_size, n_negs, n_hops, channel]\n n_e_ = seed * p_e.unsqueeze(dim=1) + (1 - seed) * n_e # mixing\n\n \"\"\"hop mixing\"\"\"\n scores = (s_e.unsqueeze(dim=1) * n_e_).sum(dim=-1) # [batch_size, n_negs, n_hops+1]\n indices = torch.max(scores, dim=1)[1].detach()\n neg_items_emb_ = n_e_.permute([0, 2, 1, 3]) # [batch_size, n_hops+1, n_negs, channel]\n # [batch_size, n_hops+1, channel]\n return neg_items_emb_[[[i] for i in range(batch_size)], range(neg_items_emb_.shape[1]), indices, :]\n\n def pooling(self, embeddings):\n # [-1, n_hops, channel]\n if self.pool == \"mean\":\n return embeddings.mean(dim=1)\n elif self.pool == \"sum\":\n return embeddings.sum(dim=1)\n elif self.pool == \"concat\":\n return embeddings.view(embeddings.shape[0], -1)\n else: # final\n return embeddings[:, -1, :]\n\n def generate(self, split=True):\n user_gcn_emb, item_gcn_emb = self.gcn(self.user_embed, self.item_embed, edge_dropout=False, mess_dropout=False)\n user_gcn_emb, item_gcn_emb = self.pooling(user_gcn_emb), self.pooling(item_gcn_emb)\n if split:\n return user_gcn_emb, item_gcn_emb\n else:\n return torch.cat([user_gcn_emb, item_gcn_emb], dim=0)\n\n def rating(self, u_g_embeddings=None, i_g_embeddings=None):\n return torch.matmul(u_g_embeddings, i_g_embeddings.t())\n\n def create_bpr_loss(self, user_gcn_emb, pos_gcn_embs, neg_gcn_embs):\n # user_gcn_emb: [batch_size, n_hops+1, channel]\n # pos_gcn_embs: [batch_size, n_hops+1, channel]\n # neg_gcn_embs: [batch_size, K, n_hops+1, channel]\n\n batch_size = user_gcn_emb.shape[0]\n\n u_e = self.pooling(user_gcn_emb)\n pos_e = self.pooling(pos_gcn_embs)\n neg_e = self.pooling(neg_gcn_embs.view(-1, neg_gcn_embs.shape[2], neg_gcn_embs.shape[3])).view(\n batch_size, self.K, -1\n )\n\n pos_scores = torch.sum(torch.mul(u_e, pos_e), axis=1)\n neg_scores = torch.sum(torch.mul(u_e.unsqueeze(dim=1), neg_e), axis=-1) # [batch_size, K]\n\n mf_loss = torch.mean(torch.log(1 + torch.exp(neg_scores - pos_scores.unsqueeze(dim=1)).sum(dim=1)))\n\n # cul regularizer\n regularize = (\n torch.norm(user_gcn_emb[:, 0, :]) ** 2\n + torch.norm(pos_gcn_embs[:, 0, :]) ** 2\n + torch.norm(neg_gcn_embs[:, :, 0, :]) ** 2\n ) / 2 # take hop=0\n emb_loss = self.decay * regularize / batch_size\n\n return mf_loss + emb_loss, mf_loss, emb_loss\n", "# Modified by CogDL Team\n#\n# DeepSpeed note, code taken from commit 3d59216cec89a363649b4fe3d15295ba936ced0f\n# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/modeling.py\n\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\nimport json\nimport logging\nimport math\n\nimport torch\nfrom torch import nn\nfrom torch.utils import checkpoint\n\nfrom torch.nn import Module\nfrom torch.nn.parameter import Parameter\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef f_gelu(x):\n pdtype = x.dtype\n x = x.float()\n y = x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n return y.to(pdtype)\n\n\[email protected]\ndef bias_gelu(bias, y):\n x = bias + y\n return x * 0.5 * (1.0 + torch.erf(x / 1.41421))\n\n\[email protected]\ndef bias_tanh(bias, y):\n x = bias + y\n return torch.tanh(x)\n\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n return f_gelu(x)\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish}\n\n\nclass LinearActivation(Module):\n r\"\"\"Fused Linear and activation Module.\"\"\"\n __constants__ = [\"bias\"]\n\n def __init__(self, in_features, out_features, act=\"gelu\", bias=True):\n super(LinearActivation, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.fused_gelu = False\n self.fused_tanh = False\n if isinstance(act, str):\n if bias and act == \"gelu\":\n self.fused_gelu = True\n elif bias and act == \"tanh\":\n self.fused_tanh = True\n else:\n self.act_fn = ACT2FN[act]\n else:\n self.act_fn = act\n self.weight = Parameter(torch.Tensor(out_features, in_features))\n if bias:\n self.bias = Parameter(torch.Tensor(out_features))\n else:\n self.register_parameter(\"bias\", None)\n self.reset_parameters()\n\n def reset_parameters(self):\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, input):\n if self.fused_gelu:\n return bias_gelu(self.bias, F.linear(input, self.weight, None))\n elif self.fused_tanh:\n return bias_tanh(self.bias, F.linear(input, self.weight, None))\n else:\n return self.act_fn(F.linear(input, self.weight, self.bias))\n\n def extra_repr(self):\n return \"in_features={}, out_features={}, bias={}\".format(\n self.in_features, self.out_features, self.bias is not None\n )\n\n\nclass BertConfig(object):\n \"\"\"Configuration class to store the configuration of a `BertModel`.\"\"\"\n\n def __init__(self):\n pass\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = cls()\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\nclass BertLayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\"\"\"\n super(BertLayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n pdtype = x.dtype\n x = x.float()\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.weight * x.to(pdtype) + self.bias\n\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, token_type_ids=None):\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = words_embeddings + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.softmax = nn.Softmax(dim=-1)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def transpose_key_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 3, 1)\n\n def forward(self, hidden_states, attention_mask):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_key_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer)\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = self.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n return context_layer\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super(BertSelfOutput, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.dense.bert_output_layer = True\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super(BertAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n\n def forward(self, input_tensor, attention_mask):\n self_output = self.self(input_tensor, attention_mask)\n attention_output = self.output(self_output, input_tensor)\n return attention_output\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super(BertIntermediate, self).__init__()\n self.dense_act = LinearActivation(config.hidden_size, config.intermediate_size, act=config.hidden_act)\n\n def forward(self, hidden_states):\n hidden_states = self.dense_act(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super(BertOutput, self).__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.dense.bert_output_layer = True\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super(BertLayer, self).__init__()\n self.attention = BertAttention(config)\n self.PreAttentionLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.PostAttentionLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(self, hidden_states, attention_mask):\n input_layer_norm = self.PreAttentionLayerNorm(hidden_states)\n attention_output = self.attention(input_layer_norm, attention_mask)\n\n intermediate_input = hidden_states + attention_output\n\n intermediate_layer_norm = self.PostAttentionLayerNorm(intermediate_input)\n intermediate_output = self.intermediate(intermediate_layer_norm)\n layer_output = self.output(intermediate_output)\n\n return layer_output + intermediate_input\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super(BertEncoder, self).__init__()\n\n # Added later to make it similar to GPT-2\n self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n\n layer = BertLayer(config)\n self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])\n\n def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False):\n all_encoder_layers = []\n\n def custom(start, end):\n def custom_forward(*inputs):\n layers = self.layer[start:end]\n x_ = inputs[0]\n for layer in layers:\n x_ = layer(x_, inputs[1])\n return x_\n\n return custom_forward\n\n if checkpoint_activations:\n l = 0 # noqa E741\n num_layers = len(self.layer)\n chunk_length = math.ceil(math.sqrt(num_layers))\n while l < num_layers:\n hidden_states = checkpoint.checkpoint(custom(l, l + chunk_length), hidden_states, attention_mask * 1)\n l += chunk_length # noqa E741\n # decoder layers\n else:\n for i, layer_module in enumerate(self.layer):\n hidden_states = layer_module(hidden_states, attention_mask)\n\n if output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n\n if not output_all_encoded_layers or checkpoint_activations:\n hidden_states = self.FinalLayerNorm(hidden_states)\n all_encoder_layers.append(hidden_states)\n return all_encoder_layers\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super(BertPooler, self).__init__()\n self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=\"tanh\")\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense_act(first_token_tensor)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super(BertPredictionHeadTransform, self).__init__()\n self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n\n def forward(self, hidden_states):\n hidden_states = self.dense_act(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertLMPredictionHead, self).__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0), bias=False)\n self.decoder.weight = bert_model_embedding_weights\n self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))\n\n def forward(self, hidden_states, masked_token_indexes):\n hidden_states = self.transform(hidden_states)\n\n if masked_token_indexes is not None:\n hidden_states = torch.index_select(hidden_states.view(-1, hidden_states.shape[-1]), 0, masked_token_indexes)\n\n hidden_states = self.decoder(hidden_states) + self.bias\n return hidden_states\n\n\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertPreTrainingHeads, self).__init__()\n self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output, masked_token_indexes=None):\n prediction_scores = self.predictions(sequence_output, masked_token_indexes)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass BertPreTrainedModel(nn.Module):\n \"\"\"An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n\n def __init__(self, config, *inputs, **kwargs):\n super(BertPreTrainedModel, self).__init__()\n if not isinstance(config, BertConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of class `BertConfig`. \"\n \"To create a model from a Google pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n )\n )\n self.config = config\n\n def init_bert_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n num_layers = self.config.num_hidden_layers\n std = self.config.initializer_range\n if hasattr(module, \"bert_output_layer\"):\n # \"Accounting for accumulation on the residual path\"\n # print(\"Accounting for accumulation on the residual path\")\n std = self.config.initializer_range / math.sqrt(2.0 * num_layers)\n module.weight.data.normal_(mean=0.0, std=std)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nclass BertModel(BertPreTrainedModel):\n \"\"\"BERT model (\"Bidirectional Embedding Representations from a Transformer\").\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.\n\n Outputs: Tuple of (encoded_layers, pooled_output)\n `encoded_layers`: controled by `output_all_encoded_layers` argument:\n - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end\n of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each\n encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],\n - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding\n to the last attention block of shape [batch_size, sequence_length, hidden_size],\n `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a\n classifier pretrained on top of the hidden state associated to the first character of the\n input (`CLS`) to train on the Next-Sentence task (see BERT's paper).\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = modeling.BertModel(config=config)\n all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n\n def __init__(self, config):\n super(BertModel, self).__init__(config)\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n self.apply(self.init_bert_weights)\n logger.info(\"Init BERT pretrain model\")\n\n def forward(\n self,\n input_ids,\n token_type_ids=None,\n attention_mask=None,\n output_all_encoded_layers=True,\n checkpoint_activations=False,\n ):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n embedding_output = self.embeddings(input_ids, token_type_ids)\n encoded_layers = self.encoder(\n embedding_output,\n extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers,\n checkpoint_activations=checkpoint_activations,\n )\n sequence_output = encoded_layers[-1]\n pooled_output = self.pooler(sequence_output)\n\n if not output_all_encoded_layers:\n encoded_layers = encoded_layers[-1]\n return encoded_layers, pooled_output\n\n\nclass BertForPreTrainingPreLN(BertPreTrainedModel):\n \"\"\"BERT model with pre-training heads.\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n \"\"\"\n\n def __init__(self, config):\n super(BertForPreTrainingPreLN, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n", "import json\nimport argparse\nimport os\nfrom tqdm import tqdm\nimport time\nimport torch\nimport numpy as np\nfrom cogdl.oag.oagbert import OAGBertPretrainingModel, oagbert\nimport multiprocessing\nfrom multiprocessing import Manager\nfrom cogdl.oag.utils import MultiProcessTqdm\nfrom cogdl.datasets import build_dataset\nfrom collections import Counter\nfrom . import BaseTask, register_task\n\n# python scripts/train.py --task oag_zero_shot_infer --model oagbert --dataset l0fos\n\n\ndef get_span_decode_prob(\n model,\n tokenizer,\n title=\"\",\n abstract=\"\",\n venue=\"\",\n authors=[],\n concepts=[],\n affiliations=[],\n span_type=\"\",\n span=\"\",\n debug=False,\n max_seq_length=512,\n device=None,\n wprop=False,\n wabs=False,\n testing=False,\n):\n token_type_str_lookup = [\"TEXT\", \"AUTHOR\", \"VENUE\", \"AFF\", \"FOS\"]\n input_ids = []\n input_masks = []\n token_type_ids = []\n masked_lm_labels = []\n position_ids = []\n position_ids_second = []\n num_spans = 0\n masked_positions = []\n\n def add_span(token_type_id, token_ids, is_mask=False):\n nonlocal num_spans\n if len(token_ids) == 0:\n return\n length = len(token_ids)\n input_ids.extend(token_ids if not is_mask else [tokenizer.mask_token_id] * length)\n input_masks.extend([1] * length)\n token_type_ids.extend([token_type_id] * length)\n masked_lm_labels.extend([-1] * length if not is_mask else [tokenizer.cls_token_id] * length)\n position_ids.extend([num_spans] * length)\n position_ids_second.extend(list(range(length)))\n if is_mask:\n masked_positions.extend([len(input_ids) - length + i for i in range(span_length)])\n num_spans += 1\n\n def _encode(text):\n return tokenizer(text, add_special_tokens=False)[\"input_ids\"] if len(text) > 0 else []\n\n span_token_ids = _encode(span)\n span_length = len(span_token_ids)\n span_token_type_id = token_type_str_lookup.index(span_type)\n if span_token_type_id < 0:\n print(\"unexpected span type: %s\" % span_type)\n return\n\n prompt_text = \"\"\n if wprop:\n if span_type == \"FOS\":\n prompt_text = \"Field of Study:\"\n elif span_type == \"VENUE\":\n prompt_text = \"Journal or Venue:\"\n elif span_type == \"AFF\":\n prompt_text = \"Affiliations:\"\n else:\n raise NotImplementedError\n prompt_token_ids = _encode(prompt_text)\n\n add_span(0, (_encode(title) + _encode(abstract if wabs else \"\") + prompt_token_ids)[: max_seq_length - span_length])\n add_span(2, _encode(venue)[: max_seq_length - len(input_ids) - span_length])\n for author in authors:\n add_span(1, _encode(author)[: max_seq_length - len(input_ids) - span_length])\n for concept in concepts:\n add_span(4, _encode(concept)[: max_seq_length - len(input_ids) - span_length])\n for affiliation in affiliations:\n add_span(3, _encode(affiliation)[: max_seq_length - len(input_ids) - span_length])\n\n add_span(span_token_type_id, span_token_ids, is_mask=True)\n\n logprobs = 0.0\n logproblist = []\n for i in range(span_length):\n if testing and i % 10 != 0:\n continue\n # scibert deleted\n batch = [None] + [\n torch.LongTensor(t[:max_seq_length]).unsqueeze(0).to(device or \"cpu\")\n for t in [input_ids, input_masks, token_type_ids, masked_lm_labels, position_ids, position_ids_second]\n ]\n sequence_output, pooled_output = model.bert.forward(\n input_ids=batch[1],\n token_type_ids=batch[3],\n attention_mask=batch[2],\n output_all_encoded_layers=False,\n checkpoint_activations=False,\n position_ids=batch[5],\n position_ids_second=batch[6],\n )\n masked_token_indexes = torch.nonzero((batch[4] + 1).view(-1)).view(-1)\n prediction_scores, _ = model.cls(sequence_output, pooled_output, masked_token_indexes)\n prediction_scores = torch.nn.functional.log_softmax(prediction_scores, dim=1) # L x Vocab\n token_log_probs = prediction_scores[torch.arange(len(span_token_ids)), span_token_ids]\n\n # not force forward\n logprob, pos = token_log_probs.max(dim=0)\n\n logprobs += logprob.item()\n logproblist.append(logprob.item())\n real_pos = masked_positions[pos]\n input_ids[real_pos] = span_token_ids[pos]\n masked_lm_labels[real_pos] = -1\n masked_positions.pop(pos)\n span_token_ids.pop(pos)\n\n return np.exp(logprobs), logproblist\n\n\n@register_task(\"oag_zero_shot_infer\")\nclass zero_shot_inference(BaseTask):\n def add_args(parser: argparse.ArgumentParser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n parser.add_argument(\"--cuda\", type=int, nargs=\"+\", default=[-1])\n parser.add_argument(\"--wprop\", action=\"store_true\", dest=\"wprop\", default=False)\n parser.add_argument(\"--wabs\", action=\"store_true\", dest=\"wabs\", default=False)\n parser.add_argument(\"--token_type\", type=str, default=\"FOS\")\n parser.add_argument(\"--testing\", action=\"store_true\", dest=\"testing\", default=False)\n\n def __init__(self, args):\n super(zero_shot_inference, self).__init__(args)\n\n self.dataset = build_dataset(args)\n self.sample = self.dataset.get_data()\n self.input_dir = self.dataset.processed_dir\n self.output_dir = \"saved/zero_shot_infer/\"\n\n self.tokenizer, self.model = oagbert(\"oagbert-v2\", True)\n\n self.cudalist = args.cuda\n self.model_name = args.model\n self.wprop = args.wprop # prompt\n self.wabs = args.wabs # with abstract\n self.token_type = args.token_type\n\n self.testing = args.testing\n\n os.makedirs(self.output_dir, exist_ok=True)\n for filename in os.listdir(self.output_dir):\n os.remove(\"%s/%s\" % (self.output_dir, filename))\n\n def process_file(self, device, filename, pbar):\n pbar.reset(1, name=\"preparing...\")\n self.model.eval()\n self.model.to(device)\n\n output_file = self.output_dir + \"/\" + filename\n candidates = self.dataset.get_candidates()\n\n pbar.reset(len(self.sample[filename]), name=filename)\n pbar.set_description(\"[%s]\" % (filename))\n fout = open(output_file, \"a\")\n i = 0\n for paper in self.sample[filename]:\n pbar.update(1)\n i = i + 1\n if self.testing and i % 50 != 0:\n continue\n title = paper[\"title\"]\n abstract = \"\".join(paper[\"abstracts\"])\n obj, probs, problists = {}, {}, {}\n for candidate in candidates:\n prob, problist = get_span_decode_prob(\n model=self.model,\n tokenizer=self.tokenizer,\n title=title,\n abstract=abstract,\n span_type=self.token_type,\n span=candidate,\n device=device,\n debug=False,\n wprop=self.wprop,\n wabs=self.wabs,\n testing=self.testing,\n )\n probs[candidate] = prob\n problists[candidate] = problist\n obj[\"probs\"] = list(sorted(probs.items(), key=lambda x: -x[1]))\n obj[\"pred\"] = list(sorted(probs.items(), key=lambda x: -x[1]))[0][0]\n obj[\"logprobs\"] = problists\n fout.write(\"%s\\n\" % json.dumps(obj, ensure_ascii=False))\n\n fout.close()\n pbar.close()\n\n def train(self):\n with Manager() as manager:\n lock = manager.Lock()\n positions = manager.dict()\n\n summary_pbar = MultiProcessTqdm(lock, positions, update_interval=1)\n if -1 not in self.cudalist:\n processnum = 4 * len(self.cudalist)\n else:\n processnum = 12\n pool = multiprocessing.get_context(\"spawn\").Pool(processnum)\n results = []\n idx = 0\n\n for filename in self.sample.keys():\n if self.testing and idx % 3 != 0:\n continue\n cuda_num = len(self.cudalist)\n cuda_idx = self.cudalist[idx % cuda_num]\n device = torch.device(\"cuda:%d\" % cuda_idx if cuda_idx >= 0 else \"cpu\")\n\n pbar = MultiProcessTqdm(lock, positions, update_interval=1)\n r = pool.apply_async(self.process_file, (device, filename, pbar))\n results.append((r, filename))\n idx += 1\n\n if self.testing:\n cuda_num = len(self.cudalist)\n cuda_idx = self.cudalist[idx % cuda_num]\n device = torch.device(\"cuda:%d\" % cuda_idx if cuda_idx >= 0 else \"cpu\")\n\n pbar = MultiProcessTqdm(lock, positions, update_interval=1)\n for filename in self.sample.keys():\n self.process_file(device, filename, pbar)\n break\n\n summary_pbar.reset(total=len(results), name=\"Total\")\n finished = set()\n while len(finished) < len(results):\n for r, filename in results:\n if filename not in finished:\n if r.ready():\n r.get()\n finished.add(filename)\n summary_pbar.update(1)\n time.sleep(1)\n pool.close()\n return self.analysis_result()\n\n def analysis_result(self):\n concepts = self.dataset.get_candidates()\n concepts.sort()\n\n result = {}\n T, F = 0, 0\n for filename in os.listdir(self.output_dir):\n if not filename.endswith(\".jsonl\"):\n continue\n\n fos = filename.split(\".\")[0]\n t, f = 0, 0\n cnter = Counter()\n for row in open(\"%s/%s\" % (self.output_dir, filename)):\n try:\n probs = json.loads(row.strip())[\"probs\"]\n pred = [\n k for k, v in sorted([(k, v) for k, v in probs if k in concepts], key=lambda tup: -tup[1])[:2]\n ]\n\n except Exception as e:\n print(\"Err:%s\" % e)\n print(\"Row:%s\" % row)\n correct = pred[0] == fos\n t += correct\n f += not correct\n cnter[pred[0]] += 1\n T += t\n F += f\n os.remove(\"%s/%s\" % (self.output_dir, filename))\n result[\"Accuracy\"] = T * 100 / (T + F)\n return result\n", "import hashlib\nimport networkx as nx\nimport numpy as np\nimport random\nfrom .. import BaseModel, register_model\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom tqdm import tqdm\n\n\nclass Hin2vec_layer(nn.Module):\n def __init__(self, num_node, num_relation, hidden_size, device):\n super(Hin2vec_layer, self).__init__()\n\n self.num_node = num_node\n\n self.Wx = Parameter(torch.randn(num_node, hidden_size))\n self.Wr = Parameter(torch.randn(num_relation, hidden_size))\n\n self.device = device\n\n self.X = F.one_hot(torch.arange(num_node), num_node).float().to(self.device)\n self.R = F.one_hot(torch.arange(num_relation), num_relation).float().to(self.device)\n self.criterion = nn.CrossEntropyLoss()\n\n def regulartion(self, embr):\n clamp_embr = torch.clamp(embr, -6.0, 6.0)\n sigmod1 = torch.sigmoid(clamp_embr)\n # return sigmod1\n re_embr = torch.mul(sigmod1, 1 - sigmod1)\n return re_embr\n\n def forward(self, x, y, r, l): # noqa E741\n x_one, y_one, r_one = (\n torch.index_select(self.X, 0, x),\n torch.index_select(self.X, 0, y),\n torch.index_select(self.R, 0, r),\n )\n self.embx, self.emby, self.embr = torch.mm(x_one, self.Wx), torch.mm(y_one, self.Wx), torch.mm(r_one, self.Wr)\n self.re_embr = self.regulartion(self.embr)\n self.preds = torch.unsqueeze(\n torch.sigmoid(torch.sum(torch.mul(torch.mul(self.embx, self.emby), self.re_embr), 1)), 1\n )\n self.logits = torch.cat((self.preds, 1 - self.preds), 1)\n return self.logits, self.criterion(self.logits, l)\n\n def get_emb(\n self,\n ):\n x = F.one_hot(torch.arange(0, self.num_node), num_classes=self.num_node).float().to(self.device)\n return torch.mm(x, self.Wx)\n\n\nclass RWgraph:\n def __init__(self, nx_G, node_type=None):\n self.G = nx_G\n self.node_type = node_type\n\n def _walk(self, start_node, walk_length):\n # Simulate a random walk starting from start node.\n walk = [start_node]\n while len(walk) < walk_length:\n cur = walk[-1]\n cur_nbrs = list(self.G.neighbors(cur))\n if len(cur_nbrs) == 0:\n break\n k = int(np.floor(np.random.rand() * len(cur_nbrs)))\n walk.append(cur_nbrs[k])\n return walk\n\n def _simulate_walks(self, walk_length, num_walks):\n # Repeatedly simulate random walks from each node.\n walks = []\n nodes = list(self.G.nodes())\n print(\"node number:\", len(nodes))\n for walk_iter in range(num_walks):\n print(str(walk_iter + 1), \"/\", str(num_walks))\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self._walk(node, walk_length))\n return walks\n\n def data_preparation(self, walks, hop, negative):\n # data preparation via process walks and negative sampling\n node_type = self.node_type\n num_node_type = len(set(node_type))\n type2list = [[] for _ in range(num_node_type)]\n for node, nt in enumerate(node_type):\n type2list[nt].append(node)\n print(\"number of type2list\", num_node_type)\n relation = dict()\n pairs = []\n for walk in walks:\n for i in range(len(walk) - hop):\n for j in range(1, hop + 1):\n x, y = walk[i], walk[i + j]\n # tx, ty = node_type[x], node_type[y]\n if x == y:\n continue\n meta_str = \"-\".join([str(node_type[a]) for a in walk[i : i + j + 1]])\n if meta_str not in relation:\n relation[meta_str] = len(meta_str)\n pairs.append([x, y, relation[meta_str], 1])\n for k in range(negative):\n if random.random() > 0.5:\n fx = random.choice(type2list[node_type[x]])\n while fx == x:\n fx = random.choice(type2list[node_type[x]])\n pairs.append([fx, y, relation[meta_str], 0])\n else:\n fy = random.choice(type2list[node_type[y]])\n while fy == y:\n fy = random.choice(type2list[node_type[y]])\n pairs.append([x, fy, relation[meta_str], 0])\n print(\"number of relation\", len(relation))\n return np.asarray(pairs), relation\n\n\n@register_model(\"hin2vec\")\nclass Hin2vec(BaseModel):\n r\"\"\"The Hin2vec model from the `\"HIN2Vec: Explore Meta-paths in Heterogeneous Information Networks for Representation Learning\"\n <https://dl.acm.org/doi/10.1145/3132847.3132953>`_ paper.\n\n Args:\n hidden_size (int) : The dimension of node representation.\n walk_length (int) : The walk length.\n walk_num (int) : The number of walks to sample for each node.\n batch_size (int) : The batch size of training in Hin2vec.\n hop (int) : The number of hop to construct training samples in Hin2vec.\n negative (int) : The number of nagative samples for each meta2path pair.\n epochs (int) : The number of training iteration.\n lr (float) : The initial learning rate of SGD.\n cpu (bool) : Use CPU or GPU to train hin2vec.\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n # fmt: off\n parser.add_argument(\"--hidden-size\", type=int, default=128)\n parser.add_argument(\"--walk-length\", type=int, default=80, help=\"Length of walk per source. Default is 80.\")\n parser.add_argument(\"--walk-num\", type=int, default=40, help=\"Number of walks per source. Default is 40.\")\n parser.add_argument(\"--batch-size\", type=int, default=1000, help=\"Batch size in SGD training process. Default is 1000.\")\n parser.add_argument(\"--hop\", type=int, default=2)\n parser.add_argument(\"--negative\", type=int, default=5)\n parser.add_argument(\"--epochs\", type=int, default=1)\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n args.hidden_size,\n args.walk_length,\n args.walk_num,\n args.batch_size,\n args.hop,\n args.negative,\n args.epochs,\n args.lr,\n args.cpu,\n )\n\n def __init__(self, hidden_dim, walk_length, walk_num, batch_size, hop, negative, epochs, lr, cpu=True):\n super(Hin2vec, self).__init__()\n self.hidden_dim = hidden_dim\n self.walk_length = walk_length\n self.walk_num = walk_num\n self.batch_size = batch_size\n self.hop = hop\n self.negative = negative\n self.epochs = epochs\n self.lr = lr\n\n def train(self, G, node_type):\n self.num_node = G.number_of_nodes()\n rw = RWgraph(G, node_type)\n walks = rw._simulate_walks(self.walk_length, self.walk_num)\n pairs, relation = rw.data_preparation(walks, self.hop, self.negative)\n\n self.num_relation = len(relation)\n model = Hin2vec_layer(self.num_node, self.num_relation, self.hidden_dim, self.device)\n self.model = model.to(self.device)\n\n num_batch = int(len(pairs) / self.batch_size)\n print_num_batch = 100\n print(\"number of batch\", num_batch)\n\n opt = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n epoch_iter = tqdm(range(self.epochs))\n for epoch in epoch_iter:\n loss_n, pred, label = [], [], []\n for i in range(num_batch):\n batch_pairs = torch.from_numpy(pairs[i * self.batch_size : (i + 1) * self.batch_size])\n batch_pairs = batch_pairs.to(self.device)\n batch_pairs = batch_pairs.T\n x, y, r, l = batch_pairs[0], batch_pairs[1], batch_pairs[2], batch_pairs[3] # noqa E741\n opt.zero_grad()\n logits, loss = self.model.forward(x, y, r, l)\n\n loss_n.append(loss.item())\n label.append(l)\n pred.extend(logits)\n if i % print_num_batch == 0 and i != 0:\n label = torch.cat(label).to(self.device)\n pred = torch.stack(pred, dim=0)\n pred = pred.max(1)[1]\n acc = pred.eq(label).sum().item() / len(label)\n epoch_iter.set_description(\n f\"Epoch: {i:03d}, Loss: {sum(loss_n)/print_num_batch:.5f}, Acc: {acc:.5f}\"\n )\n loss_n, pred, label = [], [], []\n\n loss.backward()\n opt.step()\n\n embedding = self.model.get_emb()\n return embedding.cpu().detach().numpy()\n", "import os.path as osp\nimport random\n\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nfrom cogdl import options\nfrom cogdl.datasets import build_dataset_from_name\nfrom grave import plot_network, use_attributes\nfrom tabulate import tabulate\n\n\ndef plot_graph(args):\n if not isinstance(args.dataset, list):\n args.dataset = [args.dataset]\n\n for name in args.dataset:\n dataset = build_dataset_from_name(name)\n data = dataset[0]\n\n depth = args.depth\n pic_file = osp.join(args.save_dir, f\"display_{name}.png\")\n\n col_names = [\n \"Dataset\",\n \"#nodes\",\n \"#edges\",\n \"#features\",\n \"#classes\",\n \"#labeled data\",\n ]\n tab_data = [\n [\n name,\n data.x.shape[0],\n data.edge_index.shape[1],\n data.x.shape[1],\n len(set(data.y.numpy())),\n sum(data.train_mask.numpy()),\n ]\n ]\n print(tabulate(tab_data, headers=col_names, tablefmt=\"psql\"))\n\n G = nx.Graph()\n G.add_edges_from([tuple(data.edge_index[:, i].numpy()) for i in range(data.edge_index.shape[1])])\n\n s = random.choice(list(G.nodes()))\n q = [s]\n node_set = set([s])\n node_index = {s: 0}\n max_index = 1\n for _ in range(depth):\n nq = []\n for x in q:\n for key in G[x].keys():\n if key not in node_set:\n nq.append(key)\n node_set.add(key)\n node_index[key] = node_index[x] + 1\n if len(nq) > 0:\n max_index += 1\n q = nq\n\n cmap = cm.rainbow(np.linspace(0.0, 1.0, max_index))\n\n for node, index in node_index.items():\n G.nodes[node][\"color\"] = cmap[index]\n G.nodes[node][\"size\"] = (max_index - index) * 50\n\n fig, ax = plt.subplots()\n plot_network(G.subgraph(list(node_set)), node_style=use_attributes())\n plt.savefig(pic_file)\n print(f\"Sampled ego network saved to {pic_file} .\")\n\n\nif __name__ == \"__main__\":\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--seed', '-s', type=int, default=0, help='random seed')\n parser.add_argument('--depth', '-d', type=int, default=3, help='neighborhood depth')\n parser.add_argument('--name', '-n', type=str, default='Cora', help='dataset name')\n parser.add_argument('--file', '-f', type=str, default='graph.jpg', help='saved file name')\n args = parser.parse_args()\n \"\"\"\n parser = options.get_display_data_parser()\n args = parser.parse_args()\n\n if isinstance(args.seed, list):\n args.seed = args.seed[0]\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n\n plot_graph(args)\n", "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom .. import BaseModel, register_model\nfrom cogdl.layers import SAINTLayer\nfrom cogdl.trainers.sampled_trainer import SAINTTrainer\n\n\ndef parse_arch(architecture, aggr, act, bias, hidden_size, num_features):\n num_layers = len(architecture.split(\"-\"))\n # set default values, then update by arch_gcn\n bias_layer = [bias] * num_layers\n act_layer = [act] * num_layers\n aggr_layer = [aggr] * num_layers\n dims_layer = [hidden_size] * num_layers\n order_layer = [int(order) for order in architecture.split(\"-\")]\n return [num_features] + dims_layer, order_layer, act_layer, bias_layer, aggr_layer\n\n\n@register_model(\"graphsaint\")\nclass GraphSAINT(BaseModel):\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument(\"--num-features\", type=int)\n parser.add_argument(\"--hidden-size\", type=int, default=128)\n parser.add_argument(\"--architecture\", type=str, default=\"1-1-0\")\n parser.add_argument(\"--aggr\", type=str, default=\"concat\")\n parser.add_argument(\"--act\", type=str, default=\"relu\")\n parser.add_argument(\"--bias\", type=str, default=\"norm\")\n parser.add_argument(\"--dropout\", type=float, default=0.1)\n parser.add_argument(\"--weight-decay\", type=int, default=0)\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n args.num_features,\n args.num_classes,\n args.architecture,\n args.aggr,\n args.act,\n args.bias,\n args.weight_decay,\n args.dropout,\n args.hidden_size,\n )\n\n def __init__(self, num_features, num_classes, architecture, aggr, act, bias, weight_decay, dropout, hidden_size):\n \"\"\"\n Build the multi-layer GNN architecture.\n\n Inputs:\n num_classes int, number of classes a node can belong to\n arch_gcn dict, config for each GNN layer\n train_params dict, training hyperparameters (e.g., learning rate)\n feat_full np array of shape N x f, where N is the total num of\n nodes and f is the dimension for input node feature\n label_full np array, for single-class classification, the shape\n is N x 1 and for multi-class classification, the\n shape is N x c (where c = num_classes)\n cpu_eval bool, if True, will put the model on CPU.\n\n Outputs:\n None\n \"\"\"\n super(GraphSAINT, self).__init__()\n self.aggregator_cls = SAINTLayer\n self.mulhead = 1\n self.weight_decay = weight_decay\n self.dropout = dropout\n self.sigmoid_loss = True\n self.num_classes = num_classes\n self.num_layers = len(architecture.split(\"-\"))\n _dims, self.order_layer, self.act_layer, self.bias_layer, self.aggr_layer = parse_arch(\n architecture, aggr, act, bias, hidden_size, num_features\n )\n # get layer index for each conv layer, useful for jk net last layer aggregation\n self.set_idx_conv()\n self.set_dims(_dims)\n\n self.loss = 0\n self.opt_op = None\n\n # build the model below\n self.num_params = 0\n self.aggregators, num_param = self.get_aggregators()\n self.num_params += num_param\n self.conv_layers = nn.ModuleList(self.aggregators)\n self.classifier = SAINTLayer(\n self.dims_feat[-1], self.num_classes, act=\"I\", order=0, dropout=self.dropout, bias=\"bias\"\n )\n self.num_params += self.classifier.num_param\n\n def set_dims(self, dims):\n \"\"\"\n Set the feature dimension / weight dimension for each GNN or MLP layer.\n We will use the dimensions set here to initialize PyTorch layers.\n\n Inputs:\n dims list, length of node feature for each hidden layer\n\n Outputs:\n None\n \"\"\"\n self.dims_feat = [dims[0]] + [\n ((self.aggr_layer[layer] == \"concat\") * self.order_layer[layer] + 1) * dims[layer + 1]\n for layer in range(len(dims) - 1)\n ]\n self.dims_weight = [(self.dims_feat[layer], dims[layer + 1]) for layer in range(len(dims) - 1)]\n\n def set_idx_conv(self):\n \"\"\"\n Set the index of GNN layers for the full neural net. For example, if\n the full NN is having 1-0-1-0 arch (1-hop graph conv, followed by 0-hop\n MLP, ...). Then the layer indices will be 0, 2.\n \"\"\"\n idx_conv = np.where(np.array(self.order_layer) >= 1)[0]\n idx_conv = list(idx_conv[1:] - 1)\n idx_conv.append(len(self.order_layer) - 1)\n _o_arr = np.array(self.order_layer)[idx_conv]\n if np.prod(np.ediff1d(_o_arr)) == 0:\n self.idx_conv = idx_conv\n else:\n self.idx_conv = list(np.where(np.array(self.order_layer) == 1)[0])\n\n def forward(self, graph):\n x = graph.x\n for layer in self.conv_layers:\n x = layer(graph, x)\n emb_subg_norm = F.normalize(x, p=2, dim=1)\n pred_subg = self.classifier(None, emb_subg_norm)\n return pred_subg\n\n def _loss(self, preds, labels, norm_loss):\n \"\"\"\n The predictor performs sigmoid (for multi-class) or softmax (for single-class)\n \"\"\"\n if self.sigmoid_loss:\n norm_loss = norm_loss.unsqueeze(1)\n return torch.nn.BCEWithLogitsLoss(weight=norm_loss, reduction=\"sum\")(preds, labels)\n else:\n _ls = torch.nn.CrossEntropyLoss(reduction=\"none\")(preds, labels)\n return (norm_loss * _ls).sum()\n\n def get_aggregators(self):\n \"\"\"\n Return a list of aggregator instances. to be used in self.build()\n \"\"\"\n num_param = 0\n aggregators = []\n for layer in range(self.num_layers):\n aggr = self.aggregator_cls(\n *self.dims_weight[layer],\n dropout=self.dropout,\n act=self.act_layer[layer],\n order=self.order_layer[layer],\n aggr=self.aggr_layer[layer],\n bias=self.bias_layer[layer],\n mulhead=self.mulhead,\n )\n num_param += aggr.num_param\n aggregators.append(aggr)\n return aggregators, num_param\n\n def predict(self, data):\n return self.forward(data)\n\n @staticmethod\n def get_trainer(task, args):\n return SAINTTrainer\n" ]
[ [ "torch.LongTensor", "torch.nn.Dropout", "torch.nn.Parameter", "torch.empty", "torch.floor", "torch.cat", "torch.max", "torch.norm", "torch.sparse.mm", "torch.from_numpy", "torch.mul", "torch.rand", "torch.stack", "torch.sparse.FloatTensor" ], [ "torch.nn.Softmax", "torch.sigmoid", "torch.nn.Dropout", "torch.nn.init.uniform_", "torch.ones", "torch.Tensor", "torch.zeros", "torch.sqrt", "torch.zeros_like", "torch.nn.Embedding", "torch.tanh", "torch.nn.Linear", "torch.matmul", "torch.erf", "torch.arange", "torch.nn.init._calculate_fan_in_and_fan_out", "torch.nn.functional.linear", "torch.ones_like" ], [ "torch.device", "torch.LongTensor", "numpy.exp", "torch.nn.functional.log_softmax" ], [ "torch.sigmoid", "torch.nn.CrossEntropyLoss", "torch.mm", "torch.cat", "numpy.asarray", "torch.randn", "torch.from_numpy", "torch.mul", "numpy.random.rand", "torch.arange", "torch.stack", "torch.clamp", "torch.index_select" ], [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "numpy.random.seed", "numpy.linspace" ], [ "torch.nn.functional.normalize", "torch.nn.CrossEntropyLoss", "numpy.ediff1d", "torch.nn.ModuleList", "torch.nn.BCEWithLogitsLoss", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
raven-computing/pydf
[ "70b14ad11aa629da6d1abb993a2a4c567db73ca9" ]
[ "raven/struct/dataframe/_dataframeutils.py" ]
[ "# Copyright (C) 2021 Raven Computing\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nProvides internal utility functions for DataFrame operations.\n\"\"\"\n\nimport numpy as np\n\nimport raven.struct.dataframe.core as dataframe\nimport raven.struct.dataframe.column as column\nimport raven.struct.dataframe.bytecolumn as bytecolumn\nimport raven.struct.dataframe.shortcolumn as shortcolumn\nimport raven.struct.dataframe.intcolumn as intcolumn\nimport raven.struct.dataframe.longcolumn as longcolumn\nimport raven.struct.dataframe.floatcolumn as floatcolumn\nimport raven.struct.dataframe.doublecolumn as doublecolumn\nimport raven.struct.dataframe.stringcolumn as stringcolumn\nimport raven.struct.dataframe.charcolumn as charcolumn\nimport raven.struct.dataframe.booleancolumn as booleancolumn\nimport raven.struct.dataframe.binarycolumn as binarycolumn\nimport raven.struct.dataframe._columnutils as columnutils\n\n# pylint: disable=C0103, R1702, R1705, R0911, R0912, R0914, R0915, W0212\n\ndef copy_of(df):\n \"\"\"Creates and returns a copy of the given DataFrame\n\n Args:\n df: The DataFrame instance to copy\n\n Returns:\n A copy of the specified DataFrame or None if the argument is None\n \"\"\"\n if df is None:\n return None\n\n df.flush()\n columns = [col.clone() for col in df._internal_columns()]\n copy = None\n if df.is_nullable():\n copy = dataframe.NullableDataFrame(columns)\n else:\n copy = dataframe.DefaultDataFrame(columns)\n\n return copy\n\ndef like(df):\n \"\"\"Creates and returns a DataFrame which has the same column structure\n and Column names as the specified DataFrame instance but is otherwise empty\n\n Args:\n df: The DataFrame from which to copy the Column structure\n\n Returns:\n A DataFrame with the same Column structure and names as the\n specified DataFrame, or None if the specified DataFrame is None\n \"\"\"\n if df is None:\n return None\n\n col = df.columns()\n if col == 0:\n return (dataframe.NullableDataFrame()\n if df.is_nullable()\n else dataframe.DefaultDataFrame())\n\n cols = [None] * col\n for i in range(col):\n cols[i] = column.Column.of_type(df.get_column(i).type_code())\n\n result = (dataframe.NullableDataFrame(cols)\n if df.is_nullable()\n else dataframe.DefaultDataFrame(cols))\n\n if df.has_column_names():\n result.set_column_names(df.get_column_names())\n\n return result\n\ndef is_numeric_fp(col):\n \"\"\"Indicates whether the specified Column has a type name\n of float or double.\n\n Args:\n col: The Column to check\n\n Returns:\n A bool which indicates whether the specified Column is\n a FloatColumn, NullableFloatColumn, DoubleColumn,\n NullableDoubleColumn\n \"\"\"\n return columnutils.is_numeric_fp(col)\n\ndef merge(*dataframes):\n \"\"\"Merges all given DataFrame instances into one DataFrame.\n\n All DataFames are merged by columns. All DataFrames must have an\n equal number of rows but may be of any type. All columns are added to\n the returned DataFrame in the order of the arguments passed to this\n method. Only passing one DataFrame to this method will simply\n return that instance.\n\n Columns with duplicate names are included in the returned DataFrame\n and a postfix is added to each duplicate column name.\n All columns of the returned DataFrame are backed by their origin,\n which means that changes to the original DataFrame are reflected in\n the merged DataFrame and vice versa. This does not apply, however,\n if columns need to be converted to a nullable type. For example, if\n one DataFrame argument is nullable, then all columns from non-nullable\n DataFrame arguments are converted to their corresponding\n nullable equivalent.\n\n If columns should be independent from their origin, then simply pass\n a clone (copy) of each DataFrame argument to this method.\n\n Example:\n merged = DataFrame.merge(DataFrame.copy(df1), DataFrame.copy(df2))\n\n Args:\n dataframes: The DataFrames to be merged\n\n Returns:\n A DataFrame composed of all columns of the given DataFrames\n \"\"\"\n if dataframes is None or len(dataframes) == 0:\n raise dataframe.DataFrameException(\"Arg must not be None or empty\")\n\n if len(dataframes) == 1:\n return dataframes[0]\n\n rows = dataframes[0].rows()\n cols = 0\n has_nullable = False\n has_names = False\n for i, df in enumerate(dataframes):\n if df is None:\n raise dataframe.DataFrameException(\n \"DataFrame argument must not be None\")\n\n cols += df.columns()\n if df.rows() != rows:\n raise dataframe.DataFrameException(\n (\"Size missmatch for DataFrame argument at index {}. \"\n \"Expected {} rows but found {}\")\n .format(i, rows, df.rows()))\n\n if df.is_nullable():\n has_nullable = True\n\n if df.has_column_names():\n has_names = True\n\n for _, df in enumerate(dataframes):\n df.flush()\n\n names = None\n if has_names:\n names = [None] * cols\n for i in range(cols):\n names[i] = str(i)\n\n k = 0\n for i, df in enumerate(dataframes):\n for j in range(df.columns()):\n c = df.get_column(j)\n if c.get_name():\n names[k] = c.get_name()\n k += 1\n\n for i in range(cols):\n k = 0\n already_set = False\n n = names[i]\n for j in range(cols):\n if i != j:\n if n == names[j]:\n if not already_set:\n names[i] = names[i] + \"_\" + str(k)\n k += 1\n already_set = True\n\n names[j] = names[j] + \"_\" + str(k)\n k += 1\n\n columns = [None] * cols\n k = 0\n for i, df in enumerate(dataframes):\n for j in range(df.columns()):\n if has_nullable:\n columns[k] = df.get_column(j).as_nullable()\n k += 1\n else:\n columns[k] = df.get_column(j)\n k += 1\n\n merged = None\n if has_nullable:\n merged = dataframe.NullableDataFrame(columns)\n else:\n merged = dataframe.DefaultDataFrame(columns)\n\n if has_names:\n merged.set_column_names(names)\n\n return merged\n\ndef convert(df, target_type):\n \"\"\"Converts the given DataFrame from a DefaultDataFrame to a NullableDataFrame\n or vice versa.\n\n Converting a DefaultDataFrame to a NullableDataFrame will not change\n any internal values, except that now you can add/insert null values to it.\n Converting a NullableDataFrame to a DefaultDataFrame will convert all None\n occurrences to the primitive defaults according to the Column they are located.\n\n Args:\n df: The DataFrame instance to convert. Must not be None\n target_type: The type to convert the given DataFrame to.\n May be 'default' or 'nullable'\n\n Returns:\n A DataFrame converted from the type of the argument passed to this method\n to the type specified\n \"\"\"\n if df is None or target_type is None:\n raise ValueError(\"Arg must not be null\")\n\n if not isinstance(target_type, str):\n raise ValueError(\"Target type argument must be specified as a string\")\n\n target_type = target_type.lower()\n if target_type not in (\"defaultdataframe\", \"default\", \"nullabledataframe\", \"nullable\"):\n raise ValueError(\"Unable to convert to '\" + str(target_type)\n + \"'. Must be either 'default' or 'nullable'\")\n\n if target_type == \"defaultdataframe\":\n target_type = \"default\"\n elif target_type == \"nullabledataframe\":\n target_type = \"nullable\"\n\n source_type = \"nullable\" if df.is_nullable() else \"default\"\n if target_type == source_type:\n return copy_of(df)\n\n rows = df.rows()\n converted = None\n # convert from Nullable to Default\n if target_type == \"default\":\n converted = dataframe.DefaultDataFrame()\n for col in df:\n tc = col.type_code()\n if tc == bytecolumn.NullableByteColumn.TYPE_CODE:\n vals = np.array([0] * rows, dtype=np.int8)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0 if val is None else val\n\n converted.add_column(bytecolumn.ByteColumn(col.get_name(), vals))\n elif tc == shortcolumn.NullableShortColumn.TYPE_CODE:\n vals = np.array([0] * rows, dtype=np.int16)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0 if val is None else val\n\n converted.add_column(shortcolumn.ShortColumn(col.get_name(), vals))\n elif tc == intcolumn.NullableIntColumn.TYPE_CODE:\n vals = np.array([0] * rows, dtype=np.int32)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0 if val is None else val\n\n converted.add_column(intcolumn.IntColumn(col.get_name(), vals))\n elif tc == longcolumn.NullableLongColumn.TYPE_CODE:\n vals = np.array([0] * rows, dtype=np.int64)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0 if val is None else val\n\n converted.add_column(longcolumn.LongColumn(col.get_name(), vals))\n elif tc == stringcolumn.NullableStringColumn.TYPE_CODE:\n vals = np.array([None] * rows, dtype=np.object)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = (stringcolumn.StringColumn.DEFAULT_VALUE\n if val is None or val == \"\"\n else val)\n\n converted.add_column(stringcolumn.StringColumn(col.get_name(), vals))\n elif tc == floatcolumn.NullableFloatColumn.TYPE_CODE:\n vals = np.array([0.0] * rows, dtype=np.float32)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0.0 if val is None else val\n\n converted.add_column(floatcolumn.FloatColumn(col.get_name(), vals))\n elif tc == doublecolumn.NullableDoubleColumn.TYPE_CODE:\n vals = np.array([0.0] * rows, dtype=np.float64)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0 if val is None else val\n\n converted.add_column(doublecolumn.DoubleColumn(col.get_name(), vals))\n elif tc == charcolumn.NullableCharColumn.TYPE_CODE:\n vals = np.array([0] * rows, dtype=np.uint8)\n default_val = ord(charcolumn.CharColumn.DEFAULT_VALUE)\n for i in range(rows):\n val = col._values[i]\n vals[i] = default_val if val is None else val\n\n converted.add_column(charcolumn.CharColumn(col.get_name(), vals))\n elif tc == booleancolumn.NullableBooleanColumn.TYPE_CODE:\n vals = np.array([False] * rows, dtype=np.bool)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = False if val is None else val\n\n converted.add_column(booleancolumn.BooleanColumn(col.get_name(), vals))\n elif tc == binarycolumn.NullableBinaryColumn.TYPE_CODE:\n vals = np.array([None] * rows, dtype=np.object)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = bytearray.fromhex(\"00\") if val is None else val\n\n converted.add_column(binarycolumn.BinaryColumn(col.get_name(), vals))\n else: # undefined type\n raise dataframe.DataFrameException(\n (\"Unable to convert dataframe. Unrecognized \"\n \"column type {}\".format(type(col))))\n\n else: # convert from Default to Nullable\n converted = dataframe.NullableDataFrame()\n for col in df:\n tc = col.type_code()\n vals = np.array([None] * rows, dtype=np.object)\n for i in range(rows):\n vals[i] = col.get_value(i)\n\n if tc == bytecolumn.ByteColumn.TYPE_CODE:\n converted.add_column(bytecolumn.NullableByteColumn(col.get_name(), vals))\n elif tc == shortcolumn.ShortColumn.TYPE_CODE:\n converted.add_column(shortcolumn.NullableShortColumn(col.get_name(), vals))\n elif tc == intcolumn.IntColumn.TYPE_CODE:\n converted.add_column(intcolumn.NullableIntColumn(col.get_name(), vals))\n elif tc == longcolumn.LongColumn.TYPE_CODE:\n converted.add_column(longcolumn.NullableLongColumn(col.get_name(), vals))\n elif tc == stringcolumn.StringColumn.TYPE_CODE:\n converted.add_column(stringcolumn.NullableStringColumn(col.get_name(), vals))\n elif tc == floatcolumn.FloatColumn.TYPE_CODE:\n converted.add_column(floatcolumn.NullableFloatColumn(col.get_name(), vals))\n elif tc == doublecolumn.DoubleColumn.TYPE_CODE:\n converted.add_column(doublecolumn.NullableDoubleColumn(col.get_name(), vals))\n elif tc == charcolumn.CharColumn.TYPE_CODE:\n converted.add_column(charcolumn.NullableCharColumn(col.get_name(), vals))\n elif tc == booleancolumn.BooleanColumn.TYPE_CODE:\n converted.add_column(booleancolumn.NullableBooleanColumn(col.get_name(), vals))\n elif tc == binarycolumn.BinaryColumn.TYPE_CODE:\n converted.add_column(binarycolumn.NullableBinaryColumn(col.get_name(), vals))\n else: # undefined type\n raise dataframe.DataFrameException(\n (\"Unable to convert dataframe. Unrecognized \"\n \"column type {}\".format(type(col))))\n\n return converted\n\ndef column_from_typename(typename):\n \"\"\"Constructs and returns a Column from the specified typename.\n\n The returned Column instance is a default (non-nullable) Column.\n\n Args:\n typename: The type name of the Column to return, as a str\n\n Returns:\n A Column instance from the specified type name,\n or None if the argument is not a valid type name\n \"\"\"\n return columnutils.column_from_typename(typename)\n\ndef join(df1, col1, df2, col2):\n \"\"\"Combines all rows from the specified DataFrames which have matching\n values in their columns with the corresponding specified name.\n\n Both DataFrames must have a column with the corresponding specified name\n and an identical element type. All columns in both DataFrame instances must\n be labeled by the time this method is called. The specified DataFrames may be\n of any types.\n\n All Columns in the second DataFrame argument that are also existent in\n the first DataFrame argument are excluded in the result DataFrame returned\n by this method. Therefore, in the case of duplicate Columns, the returned\n DataFrame only contains the corresponding Column from the first DataFrame.\n\n Args:\n df1: The first DataFrame to join. Must not be None\n col1: The name of the Column in the first DataFrame argument\n to match values for. Must be a str\n df2: The second DataFrame to join. Must not be None\n col2: The name of the Column in the second DataFrame argument\n to match values for. Must be a str\n\n Returns:\n A DataFrame with joined rows from both specified DataFrames\n that have matching values in the Columns with the specified names\n \"\"\"\n if df1 is None or df2 is None:\n raise dataframe.DataFrameException(\"DataFrame argument must not be None\")\n\n if df1 is df2:\n raise dataframe.DataFrameException(\"Join operation is self-referential\")\n\n if not col1:\n raise dataframe.DataFrameException(\n \"First column name argument must not be None or empty\")\n\n if not col2:\n raise dataframe.DataFrameException(\n \"Second column name argument must not be None or empty\")\n\n if not df1.has_column_names():\n raise dataframe.DataFrameException(\"DataFrame must has column labels\")\n\n if not df2.has_column_names():\n raise dataframe.DataFrameException(\"DataFrame argument must have column labels\")\n\n if not df2.has_column(col2):\n raise dataframe.DataFrameException(\n \"Invalid column name for DataFrame argument: '{}'\".format(col2))\n\n if df1.get_column(col1).type_name() != df2.get_column(col2).type_name():\n raise dataframe.DataFrameException(\n (\"Column '{}' in DataFrame argument has \"\n \"a different type. \"\n \"Expected {} but found {}\").format(\n df2.get_column(col2).get_name(),\n df1.get_column(col1).type_name(),\n df2.get_column(col2).type_name()))\n\n # create a set holding the names of all columns from df2\n # that should be bypassed in the result because they already exist in df1\n duplicates = set()\n names = df2.get_column_names()\n for _, n in enumerate(names):\n if df1.has_column(n):\n duplicates.add(n)\n\n # add the specified column name to make sure\n # it is not included in the below computations\n duplicates.add(col2)\n df1.flush()\n df2.flush()\n # find the elements common to both DataFrames\n intersec = df1.get_columns(col1).intersection_rows(df2.get_columns(col2))\n use_nullable = df1.is_nullable() or df2.is_nullable()\n result = (dataframe.NullableDataFrame() if use_nullable\n else dataframe.DefaultDataFrame())\n\n # add all columns from df1\n for i in range(df1.columns()):\n c = column.Column.of_type(df1.get_column(i).type_code())\n result.add_column(col=c.as_nullable() if use_nullable else c,\n name=df1.get_column(i).get_name())\n\n # add all columns from df2 as long as they are not already in df1\n for i in range(df2.columns()):\n col = df2.get_column(i)\n # if the column is in the collection, then it\n # is either 'col2' or another duplicate, so it is skipped\n if not col.get_name() in duplicates:\n c = column.Column.of_type(col.type_code())\n result.add_column(col=c.as_nullable() if use_nullable else c,\n name=col.get_name())\n\n # iterate over all common elements and add all rows to\n # the result from both DataFrames that match the common\n # element in their respective key column\n for i in range(intersec.rows()):\n filter_key = str(intersec.get_column(0).get_value(i))\n filter1 = df1.filter(col1, filter_key)\n filter2 = df2.filter(col2, filter_key)\n # remove 'col2' and any column already existent in df1\n for name in duplicates:\n filter2.remove_column(name)\n\n length_col1 = df1.columns()\n length_col2 = df2.columns() - len(duplicates)\n # reuse the row list\n length_row = length_col1 + length_col2\n row = [None] * length_row\n for j in range(filter1.rows()):\n for k in range(filter2.rows()):\n for l in range(length_col1):\n row[l] = filter1.get_column(l).get_value(j)\n\n for l in range(length_col2):\n row[length_col1 + l] = filter2.get_column(l).get_value(k)\n\n result.add_row(row)\n\n result.flush()\n return result\n\ndef _group_operation(df, col, operation):\n \"\"\"Performs a group_by operation for the specified DataFrame and Column.\n\n Operation codes:\n * 1 = Minimum\n * 2 = Maximum\n * 3 = Average\n * 4 = Sum\n\n Args:\n df: The DataFrame to use for the group operation\n col: The Column to use for the group operation\n operation: The operation code to use\n\n Returns:\n A DataFrame representing the result of the group operation\n \"\"\"\n if df._internal_next() == -1 or col < 0 or col >= df.columns():\n raise dataframe.DataFrameException(\"Invalid column index: {}\".format(col))\n\n c = df.get_column(col)\n n_numeric = 0\n for i in range(df.columns()):\n c_i = df.get_column(i)\n if not c_i._name:\n raise dataframe.DataFrameException(\n \"All columns must be labeled for group operations\")\n\n if c_i is not c and c_i.is_numeric():\n n_numeric += 1\n\n uniques = df.unique(col)\n n_uniques = len(uniques)\n contains_null = df.contains(col, \"None\") if df.is_nullable() else False\n col_length = n_uniques + 1 if contains_null else n_uniques\n cols = [None] * (n_numeric + 1)\n col_names = [None] * (n_numeric + 1)\n cols[0] = column.Column.of_type(c.type_code(), col_length)\n col_names[0] = c._name\n n_numeric = 1\n for i in range(df.columns()):\n c_i = df.get_column(i)\n if c_i is not c and c_i.is_numeric():\n if operation in (3, 4): # average or sum op\n cols[n_numeric] = (doublecolumn.NullableDoubleColumn(values=col_length)\n if df.is_nullable()\n else doublecolumn.DoubleColumn(values=col_length))\n\n else:\n cols[n_numeric] = column.Column.of_type(c_i.type_code(), col_length)\n\n col_names[n_numeric] = c_i._name\n n_numeric += 1\n\n result = (dataframe.NullableDataFrame(cols)\n if df.is_nullable()\n else dataframe.DefaultDataFrame(cols))\n\n result.set_column_names(col_names)\n\n length = len(cols)\n index = 0\n for elem in uniques:\n row = [None] * length\n row[0] = elem\n filtered = df.filter(c._name, str(elem))\n for i in range(1, length, 1):\n value = 0.0\n if operation == 1:\n value = filtered.minimum(col_names[i])\n elif operation == 2:\n value = filtered.maximum(col_names[i])\n elif operation == 3:\n value = filtered.average(col_names[i])\n elif operation == 4:\n value = filtered.sum(col_names[i])\n else:\n raise dataframe.DataFrameException(\n \"Unknown group operation: {}\".format(operation))\n\n row[i] = _cast_to_numeric_type(cols[i], value)\n\n result.set_row(index, row)\n index += 1\n\n if contains_null:\n row = [None] * length\n row[0] = None\n filtered = df.filter(c._name, \"None\")\n for i in range(1, length, 1):\n value = 0.0\n if operation == 1:\n value = filtered.minimum(col_names[i])\n elif operation == 2:\n value = filtered.maximum(col_names[i])\n elif operation == 3:\n value = filtered.average(col_names[i])\n elif operation == 4:\n value = filtered.sum(col_names[i])\n else:\n raise dataframe.DataFrameException(\n \"Unknown group operation: {}\".format(operation))\n\n row[i] = _cast_to_numeric_type(cols[i], value)\n\n result.set_row(index, row)\n index += 1\n\n return result\n\ndef _cast_to_numeric_type(col, value):\n \"\"\"Casts the specified double to the corresponding Number\n type of the specified Column.\n\n Args:\n col: The Column which specifies the numeric type\n value: The float value to cast\n\n Returns:\n A number which has the concrete type used\n by the specified Column\n \"\"\"\n c = col.type_code()\n if col.is_nullable():\n if c == doublecolumn.NullableDoubleColumn.TYPE_CODE:\n return float(value)\n elif c == floatcolumn.NullableFloatColumn.TYPE_CODE:\n return float(value)\n elif c == bytecolumn.NullableByteColumn.TYPE_CODE:\n return int(value) if not np.isnan(value) else None\n elif c == shortcolumn.NullableShortColumn.TYPE_CODE:\n return int(value) if not np.isnan(value) else None\n elif c == intcolumn.NullableIntColumn.TYPE_CODE:\n return int(value) if not np.isnan(value) else None\n elif c == longcolumn.NullableLongColumn.TYPE_CODE:\n return int(value) if not np.isnan(value) else None\n else:\n raise dataframe.DataFrameException(\"Unrecognized column type\")\n else:\n if c == doublecolumn.DoubleColumn.TYPE_CODE:\n return float(value)\n elif c == floatcolumn.FloatColumn.TYPE_CODE:\n return float(value)\n elif c == bytecolumn.ByteColumn.TYPE_CODE:\n return int(value)\n elif c == shortcolumn.ShortColumn.TYPE_CODE:\n return int(value)\n elif c == intcolumn.IntColumn.TYPE_CODE:\n return int(value)\n elif c == longcolumn.LongColumn.TYPE_CODE:\n return int(value)\n else:\n raise dataframe.DataFrameException(\"Unrecognized column type\")\n\ndef getitem_impl(arg, position):\n \"\"\"Implementation of the __getitem__() function\n\n Args:\n arg: The DataFrame instance on which the function was called upon\n position: The position argument passed to the function\n\n Returns:\n The value at the specified position\n \"\"\"\n if isinstance(position, tuple):\n if len(position) > 2:\n raise dataframe.DataFrameException(\n (\"Invalid position argument. Too many \"\n \"positions specified: {}\").format(len(position)))\n\n cols = position[0]\n rows = position[1]\n if isinstance(cols, (int, str)):\n # check for negative column indices\n if isinstance(cols, int) and cols < 0:\n if abs(cols) > arg.columns():\n raise dataframe.DataFrameException(\n \"Invalid column index: {}\".format(cols))\n\n cols = cols % arg.columns()\n\n if rows is None:\n # implements df[x, :] and df[\"x\", :]\n return arg.get_columns(cols=cols)\n elif isinstance(rows, int):\n # implements df[x, y] and df[\"x\", y]\n if rows < 0:\n if abs(rows) > arg.rows():\n raise dataframe.DataFrameException(\n \"Invalid row index: {}\".format(rows))\n\n rows = rows % arg.rows()\n\n return arg.get_column(cols).get_value(rows)\n elif isinstance(rows, str):\n # implements df[x, \"y_regex\"] and df[\"x\", \"y_regex\"]\n return arg.filter(cols, rows)\n elif isinstance(rows, tuple):\n # implements df[x, (y0, y1, ..., yn)]\n # and df[\"x\", (y0, y1, ..., yn)]\n col_selected = arg.get_column(cols)\n col = column.Column.like(col_selected, length=len(rows))\n df = (dataframe.NullableDataFrame(col)\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(col))\n\n for i, row_index in enumerate(rows):\n col[i] = col_selected[row_index]\n\n return df\n\n elif isinstance(rows, slice):\n # implements df[x, y0:y1:y2]\n # and df[\"x\", y0:y1:y2]\n start = rows.start\n stop = rows.stop\n step = rows.step\n col_selected = arg.get_column(cols)\n # numpy returns an array view when slicing\n # so we have to copy the array explicitly\n # to get an independent instance\n col_values = col_selected._values[start:stop:step].copy()\n col = column.Column.like(col_selected, length=0)\n col._values = col_values\n return (dataframe.NullableDataFrame(col)\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(col))\n\n elif isinstance(cols, (tuple, slice)):\n # prefetch the selected columns as a DataFrame\n if isinstance(cols, tuple):\n cols_selected = arg.get_columns(cols=cols)\n else: # is slice\n cols_selected = arg._internal_columns()[cols]\n cols_selected = (dataframe.NullableDataFrame(cols_selected)\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(cols_selected))\n\n if rows is None:\n # implements df[(x0, x1, ..., xn), ]\n # and df[x0:x1:x2, ]\n return cols_selected\n elif isinstance(rows, int):\n # implements df[(x0, x1, ..., xn), y]\n # and df[x0:x1:x2, y]\n if rows < 0:\n if abs(rows) > arg.rows():\n raise dataframe.DataFrameException(\n \"Invalid row index: {}\".format(rows))\n\n rows = rows % arg.rows()\n\n return cols_selected.get_row(rows)\n\n elif isinstance(rows, tuple):\n # implements df[(x0, x1, ..., xn), (y0, y1, ..., ym)]\n # and df[x0:x1:x2, (y0, y1, ..., ym)]\n cols = [column.Column.like(col, length=len(rows))\n for col in cols_selected._internal_columns()]\n\n df = (dataframe.NullableDataFrame(cols)\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(cols))\n\n for i, row_index in enumerate(rows):\n df.set_row(i, cols_selected.get_row(rows[i]))\n\n return df\n\n elif isinstance(rows, slice):\n # implements df[(x0, x1, ..., xn), y0:y1:y2]\n # and df[x0:x1:x2, y0:y1:y2]\n start = rows.start\n stop = rows.stop\n step = rows.step\n cols = [None] * cols_selected.columns()\n for i, col in enumerate(cols_selected._internal_columns()):\n col_values = col._values[start:stop:step].copy()\n col_sliced = column.Column.like(col, length=col_values.shape[0])\n col_sliced._values = col_values\n cols[i] = col_sliced\n\n return (dataframe.NullableDataFrame(cols)\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(cols))\n\n elif isinstance(rows, str):\n raise dataframe.DataFrameException(\n (\"Invalid column position type. A filter operation \"\n \"must only specify a single column \"\n \"but found {}\").format(type(cols)))\n\n else:\n # invalid type for column position arg\n raise dataframe.DataFrameException(\n (\"Invalid column position type. \"\n \"Expected int or str but found {}\").format(type(cols)))\n\n elif isinstance(position, int):\n # implements df[x]\n if position < 0:\n if abs(position) > arg.columns():\n raise dataframe.DataFrameException(\n \"Invalid column index: {}\".format(position))\n\n position = position % arg.columns()\n\n return arg.get_column(position)\n elif isinstance(position, str):\n # implements df[\"x\"]\n return arg.get_column(position)\n else:\n # invalid type for entire position arg\n raise dataframe.DataFrameException(\n (\"Invalid position type. \"\n \"Expected int or str but \"\n \"found {}\").format(type(position)))\n\n # make pylint happy about missing return statement\n raise dataframe.DataFrameException(\"Implementation error\")\n\ndef setitem_impl(arg, position, value):\n \"\"\"Implementation of the __setitem__() function.\n\n Args:\n arg: The DataFrame instance on which the function was called upon\n position: The position argument passed to the function\n value: The value argument passed to the function\n \"\"\"\n if isinstance(position, tuple):\n if len(position) > 2:\n raise dataframe.DataFrameException(\n (\"Invalid position argument. Too many \"\n \"positions specified: {}\").format(len(position)))\n\n cols = position[0]\n rows = position[1]\n if isinstance(cols, (int, str)):\n # check for negative column indices\n if isinstance(cols, int) and cols < 0:\n if abs(cols) > arg.columns():\n raise dataframe.DataFrameException(\n \"Invalid column index: {}\".format(cols))\n\n cols = cols % arg.columns()\n\n if rows is None:\n # implements df[x, :] = Column\n # and df[\"x\", :] = Column\n arg.set_column(cols, value)\n elif isinstance(rows, int):\n # implements df[x, y] = v\n # and df[\"x\", y] = v\n if rows < 0:\n if abs(rows) > arg.rows():\n raise dataframe.DataFrameException(\n \"Invalid row index: {}\".format(rows))\n\n rows = rows % arg.rows()\n\n arg.get_column(cols).set_value(rows, value)\n elif isinstance(rows, str):\n # implements df[x, \"y_regex\"] = v | func | lamda\n # and df[\"x\", \"y_regex\"] = v | func | lamda\n arg.replace(cols, rows, replacement=value)\n elif isinstance(rows, tuple):\n # implements df[x, (y0, y1, ..., yn)] = (v0, v1, ..., vn)\n # and df[\"x\", (y0, y1, ..., yn)] = (v0, v1, ..., vn)\n col = arg.get_column(cols)\n if isinstance(value, (list, tuple)):\n if len(rows) != len(value):\n raise dataframe.DataFrameException(\n (\"Invalid value argument. The specified \"\n \"list/tuple has a size of {} but the row position \"\n \"argument has a size of {}\")\n .format(len(value), len(rows)))\n\n for i, index in enumerate(rows):\n col.set_value(index, value[i])\n\n else:\n # implements df[x, (y0, y1, ..., yn)] = v\n # and df[\"x\", (y0, y1, ..., yn)] = v\n for index in rows:\n col.set_value(index, value)\n\n elif isinstance(rows, slice):\n rows = rows.indices(arg.rows())\n start = rows[0]\n stop = rows[1]\n step = rows[2]\n col = arg.get_column(cols)\n if isinstance(value, (list, tuple)):\n # implements df[x, y0:y1:y2] = (v0, v1, ..., vn)\n # and df[\"x\", y0:y1:y2] = (v0, v1, ..., vn)\n if ((stop - start) // step) != len(value):\n raise dataframe.DataFrameException(\n (\"Invalid value argument. The specified \"\n \"list/tuple has a size of {} but the row position \"\n \"argument has a size of {}\")\n .format(len(value), (stop - start) // step))\n\n i = 0\n for index in range(start, stop, step):\n col.set_value(index, value[i])\n i += 1\n\n else:\n # implements df[x, y0:y1:y2] = v\n # and df[\"x\", y0:y1:y2] = v\n for index in range(start, stop, step):\n col.set_value(index, value)\n\n else:\n # invalid type for row position arg\n raise dataframe.DataFrameException(\n (\"Invalid row position type. \"\n \"Expected int or str but found {}\").format(type(rows)))\n\n elif isinstance(cols, (tuple, slice)):\n # prefetch the selected columns as a DataFrame\n if isinstance(cols, tuple):\n cols_selected = arg.get_columns(cols=cols)\n else: # is slice\n cols_selected = (dataframe.NullableDataFrame(arg._internal_columns()[cols])\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(arg._internal_columns()[cols]))\n\n if isinstance(rows, int):\n if rows < 0:\n if abs(rows) > arg.rows():\n raise dataframe.DataFrameException(\n \"Invalid row index: {}\".format(rows))\n\n rows = rows % arg.rows()\n\n if isinstance(value, (tuple, list)):\n # implements df[(x0, x1, ..., xn), y] = [v0, v1, ..., vn]\n # and df[x0:x1:x2, y] = [v0, v1, ..., vn]\n cols_selected.set_row(rows, value)\n else:\n # implements df[(x0, x1, ..., xn), y] = v\n # and df[x0:x1:x2, y] = v\n cols_selected.set_row(rows, [value] * cols_selected.columns())\n\n elif isinstance(rows, tuple):\n if isinstance(value, (list, tuple)):\n # implements df[(x0, x1, ..., xn), (y0, y1, ..., ym)] = [[ ], [ ], ..., [ ]]\n # and df[x0:x1:x2, (y0, y1, ..., ym)] = [[ ], [ ], ..., [ ]]\n if len(value) == 0:\n raise dataframe.DataFrameException(\n (\"Invalid value argument. The specified list/tuple \"\n \"of row values is empty\"))\n\n if isinstance(value[0], (list, tuple)):\n if len(rows) != len(value):\n raise dataframe.DataFrameException(\n (\"Invalid value argument. The specified list/tuple \"\n \"has a size of {} but the row position argument \"\n \"has a size of {}\").format(len(value), len(rows)))\n\n for i, index in enumerate(rows):\n cols_selected.set_row(index, value[i])\n else:\n for index in rows:\n cols_selected.set_row(index, value)\n\n elif isinstance(value, dataframe.DataFrame):\n # implements df[(x0, x1, ..., xn), (y0, y1, ..., ym)] = vDataFrame\n # and df[x0:x1:x2, (y0, y1, ..., ym)] = vDataFrame\n if len(rows) != value.rows():\n rmsg1 = \"rows\" if value.rows() != 1 else \"row\"\n rmsg2 = \"rows\" if len(rows) != 1 else \"row\"\n raise dataframe.DataFrameException(\n (\"Invalid value argument. The specified \"\n \"DataFrame has {} {} but the row position \"\n \"argument specified {} {}\")\n .format(value.rows(), rmsg1, len(rows), rmsg2))\n\n for i, index in enumerate(rows):\n cols_selected.set_row(index, value.get_row(i))\n\n else:\n # implements df[(x0, x1, ..., xn), (y0, y1, ..., ym)] = v\n # and df[x0:x1:x2, (y0, y1, ..., ym)] = v\n value = [value] * cols_selected.columns()\n for index in rows:\n cols_selected.set_row(index, value)\n\n elif isinstance(rows, slice):\n rows = rows.indices(cols_selected.rows())\n start = rows[0]\n stop = rows[1]\n step = rows[2]\n if isinstance(value, (list, tuple)):\n # implements df[(x0, x1, ..., xn), y0:y1:y2] = [ .. ]\n # and df[x0:x1:x2, y0:y1:y2] = [ .. ]\n for index in range(start, stop, step):\n cols_selected.set_row(index, value)\n\n elif isinstance(value, dataframe.DataFrame):\n # implements df[(x0, x1, ..., xn), y0:y1:y2] = vDataFrame\n # and df[x0:x1:x2, y0:y1:y2] = vDataFrame\n i = 0\n for index in range(start, stop, step):\n cols_selected.set_row(index, value.get_row(i))\n i += 1\n\n else:\n # implements df[(x0, x1, ..., xn), y0:y1:y2] = v\n # and df[x0:x1:x2, y0:y1:y2] = v\n value = [value] * cols_selected.columns()\n for index in range(start, stop, step):\n cols_selected.set_row(index, value)\n\n elif isinstance(rows, str):\n raise dataframe.DataFrameException(\n (\"Invalid column position type. A replacement operation \"\n \"must only specify a single column \"\n \"but found {}\").format(type(cols)))\n\n else:\n # invalid type for row position arg\n raise dataframe.DataFrameException(\n (\"Invalid row position type. \"\n \"Expected int or str but found {}\").format(type(rows)))\n\n else:\n # invalid type for column position arg\n raise dataframe.DataFrameException(\n (\"Invalid column position type. \"\n \"Expected int or str but found {}\").format(type(cols)))\n\n elif isinstance(position, int):\n # check for negative column indices\n if position < 0:\n if abs(position) > arg.columns():\n raise dataframe.DataFrameException(\n \"Invalid column index: {}\".format(position))\n\n position = position % arg.columns()\n\n # implements df[x] = Column\n if position == arg.columns():\n arg.add_column(value)\n else:\n arg.set_column(position, value)\n elif isinstance(position, str):\n # and df[\"x\"] = Column\n arg.set_column(position, value)\n else:\n # invalid type for entire position arg\n raise dataframe.DataFrameException(\n (\"Invalid position type. \"\n \"Expected int or str but \"\n \"found {}\").format(type(position)))\n" ]
[ [ "numpy.isnan", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
amitsou/cloud_services
[ "22a2381227ecab8d1626e3dfa961821954188327" ]
[ "Functions/sensing_utils.py" ]
[ "import os\nimport cv2\nimport sys\nimport json\nimport time\nimport codecs\nimport argparse\nimport logging\nimport numpy as np\nimport warnings\n\ntry:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\",category=FutureWarning)\n from keras.models import load_model\n from keras.preprocessing import image\n from keras import layers\n from keras import models\n from keras import regularizers\n from keras import layers\n from keras.preprocessing.image import ImageDataGenerator\nexcept Exception as ex:\n sys.exit('Error import Keras library')\n\ntry:\n import paho.mqtt.client as mqtt\nexcept Exception as ex:\n sys.exit('Paho library is not present')\n\n\n\ndef upload_image(out_topic):\n \"\"\"Upload an image into the Synaisthisi Platform\n \"\"\"\n try:\n while True:\n\n img_dir = get_img_dir()\n data_out = parse_img(img_dir)\n client.publish(out_topic, data_out)\n\n except KeyboardInterrupt:\n client.loop_stop()\n\n\ndef get_img_dir():\n \"\"\"Get the image path\n\n Returns:\n [str]: [The absolute path to the image file]\n \"\"\"\n img_dir = '' #enter your image path here\n img_name = ''#enter image name here\n img_dir = os.path.join(img_dir,img_name)\n return img_dir\n\n\ndef parse_img(img_dir):\n \"\"\"Open, preprocess and convert an image into json format\n\n Args:\n img_dir (str): The image absolute path\n\n Returns:\n [str]: The json object to be published to the platform\n \"\"\"\n img = cv2.imread(img_dir)\n height, width, channels = img.shape\n img = image.load_img(img_dir, target_size=(height,width))\n img = np.array(image)\n img = img.ravel()\n\n compressed_obj = [img.tolist(), height, width, channels]\n json_obj = json.dumps(compressed_obj)\n return json_obj\n\n\ndef s_type_service_args():\n \"\"\"\n Provide the S-type service args\n \"\"\"\n parser = argparse.ArgumentParser(description='Collect arguments')\n parser.add_argument(\"--username\", metavar='username(text)', help=\"Please provide username\")\n parser.add_argument(\"--p\", metavar='password(text)', help=\"Please provide password\")\n parser.add_argument(\"--output_topics\", nargs='*', metavar='Output_topic',help='MQTT Broker Output Topics')\n\n #Developer should take care to parse as many input/output topics created in web app\n args = parser.parse_args()\n username = args.username\n user_pass = args.p\n out_topics = args.output_topics\n\n print(\"Output Topics: {0}\".format(out_topics))\n out_topic = out_topics[0]\n return username, user_pass, out_topic" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
svaiter/sparse-ho
[ "8c04ca533e44ecd128dc26b6830a556babf8416f", "8c04ca533e44ecd128dc26b6830a556babf8416f" ]
[ "examples/plot_held_out_lasso.py", "sparse_ho/datasets/real.py" ]
[ "\"\"\"\n============================\nLasso with held-out test set\n============================\n\nThis example shows how to perform hyperparameter optimization\nfor a Lasso using a held-out validation set.\n\n\"\"\"\n\n# Authors: Quentin Bertrand <[email protected]>\n# Quentin Klopfenstein <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import linear_model\n\nfrom sparse_ho.models import Lasso\nfrom sparse_ho.criterion import HeldOutMSE\nfrom sparse_ho.forward import Forward\nfrom sparse_ho.implicit_forward import ImplicitForward\nfrom sparse_ho.utils import Monitor\nfrom sparse_ho.ho import grad_search\nfrom sparse_ho.grid_search import grid_search\nfrom sklearn.datasets import make_regression\n\nfrom sklearn.model_selection import train_test_split\n\n\nfrom sparse_ho.datasets import get_data\n\nprint(__doc__)\n\ndataset = 'rcv1'\n# dataset = 'simu'\n\nif dataset == 'rcv1':\n X_train, X_val, X_test, y_train, y_val, y_test = get_data('rcv1_train')\nelse:\n X, y = make_regression(n_samples=1000, n_features=1000, noise=40)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)\n X_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=0.5)\n\nn_samples, n_features = X_train.shape\n\nprint(\"Starting path computation...\")\nn_samples = len(y_train)\nalpha_max = np.max(np.abs(X_train.T.dot(y_train))) / X_train.shape[0]\nlog_alpha0 = np.log(alpha_max / 10)\n\nn_alphas = 10\np_alphas = np.geomspace(1, 0.0001, n_alphas)\nalphas = alpha_max * p_alphas\nlog_alphas = np.log(alphas)\n\ntol = 1e-7\nmax_iter = 1e5\n\n##############################################################################\n# Grid-search with scikit-learn\n# -----------------------------\n\nestimator = linear_model.Lasso(\n fit_intercept=False, max_iter=1000, warm_start=True)\n\nprint('scikit-learn started')\n\nt0 = time.time()\nmodel = Lasso(X_train, y_train, estimator=estimator)\ncriterion = HeldOutMSE(X_val, y_val, model, X_test=X_test, y_test=y_test)\nalgo = Forward(criterion)\nmonitor_grid_sk = Monitor()\ngrid_search(\n algo, criterion, None, None, monitor_grid_sk, log_alphas=log_alphas,\n tol=tol)\nobjs = np.array(monitor_grid_sk.objs)\nt_sk = time.time() - t0\n\nprint('scikit-learn finished')\n\n\n##############################################################################\n# Grad-search with sparse-ho\n# --------------------------\n\nprint('sparse-ho started')\n\nt0 = time.time()\nmodel = Lasso(X_train, y_train, estimator=estimator)\ncriterion = HeldOutMSE(X_val, y_val, model, X_test=X_test, y_test=y_test)\nalgo = ImplicitForward(criterion)\nmonitor_grad = Monitor()\ngrad_search(\n algo, criterion, np.log(alpha_max / 10), monitor_grad, n_outer=10, tol=tol)\n\nt_grad_search = time.time() - t0\n\nprint('sparse-ho finished')\n\n##############################################################################\n# Plot results\n# ------------\n\np_alphas_grad = np.exp(np.array(monitor_grad.log_alphas)) / alpha_max\n\nobjs_grad = np.array(monitor_grad.objs)\n\nprint('sparse-ho finished')\nprint(\"Time to compute CV for scikit-learn: %.2f\" % t_sk)\nprint(\"Time to compute CV for sparse-ho: %.2f\" % t_grad_search)\n\nprint('Minimum objective grid-search %.5f' % objs.min())\nprint('Minimum objective grad-search %.5f' % objs_grad.min())\n\n\ncurrent_palette = sns.color_palette(\"colorblind\")\n\nfig = plt.figure(figsize=(5, 3))\nplt.semilogx(\n p_alphas, objs, color=current_palette[0])\nplt.semilogx(\n p_alphas, objs, 'bo', label='0-order method (grid-search)',\n color=current_palette[1])\nplt.semilogx(\n p_alphas_grad, objs_grad, 'bX', label='1-st order method',\n color=current_palette[2])\nplt.xlabel(r\"$\\lambda / \\lambda_{\\max}$\")\nplt.ylabel(\n r\"$\\|y^{\\rm{val}} - X^{\\rm{val}} \\hat \\beta^{(\\lambda)} \\|^2$\")\nplt.tick_params(width=5)\nplt.legend()\nplt.tight_layout()\nplt.show(block=False)\n", "\"\"\"File to download and load real data from libsvm, using libsvmdata.\n\"\"\"\n\nfrom libsvmdata import fetch_libsvm\nfrom sklearn.model_selection import train_test_split\n\n\ndef get_data(dataset_name, csr=False):\n X, y = fetch_libsvm(dataset_name)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.33, random_state=42)\n X_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=0.5, random_state=42)\n # be careful train_test_split returns crs matrices\n\n if csr:\n X_train = X_train.tocsr()\n X_val = X_val.tocsr()\n X_test = X_test.tocsr()\n else:\n X_train = X_train.tocsc()\n X_val = X_val.tocsc()\n X_test = X_test.tocsc()\n\n print(\"Finished loading data: %s ...\" % dataset_name)\n\n return X_train, X_val, X_test, y_train, y_val, y_test\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.log", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.semilogx", "sklearn.model_selection.train_test_split", "sklearn.linear_model.Lasso", "numpy.geomspace", "sklearn.datasets.make_regression", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "sklearn.model_selection.train_test_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kindsenior/nngen
[ "cba265b1a140f2aef7208926703782b6dac9e8be", "cba265b1a140f2aef7208926703782b6dac9e8be" ]
[ "tests/matrix_conv2d/matrix_conv2d.py", "nngen/verify/leaky_relu.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport functools\nimport math\nimport numpy as np\n\n# the next line can be removed after installation\nsys.path.insert(0, os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__)))))\n\nimport nngen as ng\n\nfrom veriloggen import *\nimport veriloggen.thread as vthread\nimport veriloggen.types.axi as axi\n\n\ndef run(act_shape=(1, 7, 7, 7), weight_shape=(3, 3, 3, 7),\n bias_shape=None, scale_shape=None,\n act_dtype=ng.int32, weight_dtype=ng.int32,\n bias_dtype=ng.int32, scale_dtype=ng.int32,\n out_dtype=ng.int32,\n stride=(1, 1, 1, 1),\n rshift_mul=None, rshift_sum=None, rshift_out=0,\n act_func=None,\n par_ich=1, par_och=1, par_col=1, par_row=1,\n concur_och=None, stationary='filter',\n input_ram_size=None, filter_ram_size=None,\n bias_ram_size=None, scale_ram_size=None,\n out_ram_size=None,\n axi_datawidth=32, silent=False,\n filename=None, simtype='iverilog', outputfile=None):\n\n # create target hardware\n act = ng.placeholder(act_dtype, shape=act_shape, name='act')\n weight = ng.variable(weight_dtype, shape=weight_shape, name='weight')\n\n if bias_shape is not None:\n bias = ng.variable(bias_dtype, bias_shape, name='bias')\n else:\n bias = None\n\n if scale_shape is not None:\n scale = ng.variable(scale_dtype, scale_shape, name='scale')\n else:\n scale = None\n\n out = ng.conv2d(act, weight, stride,\n bias, scale,\n rshift_mul, rshift_sum, rshift_out,\n act_func, 'SAME',\n out_dtype, ng.int32, ng.int32,\n 'conv2d',\n par_ich, par_och, par_col, par_row,\n concur_och,\n stationary,\n input_ram_size, filter_ram_size,\n bias_ram_size, scale_ram_size,\n None, None, None,\n out_ram_size)\n\n targ = ng.to_veriloggen([out], 'matrix_conv2d', silent=silent,\n config={'maxi_datawidth': axi_datawidth})\n\n # verification data\n if act_dtype.width > 4:\n vact = np.arange(act.length, dtype=np.int64).reshape(act.shape) % [11]\n else:\n vact = np.arange(act.length, dtype=np.int64).reshape(act.shape) % [5]\n\n vweight = np.arange(weight.length,\n dtype=np.int64).reshape(weight.shape) % [7] - [3]\n\n if bias is not None:\n vbias = np.arange(bias.length,\n dtype=np.int64).reshape(bias.shape) % [4]\n else:\n vbias = None\n\n if scale is not None:\n vscale = np.arange(scale.length,\n dtype=np.int64).reshape(scale.shape) % [6]\n else:\n vscale = None\n\n eval_outs = ng.eval([out], act=vact, weight=vweight, bias=vbias, scale=vscale)\n vout = eval_outs[0]\n\n # to memory image\n size_max = int(math.ceil(max(act.memory_size, weight.memory_size,\n bias.memory_size if bias is not None else 0,\n scale.memory_size if scale is not None else 0,\n out.memory_size) / 4096)) * 4096\n check_addr = max(act.addr, weight.addr,\n bias.addr if bias is not None else -1,\n scale.addr if scale is not None else -1,\n out.addr) + size_max\n size_check = size_max\n tmp_addr = check_addr + size_check\n\n memimg_datawidth = 32\n mem = np.zeros([1024 * 1024 * 8 // (memimg_datawidth // 8)], dtype=np.int64)\n mem = mem + [100]\n\n axi.set_memory(mem, vact, memimg_datawidth,\n act_dtype.width, act.addr,\n max(int(math.ceil(axi_datawidth / act_dtype.width)), par_ich))\n\n axi.set_memory(mem, vweight, memimg_datawidth,\n weight_dtype.width, weight.addr,\n max(int(math.ceil(axi_datawidth / weight_dtype.width)), par_ich))\n\n if bias is not None:\n axi.set_memory(mem, vbias, memimg_datawidth,\n bias_dtype.width, bias.addr,\n max(int(math.ceil(axi_datawidth / bias_dtype.width)), par_och))\n\n if scale is not None:\n axi.set_memory(mem, vscale, memimg_datawidth,\n scale_dtype.width, scale.addr,\n max(int(math.ceil(axi_datawidth / scale_dtype.width)), par_och))\n\n axi.set_memory(mem, vout, memimg_datawidth,\n out_dtype.width, check_addr,\n max(int(math.ceil(axi_datawidth / out_dtype.width)), par_och))\n\n # test controller\n m = Module('test')\n params = m.copy_params(targ)\n ports = m.copy_sim_ports(targ)\n clk = ports['CLK']\n resetn = ports['RESETN']\n rst = m.Wire('RST')\n rst.assign(Not(resetn))\n\n # AXI memory model\n if outputfile is None:\n outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'\n\n memimg_name = 'memimg_' + outputfile\n\n memory = axi.AxiMemoryModel(m, 'memory', clk, rst,\n datawidth=axi_datawidth,\n memimg=mem, memimg_name=memimg_name,\n memimg_datawidth=memimg_datawidth)\n memory.connect(ports, 'maxi')\n\n # AXI-Slave controller\n _saxi = vthread.AXIMLite(m, '_saxi', clk, rst, noio=True)\n _saxi.connect(ports, 'saxi')\n\n # timer\n time_counter = m.Reg('time_counter', 32, initval=0)\n seq = Seq(m, 'seq', clk, rst)\n seq(\n time_counter.inc()\n )\n\n def ctrl():\n for i in range(100):\n pass\n\n ng.sim.set_global_addrs(_saxi, tmp_addr)\n\n start_time = time_counter.value\n ng.sim.start(_saxi)\n\n print('# start')\n\n ng.sim.wait(_saxi)\n end_time = time_counter.value\n\n print('# end')\n print('# execution cycles: %d' % (end_time - start_time))\n\n # verify\n ok = True\n for bat in range(out.shape[0]):\n for y in range(out.shape[1]):\n for x in range(out.shape[2]):\n for ch in range(out.shape[3]):\n orig = memory.read_word(\n bat * out.aligned_shape[1] * out.aligned_shape[2] * out.aligned_shape[3] +\n y * out.aligned_shape[2] * out.aligned_shape[3] +\n x * out.aligned_shape[3] + ch,\n out.addr, out_dtype.width)\n check = memory.read_word(\n bat * out.aligned_shape[1] * out.aligned_shape[2] * out.aligned_shape[3] +\n y * out.aligned_shape[2] * out.aligned_shape[3] +\n x * out.aligned_shape[3] + ch,\n check_addr, out_dtype.width)\n\n if vthread.verilog.NotEql(orig, check):\n print('NG (', bat, y, x, ch,\n ') orig: ', orig, ' check: ', check)\n ok = False\n # else:\n # print('OK (', bat, y, x, ch,\n # ') orig: ', orig, ' check: ', check)\n\n if ok:\n print('# verify: PASSED')\n else:\n print('# verify: FAILED')\n\n vthread.finish()\n\n th = vthread.Thread(m, 'th_ctrl', clk, rst, ctrl)\n fsm = th.start()\n\n uut = m.Instance(targ, 'uut',\n params=m.connect_params(targ),\n ports=m.connect_ports(targ))\n\n # simulation.setup_waveform(m, uut)\n simulation.setup_clock(m, clk, hperiod=5)\n init = simulation.setup_reset(m, resetn, m.make_reset(), period=100, polarity='low')\n\n init.add(\n Delay(10000000),\n Systask('finish'),\n )\n\n # output source code\n if filename is not None:\n m.to_verilog(filename)\n\n # run simulation\n sim = simulation.Simulator(m, sim=simtype)\n rslt = sim.run(outputfile=outputfile)\n lines = rslt.splitlines()\n if simtype == 'verilator' and lines[-1].startswith('-'):\n rslt = '\\n'.join(lines[:-1])\n return rslt\n\n\nif __name__ == '__main__':\n rslt = run(silent=False, filename='tmp.v')\n print(rslt)\n", "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nimport functools\n\n\ndef leaky_relu(features, slope, rshift, dtype=None, name=None, par=1,\n features_dtype=None):\n\n if rshift is None:\n rshift = dtype.width if dtype is not None else 31\n\n features_point = 0 if features_dtype is None else features_dtype.point\n out_point = 0 if dtype is None else dtype.point\n out_shift = out_point - features_point\n\n negs = (features * slope) >> rshift\n comp = features >= 0\n\n out_op = ((lambda x: x << out_shift) if out_shift >= 0 else\n (lambda x: x >> -out_shift))\n\n ret = out_op(np.where(comp, features, negs))\n\n return ret\n\n\ndef get_leaky_relu_op(slope, rshift=None, dtype=None):\n return functools.partial(leaky_relu,\n slope=slope, rshift=rshift, dtype=dtype)\n" ]
[ [ "numpy.arange", "numpy.zeros" ], [ "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
grantseiter/Tax-Benefits-Of-Parenthood
[ "5350e832e8b877b46c2a3cab070fc8262b914a52", "5350e832e8b877b46c2a3cab070fc8262b914a52" ]
[ "Tax-Calculator-3.0.0/taxcalc/tests/test_data.py", "Tax-Calculator-3.0.0/taxcalc/tests/test_benefits.py" ]
[ "# CODING-STYLE CHECKS:\r\n# pycodestyle test_data.py\r\n\r\nimport os\r\nimport tempfile\r\nimport pytest\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom taxcalc import Data, GrowFactors\r\n\r\n\r\n# Test specification and use of simple Data-derived class.\r\n# This derived class is called Recs and it contains aged data.\r\n#\r\n# The following pytest fixture specifies the VARINFO file for the\r\n# Recs class, which is defined in the test_recs_class function.\r\n\r\n\r\nVARINFO_JSON = \"\"\"\r\n{\r\n \"read\": {\r\n \"RECID\": {\r\n \"required\": true,\r\n \"type\": \"int\",\r\n \"desc\": \"Unique numeric identifier for record\"\r\n },\r\n \"MARS\": {\r\n \"required\": true,\r\n \"type\": \"int\",\r\n \"desc\": \"Filing (marital) status [1..5]\"\r\n },\r\n \"e00300\": {\r\n \"type\": \"float\",\r\n \"desc\": \"Taxable interest income\"\r\n },\r\n \"s006\": {\r\n \"type\": \"float\",\r\n \"desc\": \"Record sampling weight\"\r\n }\r\n },\r\n \"calc\": {\r\n \"expanded_income\": {\r\n \"type\": \"float\"\r\n }\r\n }\r\n}\r\n\"\"\"\r\n\r\n\r\[email protected](scope='module', name='recs_varinfo_file')\r\ndef fixture_recs_varinfo_json_file():\r\n \"\"\"\r\n Define JSON VARINFO file for Data-derived Recs class.\r\n \"\"\"\r\n with tempfile.NamedTemporaryFile(mode='a', delete=False) as pfile:\r\n pfile.write(VARINFO_JSON + '\\n')\r\n pfile.close()\r\n yield pfile\r\n os.remove(pfile.name)\r\n\r\n\r\ndef test_recs_class(recs_varinfo_file, cps_subsample):\r\n \"\"\"\r\n Specify Data-derived Recs class and test it.\r\n \"\"\"\r\n\r\n class Recs(Data):\r\n \"\"\"\r\n The Recs class is derived from the abstract base Data class.\r\n \"\"\"\r\n VARINFO_FILE_NAME = recs_varinfo_file.name\r\n VARINFO_FILE_PATH = ''\r\n\r\n def __init__(self, data, start_year, gfactors, weights):\r\n super().__init__(data, start_year, gfactors, weights)\r\n\r\n def _extrapolate(self, year):\r\n self.e00300 *= self.gfactors.factor_value('AINTS', year)\r\n\r\n # test Recs class for incorrect instantiation\r\n with pytest.raises(ValueError):\r\n Recs(data=list(), start_year=2000,\r\n gfactors=None, weights=None)\r\n with pytest.raises(ValueError):\r\n Recs(data=cps_subsample, start_year=list(),\r\n gfactors=None, weights=None)\r\n with pytest.raises(ValueError):\r\n Recs(data=cps_subsample, start_year=2000,\r\n gfactors=None, weights='')\r\n with pytest.raises(ValueError):\r\n Recs(data=cps_subsample, start_year=2000,\r\n gfactors=GrowFactors(), weights=None)\r\n with pytest.raises(ValueError):\r\n Recs(data=cps_subsample, start_year=2000,\r\n gfactors='', weights='')\r\n # test Recs class for correct instantiation with no aging of data\r\n syr = 2014\r\n rec = Recs(data=cps_subsample, start_year=syr,\r\n gfactors=None, weights=None)\r\n assert isinstance(rec, Recs)\r\n assert np.all(rec.MARS != 0)\r\n assert rec.data_year == syr\r\n assert rec.current_year == syr\r\n sum_e00300_in_syr = rec.e00300.sum()\r\n rec.increment_year()\r\n assert rec.data_year == syr\r\n assert rec.current_year == syr + 1\r\n sum_e00300_in_syr_plus_one = rec.e00300.sum()\r\n assert np.allclose([sum_e00300_in_syr], [sum_e00300_in_syr_plus_one])\r\n del rec\r\n # test Recs class for correct instantiation with aging of data\r\n wghts_path = os.path.join(GrowFactors.FILE_PATH, 'cps_weights.csv.gz')\r\n wghts_df = pd.read_csv(wghts_path)\r\n rec = Recs(data=cps_subsample, start_year=syr,\r\n gfactors=GrowFactors(), weights=wghts_df)\r\n assert isinstance(rec, Recs)\r\n assert np.all(rec.MARS != 0)\r\n assert rec.data_year == syr\r\n assert rec.current_year == syr\r\n sum_s006_in_syr = rec.s006.sum()\r\n sum_e00300_in_syr = rec.e00300.sum()\r\n rec.increment_year()\r\n assert rec.data_year == syr\r\n assert rec.current_year == syr + 1\r\n sum_s006_in_syr_plus_one = rec.s006.sum()\r\n assert sum_s006_in_syr_plus_one > sum_s006_in_syr\r\n sum_e00300_in_syr_plus_one = rec.e00300.sum()\r\n # because growfactor for e00300 was less than one in 2015, assert < below:\r\n assert sum_e00300_in_syr_plus_one < sum_e00300_in_syr\r\n # test private methods\r\n rec._read_data(data=None)\r\n rec._read_weights(weights=None)\r\n with pytest.raises(ValueError):\r\n rec._read_weights(weights=list())\r\n", "\"\"\"\r\nTests of Tax-Calculator benefits.\r\n\r\nNote that the CPS-related files that are required to run this program\r\nhave been constructed by the Tax-Calculator development team from publicly\r\navailable Census data files. Hence, the CPS-related files are freely\r\navailable and are part of the Tax-Calculator repository.\r\n\r\nRead Tax-Calculator/TESTING.md for details.\r\n\"\"\"\r\n# CODING-STYLE CHECKS:\r\n# pycodestyle test_benefits.py\r\n# pylint --disable=locally-disabled test_benefits.py\r\n\r\nimport os\r\nimport pytest\r\nimport numpy as np\r\nimport pandas as pd\r\n# pylint: disable=import-error\r\nfrom taxcalc import Policy, Records, Calculator\r\n\r\n\r\[email protected]\r\ndef test_benefits(tests_path, cps_fullsample):\r\n \"\"\"\r\n Test CPS benefits.\r\n \"\"\"\r\n # pylint: disable=too-many-locals\r\n benefit_names = ['ssi', 'mcare', 'mcaid', 'snap', 'wic',\r\n 'tanf', 'vet', 'housing']\r\n # write benefits_actual.csv file\r\n recs = Records.cps_constructor(data=cps_fullsample)\r\n start_year = recs.current_year\r\n calc = Calculator(policy=Policy(), records=recs, verbose=False)\r\n assert calc.current_year == start_year\r\n year_list = list()\r\n bname_list = list()\r\n benamt_list = list()\r\n bencnt_list = list()\r\n benavg_list = list()\r\n for year in range(start_year, Policy.LAST_BUDGET_YEAR + 1):\r\n calc.advance_to_year(year)\r\n size = calc.array('XTOT')\r\n wght = calc.array('s006')\r\n # compute benefit aggregate amounts and head counts and average benefit\r\n # (head counts include all members of filing unit receiving a benefit,\r\n # which means benavg is f.unit benefit amount divided by f.unit size)\r\n for bname in benefit_names:\r\n ben = calc.array('{}_ben'.format(bname))\r\n benamt = round((ben * wght).sum() * 1e-9, 3)\r\n bencnt = round((size[ben > 0] * wght[ben > 0]).sum() * 1e-6, 3)\r\n benavg = round(benamt / bencnt, 1)\r\n year_list.append(year)\r\n bname_list.append(bname)\r\n benamt_list.append(benamt)\r\n bencnt_list.append(bencnt)\r\n benavg_list.append(benavg)\r\n adict = {'year': year_list,\r\n 'bname': bname_list,\r\n 'benamt': benamt_list,\r\n 'bencnt': bencnt_list,\r\n 'benavg': benavg_list}\r\n adf = pd.DataFrame(data=adict,\r\n columns=['year', 'bname', 'benamt', 'bencnt', 'benavg'])\r\n ben_act_path = os.path.join(tests_path, 'benefits_actual.csv')\r\n adf.to_csv(ben_act_path, index=False)\r\n # read benefits_expect.csv file\r\n ben_exp_path = os.path.join(tests_path, 'benefits_expect.csv')\r\n edf = pd.read_csv(ben_exp_path)\r\n # compare benefit information\r\n atol = 0.0001\r\n rtol = 0.0\r\n diffs = False\r\n for col in ['benamt', 'bencnt', 'benavg']:\r\n if not np.allclose(adf[col], edf[col], atol=atol, rtol=rtol):\r\n diffs = True\r\n if diffs:\r\n msg = 'CPS BENEFITS RESULTS DIFFER\\n'\r\n msg += '-------------------------------------------------\\n'\r\n msg += '--- NEW RESULTS IN benefits_actual.txt FILE ---\\n'\r\n msg += '--- if new OK, copy benefits_actual.txt to ---\\n'\r\n msg += '--- benefits_expect.txt ---\\n'\r\n msg += '--- and rerun test. ---\\n'\r\n msg += '-------------------------------------------------\\n'\r\n raise ValueError(msg)\r\n os.remove(ben_act_path)\r\n" ]
[ [ "numpy.all", "pandas.read_csv", "numpy.allclose" ], [ "pandas.read_csv", "numpy.allclose", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
michi7x7/pm-mos-model
[ "394d752b1165f5afd96520f1b6e2dbecc27fdc4b" ]
[ "CryMOS/QV.py" ]
[ "import numpy as np\n\nfrom .constants import *\nfrom .Bulk import BulkModel, BulkModelFD, BulkModelTails\nfrom .base import MosModelBase, writeable_property\n\nfrom math import sqrt\nfrom scipy.integrate import quad\n\n__all__ = ['DefaultQV', 'BeckersQVpy',\n 'DiracQVpy', 'TailsQVpy',\n 'GildenblatQVpy', 'DefaultQV']\n\n\nclass BeckersQVpy(MosModelBase, BulkModel):\n \"\"\" modelled after CRYOGENIC MOS TRANSISTOR MODEL \"\"\"\n\n new_params = ('cox', 'N_t', 'psi_t', 'g_t', 'Q_0', '_phi_m')\n params = MosModelBase.params + BulkModel.params + new_params\n pandas_default = ('temp',) # TODO\n\n def __init__(self, **kwargs):\n self.eps_si = eps_si # DO NOT CHANGE! many parts of the model refer to the global eps_si\n self.cox = 0.005755\n\n self._phi_m = None\n\n self.N_t = None\n self.psi_t = []\n self.g_t = 4.\n self.Q_0 = 0. # fixed oxide charge\n\n BulkModel.__init__(self)\n MosModelBase.__init__(self, **kwargs)\n\n self.update_params(**kwargs)\n\n @writeable_property\n def phi_m(self):\n \"\"\" gate work function / electron affinity. Default: degenerately doped silicon E_f = E_c \"\"\"\n return self.chi/e\n\n @property\n def phi_ms(self):\n \"\"\" work function difference between gate/bulk ~ flatband voltage\n\n for a poly-gate, both add chi, thus chi cancels \"\"\"\n return self.phi_m - self.phi_s\n\n @phi_ms.setter\n def phi_ms(self, phi_ms):\n \"\"\" just another way to set phi_m, does not keep phi_ms constant \"\"\"\n self._phi_m = phi_ms + self.phi_s\n\n def fs_ea(self, psi_s, V_ch):\n \"\"\" eq (8)\"\"\"\n return 1. / (1. + self.g_A * self.exp_phi_t(self.psi_a - psi_s + V_ch))\n\n def fb_ea(self):\n \"\"\" eq (9) \"\"\"\n\n assert self.N_A > self.N_D, \"NMOS only\"\n return 1. / (1. + self.g_A * self.exp_phi_t(self.psi_a - self.psi_b))\n\n @property\n def gamma(self):\n return sqrt(2 * e * self.N_A * eps_si) / self.cox\n\n def Es_square(self, psi_s, v_ch, psi_b=None, fb_ea=None):\n \"\"\" eq (7) \"\"\"\n\n # these are kinda hard to calculate, precalculate and use just once\n phi_t = self.phi_t\n psi_b = psi_b or self.psi_b\n fb_ea = fb_ea or self.fb_ea()\n # exp_phi_t = self.exp_phi_t\n exp_phi_t = lambda a: np.exp(a / phi_t)\n\n fs_ea = self.fs_ea(psi_s, v_ch)\n\n fac1 = 2. * e / eps_si\n fac2 = exp_phi_t(psi_s - v_ch) + exp_phi_t(-psi_s) - exp_phi_t(psi_b - v_ch) - exp_phi_t(-psi_b)\n fac3 = psi_s - psi_b - phi_t * np.log(fs_ea / fb_ea)\n return fac1 * (self.n_i * phi_t * fac2 + self.N_A * fac3)\n\n def Es(self, psi_s, v_ch, psi_b=None, **kwargs):\n \"\"\" sqrt of eq (7)\"\"\"\n psi_b = psi_b or self.psi_b\n return np.sign(psi_s-psi_b) * np.sqrt(self.Es_square(psi_s, v_ch, psi_b=psi_b, **kwargs))\n\n def v_fb(self, psi_s, v_ch):\n return self.phi_ms + (self.Q_0 - self.Q_it(psi_s, v_ch)) / self.cox\n\n @property\n def v_th0(self):\n \"\"\" approximated threshold voltage \"\"\"\n phi0 = self.psi_th - self.psi_b # + 5.*self.phi_t\n dphi = self.phi_t * np.log(self.fb_ea()) # E_f > E_i, fs_ea == 1\n\n return self.v_fb(self.psi_th, 0.0) + phi0 + \\\n self.gamma * sqrt(phi0 + dphi)\n\n @property\n def v_th(self):\n \"\"\" threshold voltage from full v_gb expression (psi_s = psi_th) \"\"\"\n return self.v_gb(self.psi_th, 0.0)\n\n @property\n def v_th1(self):\n phi_f0 = self.E_g/(2*e) + self.phi_t * np.log(self.N_A/np.sqrt((self.N_c * self.N_v)))\n\n # this includes incomplete ionization if the instance has ionization = incomplete\n phi_f1 = -self.psi_b\n return phi_f0 + self.phi_m - self.chi/e - (self.E_c-self.E_i)/e + self.gamma * np.sqrt(phi_f0 + phi_f1)\n\n def v_gb(self, psi_s, v_ch):\n return self.v_fb(psi_s, v_ch) + eps_si * self.Es(psi_s, v_ch) / self.cox + psi_s - self.psi_b\n\n def psi_s(self, v_ch, v_gb):\n \"\"\"solves the implicit equation (pot_loop) to get the surface potential as a function of v_ch and v_gb\"\"\"\n from scipy.optimize import root_scalar\n v_gb = np.atleast_1d(v_gb)\n psi_s = 0. * v_gb\n bracket = [-2., 2.]\n # bracket = [(self.E_v-self.E_i)/e-v_ch, (self.E_c-self.E_i)/e-v_ch]\n\n psi_b = self.psi_b\n fb_ea = self.fb_ea()\n\n Es = self.Es\n v_fb = self.v_fb\n\n # surface boundary condition:\n # going around the loop, all appearing voltage must cancel each other out, statet a bit before eq. (13)\n def pot_loop(psi_s, v_ch, v_gb):\n return v_fb(psi_s, v_ch) + \\\n eps_si * Es(psi_s, v_ch, psi_b=psi_b, fb_ea=fb_ea) / self.cox + \\\n psi_s - self.psi_b - v_gb\n\n for i, v in enumerate(v_gb):\n res = root_scalar(pot_loop, args=(v_ch, v), bracket=bracket, xtol=1e-30)\n if not res.converged:\n psi_s[i] = np.nan\n raise RuntimeError(\"root did not converge!\")\n else:\n psi_s[i] = res.root\n return psi_s\n\n def Q_m_1(self, psi_s, v_ch):\n \"\"\" Q_m exploiting the charge neutrality, here mobile = holes+electrons \"\"\"\n return self.Q_sc(psi_s, v_ch) - self.Q_f(psi_s, v_ch)\n\n def Q_m(self, psi_s, v_ch):\n \"\"\" Q_m only including electron terms \"\"\"\n log = np.log(self.fs_ea(psi_s, v_ch) / self.fb_ea())\n sqrt1 = - np.sqrt(2. * e * self.n_i * self.phi_t * eps_si * (\n self.exp_phi_t(psi_s - v_ch) - self.exp_phi_t(self.psi_b - v_ch)) + 2. * e * self.N_A * eps_si * (\n psi_s - self.psi_b - self.phi_t * log))\n\n sqrt2 = np.sqrt(2. * e * self.N_A * eps_si * (psi_s - self.psi_b - self.phi_t * log))\n return sqrt1 + sqrt2\n\n def fs_Et(self, g_t, psi_t, psi_s, v_ch):\n return 1. / (1. + g_t * self.exp_phi_t((+psi_t - psi_s + v_ch)))\n\n def Q_sc(self, psi_s, v_ch):\n \"\"\" total semiconductor charge per unit area, text after eq (10)\"\"\"\n return -eps_si * self.Es(psi_s, v_ch)\n\n def Q_f(self, psi_s, v_ch):\n \"\"\" fixed charge density per unit area, eq (11)\"\"\"\n log = np.log(self.fs_ea(psi_s, v_ch) / self.fb_ea())\n return -np.sqrt(\n 2. * e * self.N_A * eps_si * (psi_s - self.psi_b) - 2. * e * self.N_A * self.phi_t * eps_si * log)\n\n def Q_it(self, psi_s, v_ch):\n \"\"\" interface charge per unit area, eq (13) and eq (14) and text above\"\"\"\n ret = 0. * psi_s\n if self.N_t is not None and self.psi_t is not None:\n for psi_t, N_t in zip(np.atleast_1d(self.psi_t), np.atleast_1d(self.N_t)):\n # catch the case very complete ionization is assumed in order to avoid computational errors in fs_Et in this case\n if self.g_t != 0.:\n ret = ret + (-e * N_t * self.fs_Et(self.g_t, psi_t, psi_s, v_ch))\n else:\n ret = ret + (-e) * N_t\n return ret\n\n def set_arnout_traps(self, psi_t_c=0.58, N_t=1.2e15, fac=None):\n \"\"\" sets the interface traps similar to what Arnout did in his paper\"\"\"\n fac = fac or np.linspace(-2, 2, 5)\n self.psi_t = psi_t_c + self.phi_t * fac\n self.N_t = np.full_like(self.psi_t, N_t)\n\n def y_psi(self, v_gb, v_ch=0, linlog=0.5, logend=1e-3) -> (np.ndarray, np.ndarray):\n \"\"\" calculate the band structure in vertical direction\n\n returns: y, psi\n \"\"\"\n\n from math import log10, fabs\n\n psi_s = self.psi_s(v_ch, v_gb)\n psi_b = self.psi_b\n\n integr = lambda psi: 1/self.Es(psi, v_ch, psi_b=psi_b)\n\n if np.isclose(psi_s, psi_b):\n return [0, 1e-6], [psi_s, psi_b]\n\n del_psi = psi_s - psi_b\n\n # linear close to the interface, log further away\n # as per suggestion in https://h-gens.github.io/automated-drawing-of-the-mos-band-diagram.html\n psis = psi_b + del_psi*np.hstack((\n np.linspace(1, linlog, 51),\n np.logspace(log10(linlog), log10(logend), 101)[1:]\n ))\n\n @np.vectorize\n def get_y(psi):\n return quad(integr, psi, psi_s)[0]\n\n y = get_y(psis)\n return y, psis\n\n\nclass DiracQVpy(BulkModelFD, BeckersQVpy):\n \"\"\" QV model that uses FD-Integrals for E**2\n\n TODO: check whether psi_b and psi_s fit together in flatband condition!!!\n \"\"\"\n\n def Es_square(self, psi_s, v_ch, psi_b=None, fb_ea=None, E_i=None):\n # calculate Es_square via the fermi dirac integrals\n psi_b = psi_b or self.psi_b\n fac = 2. * e / eps_si\n\n def int_fun(psi):\n return self.n_psi(psi - v_ch) - self.p_psi(psi) + self.N_Am_psi(psi - v_ch) - self.N_Dp_psi(psi)\n\n intfun = lambda psi: fac * quad(int_fun, psi_b, psi)[0]\n return np.vectorize(intfun)(psi_s)\n\n def Q_f(self, psi_s, v_ch, psi_b=None):\n psi_b = psi_b or self.psi_b\n fac = 2. * e / eps_si\n\n def int_fun(psi):\n return fac * quad(\n lambda psi: self.N_Am_psi(psi) - self.N_Dp_psi(psi),\n psi_b - v_ch, psi - v_ch)[0]\n\n Es2 = np.vectorize(int_fun)(psi_s)\n\n # TODO: this is hideous... and probably wrong, is there no better way?\n return -eps_si * np.sign(psi_s - psi_b) * np.sqrt(np.abs(Es2))\n\n def Q_m(self, psi_s, v_ch, psi_b=None):\n fac = 2. * e / eps_si\n psi_b = psi_b or self.psi_b\n\n def int_fun_Qsc(psi):\n return self.n_psi(psi - v_ch) + self.N_Am_psi(psi - v_ch)\n\n Es_electrons = np.vectorize(\n lambda psi: fac * quad(int_fun_Qsc, psi_b, psi)[0]\n )(psi_s)\n\n return -eps_si * np.sqrt(Es_electrons) - self.Q_f(psi_s, v_ch, psi_b=psi_b)\n\n\nclass TailsQVpy(BulkModelTails, DiracQVpy):\n \"\"\" QV-model that includes bandtail-states \"\"\"\n pass\n\n\nclass GildenblatQVpy(BeckersQVpy):\n \"\"\" QV model that uses the H(u) description for Es_square\n\n The relevant paper is \"Surface potential equation for bulk MOSFET\" (Gildenblat 2009)\n \"\"\"\n\n @property\n def lam_bulk(self):\n return self.fb_ea()\n\n def Es_square(self, psi_s, v_ch, psi_b=None, fb_ea=None):\n from math import log, exp # this is substantially faster than np\n from warnings import warn\n warn(\"bulk_n, bulk_p and psi_b do not fit togeter: ERROR between psi_s and psi_b!\")\n\n psi_b = psi_b or self.psi_b\n phi_s = psi_s - psi_b\n phi_t = self.phi_t\n\n lam = self.lam_bulk\n\n n_b = self.bulk_n\n p_b = self.bulk_p\n\n k_0 = exp(-v_ch / phi_t)\n\n u = np.array(phi_s / phi_t, dtype=np.longdouble)\n g_fun = 1. / lam * np.log(1. + lam * (np.exp(u) - 1))\n # g_fun = 1. / lam * np.logaddexp(log(1. - lam), log(lam)+u) # only a single call to numpy: faster\n h2 = np.exp(-u) - 1 + g_fun + n_b / p_b * k_0 * (np.exp(u) - 1. - g_fun)\n\n return 2 * e * p_b * phi_t / eps_si * h2\n\n\nclass Dirac2DQV(BeckersQVpy):\n \"\"\" QV model that uses FD-Integrals for E**2 \"\"\"\n def Es_square(self, psi_s, v_ch, psi_b=None, fb_ea=None, E_i=None):\n # calculate es_square via the fermi dirac integrals\n phi_t = self.phi_t\n E_i = E_i or self.E_i\n psi_a = self.psi_a\n psi_b = psi_b or self.psi_b\n exp_phi_t = lambda a: np.exp(a / phi_t)\n fac = 2. * e / eps_si\n\n def fermi_dirac_integral(E, T):\n from fdint import fdk\n return fdk(k=0.5, phi=E / (k * T))\n\n def int_fun(psi):\n n_fd = self.N_c * 2 / np.sqrt(pi) * fermi_dirac_integral(e * (psi - v_ch) + E_i - self.E_c, self.temp)\n p_fd = self.N_v * 2 / np.sqrt(pi) * fermi_dirac_integral(self.E_v - e * psi - E_i, self.temp)\n na_min = self.N_A / (1. + self.g_A * exp_phi_t(psi_a - psi + v_ch))\n return n_fd - p_fd + na_min\n\n intfun = lambda psi: fac * quad(int_fun, psi_b, psi)[0]\n return np.vectorize(intfun)(psi_s)\n\n Q_m = BeckersQVpy.Q_m_1\n\n\n# default implementation\nDefaultQV = BeckersQVpy\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.linspace", "numpy.abs", "scipy.integrate.quad", "numpy.atleast_1d", "numpy.full_like", "numpy.sign", "numpy.vectorize", "numpy.exp", "scipy.optimize.root_scalar", "numpy.array", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "1.5", "1.7", "1.2", "1.8" ], "tensorflow": [] } ]
K2OKOH/da-faster-RCNN-ChineseComment
[ "b88d1821779b9edc3f0f4a595e1f41c3bfdd7cab" ]
[ "lib/model/utils/blob.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"Blob helper functions.\"\"\"\n\nimport numpy as np\n# from scipy.misc import imread, imresize\nimport cv2\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\ndef im_list_to_blob(ims):\n \"\"\"Convert a list of images into a network input.\n 把图片列表变化为适合网络的输入格式\n Assumes images are already prepared (means subtracted, BGR order, ...).\n \"\"\"\n # 取出每张图片的最大的长宽和深度\n max_shape = np.array([im.shape for im in ims]).max(axis=0)\n # 求出图片的个数\n num_images = len(ims)\n # 创建一个np数组4维,(图片序号,长,宽,深度)(最大的),用for循环填入图片数据\n blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),\n dtype=np.float32)\n for i in xrange(num_images):\n im = ims[i]\n blob[i, 0:im.shape[0], 0:im.shape[1], :] = im\n # 返回图片的np数组\n return blob\n\ndef prep_im_for_blob(im, pixel_means, target_size, max_size):\n \"\"\"Mean subtract and scale an image for use in a blob.\"\"\"\n im = im.astype(np.float32, copy=False)\n # 减去中值\n im -= pixel_means\n # im = im[:, :, ::-1]\n # 记录维度(三个维度的值)\n im_shape = im.shape\n # 取前两个维度的最大值和最小值\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n # target是短边像素\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n # if np.round(im_scale * im_size_max) > max_size:\n # im_scale = float(max_size) / float(im_size_max)\n # im = imresize(im, im_scale)\n # 沿x,y轴缩放的系数都是im_scale\n im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n\n # 返回缩放后的图形 和 缩放比\n return im, im_scale\n" ]
[ [ "numpy.max", "numpy.array", "numpy.zeros", "numpy.min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
johnnyapol/RPICovidScraper
[ "84a97847c80c320e2eed3fd161e9f175f83d14a5" ]
[ "main.py" ]
[ "#!/usr/bin/env python3\n# Usage: ./main.py\n\"\"\"\nCopyright (C) 2020-2021 John C. Allwein 'johnnyapol' ([email protected])\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport os\nimport pickle\nimport requests\nfrom random import choice\nfrom subprocess import run\nimport sys\nimport traceback\nfrom datetime import date, timedelta, datetime\nfrom copy import deepcopy\nfrom itertools import chain\nfrom io import BytesIO\n\nfrom bs4 import BeautifulSoup\nfrom discord_webhook import DiscordEmbed, DiscordWebhook\nimport matplotlib.pyplot as plot\nimport savepagenow\n\n# Import configuration (if available)\ntry:\n import config\n\n WEBHOOKS = config.webhooks\n PSA = config.PSA\n QUIET = config.QUIET\nexcept:\n print(\"No discord webhooks supplied - data will just be stored locally\")\n traceback.print_exc()\n WEBHOOKS = None\n PSA = None\n QUIET = False\n\nDASHBOARD = \"https://covid19.rpi.edu/dashboard\"\n\n\nclass CovidData:\n def __init__(self):\n self.rpi_array = [0] * 5\n self.last_updated = date.today() - timedelta(days=1)\n self.historicalData = {}\n\n def update(self, case_data):\n today = date.today()\n\n if today != self.last_updated:\n self.last_updated = today\n self.historicalData[today] = case_data\n self.rpi_array = case_data\n\n def get_rolling(self):\n return sum(self.get_rolling_iterator(self.last_updated))\n\n def get_case_data(self):\n return self.rpi_array\n\n def get_rolling_iterator(self, day=date.today()):\n dates = [day - timedelta(days=x) for x in range(13, -1, -1)]\n return [\n self.historicalData[date][0] if date in self.historicalData else 0\n for date in dates\n ]\n\n\ndef check_for_updates():\n global DASHBOARD\n request = requests.get(\n DASHBOARD,\n headers={\n \"User-Agent\": \"RPICovidScraper https://github.com/johnnyapol/RPICovidScraper\"\n },\n )\n soup = BeautifulSoup(request.text, features=\"lxml\")\n header = \"field field--name-field-stats field--type-entity-reference-revisions field--label-hidden field__items\"\n header2 = \"field field--name-field-stat field--type-string field--label-hidden field__item\"\n date_header = \"field field--name-field-stats-caption field--type-string field--label-hidden field__item\"\n\n \"\"\"\n Current data format:\n\n case_data[0] = positive tests (last 24 hours)\n case_data[1] = positive test results (last 7 days)\n case_data[2] = positive test results (since august 17th)\n case_data[3] = total tests (last 7 days)\n case_data[4] = total tests (since august 17th)\n \"\"\"\n return (\n [\n int(\"\".join((\"\".join(x.text.strip().split(\" \"))).split(\",\")))\n for x in soup.find(\"div\", {\"class\": header}).findAll(\n \"div\", {\"class\": header2}\n )\n ],\n soup.find(\"div\", {\"class\": date_header}).text,\n )\n\n\ndef case_value_to_string(case_data, previous_case_data, index):\n diff = case_data[index] - previous_case_data[index]\n diff_string = f\"({diff:+,})\" if diff != 0 else \"\"\n return f\"{case_data[index]:,} {diff_string}\"\n\n\ndef get_source_url():\n start = \"https://github.com/johnnyapol/RPICovidScraper/\"\n try:\n return f'{start}commit/{run([\"git\", \"log\", \"--pretty=format:%H\", \"-n\", \"1\"], capture_output=True).stdout.decode(\"ascii\")}'\n except:\n return start\n\n\ndef post_discord(\n rolling, old_rolling, case_data, previous_case_data, date, dashboard_url, graph\n):\n global WEBHOOKS\n global PSA\n global QUIET\n if WEBHOOKS is None:\n return print(\"Skipping posting to discord as no webhooks supplied\")\n\n positive_thumbnails = [\n \"https://www.continentalmessage.com/wp-content/uploads/2015/09/123rf-alert2.jpg\",\n \"https://i.kym-cdn.com/photos/images/newsfeed/000/675/645/2c7.gif\",\n \"https://media.discordapp.net/attachments/783375197604413445/790625854202839100/image0.png\",\n \"https://media.tenor.com/images/6603c0a47ff16ad8d3682e481e727f76/tenor.gif\",\n ]\n\n neutral_thumbnails = [\n \"https://steamcdn-a.akamaihd.net/steamcommunity/public/images/clans/5671259/7923c9b8e0a5799d4d422208b31f5ca0f4f49067.png\",\n \"https://static01.nyt.com/images/2020/01/28/science/28VIRUS-BATS1/28VIRUS-BATS1-videoSixteenByNineJumbo1600.jpg\",\n \"https://ih1.redbubble.net/image.1877589148.0162/ur,mask_flatlay_front,wide_portrait,750x1000.jpg\",\n \"https://media.giphy.com/media/KHEgvyrgYnL9RW08h6/giphy.gif\",\n ]\n\n negative_thumbnails = [\n \"https://media.giphy.com/media/WS0MDT0DITCTLwcNNx/giphy.gif\",\n \"https://cdn.vox-cdn.com/thumbor/iuL4QWaANcy5lyeCDXxIrBq7_uQ=/0x0:3000x2000/1400x1050/filters:focal(1436x422:1916x902):no_upscale()/cdn.vox-cdn.com/uploads/chorus_image/image/68718659/AP_20331457642255.0.jpg\",\n ]\n\n emojis = [\"❤️\", \"✨\", \"🥓\", \"🍺\", \"🧻\", \"🐍\", \"☃️\", \"😷\"]\n\n if QUIET and case_data[0] == 0:\n return\n\n embed = DiscordEmbed()\n\n if case_data[0] > 4:\n embed.set_color(15158332)\n embed.set_thumbnail(url=choice(positive_thumbnails))\n elif case_data[0] > 0:\n embed.set_color(0xFFFF00)\n embed.set_thumbnail(url=choice(neutral_thumbnails))\n else:\n embed.set_color(3066993)\n embed.set_thumbnail(url=choice(negative_thumbnails))\n\n if PSA is not None:\n embed.add_embed_field(name=\"ANNOUNCEMENT\", value=PSA, inline=False)\n embed.color = 15844367\n\n embed.add_embed_field(\n name=\"New Positive Tests\",\n value=f\"{case_data[0]}\",\n inline=False,\n )\n embed.add_embed_field(\n name=\"Positive Tests (7 days)\",\n value=case_value_to_string(case_data, previous_case_data, 1),\n inline=False,\n )\n\n embed.add_embed_field(\n name=\"Positive Tests (14 days)\",\n value=case_value_to_string([rolling], [old_rolling], 0),\n inline=False,\n )\n\n embed.add_embed_field(\n name=\"Weekly Test Count\",\n value=case_value_to_string(case_data, previous_case_data, 3),\n inline=False,\n )\n if case_data[1] != 0:\n # Calculate weekly positivity rate\n pcr = (case_data[1] / case_data[3]) * 100\n embed.add_embed_field(name=\"Weekly Positivity Rate\", value=f\"{round(pcr, 4)}%\")\n embed.add_embed_field(\n name=\"Total Positive Tests\",\n value=case_value_to_string(case_data, previous_case_data, 2),\n )\n\n # Since discord footers don't support \"rich\" content, hack on a footer to the last field\n date = \"\".join(date.split(\"\\n\"))\n embed.add_embed_field(\n name=\"Total Tests\",\n value=f\"{case_value_to_string(case_data, previous_case_data, 4)}\\n{date} Made with {choice(emojis)} - [source]({get_source_url()})\",\n inline=False,\n )\n embed.set_author(\n name=\"Click for dashboard\",\n url=dashboard_url,\n icon_url=\"https://i.redd.it/14nqzc0hswy31.png\",\n )\n\n hook = DiscordWebhook(\n url=WEBHOOKS,\n content=choice(\n [\n \"The RPI Covid Dashboard has been updated!\",\n \"I got yer COVID data right here!\",\n \"Special delivery!\",\n \"Beep beep boop\",\n \"I found some data!\",\n ]\n ),\n username=\"RPI Covid Dashboard\",\n avatar_url=\"https://www.minnpost.com/wp-content/uploads/2020/03/coronavirusCDC640.png\",\n )\n\n if graph != None:\n hook.add_file(file=graph.read(), filename=\"graph.png\")\n embed.set_image(url=\"attachment://graph.png\")\n hook.add_embed(embed)\n hook.execute()\n\n\ndef load_previous():\n try:\n with open(\".cache\", \"rb\") as file:\n return pickle.load(file)\n except:\n print(\"Cache read failed\")\n return CovidData()\n\n\ndef save(case_data):\n with open(\".cache\", \"wb\") as file:\n pickle.dump(case_data, file)\n\n\ndef create_graph(data):\n x = [int(z) for z in data.get_rolling_iterator()]\n cum = [x[0]]\n for i in range(1, len(x)):\n cum.append(cum[-1] + x[i])\n # thanks to https://www.tutorialspoint.com/matplotlib/matplotlib_bar_plot.htm for help\n today = date.today()\n monthday = lambda d: f\"{d.month}-{d.day}\"\n dates = [today - timedelta(days=x) for x in range(13, -1, -1)]\n plot.title(f\"Previous 14 days\")\n plot.bar(dates, x, color=\"red\", label=\"daily positive tests\")\n plot.plot(dates, cum, color=\"orange\", label=f\"Positives since {monthday(dates[0])}\")\n # Add individual day labels\n for i, v in zip(dates, x):\n if v == 0:\n continue\n plot.text(i, v, str(v), color=\"blue\", fontweight=\"bold\", ha=\"center\")\n plot.plot(\n dates,\n [sum(data.get_rolling_iterator(date)) for date in dates],\n color=\"green\",\n label=\"Rolling 2 week sum\",\n )\n plot.xticks(dates, [monthday(date) for date in dates], rotation=45)\n plot.legend()\n\n data = BytesIO()\n plot.subplots_adjust(bottom=0.17)\n plot.ylabel(\"Number of positive tests\")\n plot.xlabel(\"Day reported\")\n now = datetime.now()\n plot.figtext(\n 0.5,\n 0.01,\n f\"Generated on {now.strftime('%m/%d/%y %H:%M')} {datetime.now().astimezone().tzinfo.tzname(None)}\",\n ha=\"center\",\n fontsize=8,\n )\n plot.savefig(data, format=\"png\")\n data.seek(0)\n return data\n\n\ndef main():\n global DASHBOARD\n covid_data = load_previous()\n previous_case_data = deepcopy(covid_data.get_case_data())\n current_case_data, date = check_for_updates()\n\n ci = any(x.lower() == \"--ci\" for x in sys.argv)\n force = any(x.lower() == \"--force\" for x in sys.argv)\n\n # Only post under the following conditions:\n # 1. There is new data from RPI\n # - AND -\n # 2. there are new positive tests OR new weekly/total numbers reported\n # This avoids the bs updates where all RPI does is reset the daily/weekly numbers\n if (\n force\n or current_case_data != previous_case_data\n and (\n current_case_data[0] != 0\n or any(\n current_case_data[x] != previous_case_data[x]\n for x in range(2, len(current_case_data))\n )\n )\n ):\n dashboard_url = DASHBOARD\n try:\n # We don't want to abuse the Wayback Machine in actions\n if not ci:\n dashboard_url = savepagenow.capture(DASHBOARD, accept_cache=True)\n else:\n print(\"Skipping page archive as we are running in CI mode\")\n except:\n print(f\"Page archived failed\")\n traceback.print_exc()\n\n old_rolling = covid_data.get_rolling()\n covid_data.update(current_case_data)\n\n post_discord(\n covid_data.get_rolling(),\n old_rolling,\n current_case_data,\n previous_case_data,\n date,\n dashboard_url,\n create_graph(covid_data),\n )\n\n save(covid_data)\n print(\n f\"Done. Old: {previous_case_data} New: {current_case_data}\\n Rolling: {covid_data.get_rolling()}\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "matplotlib.pyplot.bar", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nerminsamet/HoughNet-VID
[ "670405e002e1c4d60596434db4790783eaf62846" ]
[ "src/lib/datasets/dataset/vid_eval.py" ]
[ "import numpy as np\nimport scipy.io as sio\nfrom collections import defaultdict\n\nBIG_NUM = 1000000\n\ndef area(box, mode=\"xyxy\"):\n\n if mode == \"xyxy\":\n TO_REMOVE = 1\n area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)\n elif mode == \"xywh\":\n area = box[:, 2] * box[:, 3]\n else:\n raise RuntimeError(\"Should not be here\")\n\n return area\n\n\ndef boxlist_iou(boxlist1, boxlist2):\n\n area1 = area(boxlist1, mode = \"xyxy\")\n area2 = area(boxlist2, mode = \"xyxy\")\n\n lt = np.maximum(np.expand_dims(boxlist1[:, :2], axis=1), boxlist2[:, :2]) # [N,M,2]\n rb = np.minimum(np.expand_dims(boxlist1[:, 2:], axis=1), boxlist2[:, 2:]) # [N,M,2]\n\n TO_REMOVE = 1\n\n wh = np.clip((rb - lt + TO_REMOVE), a_min = 0, a_max=BIG_NUM) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n\n iou = inter / (area1[:, None] + area2 - inter)\n return iou\n\n\ndef eval_proposals_vid(pred_boxlists, gt_boxlists, iou_thresh=0.5, limit=300):\n assert len(gt_boxlists) == len(\n pred_boxlists\n ), \"Length of gt and pred lists need to be same.\"\n\n gt_overlaps = []\n num_pos = 0\n for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists):\n inds = np.argsort(pred_boxlist[\"scores\"])[::-1]\n pred_boxlist['labels'] = pred_boxlist['labels'][inds]\n pred_boxlist['bbox'] = pred_boxlist['bbox'][inds]\n pred_boxlist['scores'] = pred_boxlist['scores'][inds]\n\n if len(pred_boxlist['labels']) > limit:\n pred_boxlist['labels'] = pred_boxlist['labels'][:limit]\n pred_boxlist['bbox'] = pred_boxlist['bbox'][:limit]\n pred_boxlist['scores'] = pred_boxlist['scores'][:limit]\n\n num_pos += len(gt_boxlist['labels'])\n\n if len(gt_boxlist['labels']) == 0:\n continue\n\n if len(pred_boxlist['labels']) == 0:\n continue\n\n overlaps = boxlist_iou(pred_boxlist['bbox'], gt_boxlist['bbox'])\n\n _gt_overlaps = np.zeros(gt_boxlist['bbox'].shape[0])\n for j in range(min(len(pred_boxlist['bbox']), len(gt_boxlist['bbox']))):\n max_overlaps, argmax_overlaps = np.max(overlaps, axis=0), np.argmax(overlaps, axis=0)\n\n gt_ovr, gt_ind = np.max(max_overlaps, axis=0), np.argmax(max_overlaps, axis=0)\n assert gt_ovr >= 0\n\n box_ind = argmax_overlaps[gt_ind]\n\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert _gt_overlaps[j] == gt_ovr\n\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n\n gt_overlaps.append(_gt_overlaps)\n\n gt_overlaps = np.concatenate(gt_overlaps, axis=0)\n gt_overlaps = np.sort(gt_overlaps)\n\n recall = (gt_overlaps >= iou_thresh).astype(float).sum() / float(num_pos)\n\n return {\n \"recall\": recall\n }\n\ndef eval_detection_vid(pred_boxlists,\n gt_boxlists,\n iou_thresh=0.5,\n motion_ranges=[[0.0, 0.7], [0.7, 0.9], [0.9, 1.0]],\n motion_specific=False,\n use_07_metric=False):\n assert len(gt_boxlists) == len(\n pred_boxlists\n ), \"Length of gt and pred lists need to be same.\"\n\n if motion_specific:\n motion_iou_file = \"./evaluation/vid_groundtruth_motion_iou.mat\"\n motion_ious = sio.loadmat(motion_iou_file)\n motion_ious = np.array(\n [[motion_ious['motion_iou'][i][0][j][0] if len(motion_ious['motion_iou'][i][0][j]) != 0 else 0 \\\n for j in range(len(motion_ious['motion_iou'][i][0]))] \\\n for i in range(len(motion_ious['motion_iou']))])\n else:\n motion_ious = None\n\n motion_ap = defaultdict(dict)\n for motion_index, motion_range in enumerate(motion_ranges):\n print(\"Evaluating motion iou range {} - {}\".format(motion_range[0], motion_range[1]))\n prec, rec = calc_detection_vid_prec_rec(\n pred_boxlists=pred_boxlists,\n gt_boxlists=gt_boxlists,\n motion_ious=motion_ious,\n iou_thresh=iou_thresh,\n motion_range=motion_range,\n )\n ap = calc_detection_vid_ap(prec, rec, use_07_metric=use_07_metric)\n motion_ap[motion_index] = {\"ap\": ap, \"map\": np.nanmean(ap)}\n return motion_ap\n\n\ndef calc_detection_vid_prec_rec(gt_boxlists, pred_boxlists, motion_ious,\n iou_thresh=0.5, motion_range=[0., 1.]):\n n_pos = defaultdict(int)\n score = defaultdict(list)\n match = defaultdict(list)\n pred_ignore = defaultdict(list)\n if motion_ious is None:\n motion_ious = [None] * len(gt_boxlists)\n empty_weight = 0\n else:\n all_motion_iou = np.concatenate(motion_ious, axis=0)\n empty_weight = sum([(all_motion_iou[i] >= motion_range[0]) & (all_motion_iou[i] <= motion_range[1]) for i in\n range(len(all_motion_iou))]) / float(len(all_motion_iou))\n if empty_weight == 1:\n empty_weight = 0\n for gt_boxlist, pred_boxlist, motion_iou in zip(gt_boxlists, pred_boxlists, motion_ious):\n pred_bbox = pred_boxlist['bbox']\n pred_label = pred_boxlist['labels']\n pred_score = pred_boxlist['scores']\n gt_bbox = gt_boxlist['bbox']\n gt_label = gt_boxlist['labels']\n gt_ignore = np.zeros(len(gt_bbox))\n\n for gt_index, gt in enumerate(gt_bbox):\n if motion_iou:\n if motion_iou[gt_index] < motion_range[0] or motion_iou[gt_index] > motion_range[1]:\n gt_ignore[gt_index] = 1\n else:\n gt_ignore[gt_index] = 0\n\n for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):\n pred_mask_l = pred_label == l\n pred_bbox_l = pred_bbox[pred_mask_l]\n pred_score_l = pred_score[pred_mask_l]\n\n # sort by score\n order = pred_score_l.argsort()[::-1]\n pred_bbox_l = pred_bbox_l[order]\n pred_score_l = pred_score_l[order]\n\n gt_mask_l = gt_label == l\n gt_bbox_l = gt_bbox[gt_mask_l]\n gt_ignore_l = gt_ignore[gt_mask_l]\n\n n_pos[l] += gt_bbox_l.shape[0] - sum(gt_ignore_l)\n score[l].extend(pred_score_l)\n\n if len(pred_bbox_l) == 0:\n continue\n if len(gt_bbox_l) == 0:\n match[l].extend((0,) * pred_bbox_l.shape[0])\n pred_ignore[l].extend((empty_weight,) * pred_bbox_l.shape[0])\n continue\n\n # VID evaluation follows integer typed bounding boxes.\n pred_bbox_l = pred_bbox_l.copy()\n pred_bbox_l[:, 2:] += 1\n gt_bbox_l = gt_bbox_l.copy()\n gt_bbox_l[:, 2:] += 1\n iou = boxlist_iou(pred_bbox_l, gt_bbox_l)\n\n num_obj, num_gt_obj = iou.shape\n\n selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)\n for j in range(0, num_obj):\n iou_match = iou_thresh\n iou_match_ig = -1\n iou_match_nig = -1\n arg_match = -1\n for k in range(0, num_gt_obj):\n if (gt_ignore_l[k] == 1) & (iou[j, k] > iou_match_ig):\n iou_match_ig = iou[j, k]\n if (gt_ignore_l[k] == 0) & (iou[j, k] > iou_match_nig):\n iou_match_nig = iou[j, k]\n if selec[k] or iou[j, k] < iou_match:\n continue\n if iou[j, k] == iou_match:\n if arg_match < 0 or gt_ignore_l[arg_match]:\n arg_match = k\n else:\n arg_match = k\n iou_match = iou[j, k]\n\n if arg_match >= 0:\n match[l].append(1)\n pred_ignore[l].append(gt_ignore_l[arg_match])\n selec[arg_match] = True\n else:\n if iou_match_nig > iou_match_ig:\n pred_ignore[l].append(0)\n elif iou_match_ig > iou_match_nig:\n pred_ignore[l].append(1)\n else:\n pred_ignore[l].append(sum(gt_ignore_l) / float(num_gt_obj))\n match[l].append(0)\n # pred_ignore[l].append(0)\n\n n_fg_class = max(n_pos.keys()) + 1\n print(n_pos)\n prec = [None] * n_fg_class\n rec = [None] * n_fg_class\n\n for l in n_pos.keys():\n score_l = np.array(score[l])\n match_l = np.array(match[l], dtype=np.int8)\n pred_ignore_l = np.array(pred_ignore[l])\n\n order = score_l.argsort()[::-1]\n match_l = match_l[order]\n pred_ignore_l = pred_ignore_l[order]\n\n tps = np.logical_and(match_l == 1, np.logical_not(pred_ignore_l == 1))\n fps = np.logical_and(match_l == 0, np.logical_not(pred_ignore_l == 1))\n pred_ignore_l[pred_ignore_l == 0] = 1\n fps = fps * pred_ignore_l\n\n tp = np.cumsum(tps)\n fp = np.cumsum(fps)\n\n # If an element of fp + tp is 0,\n # the corresponding element of prec[l] is nan.\n prec[l] = tp / (fp + tp + np.spacing(1))\n # If n_pos[l] is 0, rec[l] is None.\n if n_pos[l] > 0:\n rec[l] = tp / n_pos[l]\n\n return prec, rec\n\n\ndef calc_detection_vid_ap(prec, rec, use_07_metric=False):\n \"\"\"Calculate average precisions based on evaluation code of VID.\n This function calculates average precisions\n from given precisions and recalls.\n The code is based on the evaluation code used in VID.\n Args:\n prec (list of numpy.array): A list of arrays.\n :obj:`prec[l]` indicates precision for class :math:`l`.\n If :obj:`prec[l]` is :obj:`None`, this function returns\n :obj:`numpy.nan` for class :math:`l`.\n rec (list of numpy.array): A list of arrays.\n :obj:`rec[l]` indicates recall for class :math:`l`.\n If :obj:`rec[l]` is :obj:`None`, this function returns\n :obj:`numpy.nan` for class :math:`l`.\n use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric\n for calculating average precision. The default value is\n :obj:`False`.\n Returns:\n ~numpy.ndarray:\n This function returns an array of average precisions.\n The :math:`l`-th value corresponds to the average precision\n for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is\n :obj:`None`, the corresponding value is set to :obj:`numpy.nan`.\n \"\"\"\n\n n_fg_class = len(prec)\n ap = np.empty(n_fg_class)\n for l in range(n_fg_class):\n if prec[l] is None or rec[l] is None:\n ap[l] = np.nan\n continue\n\n if use_07_metric:\n # 11 point metric\n ap[l] = 0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(rec[l] >= t) == 0:\n p = 0\n else:\n p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])\n ap[l] += p / 11\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0]))\n mrec = np.concatenate(([0], rec[l], [1]))\n\n mpre = np.maximum.accumulate(mpre[::-1])[::-1]\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n\n return ap\n" ]
[ [ "numpy.expand_dims", "numpy.cumsum", "numpy.nan_to_num", "numpy.concatenate", "numpy.max", "numpy.nanmean", "numpy.where", "numpy.clip", "numpy.arange", "scipy.io.loadmat", "numpy.argmax", "numpy.zeros", "numpy.logical_not", "numpy.spacing", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.sort", "numpy.maximum.accumulate", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
ebolyen/q2-treetime
[ "162ebfca9a120391840ed30ddef18fb2b780165e" ]
[ "q2_treetime/methods.py" ]
[ "import hashlib\n\nimport skbio\nimport numpy as np\n\n\ndef add_node_names(tree: skbio.TreeNode, scheme: str = 'md5-xor') \\\n -> skbio.TreeNode:\n HASH_SIZE = 128 // 8\n\n for node in tree.postorder(include_self=True):\n if not node.children or node.name is not None:\n continue\n\n xor = np.zeros(HASH_SIZE, dtype=np.uint8)\n for child in node.children:\n # child.name will never be None because of the postorder traversal\n digest = hashlib.md5(child.name.encode('utf8')).digest()\n xor ^= np.frombuffer(digest, dtype=np.uint8)\n\n node.name = xor.tobytes().hex()\n\n return tree\n\n\ndef convert():\n pass\n\n\ndef infer_gtr():\n pass\n\n\ndef skyline():\n pass\n\n\ndef ancestral_seqs():\n pass\n\n\ndef ancestral_traits():\n pass\n\n\ndef add_trait_coords():\n pass\n" ]
[ [ "numpy.frombuffer", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SJTMusicTeam/MusicGeneration
[ "2918e151a1b9448e5452179bab70bf565d1eaaf7", "2918e151a1b9448e5452179bab70bf565d1eaaf7" ]
[ "mg/utils/midi2note.py", "mg/utils/music21vspretty_midi.py" ]
[ "from music21 import converter, instrument, note, chord, stream, midi\nimport numpy as np\nimport pandas as pd\n\n\n# Melody-RNN Format is a sequence of 8-bit integers indicating the following:\n# MELODY_NOTE_ON = [0, 127] # (note on at that MIDI pitch)\nMELODY_NOTE_OFF = 128 # (stop playing all previous notes)\nMELODY_NO_EVENT = 129 # (no change from previous event)\n# Each element in the sequence lasts for one sixteenth note.\n# This can encode monophonic music only.\n\ndef streamToNoteArray(stream):\n \"\"\"\n Convert a Music21 sequence to a numpy array of int8s into Melody-RNN format:\n 0-127 - note on at specified pitch\n 128 - note off\n 129 - no event\n \"\"\"\n # Part one, extract from stream\n total_length = np.int(np.round(stream.flat.highestTime / 0.25)) # in semiquavers\n stream_list = []\n for element in stream.flat:\n if isinstance(element, note.Note):\n stream_list.append([np.round(element.offset / 0.25), np.round(element.quarterLength / 0.25), element.pitch.midi])\n elif isinstance(element, chord.Chord):\n stream_list.append([np.round(element.offset / 0.25), np.round(element.quarterLength / 0.25), element.sortAscending().pitches[-1].midi])\n np_stream_list = np.array(stream_list, dtype=np.int)\n df = pd.DataFrame({'pos': np_stream_list.T[0], 'dur': np_stream_list.T[1], 'pitch': np_stream_list.T[2]})\n df = df.sort_values(['pos','pitch'], ascending=[True, False]) # sort the dataframe properly\n df = df.drop_duplicates(subset=['pos']) # drop duplicate values\n # part 2, convert into a sequence of note events\n #output = np.zeros(df.off.max() + 1, dtype=np.int16) + np.int16(MELODY_NO_EVENT)\n output = np.zeros(total_length+2, dtype=np.int16) + np.int16(MELODY_NO_EVENT) # set array full of no events by default.\n # Fill in the output list\n for i in range(total_length):\n if not df[df.pos==i].empty:\n n = df[df.pos==i].iloc[0] # pick the highest pitch at each semiquaver\n output[i] = n.pitch # set note on\n output[i+n.dur] = MELODY_NOTE_OFF\n\n return output\n\ndef noteArrayToDataFrame(note_array):\n \"\"\"\n Convert a numpy array containing a Melody-RNN sequence into a dataframe.\n \"\"\"\n df = pd.DataFrame({\"code\": note_array})\n df['offset'] = df.index\n df['duration'] = df.index\n df = df[df.code != MELODY_NO_EVENT]\n df.duration = df.duration.diff(-1) * -1 * 0.25 # calculate durations and change to quarter note fractions\n df = df.fillna(0.25)\n return df[['code','duration']]\n\ndef noteArrayToStream(note_array):\n \"\"\"\n Convert a numpy array containing a Melody-RNN sequence into a music21 stream.\n \"\"\"\n df = noteArrayToDataFrame(note_array)\n melody_stream = stream.Stream()\n for index, row in df.iterrows():\n if row.code == MELODY_NO_EVENT:\n new_note = note.Rest() # bit of an oversimplification, doesn't produce long notes.\n elif row.code == MELODY_NOTE_OFF:\n new_note = note.Rest()\n else:\n new_note = note.Note(row.code)\n new_note.quarterLength = row.duration\n melody_stream.append(new_note)\n return melody_stream\n", "from music21 import converter, instrument, note, chord, stream, midi#多乐器是否可行\nimport time\nimport numpy as np\nimport pandas as pd\nimport os\nimport random\nimport sys\nimport pretty_midi\nfrom pretty_midi import PrettyMIDI, Note, Instrument\nimport copy\nimport itertools\nimport collections\n\n# Melody-RNN Format is a sequence of 8-bit integers indicating the following:\n# MELODY_NOTE_ON = [0, 127] # (note on at that MIDI pitch)\nMELODY_NOTE_OFF = 128 # (stop playing all previous notes)\nMELODY_NO_EVENT = 129 # (no change from previous event)\n# Each element in the sequence lasts for one sixteenth note.\n# This can encode monophonic music only.\n\ndef streamToNoteArray(stream):\n \"\"\"\n Convert a Music21 sequence to a numpy array of int8s into Melody-RNN format:\n 0-127 - note on at specified pitch\n 128 - note off\n 129 - no event\n \"\"\"\n # Part one, extract from stream\n total_length = np.int(np.round(stream.flat.highestTime / 0.25)) # in semiquavers\n stream_list = []\n # cnt = 0\n for element in stream.flat:\n # if cnt<50:\n # print('offset= %f, quarterLength= %f '%(element.offset,element.quarterLength))\n # cnt += 1\n if isinstance(element, note.Note):\n stream_list.append([np.round(element.offset / 0.25), np.round(element.quarterLength / 0.25), element.pitch.midi])\n elif isinstance(element, chord.Chord):\n stream_list.append([np.round(element.offset / 0.25), np.round(element.quarterLength / 0.25), element.sortAscending().pitches[-1].midi])\n np_stream_list = np.array(stream_list, dtype=np.int)\n print(np_stream_list)\n df = pd.DataFrame({'pos': np_stream_list.T[0], 'dur': np_stream_list.T[1], 'pitch': np_stream_list.T[2]})\n df = df.sort_values(['pos','pitch'], ascending=[True, False]) # sort the dataframe properly\n df = df.drop_duplicates(subset=['pos']) # drop duplicate values\n # part 2, convert into a sequence of note events\n #output = np.zeros(df.off.max() + 1, dtype=np.int16) + np.int16(MELODY_NO_EVENT)\n output = np.zeros(total_length+2, dtype=np.int16) + np.int16(MELODY_NO_EVENT) # set array full of no events by default.\n # Fill in the output list\n \"\"\"\n for row in df.iterrows():\n output[row[1].on] = row[1].pitch # set note on\n output[row[1].off] = MELODY_NOTE_OFF\n \"\"\"\n for i in range(total_length):\n if not df[df.pos==i].empty:\n n = df[df.pos==i].iloc[0] # pick the highest pitch at each semiquaver\n output[i] = n.pitch # set note on\n output[i+n.dur] = MELODY_NOTE_OFF\n\n return output\n\ndef noteArrayToDataFrame(note_array):\n \"\"\"\n Convert a numpy array containing a Melody-RNN sequence into a dataframe.\n \"\"\"\n df = pd.DataFrame({\"code\": note_array})\n df['offset'] = df.index\n df['duration'] = df.index\n df = df[df.code != MELODY_NO_EVENT]\n df.duration = df.duration.diff(-1) * -1 * 0.25 # calculate durations and change to quarter note fractions\n df = df.fillna(0.25)\n return df[['code','duration']]\n\ndef noteArrayToStream(note_array):\n \"\"\"\n Convert a numpy array containing a Melody-RNN sequence into a music21 stream.\n \"\"\"\n df = noteArrayToDataFrame(note_array)\n melody_stream = stream.Stream()\n for index, row in df.iterrows():\n if row.code == MELODY_NO_EVENT:\n new_note = note.Rest() # bit of an oversimplification, doesn't produce long notes.\n elif row.code == MELODY_NOTE_OFF:\n new_note = note.Rest()\n else:\n new_note = note.Note(row.code)\n new_note.quarterLength = row.duration\n melody_stream.append(new_note)\n return melody_stream\n\n\n## Play a melody stream\n#sp = midi.realtime.StreamPlayer(melody_stream)\n#sp.play()\nfpath = \"../../egs/dataset/maestro/train/MIDI-UNPROCESSED_01-03_R1_2014_MID--AUDIO_01_R1_2014_wav--3.midi\"\nwm_mid = converter.parse(fpath)\n# print(wm_mid.flat)\n#wm_mid.show()\nwm_mel_rnn = streamToNoteArray(wm_mid)[:50]\nprint(wm_mel_rnn)\n\n#noteArrayToStream(wm_mel_rnn).show()\n#noteArrayToStream(wm_mel_rnn).write(\"midi\", \"../../egs/dataset/tmp_res/music_test.mid\")\n\nmusic = pretty_midi.PrettyMIDI(fpath)\nnotes = itertools.chain(*[\n inst.notes for inst in music.instruments\n if inst.program in range(128) and not inst.is_drum])\nprint(notes)\nprint([inst.notes[:50] for inst in music.instruments\n if inst.program in range(128) and not inst.is_drum])" ]
[ [ "pandas.DataFrame", "numpy.int16", "numpy.round", "numpy.array", "numpy.zeros" ], [ "pandas.DataFrame", "numpy.int16", "numpy.round", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Ben3940/scikit-learn
[ "adb47e7c142ce6d699cc5927925d448cb2c1ab91", "adb47e7c142ce6d699cc5927925d448cb2c1ab91" ]
[ "examples/inspection/plot_partial_dependence.py", "sklearn/tests/test_docstrings.py" ]
[ "\"\"\"\n===============================================================\nPartial Dependence and Individual Conditional Expectation Plots\n===============================================================\n\nPartial dependence plots show the dependence between the target function [2]_\nand a set of features of interest, marginalizing over the values of all other\nfeatures (the complement features). Due to the limits of human perception, the\nsize of the set of features of interest must be small (usually, one or two)\nthus they are usually chosen among the most important features.\n\nSimilarly, an individual conditional expectation (ICE) plot [3]_\nshows the dependence between the target function and a feature of interest.\nHowever, unlike partial dependence plots, which show the average effect of the\nfeatures of interest, ICE plots visualize the dependence of the prediction on a\nfeature for each :term:`sample` separately, with one line per sample.\nOnly one feature of interest is supported for ICE plots.\n\nThis example shows how to obtain partial dependence and ICE plots from a\n:class:`~sklearn.neural_network.MLPRegressor` and a\n:class:`~sklearn.ensemble.HistGradientBoostingRegressor` trained on the\nCalifornia housing dataset. The example is taken from [1]_.\n\n.. [1] T. Hastie, R. Tibshirani and J. Friedman, \"Elements of Statistical\n Learning Ed. 2\", Springer, 2009.\n\n.. [2] For classification you can think of it as the regression score before\n the link function.\n\n.. [3] :arxiv:`Goldstein, A., Kapelner, A., Bleich, J., and Pitkin, E. (2015).\n \"Peeking Inside the Black Box: Visualizing Statistical Learning With Plots of\n Individual Conditional Expectation\". Journal of Computational and\n Graphical Statistics, 24(1): 44-65 <1309.6392>`\n\n\"\"\"\n\n# %%\n# California Housing data preprocessing\n# -------------------------------------\n#\n# Center target to avoid gradient boosting init bias: gradient boosting\n# with the 'recursion' method does not account for the initial estimator\n# (here the average target, by default).\n\nimport pandas as pd\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.model_selection import train_test_split\n\ncal_housing = fetch_california_housing()\nX = pd.DataFrame(cal_housing.data, columns=cal_housing.feature_names)\ny = cal_housing.target\n\ny -= y.mean()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)\n\n# %%\n# 1-way partial dependence with different models\n# ----------------------------------------------\n#\n# In this section, we will compute 1-way partial dependence with two different\n# machine-learning models: (i) a multi-layer perceptron and (ii) a\n# gradient-boosting. With these two models, we illustrate how to compute and\n# interpret both partial dependence plot (PDP) and individual conditional\n# expectation (ICE).\n#\n# Multi-layer perceptron\n# ......................\n#\n# Let's fit a :class:`~sklearn.neural_network.MLPRegressor` and compute\n# single-variable partial dependence plots.\n\nfrom time import time\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.neural_network import MLPRegressor\n\nprint(\"Training MLPRegressor...\")\ntic = time()\nest = make_pipeline(\n QuantileTransformer(),\n MLPRegressor(\n hidden_layer_sizes=(30, 15),\n learning_rate_init=0.01,\n early_stopping=True,\n random_state=0,\n ),\n)\nest.fit(X_train, y_train)\nprint(f\"done in {time() - tic:.3f}s\")\nprint(f\"Test R2 score: {est.score(X_test, y_test):.2f}\")\n\n# %%\n# We configured a pipeline to scale the numerical input features and tuned the\n# neural network size and learning rate to get a reasonable compromise between\n# training time and predictive performance on a test set.\n#\n# Importantly, this tabular dataset has very different dynamic ranges for its\n# features. Neural networks tend to be very sensitive to features with varying\n# scales and forgetting to preprocess the numeric feature would lead to a very\n# poor model.\n#\n# It would be possible to get even higher predictive performance with a larger\n# neural network but the training would also be significantly more expensive.\n#\n# Note that it is important to check that the model is accurate enough on a\n# test set before plotting the partial dependence since there would be little\n# use in explaining the impact of a given feature on the prediction function of\n# a poor model.\n#\n# We will plot the partial dependence, both individual (ICE) and averaged one\n# (PDP). We limit to only 50 ICE curves to not overcrowd the plot.\n\nfrom sklearn.inspection import PartialDependenceDisplay\n\ncommon_params = {\n \"subsample\": 50,\n \"n_jobs\": 2,\n \"grid_resolution\": 20,\n \"centered\": True,\n \"random_state\": 0,\n}\n\nprint(\"Computing partial dependence plots...\")\ntic = time()\ndisplay = PartialDependenceDisplay.from_estimator(\n est,\n X_train,\n features=[\"MedInc\", \"AveOccup\", \"HouseAge\", \"AveRooms\"],\n kind=\"both\",\n **common_params,\n)\nprint(f\"done in {time() - tic:.3f}s\")\ndisplay.figure_.suptitle(\n \"Partial dependence of house value on non-location features\\n\"\n \"for the California housing dataset, with MLPRegressor\"\n)\ndisplay.figure_.subplots_adjust(hspace=0.3)\n\n# %%\n# Gradient boosting\n# .................\n#\n# Let's now fit a :class:`~sklearn.ensemble.HistGradientBoostingRegressor` and\n# compute the partial dependence on the same features.\n\nfrom sklearn.ensemble import HistGradientBoostingRegressor\n\nprint(\"Training HistGradientBoostingRegressor...\")\ntic = time()\nest = HistGradientBoostingRegressor(random_state=0)\nest.fit(X_train, y_train)\nprint(f\"done in {time() - tic:.3f}s\")\nprint(f\"Test R2 score: {est.score(X_test, y_test):.2f}\")\n\n# %%\n# Here, we used the default hyperparameters for the gradient boosting model\n# without any preprocessing as tree-based models are naturally robust to\n# monotonic transformations of numerical features.\n#\n# Note that on this tabular dataset, Gradient Boosting Machines are both\n# significantly faster to train and more accurate than neural networks. It is\n# also significantly cheaper to tune their hyperparameters (the defaults tend\n# to work well while this is not often the case for neural networks).\n#\n# We will plot the partial dependence, both individual (ICE) and averaged one\n# (PDP). We limit to only 50 ICE curves to not overcrowd the plot.\n\nprint(\"Computing partial dependence plots...\")\ntic = time()\ndisplay = PartialDependenceDisplay.from_estimator(\n est,\n X_train,\n features=[\"MedInc\", \"AveOccup\", \"HouseAge\", \"AveRooms\"],\n kind=\"both\",\n **common_params,\n)\nprint(f\"done in {time() - tic:.3f}s\")\ndisplay.figure_.suptitle(\n \"Partial dependence of house value on non-location features\\n\"\n \"for the California housing dataset, with Gradient Boosting\"\n)\ndisplay.figure_.subplots_adjust(wspace=0.4, hspace=0.3)\n\n# %%\n# Analysis of the plots\n# .....................\n#\n# We can clearly see on the PDPs (dashed orange line) that the median house price\n# shows a linear relationship with the median income (top left) and that the\n# house price drops when the average occupants per household increases (top\n# middle). The top right plot shows that the house age in a district does not\n# have a strong influence on the (median) house price; so does the average\n# rooms per household.\n#\n# The ICE curves (light blue lines) complement the analysis: we can see that\n# there are some exceptions (which are better highlighted with the option\n# `centered=True`), where the house price remains constant with respect to\n# median income and average occupants variations.\n# On the other hand, while the house age (top right) does not have a strong\n# influence on the median house price on average, there seems to be a number\n# of exceptions where the house price increases when\n# between the ages 15-25. Similar exceptions can be observed for the average\n# number of rooms (bottom left). Therefore, ICE plots show some individual\n# effect which are attenuated by taking the averages.\n#\n# In all plots, the tick marks on the x-axis represent the deciles of the\n# feature values in the training data.\n#\n# We also observe that :class:`~sklearn.neural_network.MLPRegressor` has much\n# smoother predictions than\n# :class:`~sklearn.ensemble.HistGradientBoostingRegressor`.\n#\n# However, it is worth noting that we are creating potential meaningless\n# synthetic samples if features are correlated.\n\n# %%\n# 2D interaction plots\n# --------------------\n#\n# PDPs with two features of interest enable us to visualize interactions among\n# them. However, ICEs cannot be plotted in an easy manner and thus interpreted.\n# Another consideration is linked to the performance to compute the PDPs. With\n# the tree-based algorithm, when only PDPs are requested, they can be computed\n# on an efficient way using the `'recursion'` method.\nimport matplotlib.pyplot as plt\n\nprint(\"Computing partial dependence plots...\")\ntic = time()\n_, ax = plt.subplots(ncols=3, figsize=(9, 4))\n\n# Note that we could have called the method `from_estimator` three times and\n# provide one feature, one kind of plot, and one axis for each call.\ndisplay = PartialDependenceDisplay.from_estimator(\n est,\n X_train,\n features=[\"AveOccup\", \"HouseAge\", (\"AveOccup\", \"HouseAge\")],\n kind=[\"both\", \"both\", \"average\"],\n ax=ax,\n **common_params,\n)\n\nprint(f\"done in {time() - tic:.3f}s\")\ndisplay.figure_.suptitle(\n \"Partial dependence of house value on non-location features\\n\"\n \"for the California housing dataset, with Gradient Boosting\"\n)\ndisplay.figure_.subplots_adjust(wspace=0.4, hspace=0.3)\n\n# %%\n# The two-way partial dependence plot shows the dependence of median house\n# price on joint values of house age and average occupants per household. We\n# can clearly see an interaction between the two features: for an average\n# occupancy greater than two, the house price is nearly independent of the\n# house age, whereas for values less than two there is a strong dependence on\n# age.\n#\n# 3D interaction plots\n# --------------------\n#\n# Let's make the same partial dependence plot for the 2 features interaction,\n# this time in 3 dimensions.\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.inspection import partial_dependence\n\nfig = plt.figure()\n\nfeatures = (\"AveOccup\", \"HouseAge\")\npdp = partial_dependence(\n est, X_train, features=features, kind=\"average\", grid_resolution=10\n)\nXX, YY = np.meshgrid(pdp[\"values\"][0], pdp[\"values\"][1])\nZ = pdp.average[0].T\nax = Axes3D(fig)\nfig.add_axes(ax)\n\nsurf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu, edgecolor=\"k\")\nax.set_xlabel(features[0])\nax.set_ylabel(features[1])\nax.set_zlabel(\"Partial dependence\")\n# pretty init view\nax.view_init(elev=22, azim=122)\nplt.colorbar(surf)\nplt.suptitle(\n \"Partial dependence of house value on median\\n\"\n \"age and average occupancy, with Gradient Boosting\"\n)\nplt.subplots_adjust(top=0.9)\nplt.show()\n", "import re\nfrom inspect import signature\nimport pkgutil\nimport inspect\nimport importlib\nfrom typing import Optional\n\nimport pytest\nfrom sklearn.utils import all_estimators\nimport sklearn\n\nnumpydoc_validation = pytest.importorskip(\"numpydoc.validate\")\n\nFUNCTION_DOCSTRING_IGNORE_LIST = [\n \"sklearn.datasets._kddcup99.fetch_kddcup99\",\n \"sklearn.datasets._lfw.fetch_lfw_pairs\",\n \"sklearn.datasets._lfw.fetch_lfw_people\",\n \"sklearn.datasets._samples_generator.make_gaussian_quantiles\",\n \"sklearn.datasets._samples_generator.make_spd_matrix\",\n \"sklearn.datasets._species_distributions.fetch_species_distributions\",\n \"sklearn.datasets._svmlight_format_io.dump_svmlight_file\",\n \"sklearn.datasets._svmlight_format_io.load_svmlight_file\",\n \"sklearn.datasets._svmlight_format_io.load_svmlight_files\",\n \"sklearn.decomposition._dict_learning.dict_learning\",\n \"sklearn.decomposition._dict_learning.dict_learning_online\",\n \"sklearn.decomposition._fastica.fastica\",\n \"sklearn.decomposition._nmf.non_negative_factorization\",\n \"sklearn.externals._packaging.version.parse\",\n \"sklearn.feature_extraction.image.extract_patches_2d\",\n \"sklearn.feature_extraction.image.img_to_graph\",\n \"sklearn.feature_extraction.text.strip_accents_ascii\",\n \"sklearn.feature_extraction.text.strip_accents_unicode\",\n \"sklearn.feature_extraction.text.strip_tags\",\n \"sklearn.feature_selection._univariate_selection.chi2\",\n \"sklearn.feature_selection._univariate_selection.f_oneway\",\n \"sklearn.inspection._partial_dependence.partial_dependence\",\n \"sklearn.inspection._plot.partial_dependence.plot_partial_dependence\",\n \"sklearn.linear_model._least_angle.lars_path_gram\",\n \"sklearn.linear_model._omp.orthogonal_mp_gram\",\n \"sklearn.manifold._locally_linear.locally_linear_embedding\",\n \"sklearn.manifold._t_sne.trustworthiness\",\n \"sklearn.metrics._classification.brier_score_loss\",\n \"sklearn.metrics._classification.cohen_kappa_score\",\n \"sklearn.metrics._classification.fbeta_score\",\n \"sklearn.metrics._classification.hinge_loss\",\n \"sklearn.metrics._classification.jaccard_score\",\n \"sklearn.metrics._classification.log_loss\",\n \"sklearn.metrics._plot.det_curve.plot_det_curve\",\n \"sklearn.metrics._plot.precision_recall_curve.plot_precision_recall_curve\",\n \"sklearn.metrics._ranking.auc\",\n \"sklearn.metrics._ranking.coverage_error\",\n \"sklearn.metrics._ranking.dcg_score\",\n \"sklearn.metrics._ranking.label_ranking_average_precision_score\",\n \"sklearn.metrics._ranking.roc_auc_score\",\n \"sklearn.metrics._ranking.roc_curve\",\n \"sklearn.metrics._ranking.top_k_accuracy_score\",\n \"sklearn.metrics._regression.mean_pinball_loss\",\n \"sklearn.metrics.cluster._bicluster.consensus_score\",\n \"sklearn.metrics.cluster._supervised.adjusted_mutual_info_score\",\n \"sklearn.metrics.cluster._supervised.adjusted_rand_score\",\n \"sklearn.metrics.cluster._supervised.entropy\",\n \"sklearn.metrics.cluster._supervised.fowlkes_mallows_score\",\n \"sklearn.metrics.cluster._supervised.homogeneity_completeness_v_measure\",\n \"sklearn.metrics.cluster._supervised.mutual_info_score\",\n \"sklearn.metrics.cluster._supervised.normalized_mutual_info_score\",\n \"sklearn.metrics.cluster._supervised.pair_confusion_matrix\",\n \"sklearn.metrics.cluster._supervised.rand_score\",\n \"sklearn.metrics.cluster._supervised.v_measure_score\",\n \"sklearn.metrics.pairwise.additive_chi2_kernel\",\n \"sklearn.metrics.pairwise.check_paired_arrays\",\n \"sklearn.metrics.pairwise.check_pairwise_arrays\",\n \"sklearn.metrics.pairwise.chi2_kernel\",\n \"sklearn.metrics.pairwise.cosine_distances\",\n \"sklearn.metrics.pairwise.cosine_similarity\",\n \"sklearn.metrics.pairwise.distance_metrics\",\n \"sklearn.metrics.pairwise.haversine_distances\",\n \"sklearn.metrics.pairwise.kernel_metrics\",\n \"sklearn.metrics.pairwise.paired_manhattan_distances\",\n \"sklearn.metrics.pairwise.pairwise_distances_argmin\",\n \"sklearn.metrics.pairwise.pairwise_distances_argmin_min\",\n \"sklearn.metrics.pairwise.pairwise_distances_chunked\",\n \"sklearn.metrics.pairwise.pairwise_kernels\",\n \"sklearn.metrics.pairwise.polynomial_kernel\",\n \"sklearn.metrics.pairwise.rbf_kernel\",\n \"sklearn.metrics.pairwise.sigmoid_kernel\",\n \"sklearn.model_selection._validation.cross_validate\",\n \"sklearn.model_selection._validation.learning_curve\",\n \"sklearn.model_selection._validation.permutation_test_score\",\n \"sklearn.model_selection._validation.validation_curve\",\n \"sklearn.pipeline.make_union\",\n \"sklearn.preprocessing._data.maxabs_scale\",\n \"sklearn.preprocessing._data.robust_scale\",\n \"sklearn.preprocessing._data.scale\",\n \"sklearn.preprocessing._label.label_binarize\",\n \"sklearn.random_projection.johnson_lindenstrauss_min_dim\",\n \"sklearn.svm._bounds.l1_min_c\",\n \"sklearn.tree._export.plot_tree\",\n \"sklearn.utils.axis0_safe_slice\",\n \"sklearn.utils.extmath.density\",\n \"sklearn.utils.extmath.fast_logdet\",\n \"sklearn.utils.extmath.randomized_svd\",\n \"sklearn.utils.extmath.safe_sparse_dot\",\n \"sklearn.utils.extmath.squared_norm\",\n \"sklearn.utils.extmath.stable_cumsum\",\n \"sklearn.utils.extmath.svd_flip\",\n \"sklearn.utils.extmath.weighted_mode\",\n \"sklearn.utils.fixes.delayed\",\n \"sklearn.utils.fixes.linspace\",\n # To be fixed in upstream issue:\n # https://github.com/joblib/threadpoolctl/issues/108\n \"sklearn.utils.fixes.threadpool_info\",\n \"sklearn.utils.fixes.threadpool_limits\",\n \"sklearn.utils.gen_batches\",\n \"sklearn.utils.gen_even_slices\",\n \"sklearn.utils.graph.graph_shortest_path\",\n \"sklearn.utils.graph.single_source_shortest_path_length\",\n \"sklearn.utils.is_scalar_nan\",\n \"sklearn.utils.metaestimators.available_if\",\n \"sklearn.utils.metaestimators.if_delegate_has_method\",\n \"sklearn.utils.multiclass.class_distribution\",\n \"sklearn.utils.multiclass.type_of_target\",\n \"sklearn.utils.multiclass.unique_labels\",\n \"sklearn.utils.resample\",\n \"sklearn.utils.safe_mask\",\n \"sklearn.utils.safe_sqr\",\n \"sklearn.utils.shuffle\",\n \"sklearn.utils.sparsefuncs.count_nonzero\",\n \"sklearn.utils.sparsefuncs.csc_median_axis_0\",\n \"sklearn.utils.sparsefuncs.incr_mean_variance_axis\",\n \"sklearn.utils.sparsefuncs.inplace_swap_column\",\n \"sklearn.utils.sparsefuncs.inplace_swap_row\",\n \"sklearn.utils.sparsefuncs.inplace_swap_row_csc\",\n \"sklearn.utils.sparsefuncs.inplace_swap_row_csr\",\n \"sklearn.utils.sparsefuncs.mean_variance_axis\",\n \"sklearn.utils.validation.check_is_fitted\",\n]\nFUNCTION_DOCSTRING_IGNORE_LIST = set(FUNCTION_DOCSTRING_IGNORE_LIST)\n\n\ndef get_all_methods():\n estimators = all_estimators()\n for name, Estimator in estimators:\n if name.startswith(\"_\"):\n # skip private classes\n continue\n methods = []\n for name in dir(Estimator):\n if name.startswith(\"_\"):\n continue\n method_obj = getattr(Estimator, name)\n if hasattr(method_obj, \"__call__\") or isinstance(method_obj, property):\n methods.append(name)\n methods.append(None)\n\n for method in sorted(methods, key=lambda x: str(x)):\n yield Estimator, method\n\n\ndef _is_checked_function(item):\n if not inspect.isfunction(item):\n return False\n\n if item.__name__.startswith(\"_\"):\n return False\n\n mod = item.__module__\n if not mod.startswith(\"sklearn.\") or mod.endswith(\"estimator_checks\"):\n return False\n\n return True\n\n\ndef get_all_functions_names():\n \"\"\"Get all public functions define in the sklearn module\"\"\"\n modules_to_ignore = {\n \"tests\",\n \"externals\",\n \"setup\",\n \"conftest\",\n \"experimental\",\n \"estimator_checks\",\n }\n\n all_functions_names = set()\n for module_finder, module_name, ispkg in pkgutil.walk_packages(\n path=sklearn.__path__, prefix=\"sklearn.\"\n ):\n module_parts = module_name.split(\".\")\n if (\n any(part in modules_to_ignore for part in module_parts)\n or \"._\" in module_name\n ):\n continue\n\n module = importlib.import_module(module_name)\n functions = inspect.getmembers(module, _is_checked_function)\n for name, func in functions:\n full_name = f\"{func.__module__}.{func.__name__}\"\n all_functions_names.add(full_name)\n\n return sorted(all_functions_names)\n\n\ndef filter_errors(errors, method, Estimator=None):\n \"\"\"\n Ignore some errors based on the method type.\n\n These rules are specific for scikit-learn.\"\"\"\n for code, message in errors:\n # We ignore following error code,\n # - RT02: The first line of the Returns section\n # should contain only the type, ..\n # (as we may need refer to the name of the returned\n # object)\n # - GL01: Docstring text (summary) should start in the line\n # immediately after the opening quotes (not in the same line,\n # or leaving a blank line in between)\n # - GL02: If there's a blank line, it should be before the\n # first line of the Returns section, not after (it allows to have\n # short docstrings for properties).\n\n if code in [\"RT02\", \"GL01\", \"GL02\"]:\n continue\n\n # Ignore PR02: Unknown parameters for properties. We sometimes use\n # properties for ducktyping, i.e. SGDClassifier.predict_proba\n if code == \"PR02\" and Estimator is not None and method is not None:\n method_obj = getattr(Estimator, method)\n if isinstance(method_obj, property):\n continue\n\n # Following codes are only taken into account for the\n # top level class docstrings:\n # - ES01: No extended summary found\n # - SA01: See Also section not found\n # - EX01: No examples section found\n\n if method is not None and code in [\"EX01\", \"SA01\", \"ES01\"]:\n continue\n yield code, message\n\n\ndef repr_errors(res, estimator=None, method: Optional[str] = None) -> str:\n \"\"\"Pretty print original docstring and the obtained errors\n\n Parameters\n ----------\n res : dict\n result of numpydoc.validate.validate\n estimator : {estimator, None}\n estimator object or None\n method : str\n if estimator is not None, either the method name or None.\n\n Returns\n -------\n str\n String representation of the error.\n \"\"\"\n if method is None:\n if hasattr(estimator, \"__init__\"):\n method = \"__init__\"\n elif estimator is None:\n raise ValueError(\"At least one of estimator, method should be provided\")\n else:\n raise NotImplementedError\n\n if estimator is not None:\n obj = getattr(estimator, method)\n try:\n obj_signature = str(signature(obj))\n except TypeError:\n # In particular we can't parse the signature of properties\n obj_signature = (\n \"\\nParsing of the method signature failed, \"\n \"possibly because this is a property.\"\n )\n\n obj_name = estimator.__name__ + \".\" + method\n else:\n obj_signature = \"\"\n obj_name = method\n\n msg = \"\\n\\n\" + \"\\n\\n\".join(\n [\n str(res[\"file\"]),\n obj_name + obj_signature,\n res[\"docstring\"],\n \"# Errors\",\n \"\\n\".join(\n \" - {}: {}\".format(code, message) for code, message in res[\"errors\"]\n ),\n ]\n )\n return msg\n\n\[email protected](\"function_name\", get_all_functions_names())\ndef test_function_docstring(function_name, request):\n \"\"\"Check function docstrings using numpydoc.\"\"\"\n if function_name in FUNCTION_DOCSTRING_IGNORE_LIST:\n request.applymarker(\n pytest.mark.xfail(run=False, reason=\"TODO pass numpydoc validation\")\n )\n\n res = numpydoc_validation.validate(function_name)\n\n res[\"errors\"] = list(filter_errors(res[\"errors\"], method=\"function\"))\n\n if res[\"errors\"]:\n msg = repr_errors(res, method=f\"Tested function: {function_name}\")\n\n raise ValueError(msg)\n\n\[email protected](\"Estimator, method\", get_all_methods())\ndef test_docstring(Estimator, method, request):\n base_import_path = Estimator.__module__\n import_path = [base_import_path, Estimator.__name__]\n if method is not None:\n import_path.append(method)\n\n import_path = \".\".join(import_path)\n\n res = numpydoc_validation.validate(import_path)\n\n res[\"errors\"] = list(filter_errors(res[\"errors\"], method, Estimator=Estimator))\n\n if res[\"errors\"]:\n msg = repr_errors(res, Estimator, method)\n\n raise ValueError(msg)\n\n\nif __name__ == \"__main__\":\n import sys\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Validate docstring with numpydoc.\")\n parser.add_argument(\"import_path\", help=\"Import path to validate\")\n\n args = parser.parse_args()\n\n res = numpydoc_validation.validate(args.import_path)\n\n import_path_sections = args.import_path.split(\".\")\n # When applied to classes, detect class method. For functions\n # method = None.\n # TODO: this detection can be improved. Currently we assume that we have\n # class # methods if the second path element before last is in camel case.\n if len(import_path_sections) >= 2 and re.match(\n r\"(?:[A-Z][a-z]*)+\", import_path_sections[-2]\n ):\n method = import_path_sections[-1]\n else:\n method = None\n\n res[\"errors\"] = list(filter_errors(res[\"errors\"], method))\n\n if res[\"errors\"]:\n msg = repr_errors(res, method=args.import_path)\n\n print(msg)\n sys.exit(1)\n else:\n print(\"All docstring checks passed for {}!\".format(args.import_path))\n" ]
[ [ "sklearn.neural_network.MLPRegressor", "sklearn.inspection.PartialDependenceDisplay.from_estimator", "sklearn.preprocessing.QuantileTransformer", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "matplotlib.pyplot.subplots", "sklearn.datasets.fetch_california_housing", "sklearn.inspection.partial_dependence", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.suptitle", "numpy.meshgrid", "sklearn.ensemble.HistGradientBoostingRegressor", "matplotlib.pyplot.figure" ], [ "sklearn.utils.all_estimators" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
i2mint/examples
[ "1dd52935f6f6175a17af9b8e5ee9e2b10b08cb7f", "1dd52935f6f6175a17af9b8e5ee9e2b10b08cb7f" ]
[ "synthetic_sounds/classification_problems.py", "synthetic_sounds/outlier_problems.py" ]
[ "from typing import Iterable, Callable\nimport numpy as np\nfrom examples.synthetic_sounds.util import (\n seeds_to_wfs,\n seed_to_wf_chk,\n DFLT_SEEDS,\n DFLT_CHUNKER,\n chk_tag_gen,\n frame_annots_to_chk_annots,\n)\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\n\n\ndef make_frequency_groups(seeds: Iterable, chk_size: int, class_sep: float):\n freq_dict = {}\n for seed in sorted(list(set(seeds))):\n freq_dict[seed] = 100 + class_sep * 1000 * len(freq_dict)\n\n wfs = seeds_to_wfs(seeds, chk_size, freq_dict, seed_to_wf_chk)\n\n annots = []\n for idx, wf in enumerate(wfs):\n annots.append((np.array([chk_size * idx, chk_size * (idx + 1)]), seeds[idx]))\n\n return wfs, annots\n\n\ndef test_classification_model(\n seeds: Iterable = None,\n n_classes: int = None,\n chk_size: int = 2048 * 5,\n class_sep: float = 1.0,\n chunker=DFLT_CHUNKER,\n chunker_chk_size: int = 1024,\n featurizer: Callable = PCA,\n model: Callable = SVC,\n):\n if n_classes is None and seeds is None:\n raise AttributeError(\"Either seeds or n_classes needs to be specified!\")\n elif seeds is None:\n seeds = list(DFLT_SEEDS[:n_classes])\n elif n_classes is None:\n pass\n else:\n assert len(set(seeds)) == n_classes\n\n wfs, annots = make_frequency_groups(seeds, chk_size, class_sep)\n\n chks, tags = zip(*chk_tag_gen(wfs, seeds, chunker=chunker(chunker_chk_size)))\n\n featurizer = featurizer().fit(chks, tags)\n fvs = featurizer(chks)\n\n model = model().fit(fvs, tags)\n scores = model(fvs)\n\n chk_annots = frame_annots_to_chk_annots(annots, chunker_chk_size)\n classification_wf = np.hstack(wfs)\n\n return scores, chk_annots, classification_wf\n", "import librosa\nimport numpy as np\nimport augly.audio as audaugs\nfrom examples.synthetic_sounds.util import (\n times_to_frames_range,\n frames_to_chunks,\n chk_gen,\n DFLT_CHUNKER,\n)\nfrom typing import Iterable, Callable\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import OneClassSVM\n\n\ndef build_click_wf(\n base_wf: np.ndarray,\n times_for_clicks: np.ndarray,\n sample_rate: int = 44100,\n strength_of_click: float = 0,\n click_sample: np.ndarray = None,\n):\n times_for_clicks = np.array(times_for_clicks)\n times_for_clicks = np.append(times_for_clicks, base_wf.shape[-1] / sample_rate)\n\n clicks_audio = librosa.clicks(\n times=times_for_clicks, sr=sample_rate, click=click_sample\n )\n\n new_wf, sample_rate = audaugs.add_background_noise(\n base_wf,\n sample_rate=sample_rate,\n background_audio=clicks_audio,\n snr_level_db=strength_of_click,\n )\n\n if click_sample is not None:\n frames_range = times_to_frames_range(\n times=times_for_clicks, click_len=len(click_sample), sample_rate=sample_rate\n )\n else:\n frames_range = times_to_frames_range(\n times=times_for_clicks, sample_rate=sample_rate\n )\n\n return new_wf, frames_range\n\n\ndef test_outlier_model(\n base_wf: np.ndarray,\n sample_rate: int = 44100,\n seconds_between_clicks: float = None,\n times_for_clicks: Iterable = None,\n strength_of_click: float = 0,\n click_sample: np.ndarray = None,\n chunker=DFLT_CHUNKER,\n chk_size=2048,\n featurizer: Callable = PCA,\n model: Callable = OneClassSVM,\n):\n num_samples = base_wf.shape[-1]\n seconds = num_samples / sample_rate\n\n if seconds_between_clicks is None and times_for_clicks is None:\n raise AttributeError(\n \"Either seconds_between_clicks or times needs to be specified!\"\n )\n elif times_for_clicks is None:\n times_for_clicks = np.arange(0, seconds, seconds_between_clicks)\n elif seconds_between_clicks is None:\n times_for_clicks = np.array(times_for_clicks)\n else:\n raise AttributeError(\n \"Only one of times and seconds_between_clicks should be specified!\"\n )\n\n outlier_wf, frames_range = build_click_wf(\n base_wf=base_wf,\n times_for_clicks=times_for_clicks,\n sample_rate=sample_rate,\n strength_of_click=strength_of_click,\n click_sample=click_sample,\n )\n chunker = chunker(chk_size)\n chks = [tup for tup in chk_gen(outlier_wf, chunker)]\n\n featurizer = featurizer().fit(chks)\n fvs = featurizer(chks)\n\n model = model().fit(fvs)\n outlier_scores = model.predict(fvs)\n\n click_chunks = frames_to_chunks(frames_range, chk_size)\n return outlier_scores, click_chunks, outlier_wf\n" ]
[ [ "numpy.hstack", "numpy.array" ], [ "numpy.arange", "numpy.append", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
not-a-hot-dog/parallelized-disease-modeling
[ "e82d052b4841f1c545f4f5c65ce509f1a3418754" ]
[ "generate_data.py" ]
[ "import numpy as np\n\n##Read in data matrix\ndata = np.load(\"data/matrix.npy\")\n\nprint('Creating data...')\n##Generate individual CSVs\nS_init_data = data[:, :, 2]\nI_init_data = data[:, :, 3]\nR_init_data = data[:, :, 4]\nisUS_data = data[:, :, 5]\n#Use fixed beta and gamma for stable simulation\nbeta_data = 0.8 * np.ones(S_init_data.shape)\n# beta_data = data[:, :, 0]*200\ngamma_data = 0.01 * np.ones(S_init_data.shape)\n# gamma_data = data[:, :, 1]/10\ndS_data = 0.01 * np.ones(S_init_data.shape)\ndI_data = 0.3 * np.ones(S_init_data.shape)\ndR_data = 0.01 * np.ones(S_init_data.shape)\n#Prevent diffusion at regions with no people\ndS_data[isUS_data == 0] = 0\ndI_data[isUS_data == 0] = 0\ndR_data[isUS_data == 0] = 0\n\nprint('Saving data...')\nnp.savetxt('data/beta.csv', beta_data, delimiter=',')\nnp.savetxt('data/gamma.csv', gamma_data, delimiter=',')\nnp.savetxt('data/S_init.csv', S_init_data, delimiter=',')\nnp.savetxt('data/I_init.csv', I_init_data, delimiter=',')\nnp.savetxt('data/R_init.csv', R_init_data, delimiter=',')\nnp.savetxt('data/dS.csv', dS_data, delimiter=',')\nnp.savetxt('data/dI.csv', dI_data, delimiter=',')\nnp.savetxt('data/dR.csv', dR_data, delimiter=',')\nnp.savetxt('data/isUS.csv', isUS_data, delimiter=',')" ]
[ [ "numpy.savetxt", "numpy.load", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
awardblvr/pv_mppt_test
[ "366015fd418448791b8470baf82e6ecb3686b17d" ]
[ "Graph_MPPT.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\n Import MPPT CSV data and plot it.\n\n CSV format:\n Volts,volts,amps,watts,state,mode_str,panelSN,resistance,timestamp\n 29.646,29.646,0.0,0.0,0,CR,B41J00052893,100000,20210913_120014.79\n 14.267,14.267,0.354,5.05,1,CR,B41J00052893,40.0,20210913_120016.16\n\n\"\"\"\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport argparse\nfrom datetime import datetime as dt\nimport time\nimport pandas as pd\nfrom numpy import *\nimport numpy as np\nfrom mpl_toolkits.axes_grid1 import host_subplot\nfrom mpl_toolkits import axisartist\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\n\nfrom csv import reader\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4, depth=4).pprint\npp_str = pprint.PrettyPrinter(indent=4, depth=4).pformat\n\ndef plot_df(df):\n '''\n 0 1 2 3 4 5 6 7 8\n 0 Volts, volts, amps, watts, state, mode_str, panelSN, resistance, timestamp\n 1 29.646, 29.646, 0.0, 0.0, 0, CR, B41J00052893, 100000, 20210913_120014.79 <--Voc\n 2 14.267, 14.267, 0.354, 5.05, 1, CR, B41J00052893, 40.0, 20210913_120016.16\n '''\n print(df)\n title_sn = df['panelSN'][1]\n\n volt_series = df['Volts'][1:]\n\n std_voltage_series = np.arange(50, 0, 0-(50.0 /volt_series.size ))\n\n print(f\"{volt_series.size=}\")\n print(f\"std_voltage_series-> size {len(std_voltage_series)}, {std_voltage_series})\")\n\n amps_series = df['amps'][1:]\n watts_series = df['watts'][1:]\n ohms_series = df['resistance'][1:]\n\n # print(volt_series)\n\n fig, ax1 = plt.subplots()\n\n\n color = 'tab:red'\n ax1.set_xlabel('Voltage')\n # ax1.set_ylabel('Current', color=color)\n ax1.set_ylim(1, 6)\n # ax1.plot(volt_series, amps_series, color=color)\n ax1.plot(std_voltage_series, amps_series, color=color, label='Current')\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n color = 'tab:blue'\n # ax2.set_ylabel('Watts', color=color) # we already handled the x-label with ax1\n # ax2.plot(volt_series, watts_series, color=color)\n ax2.plot(std_voltage_series, watts_series, color=color, label='Watts')\n ax2.tick_params(axis='y', labelcolor=color)\n\n plt.title(f\"Panel S/N {title_sn}\")\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n plt.legend()\n plt.show()\n\ndef get_and_plot_mppt(df):\n\n # IL = array(ItemList)\n \n host = host_subplot(111, axes_class=axisartist.Axes) # (1 row, 1 column, plot number 1)\n plt.subplots_adjust(right=0.75)\n\n par1 = host.twinx()\n # par2 = host.twinx()\n\n # par2.axis[\"right\"] = par2.new_fixed_axis(loc=\"right\", offset=(60, 0))\n\n par1.axis[\"right\"].toggle(all=True)\n # OUT_FOR_SINGLE par2.axis[\"right\"].toggle() #all=True)\n\n '''\n 0 1 2 3 4 5 6 7 8\n 0 Volts, volts, amps, watts, state, mode_str, panelSN, resistance, timestamp\n 1 29.646, 29.646, 0.0, 0.0, 0, CR, B41J00052893, 100000, 20210913_120014.79 <--Voc\n 2 14.267, 14.267, 0.354, 5.05, 1, CR, B41J00052893, 40.0, 20210913_120016.16\n '''\n\n # print\n # '\\n'.join(['%i: %s' % (n, l[n]) for n in xrange(len(l))])\n\n # print(f\"Current: {['%.2f'.format(x[0]) for x in IL[2:]]}, \\n {[x[2] for x in `IL`[2:]]}\")\n print(\"Voltage: %s\"%(\", \".join([\"%.1f\"%float(x[0]) for x in IL[2:]]))) # , \\n {[x[2] for x in IL[2:]]}\")\n print(\"Current: %s\"%(\", \".join([\"%.1f\"%float(x[2]) for x in IL[2:]]))) # , \\n {[x[2] for x in IL[2:]]}\")\n # OUT_FOR_SINGLE print(f\"Watts: {[x[3] for x in IL[2:]]}, \\n {[x[3] for x in IL[2:]]}\")\n # OUT_FOR_SINGLE print(f\"Resistance: {[x[7] for x in IL[2:]]}, \\n {[x[7] for x in IL[2:]]}\")\n\n\n p1, = host.plot([float(x[0]) for x in IL[2:]], [float(x[2]) for x in IL[2:]], label=\"Current\")\n p2, = par1.plot([float(x[0]) for x in IL[2:]], [float(x[3]) for x in IL[2:]], label=\"Watts\")\n # OUT_FOR_SINGLE p3, = host.plot([x[7] for x in IL[2:]], [x[7] for x in IL[2:]], label=\"Resistance\")\n\n xlim_min = 0 # min([x[0] for x in IL[2:]])\n xlim_max = 50 # max([x[0] for x in IL[2:]])\n print(f\"X-Axis {xlim_min=}, {xlim_max=}\")\n\n ylim_min = min([x[2] for x in IL[2:]])\n ylim_max = max([x[2] for x in IL[2:]])\n print(f\"Y-Axis {ylim_min=}, {ylim_max=}\")\n\n host.set_xlim( xlim_min, xlim_max) # X Axis (Voltage)\n host.set_ylim( ylim_min, ylim_max) # # Left Y Axis (Current)\n par1.set_ylim( 0, 200) # Right Y Axis 1 (Wattage)\n # OUT_FOR_SINGLE par2.set_ylim( IL[2][7], IL[-1][7]) # Right Y Axis 2 (Resistance)\n\n host.set_xlabel(\"Voltage\")\n host.set_ylabel(\"Current (Amps)\")\n par1.set_ylabel(\"Watts\")\n # OUT_FOR_SINGLE par2.set_ylabel(\"Load Resistance\")\n\n host.legend()\n\n host.axis[\"left\"].label.set_color(p1.get_color())\n par1.axis[\"right\"].label.set_color(p2.get_color())\n # OUT_FOR_SINGLE par2.axis[\"right\"].label.set_color(p3.get_color())\n\n # from MAYBE related examples axes.yaxis.set_major_locator(MaxNLocator(5))\n host.yaxis.set_major_locator(MaxNLocator(10))\n host.xaxis.set_major_locator(MaxNLocator(8))\n # par1.yaxis.set_major_locator(MaxNLocator(8))\n\n plt.show()\n\ndef main(arguments=None):\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('infile', help=\"Input file\") # type=argparse.FileType('r'))\n # parser.add_argument('-o', '--outfile', help=\"Output file\",\n # default=sys.stdout, type=argparse.FileType('w'))\n\n args = parser.parse_args(arguments)\n\n # print(pp_str(args))\n\n # read csv file as a list of lists\n\n # with open(args.infile, 'r') as read_obj:\n # # pass the file object to reader() to get the reader object\n # csv_reader = reader(read_obj)\n # # Pass reader object to list() to get a list of lists\n # list_of_rows = list(csv_reader)\n # # print(pp_str(list_of_rows))\n # for i in list_of_rows:\n # print(f\"{i}\")\n\n df = pd.read_csv(args.infile)\n\n # get_and_plot_mppt(df)\n\n plot_df(df)\n\n\n\nif __name__ == '__main__':\n\n main(sys.argv[1:])\n # time.sleep(2.612)\n\n\n sys.exit(0)\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
jberkman/pyxfoil
[ "511cb0e2cdc7e2967bc53fa0276351ebf57a90bf" ]
[ "pyxfoil.py" ]
[ "\"\"\"PYXFOIL: XFOIL AUTOMATION USING PYTHON\nLogan Halstrom\nEAE 127\nUCD\nCREATED: 15 SEP 2015\nMODIFIED: 17 OCT 2018\n\nDESCRIPTION: Provides functions for automating XFOIL runs.\nEach function will iteratively build a list of inputs. When you are ready,\nuse the RunXfoil command to run the input list\n\nNOTE: Since input list is predetermined, runs cannot be reiterated.\nMake sure to set the iter limit high enough, that each simulation will\nwork on the first try\n\nTO CALL IN A SCRIPT:\nimport sys\nsys.path.append('path/to/pyxfoil.py')\nimport pyxfoil\n\nFUTURE IMPROVEMENTS:\n\n------------------------------------------------------------------------\nMIT License\n\nCopyright (c) 2017 Logan Halstrom\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n------------------------------------------------------------------------\n\"\"\"\n\nimport os\nimport sys\nimport re\nimport numpy as np\nimport subprocess\nimport pandas as pd\n\n########################################################################\n### GENERAL FILE AND PROCESS UTILITIES #################################\n########################################################################\n\ndef MakeOutputDir(savedir):\n \"\"\"make results output directory if it does not already exist.\n instring --> directory path from script containing folder\n \"\"\"\n #split individual directories\n splitstring = savedir.split('/')\n prestring = ''\n for string in splitstring:\n prestring += string + '/'\n try:\n os.mkdir(prestring)\n except Exception:\n pass\n\ndef GetParentDir(savename):\n \"\"\"Get parent directory from path of file\"\"\"\n #split individual directories\n splitstring = savename.split('/')\n parent = ''\n #concatenate all dirs except bottommost\n for string in splitstring[:-1]:\n parent += string + '/'\n return parent\n\ndef FindBetween(string, before='^', after=None):\n \"\"\"Search 'string' for characters between 'before' and 'after' characters\n If after=None, return everything after 'before'\n Default before is beginning of line\n \"\"\"\n if after == None and before != None:\n match = re.search('{}(.*)$'.format(before), string)\n if match != None:\n return match.group(1)\n else:\n return 'No Match'\n else:\n match = re.search('(?<={})(?P<value>.*?)(?={})'.format(before, after), string)\n if match != None:\n return match.group('value')\n else:\n return 'No Match'\n\ndef IsItWindows():\n \"\"\"Return true if operating system is windows\"\"\"\n return True if os.name == 'nt' else False\n\ndef ErrorMessage(text):\n \"\"\"Format an error output message\n \"\"\"\n return \"\\n\\n\" \\\n \"********************************************************************\\n\" \\\n \"{}\\n\" \\\n \"********************************************************************\" \\\n \"\\n\\n\".format(text)\n\n########################################################################\n### XFOIL AUTOMATION CLASS #############################################\n########################################################################\n\nclass Xfoil:\n def __init__(self, foil='0012', naca=True, Re=0, Iter=100,\n xfoilpath=None, headless=True):\n \"\"\"Initialize class for specific airfoil.\n foil --> airfoil name, either NACA digits or path to geometry file\n naca --> True for naca digits, False for geometry file\n Re --> Reynolds number (inviscid if zero)\n Iter --> number of iterations per simulation (XFOIL default: 20)\n xfoilpath --> path to xfoil executable file\n headless --> run xfoil without graphical output (avoids X11/XQuartz dependency)\n \"\"\"\n\n #DETERMINE OPERATING SYSTEM\n self.win = IsItWindows()\n\n #SET PATH TO XFOIL FOR CURRENT OPERATING SYSTEM\n if xfoilpath != None:\n #Manually specify path to Xfoil\n self.xfoilpath = xfoilpath\n elif self.win:\n #Windows default location is in same folder as python script\n self.xfoilpath = 'xfoil.exe'\n #check dependencies\n if not os.path.isfile(self.xfoilpath):\n txt = \"PYXFOIL ERROR: Put xfoil.exe in same folder as pyxfoil.py\"\n sys.exit(ErrorMessage(txt))\n else:\n #Mac Install location\n self.xfoilpath = \"/usr/local/bin/xfoil\"\n #check dependencies\n if not os.path.isfile(self.xfoilpath):\n txt = \"PYXFOIL ERROR: Xfoil is not installed\"\n sys.exit(ErrorMessage(txt))\n if not os.path.isfile('/opt/X11/bin/xquartz'):\n txt = \"PYXFOIL ERROR: X11/xquartz not installed\"\n print(ErrorMessage(txt))\n\n\n #SAVE RUN PARAMETERS\n #Reynolds number\n self.Re = Re\n #Maximum iteration\n self.Iter = Iter\n #MAKE AIRFOIL NAME\n self.naca = naca\n if self.naca:\n #4-digit NACA to be loaded from equation\n self.name = 'naca' + foil\n else:\n #Load airfoil from file\n #airfoil name is between parent path and file extension\n parent = GetParentDir(foil)\n self.name = FindBetween(foil, parent, '\\.')\n #CREATE SAVE DIRECTORY\n #Save in Data/airfoilname/\n self.savepath = 'Data/{}'.format(self.name)\n MakeOutputDir(self.savepath)\n\n #INITIALIZE COMMAND INPUT LIST\n self.input = ''\n\n #TURN OFF GRAPHICS (MAKE XFOIL \"HEADLESS\")\n #avoids XQuartz incompatibility\n if headless:\n self.TurnOffGraphics()\n\n #LOAD AIRFOIL (AND START INPUT LIST)\n self.foil = foil\n self.LoadGeom()\n\n def AddInput(self, cmd):\n \"\"\"Add input command to command list\n cmd --> string command to add\n \"\"\"\n self.input += '{}\\n'.format(cmd)\n\n def RunXfoil(self, quiet=True):\n \"\"\"Once input command list has been built, run all commands with this\n quiet --> true for no XFOIL output to screen\n \"\"\"\n #Supress output if quiet option, otherwise write XFOIl output to screen\n stdout = open(os.devnull, 'wb') if quiet else None\n\n #START XFOIL\n xf = subprocess.Popen(self.xfoilpath,\n stdin=subprocess.PIPE,\n stdout=stdout,\n stderr=None,)\n #XFOIL SUBPROCESS\n self.xf = xf\n #Pipe inputs into xfoil\n res = xf.communicate( self.input.encode('utf-8') )\n #Space output with a few newlines\n if not quiet:\n print('\\n\\n\\n')\n\n def LoadGeom(self):\n \"\"\"Load given airfoil, either NACA number or file path\n \"\"\"\n if self.naca:\n #Load NACA airfoil based on given digits\n self.AddInput( 'naca {}'.format(self.foil) )\n else:\n #check dependencies\n if not os.path.isfile(self.foil):\n txt = \"PYXFOIL ERROR: Geometry input file does not exist/\" \\\n \"in wrong location\\n({})\".format(self.foil)\n sys.exit(ErrorMessage(txt))\n if len([l for l in open(self.foil, 'r')]) < 2:\n txt = \"PYXFOIL ERROR: Geometry input file is empty (no data)\" \\\n \"\\nDownload or create new file: ({})\".format(self.foil)\n sys.exit(ErrorMessage(txt))\n\n #Load geometry from file path\n self.AddInput('load {}'.format( self.foil) )\n\n def SaveGeom(self, overwrite=True):\n \"\"\"Save airfoil geometry. MUST BE CALLED IN TOP MENU.\n overwrite --> Overwrite file if it exists\n \"\"\"\n savename = self.SaveNameGeom()\n if not os.path.isfile(savename) and overwrite:\n self.AddInput( 'save {}'.format( savename ) )\n\n def EnterOperMenu(self):\n \"\"\"Set up 'oper' menu for inviscid or viscous operations.\n Call from top menu after loading geometry.\n \"\"\"\n #ENTER OPERATIONS MENU\n self.AddInput('oper')\n if self.Re != 0:\n #VISCOULS SIMULATION WITH GIVEN REYNOLDS NUMBER\n self.AddInput('visc {}'.format( self.Re ) )\n #SET ITERATION NUMBER\n self.AddInput('iter {}'.format( self.Iter ))\n\n def SingleAlfa(self, alf, SaveCP=True):\n \"\"\"Simulate airfoil at a single angle of attack.\n Must be run in 'oper' menu.\n alf --> angle of attack to simulate\n SaveCP --> Save individual surface pressure distributions\n \"\"\"\n self.AddInput('alfa {}'.format( alf ) )\n if SaveCP:\n savename = self.SaveNameSurfCp(alf)\n self.AddInput('cpwr {}'.format(savename) )\n\n def Polar(self, alfs, SaveCP=True, overwrite=True):\n \"\"\"Create and save polar for airfoil. Call in top menu after\n loading geometry.\n alfs --> list of alphas to run\n SaveCP --> Save individual surface pressure distributions\n overwrite --> overwrite polar file (otherwise append new alphas)\n \"\"\"\n\n #STORE RUN INFO\n if type(alfs) == float or type(alfs) == int:\n #angle of attack input must be array-like\n alfs = [alfs]\n self.alfs = alfs\n #SET REYNOLDS NUMBER\n self.EnterOperMenu()\n\n #SET UP POLAR ACCUMULATION\n # if len(alfs) > 1:\n savename = self.SaveNamePolar(alfs)\n\n if os.path.isfile(savename) and overwrite:\n os.remove(savename) #Remove polar file if starting new\n #TURN POLAR ACCUMULATION ON\n self.AddInput('pacc')\n #Submit Polar File Name\n self.AddInput(savename)\n #Skip Polar Dumpfile Name\n self.AddInput('')\n # self.AddInput(self.savename + 'dump.dat')\n # self.AddInput('pacc'; savename; self.savename + 'dump.dat')\n\n #SIMULATE EACH ANGLE OF ATTACK\n for alf in alfs:\n self.SingleAlfa(alf, SaveCP)\n\n # if len(alfs) > 1:\n #TURN POLAR ACCUMULATION OFF\n self.AddInput('pacc')\n\n def Quit(self):\n \"\"\"Quit XFOIL by going to top-most menu and issuing 'quit' command\n \"\"\"\n self.AddInput('')\n self.AddInput('')\n self.AddInput('')\n self.AddInput('')\n self.AddInput('quit')\n\n def TurnOffGraphics(self,):\n \"\"\" Turn off XFOIL graphical output so that XFOIL can run 'headless'.\n Use this to avoid XQuartz compatibility issues and to simplify output to screen.\n \"\"\"\n #Enter Plotting Options Menu\n self.AddInput('plop')\n #Turn graphics option to False\n self.AddInput('g f')\n #Return to main menu\n self.AddInput('')\n\n def SaveNameGeom(self,):\n \"\"\"Make save filename for airfoil geometry\n \"\"\"\n return '{}/{}.dat'.format(self.savepath, self.name)\n\n def SaveNameSurfCp(self, alf):\n \"\"\"Make save filename for airfoil surface pressure based on current\n airfoil, Reynolds number, and angle of attack\n alf --> current angle of attack\n \"\"\"\n return '{}/{}_surfCP_Re{:1.2e}a{:1.1f}.dat'.format(\n self.savepath, self.name, self.Re, alf)\n\n def SaveNamePolar(self, alfs):\n \"\"\"Make save filename for airfoil polar based on\n airfoil, Reynolds number, and angle of attack\n alfs --> Range of angles of attack to run\n \"\"\"\n if type(alfs) == float or type(alfs) == int:\n #angle of attack input must be array-like\n alfs = [alfs]\n if len(alfs) == 1:\n #only one provided angle of attack\n alfrange = 'a{:1.2f}'.format(alfs[0])\n else:\n #use least and greatest angle of attack for name\n alfrange = 'a{:1.1f}-{:1.1f}'.format(alfs[0], alfs[-1])\n return '{}/{}_polar_Re{:1.2e}{}.dat'.format(\n self.savepath, self.name, self.Re, alfrange)\n\n\n\n########################################################################\n### XFOIL FILE I/O #####################################################\n########################################################################\n\ndef ReadXfoilAirfoilGeom(filename):\n \"\"\"Read in XFOIL airfoil geometry file data, skipping title lines\n filename --> path to file\n \"\"\"\n df = pd.read_csv(filename, delim_whitespace=True, skiprows=1,\n names=['x', 'z'])\n return df\n\ndef ReadXfoilSurfPress(filename):\n \"\"\"Read in XFOIL surface pressure coefficient data, skipping title lines\n filename --> path to file\n \"\"\"\n if IsItWindows():\n #Windows file format\n names = ['x', 'y', 'Cp']\n skip = 3\n else:\n #Mac file format\n names = ['x', 'Cp']\n skip = 1\n #read file\n df = pd.read_csv(filename, delim_whitespace=True, skiprows=skip, names=names)\n return df\n\ndef ReadXfoilPolar(filename):\n \"\"\"Read in XFOIL polar file data, skipping title lines\n filename --> path to polar data file\n \"\"\"\n df = pd.read_csv(filename, delim_whitespace=True, skiprows=12,\n names=['alpha', 'Cl', 'Cd', 'Cdp', 'Cm', 'Top_Xtr', 'Bot_Xtr'])\n return df\n\ndef WriteXfoilFile(name, x, z):\n \"\"\"Write 2-column XFOIL file with fixed-width separation.\n First line is 'name'. Works best for writting geometry.\n \"\"\"\n ofile = open(name, 'w')\n ofile.write('foil\\n')\n for xx, zz in zip(x, z):\n #XYZ POINTS FORMATED IN 3, 16-WIDE COLUMNS\n #< : left-aligned,\n #14 : 14 spaces reserved in column,\n #.7 : 7 spaces reserved after decimal point,\n #f : float\n ofile.write(' {:<14.7f}{:<14.7f}\\n'.format(xx, zz))\n ofile.close()\n\n########################################################################\n### MAIN ###############################################################\n########################################################################\n\ndef GetPolar(foil='0012', naca=True, alfs=[0], Re=0,\n SaveCP=True, Iter=100, pane=False,\n overwrite=True, quiet=True):\n \"\"\"For a single airfoil at a single Reynolds number,\n create a polar with given alphas.\n foil --> naca digits or path to geom file\n naca --> True for naca digits, False for file path\n alfs --> list of alphas to run\n Re --> Reynolds number (default invisc)\n SaveCp --> save each individual pressure distribution\n pane --> smooth geometry before simulation (can cause instability)\n overwrite --> overwrite existing save files\n quiet --> Supress XFOIL output\n \"\"\"\n #INITIALIZE XFOIL OBJECT\n obj = Xfoil(foil, naca, Re, Iter=Iter)\n #GEOMETRY\n #condition panel geometry (use for rough shapes, not on smooth shapes)\n if pane:\n obj.AddInput('pane')\n #Save geometry for later slope calculations\n obj.SaveGeom()\n #RUN AND SAVE ALL POLAR CASES\n obj.Polar(alfs, SaveCP=SaveCP, overwrite=overwrite)\n #Quit XFOIL\n obj.Quit()\n #Run Input List In XFOIL\n obj.RunXfoil(quiet=quiet)\n\n return obj\n\n\n\n\ndef main(foil, naca, alfs, Re, Iter=30):\n \"\"\"\n foil --> path to airfoil file or naca 4-digit number\n naca --> boolean if naca or not\n alfs --> list of angle of attacks for airfoils (deg)\n Re --> Reynolds number to run\n Iter --> maximum number of iterations for each simulation\n \"\"\"\n\n obj = Xfoil(foil, naca, Re, Iter) #initialize xfoil\n obj.SaveGeom() #save airfoil geometry\n obj.EnterOperMenu() #set up operations, reynolds, iteration number\n obj.SingleAlfa(alfs[0]) #command to run single alpha case\n obj.Polar(alfs) #Command to run polar case\n obj.Quit() #command to quit XFOIL when done\n\n obj.RunXfoil() #Run all commands at once\n\n print('done')\n\nif __name__ == \"__main__\":\n\n foils = ['0012', 'Data/s1223.dat']\n nacas = [True, False]\n alfs = [0, 10]\n Re = 2e5\n\n for foil, naca in zip(foils, nacas):\n main(foil, naca, alfs, Re)\n\n\n\n\n\n\n\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
michael-kuhlmann/padertorch
[ "3925fa693299ffb166d82dd81deed997237e85c7" ]
[ "padertorch/contrib/je/data/mixup.py" ]
[ "from lazy_dataset import Dataset, FilterException\nimport numpy as np\nimport numbers\n\n\nclass MixUpDataset(Dataset):\n \"\"\"\n >>> ds = MixUpDataset(range(10), SampleMixupComponents((.0,1.)), (lambda x: x), buffer_size=2)\n >>> list(ds)\n \"\"\"\n def __init__(self, input_dataset, sample_fn, mixup_fn, buffer_size=100):\n \"\"\"\n Combines examples from input_dataset and mixin_dataset into tuples.\n\n Args:\n input_dataset: lazy dataset providing example dict with key audio_length.\n sample_fn: sample_fn(buffer) returning a list of examples from buffer for mixup.\n \"\"\"\n self.input_dataset = input_dataset\n self.buffer = []\n self.buffer_size = buffer_size\n self.sample_fn = sample_fn\n self.mixup_fn = mixup_fn\n\n def __len__(self):\n return len(self.input_dataset)\n\n def __iter__(self):\n for example in self.input_dataset:\n self.buffer.append(example)\n if len(self.buffer) > self.buffer_size:\n examples = self.sample_fn(self.buffer)\n if len(examples) == 1:\n yield examples[0]\n elif len(examples) > 1:\n yield self.mixup_fn(examples)\n else:\n raise ValueError('sample_fn has to return at least one example')\n self.buffer.pop(0)\n else:\n yield example\n\n def copy(self, freeze=False):\n return self.__class__(\n input_dataset=self.input_dataset.copy(freeze=freeze),\n sample_fn=self.sample_fn,\n mixup_fn=self.mixup_fn,\n buffer_size=self.buffer_size,\n )\n\n @property\n def indexable(self):\n return False\n\n\nclass SampleMixupComponents:\n \"\"\"\n >>> sample_fn = SampleMixupComponents((0,1.))\n >>> buffer = list(range(10))\n >>> sample_fn(buffer)\n >>> buffer\n \"\"\"\n def __init__(self, mixup_prob):\n self.mixup_prob = mixup_prob\n\n def __call__(self, buffer):\n examples = [buffer[-1]]\n num_mixins = np.random.choice(len(self.mixup_prob), p=self.mixup_prob)\n num_mixins = min(num_mixins, len(buffer) - 1)\n if num_mixins > 0:\n idx = np.random.choice(len(buffer)-1, num_mixins, replace=False)\n examples.extend(buffer[i] for i in idx)\n return examples\n\n\nclass SuperposeEvents:\n \"\"\"\n >>> mixup_fn = SuperposeEvents(min_overlap=0.5)\n >>> example1 = {'example_id': '0', 'dataset': '0', 'stft': np.ones((1, 10, 9, 2)), 'events': np.array([0,1,0,0,1]), 'events_alignment': np.array([0,1,0,0,1])[:,None].repeat(10,axis=1)}\n >>> example2 = {'example_id': '1', 'dataset': '1', 'stft': -np.ones((1, 8, 9, 2)), 'events': np.array([0,0,1,0,0]), 'events_alignment': np.array([0,0,1,0,0])[:,None].repeat(8,axis=1)}\n >>> mixup_fn([example1, example2])\n \"\"\"\n def __init__(self, min_overlap=1., max_length=None):\n self.min_overlap = min_overlap\n self.max_length = max_length\n\n def __call__(self, components):\n assert len(components) > 0\n start_indices = [0]\n stop_indices = [components[0]['stft'].shape[1]]\n for comp in components[1:]:\n l = comp['stft'].shape[1]\n min_start = -int(l*(1-self.min_overlap))\n max_start = components[0]['stft'].shape[1] - int(np.ceil(self.min_overlap*l))\n if self.max_length is not None:\n min_start = max(\n min_start, max(stop_indices) - self.max_length\n )\n max_start = min(\n max_start, min(start_indices) + self.max_length - l\n )\n if max_start < min_start:\n raise FilterException\n start_indices.append(\n int(min_start + np.random.rand() * (max_start - min_start + 1))\n )\n stop_indices.append(start_indices[-1] + l)\n start_indices = np.array(start_indices)\n stop_indices = np.array(stop_indices)\n stop_indices -= start_indices.min()\n start_indices -= start_indices.min()\n\n stft_shape = list(components[0]['stft'].shape)\n stft_shape[1] = stop_indices.max()\n mixed_stft = np.zeros(stft_shape, dtype=components[0]['stft'].dtype)\n if 'events_alignment' in components[0]:\n assert all(['events_alignment' in comp for comp in components])\n alignment_shape = list(components[0]['events_alignment'].shape)\n alignment_shape[1] = stop_indices.max()\n mixed_alignment = np.zeros(alignment_shape)\n else:\n mixed_alignment = None\n for comp, start, stop in zip(components, start_indices, stop_indices):\n mixed_stft[:, start:stop] += comp['stft']\n if mixed_alignment is not None:\n mixed_alignment[:, start:stop] += comp['events_alignment']\n\n mix = {\n 'example_id': '+'.join([comp['example_id'] for comp in components]),\n 'dataset': '+'.join(sorted(set([comp['dataset'] for comp in components]))),\n 'stft': mixed_stft,\n 'seq_len': mixed_stft.shape[1],\n }\n if all(['events' in comp for comp in components]):\n mix['events'] = (np.sum([comp['events'] for comp in components], axis=0) > .5).astype(components[0]['events'].dtype)\n if mixed_alignment is not None:\n mix['events_alignment'] = (mixed_alignment > .5).astype(components[0]['events_alignment'].dtype)\n return mix\n" ]
[ [ "numpy.ceil", "numpy.random.rand", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
achinta/CategoricalNF
[ "d8717a037e8f13641e9d9a89abf66fba38e23f91", "d8717a037e8f13641e9d9a89abf66fba38e23f91" ]
[ "experiments/graph_coloring/datasets/graph_coloring_generation.py", "experiments/graph_coloring/datasets/mutils.py" ]
[ "import random\nimport numpy as np\nimport networkx as nx\nimport sys, os, json, argparse, itertools\nimport grinpy as gp\nimport time\nfrom glob import glob\nfrom multiprocessing import Pool\nfrom ortools.sat.python import cp_model\n\n\"\"\"\nThis code is based on https://github.com/machine-reasoning-ufrgs/GNN-GCP\n\"\"\"\n\n\ndef solve_csp(M, n_colors, nmin=25):\n model = cp_model.CpModel()\n N = len(M)\n variables = []\n \n variables = [ model.NewIntVar(0, n_colors-1, '{i}'.format(i=i)) for i in range(N) ]\n \n for i in range(N):\n for j in range(i+1,N):\n if M[i][j] == 1:\n model.Add( variables[i] != variables [j] )\n \n solver = cp_model.CpSolver()\n solver.parameters.max_time_in_seconds = int( ((10.0 / nmin) * N) )\n status = solver.Solve(model)\n \n if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL :\n solution = dict()\n for k in range(N):\n solution[k] = solver.Value(variables[k])\n return solution\n elif status == cp_model.INFEASIBLE:\n return None\n else:\n raise Exception(\"CSP is unsure about the problem\")\n\n\ndef is_cn(Ma, cn_i):\n if solve_csp(Ma, cn_i-1) == None:\n return True\n else:\n return False\n\n\ndef multiprocessing_dataset_generation(nmin, nmax, ncolors, path, samples, seed, num_workers=8):\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)\n\t# For increasing sampling speed, we create multiple workers/processes in parallel\n\tsamples_per_worker = int(samples//num_workers)\n\tp = Pool()\n\targs_list = [(nmin, nmax, ncolors, path, samples_per_worker, samples_per_worker*i, seed+i) for i in range(num_workers)]\n\tp.map(_create_simple_dataset_tuple, args_list)\n\n\ndef _create_simple_dataset_tuple(args):\n\tnmin, nmax, ncolors, path, samples, start_idx, seed = args\n\tcreate_simple_dataset(nmin, nmax, ncolors, path, samples, start_idx, seed)\n\n\ndef create_simple_dataset(nmin, nmax, ncolors, path, samples, start_idx=0, seed=123):\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)\n\t\t\n\texport_pack = 500\n\tall_solutions = {\"N\": np.zeros((export_pack,), dtype=np.uint8), \n\t\t\t\t\t \"adjacency\": -np.ones((export_pack, nmax, nmax), dtype=np.int8), \n\t\t\t\t\t \"nodes\": -np.ones((export_pack, nmax), dtype=np.int8),\n\t\t\t\t\t \"graph_idx\": -np.ones((export_pack,), dtype=np.int32),\n\t\t\t\t\t \"idx\": 0}\n\n\tdef export_solution(Ma, init_sol, z, graph_idx=-1):\n\t\tN, Ma, sol = write_solution(Ma=Ma, init_sol=init_sol, save_path=None)\n\t\tsol_idx = all_solutions[\"idx\"]\n\t\tall_solutions[\"N\"][sol_idx] = N\n\t\tall_solutions[\"adjacency\"][sol_idx,:N,:N] = Ma.astype(np.uint8)\n\t\tall_solutions[\"nodes\"][sol_idx,:N] = sol\n\t\tall_solutions[\"graph_idx\"][sol_idx] = graph_idx\n\n\t\tall_solutions[\"idx\"] += 1\n\t\tif all_solutions[\"idx\"] >= export_pack:\n\t\t\tall_solutions.pop(\"idx\")\n\t\t\tnp.savez_compressed(os.path.join(path, \"samples_%s_%s.npz\" % (str(z-export_pack+2).zfill(7), str(z+1).zfill(7))), \n\t\t\t\t\t\t\t\t**all_solutions)\n\n\t\t\tall_solutions[\"N\"].fill(0)\n\t\t\tall_solutions[\"adjacency\"].fill(-1)\n\t\t\tall_solutions[\"nodes\"].fill(-1)\n\t\t\tall_solutions[\"graph_idx\"].fill(-1)\n\t\t\tall_solutions[\"idx\"] = 0\n\n\t# Adjacency density ratio to sample from. \n\tedge_prob_constraints = {3: (0.1, 0.3), 4: (0.15, 0.3)}\n\n\tnp.random.seed(seed)\n\trandom.seed(seed)\n\tz = start_idx\n\tN = np.random.randint(nmin, nmax+1)\n\twhile z in range(start_idx,samples+start_idx):\n\t\tN = np.random.randint(nmin, nmax+1)\n\t\tsave_path = os.path.join(path, \"sample_%s.npz\" % (str(z).zfill(6)))\n\t\tfound_sol = False\n\t\t\n\t\tCn = ncolors\n\t\tlim_inf, lim_sup = edge_prob_constraints[Cn][0], edge_prob_constraints[Cn][1]\n\t\tlim_sup = min(lim_sup, nmax/N*(lim_inf+lim_sup)/2.0)\n\n\t\tp_connected = random.uniform(lim_inf, lim_sup)\n\t\tMa = gen_matrix(N, p_connected)\n\n\t\tinit_sol = solve_csp(Ma, Cn)\n\t\tif init_sol is not None and is_cn(Ma,Cn):\n\t\t\texport_solution(Ma, init_sol, z)\n\t\t\tfound_sol = True\n\n\t\tif found_sol:\n\t\t\tz += 1\n\t\t\tif z % 100 == 0:\n\t\t\t\tprint(\"Completed %i (%4.2f%%) in [%i,%i] samples...\" % (z-start_idx, (z-start_idx)*100.0/samples, start_idx, start_idx+samples))\n\n\ndef write_solution(Ma, init_sol, save_path=None):\n\tN = Ma.shape[0]\n\tsol = np.zeros(N, dtype=np.uint8)\n\tfor i in range(N):\n\t\tsol[i] = int(init_sol[i])\n\tif save_path is not None:\n\t\tnp.savez_compressed(save_path, adjacency=Ma, nodes=sol)\n\telse:\n\t\treturn (N, Ma, sol)\n\n\ndef combine_solution_files(save_path):\n\tprint(\"Combining solution files...\")\n\tsample_files = sorted(glob(os.path.join(save_path, \"sample*.npz\")))\n\tnodes, adjacency = None, None\n\tfor filename in sample_files:\n\t\tdata_arr = np.load(filename)\n\t\tif nodes is None and adjacency is None:\n\t\t\tnodes, adjacency = data_arr[\"nodes\"], data_arr[\"adjacency\"]\n\t\telse:\n\t\t\tnodes = np.concatenate([nodes, data_arr[\"nodes\"]], axis=0)\n\t\t\tadjacency = np.concatenate([adjacency, data_arr[\"adjacency\"]], axis=0)\n\tnp.savez_compressed(os.path.join(save_path, \"samples_combined.npz\"), nodes=nodes, adjacency=adjacency)\n\n\ndef gen_matrix(N, prob):\n\tMa = np.zeros((N,N))\n\tMa = np.random.choice([0,1], size=(N, N), p=[1-prob,prob])\n\ti_lower = np.tril_indices(N, -1)\n\tMa[i_lower] = Ma.T[i_lower] # make the matrix symmetric\n\tnp.fill_diagonal(Ma, 0)\n\n\t# Ensuring that every node has at least 1 connection\n\twhile np.min(Ma.sum(axis=0)) == 0:\n\t\tidx = np.argmin(Ma.sum(axis=0))\n\t\tMa[idx,:] = np.random.choice([0,1], size=(N,), p=[1-prob,prob])\n\t\tMa[:,idx] = Ma[idx,:]\n\t\tMa[idx,idx] = 0\n\n\t# Test that the whole graph is connected\n\tconnect = np.zeros((N,))\n\tconnect[0] = 1\n\tMa_diag = np.eye(N) + Ma\n\twhile (1 - connect).sum() > 0:\n\t\tnew_connect = ((connect[None,:] * Ma_diag).sum(axis=1) > 0).astype(connect.dtype)\n\t\tif np.any(new_connect != connect):\n\t\t\tconnect = new_connect\n\t\telse:\n\t\t\tnum_choices = 3\n\t\t\tstart_nodes = np.random.choice(np.where(connect>0)[0], size=(num_choices,))\n\t\t\tend_nodes = np.random.choice(np.where(connect==0)[0], size=(num_choices,))\n\t\t\tMa[start_nodes, end_nodes] = 1\n\t\t\tMa[end_nodes, start_nodes] = 1\n\t\t\tMa_diag = np.eye(N) + Ma\n\n\treturn Ma\n\n\nif __name__ == '__main__':\n\t# Define argument parser\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--path', help='Path to which the files should be saved.', type=str, required=True)\n\tparser.add_argument('--samples', help='Number of samples to generate', type=int, default=2e5)\n\tparser.add_argument('--nmin', default=25, help='Minimum number of nodes in a graph', type=int)\n\tparser.add_argument('--nmax', default=50, help='Maximum number of nodes in a graph', type=int)\n\tparser.add_argument('--ncolor', default=3, help='Number of colors to use for the graph coloring', type=int)\n\tparser.add_argument('--train', help='If train is selected, we use a different seed', action='store_true')\n\n\t# Parse arguments from command line\n\targs = parser.parse_args()\n\tseed = 1327 if args.train else 3712\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\n\t# Start the generation process\n\tprint('Creating {} instances'.format(args.samples))\n\tmultiprocessing_dataset_generation(\n\t\t\targs.nmin, args.nmax,\n\t\t\tncolors=args.ncolor,\n\t\t\tsamples=args.samples,\n\t\t\tpath=args.path,\n\t\t\tseed=seed\n\t\t)\n\tcombine_solution_files(args.path)", "import torch\nimport torch.utils.data as data\nimport numpy as np \nimport matplotlib\nimport matplotlib.pyplot as plt \nfrom statistics import mean, median, stdev\n\n\nclass BucketSampler(data.Sampler):\n\n\tdef __init__(self, dataset, batch_size, len_step=1):\n\t\tsuper().__init__(dataset)\n\t\tself.dataset = dataset\n\t\tself.batch_size = batch_size\n\t\tself.len_step = len_step\n\t\tself._prepare()\n\n\tdef _prepare(self):\n\t\tindices = self.dataset.data_indices\n\t\tlengths = (self.dataset.__class__.DATASET_NODES[indices] >= 0).sum(axis=-1)\n\t\tlengths = lengths // self.len_step\n\t\tlinear_indices = np.arange(indices.shape[0]).astype(np.int32)\n\t\tself.unique_lengths = np.unique(lengths)\n\t\tself.indices_by_lengths = [linear_indices[lengths==l] for l in self.unique_lengths]\n\n\tdef __iter__(self):\n\t\tsampled_indices = []\n\t\tind_by_len = [np.random.permutation(inds) for inds in self.indices_by_lengths]\n\n\t\twhile len(sampled_indices) < len(self):\n\t\t\tp = [inds.shape[0] for inds in ind_by_len]\n\t\t\tp = [e*1.0/sum(p) for e in p]\n\t\t\tglobal_len = np.random.choice(len(ind_by_len), p=p, size=1)[0]\n\n\t\t\tglobal_inds = []\n\n\t\t\tdef add_len(global_inds, local_len):\n\t\t\t\tsize_to_add = self.batch_size - len(global_inds)\n\t\t\t\tglobal_inds += ind_by_len[local_len][:size_to_add].tolist()\n\t\t\t\tif ind_by_len[local_len].shape[0] > size_to_add:\n\t\t\t\t\tind_by_len[local_len] = ind_by_len[local_len][size_to_add:]\n\t\t\t\telse:\n\t\t\t\t\tind_by_len[local_len] = np.array([])\n\t\t\t\treturn global_inds\n\n\t\t\tadd_len(global_inds, global_len)\n\n\t\t\twhile len(global_inds) < self.batch_size:\n\t\t\t\tif all([inds.shape[0]==0 for inds in ind_by_len]):\n\t\t\t\t\tbreak\n\t\t\t\tglobal_len = (global_len + 1) % len(ind_by_len)\n\t\t\t\tadd_len(global_inds, global_len)\n\n\t\t\tsampled_indices += global_inds\n\n\n\t\treturn iter(sampled_indices)\n\n\n\tdef __len__(self):\n\t\treturn len(self.dataset)" ]
[ [ "numpy.random.seed", "numpy.random.choice", "numpy.tril_indices", "numpy.eye", "numpy.ones", "numpy.concatenate", "numpy.savez_compressed", "numpy.fill_diagonal", "numpy.any", "numpy.load", "numpy.zeros", "numpy.where", "numpy.random.randint" ], [ "numpy.arange", "numpy.array", "numpy.random.permutation", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ARKseal/crawlingathome-gpu-hcloud
[ "73f185df36eb5e420e6513bcf1b518dd21499b76" ]
[ "helpers/bloom.py" ]
[ "# use this file inside every minute cron in order to recalculate bloom filters. location: staging server\n# folder structure\n# /home/archiveteam/CAH/\n# |_bloom archiveteam@IP::bloom contains bloom filters\n# |_clipped contains clipped lists\n# |_ds contains files ready to be sent to the eye\n# |_hashes contains list of hashes of files inserted into the dataset\n# |_results archiveteam@IP::CAH incoming folder for the final results from workers\n\n# Stacked bloom filters. Naming convention:\n# frozen filters: filter.bin, filter1.bin, filter2.bin\n# active filters: filter_active.bin\n#\n#\nimport sys\nimport time\nimport requests\nimport pandas as pd\nfrom glob import glob\nfrom pathlib import Path\nfrom datetime import datetime\nfrom bloom_filter2 import BloomFilter\n\n# update the bloom server filters too\nbloomip = \"116.202.162.146\"\n\nserverbloom = BloomFilter(max_elements=10000000, error_rate=0.01, filename=(f\"/home/archiveteam/bloom-{bloomip}.bin\",-1))\nserverclip = BloomFilter(max_elements=10000000, error_rate=0.01, filename=(f\"/home/archiveteam/clip-{bloomip}.bin\",-1))\n\nstart = time.time()\nnow = datetime.now().strftime(\"%Y/%m/%d_%H:%M\")\n\nfailed = BloomFilter(max_elements=10000000, error_rate=0.01, filename=(\"/home/archiveteam/CAH/bloom/failed-domains.bin\",-1))\nfilesfailed = BloomFilter(max_elements=100000, error_rate=0.01, filename=(\"/home/archiveteam/filesfailed.bin\",-1))\n\ntime.sleep(5)\ncounter = 0\nuniques = 0\nfor file in glob(\"/home/archiveteam/CAH/hashes/*.hsh\"):\n stem = Path(file).stem.strip(\".\")\n if stem not in serverbloom:\n with open(file,\"rt\") as f:\n for line in f.readlines():\n counter += 1\n post = {\n 'file': (stem, open(file, 'rb')),\n 'key': (None, 'main'),\n }\n response = requests.post(f'http://{bloomip}:8000/add/', files=post)\n if response.status_code == 200:\n serverbloom.add(stem)\n uniques = int(response.text)\n\nfailed_counter = 0\nfor file in glob(\"/home/archiveteam/CAH/bloom/*.txt\"):\n stem = Path(file).stem.strip(\".\")\n if stem not in filesfailed:\n with open(file,\"rt\") as f:\n for line in f.readlines():\n line = line.strip()\n if line not in failed:\n failed.add(line)\n failed_counter += 1\n filesfailed.add(stem)\n\nclipped_counter = 0\nfor file in glob(\"/home/archiveteam/CAH/clipped/*.clp\"):\n stem = Path(file).stem.strip(\".\")\n if stem not in serverclip:\n post = {\n 'file': (stem, open(file, 'rb')),\n 'key': (None, 'clipped'),\n }\n response = requests.post(f'http://{bloomip}:8000/add/', files=post)\n if response.status_code == 200:\n serverclip.add(stem)\n clipped_counter = int(response.text)\n\npd.set_option('precision', 2)\ndf = pd.read_csv(\"bloom.log\", sep=\" \",header=None, names=[\"Date\", \"a\", \"unique pairs (5%)\", \"b\", \"total including duplicates\",\"c\",\"clipped filter (5%)\",\"d\",\"failed filter\",\"e\"])\ndf[\"Date\"]=df.Date.apply(lambda x: datetime.strptime(x, \"[%Y/%m/%d_%H:%M]\"))\ndf[\"unique pairs (5%)\"]=df[\"unique pairs (5%)\"]/1000000\ndf[\"total including duplicates\"]=df[\"total including duplicates\"]/1000000\ndf[\"clipped filter (5%)\"]=df[\"clipped filter (5%)\"]/1000000\n\nif uniques > 0:\n print(f\"[{now}] added {uniques} \\\"from total of\\\" {counter} \\\"(i.e. {round((counter-uniques)*100/(counter+sys.float_info.epsilon),2)}% duplication in {round(time.time()-start,2)} sec) Also added \\\" {clipped_counter} \\\"clipped and\\\" {failed_counter} failed\")\n with open('dashboard.txt', 'w') as file:\n file.write(\"<h5><a href='http://cah.io.community'>Crawling at Home project</a></h5>\\n\")\n file.write(\"<h1>Bloom filters status</h1>\\n\")\n file.write(\"<h2>All time stats</h2>\\n\")\n file.write(\"<h5>initialized from first parquet files</h5>\\n\")\n file.write(str(df.sum(axis=0, numeric_only=True)).replace(\"\\n\",\"<br/>\"))\n file.write(\"<br/><br/>\")\n file.write(\"<h2>Last day stats</h2>\\n\")\n file.write(str(df[df.Date > datetime.now() - pd.to_timedelta(\"1day\")].sum(axis=0, numeric_only=True)).replace(\"\\n\",\"<br/>\"))\n file.write(\"<h2>Last week stats</h2>\\n\")\n file.write(\"<h5>Last reset date: 02 August 2021</h5>\\n\")\n file.write(str(df[df.Date > datetime.now() - pd.to_timedelta(\"7day\")].sum(axis=0, numeric_only=True)).replace(\"\\n\",\"<br/>\"))\n" ]
[ [ "pandas.to_timedelta", "pandas.set_option", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ikingye/scikit-learn
[ "3254e98a79e5c1172c794ad38f222dc45f0fb65c", "3254e98a79e5c1172c794ad38f222dc45f0fb65c", "3254e98a79e5c1172c794ad38f222dc45f0fb65c", "3254e98a79e5c1172c794ad38f222dc45f0fb65c", "3254e98a79e5c1172c794ad38f222dc45f0fb65c" ]
[ "examples/applications/plot_tomography_l1_reconstruction.py", "sklearn/base.py", "sklearn/linear_model/_ridge.py", "sklearn/preprocessing/_label.py", "sklearn/linear_model/tests/test_bayes.py" ]
[ "\"\"\"\n======================================================================\nCompressive sensing: tomography reconstruction with L1 prior (Lasso)\n======================================================================\n\nThis example shows the reconstruction of an image from a set of parallel\nprojections, acquired along different angles. Such a dataset is acquired in\n**computed tomography** (CT).\n\nWithout any prior information on the sample, the number of projections\nrequired to reconstruct the image is of the order of the linear size\n``l`` of the image (in pixels). For simplicity we consider here a sparse\nimage, where only pixels on the boundary of objects have a non-zero\nvalue. Such data could correspond for example to a cellular material.\nNote however that most images are sparse in a different basis, such as\nthe Haar wavelets. Only ``l/7`` projections are acquired, therefore it is\nnecessary to use prior information available on the sample (its\nsparsity): this is an example of **compressive sensing**.\n\nThe tomography projection operation is a linear transformation. In\naddition to the data-fidelity term corresponding to a linear regression,\nwe penalize the L1 norm of the image to account for its sparsity. The\nresulting optimization problem is called the :ref:`lasso`. We use the\nclass :class:`~sklearn.linear_model.Lasso`, that uses the coordinate descent\nalgorithm. Importantly, this implementation is more computationally efficient\non a sparse matrix, than the projection operator used here.\n\nThe reconstruction with L1 penalization gives a result with zero error\n(all pixels are successfully labeled with 0 or 1), even if noise was\nadded to the projections. In comparison, an L2 penalization\n(:class:`~sklearn.linear_model.Ridge`) produces a large number of labeling\nerrors for the pixels. Important artifacts are observed on the\nreconstructed image, contrary to the L1 penalization. Note in particular\nthe circular artifact separating the pixels in the corners, that have\ncontributed to fewer projections than the central disk.\n\"\"\"\n\nprint(__doc__)\n\n# Author: Emmanuelle Gouillart <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy import ndimage\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import Ridge\nimport matplotlib.pyplot as plt\n\n\ndef _weights(x, dx=1, orig=0):\n x = np.ravel(x)\n floor_x = np.floor((x - orig) / dx).astype(np.int64)\n alpha = (x - orig - floor_x * dx) / dx\n return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))\n\n\ndef _generate_center_coordinates(l_x):\n X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)\n center = l_x / 2.\n X += 0.5 - center\n Y += 0.5 - center\n return X, Y\n\n\ndef build_projection_operator(l_x, n_dir):\n \"\"\" Compute the tomography design matrix.\n\n Parameters\n ----------\n\n l_x : int\n linear size of image array\n\n n_dir : int\n number of angles at which projections are acquired.\n\n Returns\n -------\n p : sparse matrix of shape (n_dir l_x, l_x**2)\n \"\"\"\n X, Y = _generate_center_coordinates(l_x)\n angles = np.linspace(0, np.pi, n_dir, endpoint=False)\n data_inds, weights, camera_inds = [], [], []\n data_unravel_indices = np.arange(l_x ** 2)\n data_unravel_indices = np.hstack((data_unravel_indices,\n data_unravel_indices))\n for i, angle in enumerate(angles):\n Xrot = np.cos(angle) * X - np.sin(angle) * Y\n inds, w = _weights(Xrot, dx=1, orig=X.min())\n mask = np.logical_and(inds >= 0, inds < l_x)\n weights += list(w[mask])\n camera_inds += list(inds[mask] + i * l_x)\n data_inds += list(data_unravel_indices[mask])\n proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))\n return proj_operator\n\n\ndef generate_synthetic_data():\n \"\"\" Synthetic binary data \"\"\"\n rs = np.random.RandomState(0)\n n_pts = 36\n x, y = np.ogrid[0:l, 0:l]\n mask_outer = (x - l / 2.) ** 2 + (y - l / 2.) ** 2 < (l / 2.) ** 2\n mask = np.zeros((l, l))\n points = l * rs.rand(2, n_pts)\n mask[(points[0]).astype(int), (points[1]).astype(int)] = 1\n mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)\n res = np.logical_and(mask > mask.mean(), mask_outer)\n return np.logical_xor(res, ndimage.binary_erosion(res))\n\n\n# Generate synthetic images, and projections\nl = 128\nproj_operator = build_projection_operator(l, l // 7)\ndata = generate_synthetic_data()\nproj = proj_operator * data.ravel()[:, np.newaxis]\nproj += 0.15 * np.random.randn(*proj.shape)\n\n# Reconstruction with L2 (Ridge) penalization\nrgr_ridge = Ridge(alpha=0.2)\nrgr_ridge.fit(proj_operator, proj.ravel())\nrec_l2 = rgr_ridge.coef_.reshape(l, l)\n\n# Reconstruction with L1 (Lasso) penalization\n# the best value of alpha was determined using cross validation\n# with LassoCV\nrgr_lasso = Lasso(alpha=0.001)\nrgr_lasso.fit(proj_operator, proj.ravel())\nrec_l1 = rgr_lasso.coef_.reshape(l, l)\n\nplt.figure(figsize=(8, 3.3))\nplt.subplot(131)\nplt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')\nplt.axis('off')\nplt.title('original image')\nplt.subplot(132)\nplt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')\nplt.title('L2 penalization')\nplt.axis('off')\nplt.subplot(133)\nplt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')\nplt.title('L1 penalization')\nplt.axis('off')\n\nplt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,\n right=1)\n\nplt.show()\n", "\"\"\"Base classes for all estimators.\"\"\"\n\n# Author: Gael Varoquaux <[email protected]>\n# License: BSD 3 clause\n\nimport copy\nimport warnings\nfrom collections import defaultdict\nimport platform\nimport inspect\nimport re\n\nimport numpy as np\n\nfrom . import __version__\nfrom ._config import get_config\nfrom .utils import _IS_32BIT\nfrom .utils.validation import check_X_y\nfrom .utils.validation import check_array\nfrom .utils._estimator_html_repr import estimator_html_repr\nfrom .utils.validation import _deprecate_positional_args\n\n_DEFAULT_TAGS = {\n 'non_deterministic': False,\n 'requires_positive_X': False,\n 'requires_positive_y': False,\n 'X_types': ['2darray'],\n 'poor_score': False,\n 'no_validation': False,\n 'multioutput': False,\n \"allow_nan\": False,\n 'stateless': False,\n 'multilabel': False,\n '_skip_test': False,\n '_xfail_checks': False,\n 'multioutput_only': False,\n 'binary_only': False,\n 'requires_fit': True,\n 'requires_y': False,\n }\n\n\n@_deprecate_positional_args\ndef clone(estimator, *, safe=True):\n \"\"\"Constructs a new estimator with the same parameters.\n\n Clone does a deep copy of the model in an estimator\n without actually copying attached data. It yields a new estimator\n with the same parameters that has not been fit on any data.\n\n Parameters\n ----------\n estimator : {list, tuple, set} of estimator instance or a single \\\n estimator instance\n The estimator or group of estimators to be cloned.\n\n safe : bool, default=True\n If safe is False, clone will fall back to a deep copy on objects\n that are not estimators.\n\n \"\"\"\n estimator_type = type(estimator)\n # XXX: not handling dictionaries\n if estimator_type in (list, tuple, set, frozenset):\n return estimator_type([clone(e, safe=safe) for e in estimator])\n elif not hasattr(estimator, 'get_params') or isinstance(estimator, type):\n if not safe:\n return copy.deepcopy(estimator)\n else:\n if isinstance(estimator, type):\n raise TypeError(\"Cannot clone object. \" +\n \"You should provide an instance of \" +\n \"scikit-learn estimator instead of a class.\")\n else:\n raise TypeError(\"Cannot clone object '%s' (type %s): \"\n \"it does not seem to be a scikit-learn \"\n \"estimator as it does not implement a \"\n \"'get_params' method.\"\n % (repr(estimator), type(estimator)))\n\n klass = estimator.__class__\n new_object_params = estimator.get_params(deep=False)\n for name, param in new_object_params.items():\n new_object_params[name] = clone(param, safe=False)\n new_object = klass(**new_object_params)\n params_set = new_object.get_params(deep=False)\n\n # quick sanity check of the parameters of the clone\n for name in new_object_params:\n param1 = new_object_params[name]\n param2 = params_set[name]\n if param1 is not param2:\n raise RuntimeError('Cannot clone object %s, as the constructor '\n 'either does not set or modifies parameter %s' %\n (estimator, name))\n return new_object\n\n\ndef _pprint(params, offset=0, printer=repr):\n \"\"\"Pretty print the dictionary 'params'\n\n Parameters\n ----------\n params : dict\n The dictionary to pretty print\n\n offset : int, default=0\n The offset in characters to add at the begin of each line.\n\n printer : callable, default=repr\n The function to convert entries to strings, typically\n the builtin str or repr\n\n \"\"\"\n # Do a multi-line justified repr:\n options = np.get_printoptions()\n np.set_printoptions(precision=5, threshold=64, edgeitems=2)\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, (k, v) in enumerate(sorted(params.items())):\n if type(v) is float:\n # use str for representing floating point numbers\n # this way we get consistent representation across\n # architectures and versions.\n this_repr = '%s=%s' % (k, str(v))\n else:\n # use repr of the rest\n this_repr = '%s=%s' % (k, printer(v))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if (this_line_length + len(this_repr) >= 75 or '\\n' in this_repr):\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n\n np.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines\n\n\nclass BaseEstimator:\n \"\"\"Base class for all estimators in scikit-learn.\n\n Notes\n -----\n All estimators should specify all the parameters that can be set\n at the class level in their ``__init__`` as explicit keyword\n arguments (no ``*args`` or ``**kwargs``).\n \"\"\"\n\n @classmethod\n def _get_param_names(cls):\n \"\"\"Get parameter names for the estimator\"\"\"\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = inspect.signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [p for p in init_signature.parameters.values()\n if p.name != 'self' and p.kind != p.VAR_KEYWORD]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\"\n % (cls, init_signature))\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])\n\n def get_params(self, deep=True):\n \"\"\"\n Get parameters for this estimator.\n\n Parameters\n ----------\n deep : bool, default=True\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : dict\n Parameter names mapped to their values.\n \"\"\"\n out = dict()\n for key in self._get_param_names():\n try:\n value = getattr(self, key)\n except AttributeError:\n warnings.warn('From version 0.24, get_params will raise an '\n 'AttributeError if a parameter cannot be '\n 'retrieved as an instance attribute. Previously '\n 'it would return None.',\n FutureWarning)\n value = None\n if deep and hasattr(value, 'get_params'):\n deep_items = value.get_params().items()\n out.update((key + '__' + k, val) for k, val in deep_items)\n out[key] = value\n return out\n\n def set_params(self, **params):\n \"\"\"\n Set the parameters of this estimator.\n\n The method works on simple estimators as well as on nested objects\n (such as :class:`~sklearn.pipeline.Pipeline`). The latter have\n parameters of the form ``<component>__<parameter>`` so that it's\n possible to update each component of a nested object.\n\n Parameters\n ----------\n **params : dict\n Estimator parameters.\n\n Returns\n -------\n self : estimator instance\n Estimator instance.\n \"\"\"\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition('__')\n if key not in valid_params:\n raise ValueError('Invalid parameter %s for estimator %s. '\n 'Check the list of available parameters '\n 'with `estimator.get_params().keys()`.' %\n (key, self))\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n valid_params[key].set_params(**sub_params)\n\n return self\n\n def __repr__(self, N_CHAR_MAX=700):\n # N_CHAR_MAX is the (approximate) maximum number of non-blank\n # characters to render. We pass it as an optional parameter to ease\n # the tests.\n\n from .utils._pprint import _EstimatorPrettyPrinter\n\n N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences\n\n # use ellipsis for sequences with a lot of elements\n pp = _EstimatorPrettyPrinter(\n compact=True, indent=1, indent_at_name=True,\n n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW)\n\n repr_ = pp.pformat(self)\n\n # Use bruteforce ellipsis when there are a lot of non-blank characters\n n_nonblank = len(''.join(repr_.split()))\n if n_nonblank > N_CHAR_MAX:\n lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends\n regex = r'^(\\s*\\S){%d}' % lim\n # The regex '^(\\s*\\S){%d}' % n\n # matches from the start of the string until the nth non-blank\n # character:\n # - ^ matches the start of string\n # - (pattern){n} matches n repetitions of pattern\n # - \\s*\\S matches a non-blank char following zero or more blanks\n left_lim = re.match(regex, repr_).end()\n right_lim = re.match(regex, repr_[::-1]).end()\n\n if '\\n' in repr_[left_lim:-right_lim]:\n # The left side and right side aren't on the same line.\n # To avoid weird cuts, e.g.:\n # categoric...ore',\n # we need to start the right side with an appropriate newline\n # character so that it renders properly as:\n # categoric...\n # handle_unknown='ignore',\n # so we add [^\\n]*\\n which matches until the next \\n\n regex += r'[^\\n]*\\n'\n right_lim = re.match(regex, repr_[::-1]).end()\n\n ellipsis = '...'\n if left_lim + len(ellipsis) < len(repr_) - right_lim:\n # Only add ellipsis if it results in a shorter repr\n repr_ = repr_[:left_lim] + '...' + repr_[-right_lim:]\n\n return repr_\n\n def __getstate__(self):\n try:\n state = super().__getstate__()\n except AttributeError:\n state = self.__dict__.copy()\n\n if type(self).__module__.startswith('sklearn.'):\n return dict(state.items(), _sklearn_version=__version__)\n else:\n return state\n\n def __setstate__(self, state):\n if type(self).__module__.startswith('sklearn.'):\n pickle_version = state.pop(\"_sklearn_version\", \"pre-0.18\")\n if pickle_version != __version__:\n warnings.warn(\n \"Trying to unpickle estimator {0} from version {1} when \"\n \"using version {2}. This might lead to breaking code or \"\n \"invalid results. Use at your own risk.\".format(\n self.__class__.__name__, pickle_version, __version__),\n UserWarning)\n try:\n super().__setstate__(state)\n except AttributeError:\n self.__dict__.update(state)\n\n def _more_tags(self):\n return _DEFAULT_TAGS\n\n def _get_tags(self):\n collected_tags = {}\n for base_class in reversed(inspect.getmro(self.__class__)):\n if hasattr(base_class, '_more_tags'):\n # need the if because mixins might not have _more_tags\n # but might do redundant work in estimators\n # (i.e. calling more tags on BaseEstimator multiple times)\n more_tags = base_class._more_tags(self)\n collected_tags.update(more_tags)\n return collected_tags\n\n def _check_n_features(self, X, reset):\n \"\"\"Set the `n_features_in_` attribute, or check against it.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The input samples.\n reset : bool\n If True, the `n_features_in_` attribute is set to `X.shape[1]`.\n Else, the attribute must already exist and the function checks\n that it is equal to `X.shape[1]`.\n \"\"\"\n n_features = X.shape[1]\n\n if reset:\n self.n_features_in_ = n_features\n else:\n if not hasattr(self, 'n_features_in_'):\n raise RuntimeError(\n \"The reset parameter is False but there is no \"\n \"n_features_in_ attribute. Is this estimator fitted?\"\n )\n if n_features != self.n_features_in_:\n raise ValueError(\n 'X has {} features, but {} is expecting {} features '\n 'as input.'.format(n_features, self.__class__.__name__,\n self.n_features_in_)\n )\n\n def _validate_data(self, X, y=None, reset=True,\n validate_separately=False, **check_params):\n \"\"\"Validate input data and set or check the `n_features_in_` attribute.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, dataframe} of shape \\\n (n_samples, n_features)\n The input samples.\n y : array-like of shape (n_samples,), default=None\n The targets. If None, `check_array` is called on `X` and\n `check_X_y` is called otherwise.\n reset : bool, default=True\n Whether to reset the `n_features_in_` attribute.\n If False, the input will be checked for consistency with data\n provided when reset was last True.\n validate_separately : False or tuple of dicts, default=False\n Only used if y is not None.\n If False, call validate_X_y(). Else, it must be a tuple of kwargs\n to be used for calling check_array() on X and y respectively.\n **check_params : kwargs\n Parameters passed to :func:`sklearn.utils.check_array` or\n :func:`sklearn.utils.check_X_y`. Ignored if validate_separately\n is not False.\n\n Returns\n -------\n out : {ndarray, sparse matrix} or tuple of these\n The validated input. A tuple is returned if `y` is not None.\n \"\"\"\n\n if y is None:\n if self._get_tags()['requires_y']:\n raise ValueError(\n f\"This {self.__class__.__name__} estimator \"\n f\"requires y to be passed, but the target y is None.\"\n )\n X = check_array(X, **check_params)\n out = X\n else:\n if validate_separately:\n # We need this because some estimators validate X and y\n # separately, and in general, separately calling check_array()\n # on X and y isn't equivalent to just calling check_X_y()\n # :(\n check_X_params, check_y_params = validate_separately\n X = check_array(X, **check_X_params)\n y = check_array(y, **check_y_params)\n else:\n X, y = check_X_y(X, y, **check_params)\n out = X, y\n\n if check_params.get('ensure_2d', True):\n self._check_n_features(X, reset=reset)\n\n return out\n\n @property\n def _repr_html_(self):\n \"\"\"HTML representation of estimator.\n\n This is redundant with the logic of `_repr_mimebundle_`. The latter\n should be favorted in the long term, `_repr_html_` is only\n implemented for consumers who do not interpret `_repr_mimbundle_`.\n \"\"\"\n if get_config()[\"display\"] != 'diagram':\n raise AttributeError(\"_repr_html_ is only defined when the \"\n \"'display' configuration option is set to \"\n \"'diagram'\")\n return self._repr_html_inner\n\n def _repr_html_inner(self):\n \"\"\"This function is returned by the @property `_repr_html_` to make\n `hasattr(estimator, \"_repr_html_\") return `True` or `False` depending\n on `get_config()[\"display\"]`.\n \"\"\"\n return estimator_html_repr(self)\n\n def _repr_mimebundle_(self, **kwargs):\n \"\"\"Mime bundle used by jupyter kernels to display estimator\"\"\"\n output = {\"text/plain\": repr(self)}\n if get_config()[\"display\"] == 'diagram':\n output[\"text/html\"] = estimator_html_repr(self)\n return output\n\n\nclass ClassifierMixin:\n \"\"\"Mixin class for all classifiers in scikit-learn.\"\"\"\n\n _estimator_type = \"classifier\"\n\n def score(self, X, y, sample_weight=None):\n \"\"\"\n Return the mean accuracy on the given test data and labels.\n\n In multi-label classification, this is the subset accuracy\n which is a harsh metric since you require for each sample that\n each label set be correctly predicted.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test samples.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n True labels for `X`.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n Mean accuracy of ``self.predict(X)`` wrt. `y`.\n \"\"\"\n from .metrics import accuracy_score\n return accuracy_score(y, self.predict(X), sample_weight=sample_weight)\n\n def _more_tags(self):\n return {'requires_y': True}\n\n\nclass RegressorMixin:\n \"\"\"Mixin class for all regression estimators in scikit-learn.\"\"\"\n _estimator_type = \"regressor\"\n\n def score(self, X, y, sample_weight=None):\n \"\"\"Return the coefficient of determination :math:`R^2` of the\n prediction.\n\n The coefficient :math:`R^2` is defined as :math:`(1 - \\\\frac{u}{v})`,\n where :math:`u` is the residual sum of squares ``((y_true - y_pred)\n ** 2).sum()`` and :math:`v` is the total sum of squares ``((y_true -\n y_true.mean()) ** 2).sum()``. The best possible score is 1.0 and it\n can be negative (because the model can be arbitrarily worse). A\n constant model that always predicts the expected value of `y`,\n disregarding the input features, would get a :math:`R^2` score of\n 0.0.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test samples. For some estimators this may be a precomputed\n kernel matrix or a list of generic objects instead with shape\n ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``\n is the number of samples used in the fitting for the estimator.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n True values for `X`.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n :math:`R^2` of ``self.predict(X)`` wrt. `y`.\n\n Notes\n -----\n The :math:`R^2` score used when calling ``score`` on a regressor uses\n ``multioutput='uniform_average'`` from version 0.23 to keep consistent\n with default value of :func:`~sklearn.metrics.r2_score`.\n This influences the ``score`` method of all the multioutput\n regressors (except for\n :class:`~sklearn.multioutput.MultiOutputRegressor`).\n \"\"\"\n\n from .metrics import r2_score\n y_pred = self.predict(X)\n return r2_score(y, y_pred, sample_weight=sample_weight)\n\n def _more_tags(self):\n return {'requires_y': True}\n\n\nclass ClusterMixin:\n \"\"\"Mixin class for all cluster estimators in scikit-learn.\"\"\"\n _estimator_type = \"clusterer\"\n\n def fit_predict(self, X, y=None):\n \"\"\"\n Perform clustering on `X` and returns cluster labels.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,), dtype=np.int64\n Cluster labels.\n \"\"\"\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n self.fit(X)\n return self.labels_\n\n\nclass BiclusterMixin:\n \"\"\"Mixin class for all bicluster estimators in scikit-learn.\"\"\"\n\n @property\n def biclusters_(self):\n \"\"\"Convenient way to get row and column indicators together.\n\n Returns the ``rows_`` and ``columns_`` members.\n \"\"\"\n return self.rows_, self.columns_\n\n def get_indices(self, i):\n \"\"\"Row and column indices of the `i`'th bicluster.\n\n Only works if ``rows_`` and ``columns_`` attributes exist.\n\n Parameters\n ----------\n i : int\n The index of the cluster.\n\n Returns\n -------\n row_ind : ndarray, dtype=np.intp\n Indices of rows in the dataset that belong to the bicluster.\n col_ind : ndarray, dtype=np.intp\n Indices of columns in the dataset that belong to the bicluster.\n\n \"\"\"\n rows = self.rows_[i]\n columns = self.columns_[i]\n return np.nonzero(rows)[0], np.nonzero(columns)[0]\n\n def get_shape(self, i):\n \"\"\"Shape of the `i`'th bicluster.\n\n Parameters\n ----------\n i : int\n The index of the cluster.\n\n Returns\n -------\n n_rows : int\n Number of rows in the bicluster.\n\n n_cols : int\n Number of columns in the bicluster.\n \"\"\"\n indices = self.get_indices(i)\n return tuple(len(i) for i in indices)\n\n def get_submatrix(self, i, data):\n \"\"\"Return the submatrix corresponding to bicluster `i`.\n\n Parameters\n ----------\n i : int\n The index of the cluster.\n data : array-like of shape (n_samples, n_features)\n The data.\n\n Returns\n -------\n submatrix : ndarray of shape (n_rows, n_cols)\n The submatrix corresponding to bicluster `i`.\n\n Notes\n -----\n Works with sparse matrices. Only works if ``rows_`` and\n ``columns_`` attributes exist.\n \"\"\"\n from .utils.validation import check_array\n data = check_array(data, accept_sparse='csr')\n row_ind, col_ind = self.get_indices(i)\n return data[row_ind[:, np.newaxis], col_ind]\n\n\nclass TransformerMixin:\n \"\"\"Mixin class for all transformers in scikit-learn.\"\"\"\n\n def fit_transform(self, X, y=None, **fit_params):\n \"\"\"\n Fit to data, then transform it.\n\n Fits transformer to `X` and `y` with optional parameters `fit_params`\n and returns a transformed version of `X`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input samples.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs), \\\n default=None\n Target values (None for unsupervised transformations).\n\n **fit_params : dict\n Additional fit parameters.\n\n Returns\n -------\n X_new : ndarray array of shape (n_samples, n_features_new)\n Transformed array.\n \"\"\"\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n if y is None:\n # fit method of arity 1 (unsupervised transformation)\n return self.fit(X, **fit_params).transform(X)\n else:\n # fit method of arity 2 (supervised transformation)\n return self.fit(X, y, **fit_params).transform(X)\n\n\nclass DensityMixin:\n \"\"\"Mixin class for all density estimators in scikit-learn.\"\"\"\n _estimator_type = \"DensityEstimator\"\n\n def score(self, X, y=None):\n \"\"\"Return the score of the model on the data `X`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n score : float\n \"\"\"\n pass\n\n\nclass OutlierMixin:\n \"\"\"Mixin class for all outlier detection estimators in scikit-learn.\"\"\"\n _estimator_type = \"outlier_detector\"\n\n def fit_predict(self, X, y=None):\n \"\"\"Perform fit on X and returns labels for X.\n\n Returns -1 for outliers and 1 for inliers.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, dataframe} of shape \\\n (n_samples, n_features)\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n 1 for inliers, -1 for outliers.\n \"\"\"\n # override for transductive outlier detectors like LocalOulierFactor\n return self.fit(X).predict(X)\n\n\nclass MetaEstimatorMixin:\n _required_parameters = [\"estimator\"]\n \"\"\"Mixin class for all meta estimators in scikit-learn.\"\"\"\n\n\nclass MultiOutputMixin:\n \"\"\"Mixin to mark estimators that support multioutput.\"\"\"\n def _more_tags(self):\n return {'multioutput': True}\n\n\nclass _UnstableArchMixin:\n \"\"\"Mark estimators that are non-determinstic on 32bit or PowerPC\"\"\"\n def _more_tags(self):\n return {'non_deterministic': (\n _IS_32BIT or platform.machine().startswith(('ppc', 'powerpc')))}\n\n\ndef is_classifier(estimator):\n \"\"\"Return True if the given estimator is (probably) a classifier.\n\n Parameters\n ----------\n estimator : object\n Estimator object to test.\n\n Returns\n -------\n out : bool\n True if estimator is a classifier and False otherwise.\n \"\"\"\n return getattr(estimator, \"_estimator_type\", None) == \"classifier\"\n\n\ndef is_regressor(estimator):\n \"\"\"Return True if the given estimator is (probably) a regressor.\n\n Parameters\n ----------\n estimator : estimator instance\n Estimator object to test.\n\n Returns\n -------\n out : bool\n True if estimator is a regressor and False otherwise.\n \"\"\"\n return getattr(estimator, \"_estimator_type\", None) == \"regressor\"\n\n\ndef is_outlier_detector(estimator):\n \"\"\"Return True if the given estimator is (probably) an outlier detector.\n\n Parameters\n ----------\n estimator : estimator instance\n Estimator object to test.\n\n Returns\n -------\n out : bool\n True if estimator is an outlier detector and False otherwise.\n \"\"\"\n return getattr(estimator, \"_estimator_type\", None) == \"outlier_detector\"\n", "\"\"\"\nRidge regression\n\"\"\"\n\n# Author: Mathieu Blondel <[email protected]>\n# Reuben Fletcher-Costin <[email protected]>\n# Fabian Pedregosa <[email protected]>\n# Michael Eickenberg <[email protected]>\n# License: BSD 3 clause\n\n\nfrom abc import ABCMeta, abstractmethod\nimport warnings\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy import sparse\nfrom scipy.sparse import linalg as sp_linalg\n\nfrom ._base import LinearClassifierMixin, LinearModel, _rescale_data\nfrom ._sag import sag_solver\nfrom ..base import RegressorMixin, MultiOutputMixin, is_classifier\nfrom ..utils.extmath import safe_sparse_dot\nfrom ..utils.extmath import row_norms\nfrom ..utils import check_array\nfrom ..utils import check_consistent_length\nfrom ..utils import compute_sample_weight\nfrom ..utils import column_or_1d\nfrom ..utils.validation import _check_sample_weight\nfrom ..utils.validation import _deprecate_positional_args\nfrom ..preprocessing import LabelBinarizer\nfrom ..model_selection import GridSearchCV\nfrom ..metrics import check_scoring\nfrom ..exceptions import ConvergenceWarning\nfrom ..utils.sparsefuncs import mean_variance_axis\n\n\ndef _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0,\n X_offset=None, X_scale=None):\n\n def _get_rescaled_operator(X):\n\n X_offset_scale = X_offset / X_scale\n\n def matvec(b):\n return X.dot(b) - b.dot(X_offset_scale)\n\n def rmatvec(b):\n return X.T.dot(b) - X_offset_scale * np.sum(b)\n\n X1 = sparse.linalg.LinearOperator(shape=X.shape,\n matvec=matvec,\n rmatvec=rmatvec)\n return X1\n\n n_samples, n_features = X.shape\n\n if X_offset is None or X_scale is None:\n X1 = sp_linalg.aslinearoperator(X)\n else:\n X1 = _get_rescaled_operator(X)\n\n coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)\n\n if n_features > n_samples:\n def create_mv(curr_alpha):\n def _mv(x):\n return X1.matvec(X1.rmatvec(x)) + curr_alpha * x\n return _mv\n else:\n def create_mv(curr_alpha):\n def _mv(x):\n return X1.rmatvec(X1.matvec(x)) + curr_alpha * x\n return _mv\n\n for i in range(y.shape[1]):\n y_column = y[:, i]\n\n mv = create_mv(alpha[i])\n if n_features > n_samples:\n # kernel ridge\n # w = X.T * inv(X X^t + alpha*Id) y\n C = sp_linalg.LinearOperator(\n (n_samples, n_samples), matvec=mv, dtype=X.dtype)\n # FIXME atol\n try:\n coef, info = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')\n except TypeError:\n # old scipy\n coef, info = sp_linalg.cg(C, y_column, tol=tol)\n coefs[i] = X1.rmatvec(coef)\n else:\n # linear ridge\n # w = inv(X^t X + alpha*Id) * X.T y\n y_column = X1.rmatvec(y_column)\n C = sp_linalg.LinearOperator(\n (n_features, n_features), matvec=mv, dtype=X.dtype)\n # FIXME atol\n try:\n coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,\n tol=tol, atol='legacy')\n except TypeError:\n # old scipy\n coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,\n tol=tol)\n\n if info < 0:\n raise ValueError(\"Failed with error code %d\" % info)\n\n if max_iter is None and info > 0 and verbose:\n warnings.warn(\"sparse_cg did not converge after %d iterations.\" %\n info, ConvergenceWarning)\n\n return coefs\n\n\ndef _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):\n n_samples, n_features = X.shape\n coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)\n n_iter = np.empty(y.shape[1], dtype=np.int32)\n\n # According to the lsqr documentation, alpha = damp^2.\n sqrt_alpha = np.sqrt(alpha)\n\n for i in range(y.shape[1]):\n y_column = y[:, i]\n info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],\n atol=tol, btol=tol, iter_lim=max_iter)\n coefs[i] = info[0]\n n_iter[i] = info[2]\n\n return coefs, n_iter\n\n\ndef _solve_cholesky(X, y, alpha):\n # w = inv(X^t X + alpha*Id) * X.T y\n n_features = X.shape[1]\n n_targets = y.shape[1]\n\n A = safe_sparse_dot(X.T, X, dense_output=True)\n Xy = safe_sparse_dot(X.T, y, dense_output=True)\n\n one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])\n\n if one_alpha:\n A.flat[::n_features + 1] += alpha[0]\n return linalg.solve(A, Xy, sym_pos=True,\n overwrite_a=True).T\n else:\n coefs = np.empty([n_targets, n_features], dtype=X.dtype)\n for coef, target, current_alpha in zip(coefs, Xy.T, alpha):\n A.flat[::n_features + 1] += current_alpha\n coef[:] = linalg.solve(A, target, sym_pos=True,\n overwrite_a=False).ravel()\n A.flat[::n_features + 1] -= current_alpha\n return coefs\n\n\ndef _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):\n # dual_coef = inv(X X^t + alpha*Id) y\n n_samples = K.shape[0]\n n_targets = y.shape[1]\n\n if copy:\n K = K.copy()\n\n alpha = np.atleast_1d(alpha)\n one_alpha = (alpha == alpha[0]).all()\n has_sw = isinstance(sample_weight, np.ndarray) \\\n or sample_weight not in [1.0, None]\n\n if has_sw:\n # Unlike other solvers, we need to support sample_weight directly\n # because K might be a pre-computed kernel.\n sw = np.sqrt(np.atleast_1d(sample_weight))\n y = y * sw[:, np.newaxis]\n K *= np.outer(sw, sw)\n\n if one_alpha:\n # Only one penalty, we can solve multi-target problems in one time.\n K.flat[::n_samples + 1] += alpha[0]\n\n try:\n # Note: we must use overwrite_a=False in order to be able to\n # use the fall-back solution below in case a LinAlgError\n # is raised\n dual_coef = linalg.solve(K, y, sym_pos=True,\n overwrite_a=False)\n except np.linalg.LinAlgError:\n warnings.warn(\"Singular matrix in solving dual problem. Using \"\n \"least-squares solution instead.\")\n dual_coef = linalg.lstsq(K, y)[0]\n\n # K is expensive to compute and store in memory so change it back in\n # case it was user-given.\n K.flat[::n_samples + 1] -= alpha[0]\n\n if has_sw:\n dual_coef *= sw[:, np.newaxis]\n\n return dual_coef\n else:\n # One penalty per target. We need to solve each target separately.\n dual_coefs = np.empty([n_targets, n_samples], K.dtype)\n\n for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):\n K.flat[::n_samples + 1] += current_alpha\n\n dual_coef[:] = linalg.solve(K, target, sym_pos=True,\n overwrite_a=False).ravel()\n\n K.flat[::n_samples + 1] -= current_alpha\n\n if has_sw:\n dual_coefs *= sw[np.newaxis, :]\n\n return dual_coefs.T\n\n\ndef _solve_svd(X, y, alpha):\n U, s, Vt = linalg.svd(X, full_matrices=False)\n idx = s > 1e-15 # same default value as scipy.linalg.pinv\n s_nnz = s[idx][:, np.newaxis]\n UTy = np.dot(U.T, y)\n d = np.zeros((s.size, alpha.size), dtype=X.dtype)\n d[idx] = s_nnz / (s_nnz ** 2 + alpha)\n d_UT_y = d * UTy\n return np.dot(Vt.T, d_UT_y).T\n\n\ndef _get_valid_accept_sparse(is_X_sparse, solver):\n if is_X_sparse and solver in ['auto', 'sag', 'saga']:\n return 'csr'\n else:\n return ['csr', 'csc', 'coo']\n\n\n@_deprecate_positional_args\ndef ridge_regression(X, y, alpha, *, sample_weight=None, solver='auto',\n max_iter=None, tol=1e-3, verbose=0, random_state=None,\n return_n_iter=False, return_intercept=False,\n check_input=True):\n \"\"\"Solve the ridge equation by the method of normal equations.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix, LinearOperator} of shape \\\n (n_samples, n_features)\n Training data\n\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n Target values\n\n alpha : float or array-like of shape (n_targets,)\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``1 / (2C)`` in other linear models such as\n :class:`~sklearn.linear_model.LogisticRegression` or\n :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are\n assumed to be specific to the targets. Hence they must correspond in\n number.\n\n sample_weight : float or array-like of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight. If sample_weight is not None and\n solver='auto', the solver will be set to 'cholesky'.\n\n .. versionadded:: 0.17\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}, \\\n default='auto'\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution via a Cholesky decomposition of\n dot(X.T, X)\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its improved, unbiased version named SAGA. Both methods also use an\n iterative procedure, and are often faster than other solvers when\n both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n\n All last five solvers support both dense and sparse data. However, only\n 'sag' and 'sparse_cg' supports sparse input when `fit_intercept` is\n True.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n .. versionadded:: 0.19\n SAGA solver.\n\n max_iter : int, default=None\n Maximum number of iterations for conjugate gradient solver.\n For the 'sparse_cg' and 'lsqr' solvers, the default value is determined\n by scipy.sparse.linalg. For 'sag' and saga solver, the default value is\n 1000.\n\n tol : float, default=1e-3\n Precision of the solution.\n\n verbose : int, default=0\n Verbosity level. Setting verbose > 0 will display additional\n information depending on the solver used.\n\n random_state : int, RandomState instance, default=None\n Used when ``solver`` == 'sag' or 'saga' to shuffle the data.\n See :term:`Glossary <random_state>` for details.\n\n return_n_iter : bool, default=False\n If True, the method also returns `n_iter`, the actual number of\n iteration performed by the solver.\n\n .. versionadded:: 0.17\n\n return_intercept : bool, default=False\n If True and if X is sparse, the method also returns the intercept,\n and the solver is automatically changed to 'sag'. This is only a\n temporary fix for fitting the intercept with sparse data. For dense\n data, use sklearn.linear_model._preprocess_data before your regression.\n\n .. versionadded:: 0.17\n\n check_input : bool, default=True\n If False, the input arrays X and y will not be checked.\n\n .. versionadded:: 0.21\n\n Returns\n -------\n coef : ndarray of shape (n_features,) or (n_targets, n_features)\n Weight vector(s).\n\n n_iter : int, optional\n The actual number of iteration performed by the solver.\n Only returned if `return_n_iter` is True.\n\n intercept : float or ndarray of shape (n_targets,)\n The intercept of the model. Only returned if `return_intercept`\n is True and if X is a scipy sparse array.\n\n Notes\n -----\n This function won't compute the intercept.\n \"\"\"\n return _ridge_regression(X, y, alpha,\n sample_weight=sample_weight,\n solver=solver,\n max_iter=max_iter,\n tol=tol,\n verbose=verbose,\n random_state=random_state,\n return_n_iter=return_n_iter,\n return_intercept=return_intercept,\n X_scale=None,\n X_offset=None,\n check_input=check_input)\n\n\ndef _ridge_regression(X, y, alpha, sample_weight=None, solver='auto',\n max_iter=None, tol=1e-3, verbose=0, random_state=None,\n return_n_iter=False, return_intercept=False,\n X_scale=None, X_offset=None, check_input=True):\n\n has_sw = sample_weight is not None\n\n if solver == 'auto':\n if return_intercept:\n # only sag supports fitting intercept directly\n solver = \"sag\"\n elif not sparse.issparse(X):\n solver = \"cholesky\"\n else:\n solver = \"sparse_cg\"\n\n if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):\n raise ValueError(\"Known solvers are 'sparse_cg', 'cholesky', 'svd'\"\n \" 'lsqr', 'sag' or 'saga'. Got %s.\" % solver)\n\n if return_intercept and solver != 'sag':\n raise ValueError(\"In Ridge, only 'sag' solver can directly fit the \"\n \"intercept. Please change solver to 'sag' or set \"\n \"return_intercept=False.\")\n\n if check_input:\n _dtype = [np.float64, np.float32]\n _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)\n X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype,\n order=\"C\")\n y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)\n check_consistent_length(X, y)\n\n n_samples, n_features = X.shape\n\n if y.ndim > 2:\n raise ValueError(\"Target y has the wrong shape %s\" % str(y.shape))\n\n ravel = False\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n ravel = True\n\n n_samples_, n_targets = y.shape\n\n if n_samples != n_samples_:\n raise ValueError(\"Number of samples in X and y does not correspond:\"\n \" %d != %d\" % (n_samples, n_samples_))\n\n if has_sw:\n sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)\n\n if solver not in ['sag', 'saga']:\n # SAG supports sample_weight directly. For other solvers,\n # we implement sample_weight via a simple rescaling.\n X, y = _rescale_data(X, y, sample_weight)\n\n # There should be either 1 or n_targets penalties\n alpha = np.asarray(alpha, dtype=X.dtype).ravel()\n if alpha.size not in [1, n_targets]:\n raise ValueError(\"Number of targets and number of penalties \"\n \"do not correspond: %d != %d\"\n % (alpha.size, n_targets))\n\n if alpha.size == 1 and n_targets > 1:\n alpha = np.repeat(alpha, n_targets)\n\n n_iter = None\n if solver == 'sparse_cg':\n coef = _solve_sparse_cg(X, y, alpha,\n max_iter=max_iter,\n tol=tol,\n verbose=verbose,\n X_offset=X_offset,\n X_scale=X_scale)\n\n elif solver == 'lsqr':\n coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)\n\n elif solver == 'cholesky':\n if n_features > n_samples:\n K = safe_sparse_dot(X, X.T, dense_output=True)\n try:\n dual_coef = _solve_cholesky_kernel(K, y, alpha)\n\n coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T\n except linalg.LinAlgError:\n # use SVD solver if matrix is singular\n solver = 'svd'\n else:\n try:\n coef = _solve_cholesky(X, y, alpha)\n except linalg.LinAlgError:\n # use SVD solver if matrix is singular\n solver = 'svd'\n\n elif solver in ['sag', 'saga']:\n # precompute max_squared_sum for all targets\n max_squared_sum = row_norms(X, squared=True).max()\n\n coef = np.empty((y.shape[1], n_features), dtype=X.dtype)\n n_iter = np.empty(y.shape[1], dtype=np.int32)\n intercept = np.zeros((y.shape[1], ), dtype=X.dtype)\n for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):\n init = {'coef': np.zeros((n_features + int(return_intercept), 1),\n dtype=X.dtype)}\n coef_, n_iter_, _ = sag_solver(\n X, target.ravel(), sample_weight, 'squared', alpha_i, 0,\n max_iter, tol, verbose, random_state, False, max_squared_sum,\n init, is_saga=solver == 'saga')\n if return_intercept:\n coef[i] = coef_[:-1]\n intercept[i] = coef_[-1]\n else:\n coef[i] = coef_\n n_iter[i] = n_iter_\n\n if intercept.shape[0] == 1:\n intercept = intercept[0]\n coef = np.asarray(coef)\n\n if solver == 'svd':\n if sparse.issparse(X):\n raise TypeError('SVD solver does not support sparse'\n ' inputs currently')\n coef = _solve_svd(X, y, alpha)\n\n if ravel:\n # When y was passed as a 1d-array, we flatten the coefficients.\n coef = coef.ravel()\n\n if return_n_iter and return_intercept:\n return coef, n_iter, intercept\n elif return_intercept:\n return coef, intercept\n elif return_n_iter:\n return coef, n_iter\n else:\n return coef\n\n\nclass _BaseRidge(LinearModel, metaclass=ABCMeta):\n @abstractmethod\n @_deprecate_positional_args\n def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, solver=\"auto\",\n random_state=None):\n self.alpha = alpha\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.copy_X = copy_X\n self.max_iter = max_iter\n self.tol = tol\n self.solver = solver\n self.random_state = random_state\n\n def fit(self, X, y, sample_weight=None):\n\n # all other solvers work at both float precision levels\n _dtype = [np.float64, np.float32]\n _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X),\n self.solver)\n X, y = self._validate_data(X, y,\n accept_sparse=_accept_sparse,\n dtype=_dtype,\n multi_output=True, y_numeric=True)\n if sparse.issparse(X) and self.fit_intercept:\n if self.solver not in ['auto', 'sparse_cg', 'sag']:\n raise ValueError(\n \"solver='{}' does not support fitting the intercept \"\n \"on sparse data. Please set the solver to 'auto' or \"\n \"'sparse_cg', 'sag', or set `fit_intercept=False`\"\n .format(self.solver))\n if (self.solver == 'sag' and self.max_iter is None and\n self.tol > 1e-4):\n warnings.warn(\n '\"sag\" solver requires many iterations to fit '\n 'an intercept with sparse inputs. Either set the '\n 'solver to \"auto\" or \"sparse_cg\", or set a low '\n '\"tol\" and a high \"max_iter\" (especially if inputs are '\n 'not standardized).')\n solver = 'sag'\n else:\n solver = 'sparse_cg'\n else:\n solver = self.solver\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X,\n dtype=X.dtype)\n\n # when X is sparse we only remove offset from y\n X, y, X_offset, y_offset, X_scale = self._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X,\n sample_weight=sample_weight, return_mean=True)\n\n if solver == 'sag' and sparse.issparse(X) and self.fit_intercept:\n self.coef_, self.n_iter_, self.intercept_ = _ridge_regression(\n X, y, alpha=self.alpha, sample_weight=sample_weight,\n max_iter=self.max_iter, tol=self.tol, solver='sag',\n random_state=self.random_state, return_n_iter=True,\n return_intercept=True, check_input=False)\n # add the offset which was subtracted by _preprocess_data\n self.intercept_ += y_offset\n\n else:\n if sparse.issparse(X) and self.fit_intercept:\n # required to fit intercept with sparse_cg solver\n params = {'X_offset': X_offset, 'X_scale': X_scale}\n else:\n # for dense matrices or when intercept is set to 0\n params = {}\n\n self.coef_, self.n_iter_ = _ridge_regression(\n X, y, alpha=self.alpha, sample_weight=sample_weight,\n max_iter=self.max_iter, tol=self.tol, solver=solver,\n random_state=self.random_state, return_n_iter=True,\n return_intercept=False, check_input=False, **params)\n self._set_intercept(X_offset, y_offset, X_scale)\n\n return self\n\n\nclass Ridge(MultiOutputMixin, RegressorMixin, _BaseRidge):\n \"\"\"Linear least squares with l2 regularization.\n\n Minimizes the objective function::\n\n ||y - Xw||^2_2 + alpha * ||w||^2_2\n\n This model solves a regression model where the loss function is\n the linear least squares function and regularization is given by\n the l2-norm. Also known as Ridge Regression or Tikhonov regularization.\n This estimator has built-in support for multi-variate regression\n (i.e., when y is a 2d-array of shape (n_samples, n_targets)).\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alpha : {float, ndarray of shape (n_targets,)}, default=1.0\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``1 / (2C)`` in other linear models such as\n :class:`~sklearn.linear_model.LogisticRegression` or\n :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are\n assumed to be specific to the targets. Hence they must correspond in\n number.\n\n fit_intercept : bool, default=True\n Whether to fit the intercept for this model. If set\n to false, no intercept will be used in calculations\n (i.e. ``X`` and ``y`` are expected to be centered).\n\n normalize : bool, default=False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n copy_X : bool, default=True\n If True, X will be copied; else, it may be overwritten.\n\n max_iter : int, default=None\n Maximum number of iterations for conjugate gradient solver.\n For 'sparse_cg' and 'lsqr' solvers, the default value is determined\n by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.\n\n tol : float, default=1e-3\n Precision of the solution.\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}, \\\n default='auto'\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution.\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its improved, unbiased version named SAGA. Both methods also use an\n iterative procedure, and are often faster than other solvers when\n both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n All last five solvers support both dense and sparse data. However, only\n 'sag' and 'sparse_cg' supports sparse input when `fit_intercept` is\n True.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n .. versionadded:: 0.19\n SAGA solver.\n\n random_state : int, RandomState instance, default=None\n Used when ``solver`` == 'sag' or 'saga' to shuffle the data.\n See :term:`Glossary <random_state>` for details.\n\n .. versionadded:: 0.17\n `random_state` to support Stochastic Average Gradient.\n\n Attributes\n ----------\n coef_ : ndarray of shape (n_features,) or (n_targets, n_features)\n Weight vector(s).\n\n intercept_ : float or ndarray of shape (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n n_iter_ : None or ndarray of shape (n_targets,)\n Actual number of iterations for each target. Available only for\n sag and lsqr solvers. Other solvers will return None.\n\n .. versionadded:: 0.17\n\n See also\n --------\n RidgeClassifier : Ridge classifier\n RidgeCV : Ridge regression with built-in cross validation\n :class:`~sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression\n combines ridge regression with the kernel trick\n\n Examples\n --------\n >>> from sklearn.linear_model import Ridge\n >>> import numpy as np\n >>> n_samples, n_features = 10, 5\n >>> rng = np.random.RandomState(0)\n >>> y = rng.randn(n_samples)\n >>> X = rng.randn(n_samples, n_features)\n >>> clf = Ridge(alpha=1.0)\n >>> clf.fit(X, y)\n Ridge()\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, solver=\"auto\",\n random_state=None):\n super().__init__(\n alpha=alpha, fit_intercept=fit_intercept,\n normalize=normalize, copy_X=copy_X,\n max_iter=max_iter, tol=tol, solver=solver,\n random_state=random_state)\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training data\n\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n Target values\n\n sample_weight : float or ndarray of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n return super().fit(X, y, sample_weight=sample_weight)\n\n\nclass RidgeClassifier(LinearClassifierMixin, _BaseRidge):\n \"\"\"Classifier using Ridge regression.\n\n This classifier first converts the target values into ``{-1, 1}`` and\n then treats the problem as a regression task (multi-output regression in\n the multiclass case).\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alpha : float, default=1.0\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``1 / (2C)`` in other linear models such as\n :class:`~sklearn.linear_model.LogisticRegression` or\n :class:`~sklearn.svm.LinearSVC`.\n\n fit_intercept : bool, default=True\n Whether to calculate the intercept for this model. If set to false, no\n intercept will be used in calculations (e.g. data is expected to be\n already centered).\n\n normalize : bool, default=False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n copy_X : bool, default=True\n If True, X will be copied; else, it may be overwritten.\n\n max_iter : int, default=None\n Maximum number of iterations for conjugate gradient solver.\n The default value is determined by scipy.sparse.linalg.\n\n tol : float, default=1e-3\n Precision of the solution.\n\n class_weight : dict or 'balanced', default=None\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}, \\\n default='auto'\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution.\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its unbiased and more flexible version named SAGA. Both methods\n use an iterative procedure, and are often faster than other solvers\n when both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n .. versionadded:: 0.19\n SAGA solver.\n\n random_state : int, RandomState instance, default=None\n Used when ``solver`` == 'sag' or 'saga' to shuffle the data.\n See :term:`Glossary <random_state>` for details.\n\n Attributes\n ----------\n coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)\n Coefficient of the features in the decision function.\n\n ``coef_`` is of shape (1, n_features) when the given problem is binary.\n\n intercept_ : float or ndarray of shape (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n n_iter_ : None or ndarray of shape (n_targets,)\n Actual number of iterations for each target. Available only for\n sag and lsqr solvers. Other solvers will return None.\n\n classes_ : ndarray of shape (n_classes,)\n The classes labels.\n\n See Also\n --------\n Ridge : Ridge regression.\n RidgeClassifierCV : Ridge classifier with built-in cross validation.\n\n Notes\n -----\n For multi-class classification, n_class classifiers are trained in\n a one-versus-all approach. Concretely, this is implemented by taking\n advantage of the multi-variate response support in Ridge.\n\n Examples\n --------\n >>> from sklearn.datasets import load_breast_cancer\n >>> from sklearn.linear_model import RidgeClassifier\n >>> X, y = load_breast_cancer(return_X_y=True)\n >>> clf = RidgeClassifier().fit(X, y)\n >>> clf.score(X, y)\n 0.9595...\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, class_weight=None,\n solver=\"auto\", random_state=None):\n super().__init__(\n alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,\n copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,\n random_state=random_state)\n self.class_weight = class_weight\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge classifier model.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : ndarray of shape (n_samples,)\n Target values.\n\n sample_weight : float or ndarray of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight.\n\n .. versionadded:: 0.17\n *sample_weight* support to Classifier.\n\n Returns\n -------\n self : object\n Instance of the estimator.\n \"\"\"\n _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X),\n self.solver)\n X, y = self._validate_data(X, y, accept_sparse=_accept_sparse,\n multi_output=True, y_numeric=False)\n sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)\n\n self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)\n Y = self._label_binarizer.fit_transform(y)\n if not self._label_binarizer.y_type_.startswith('multilabel'):\n y = column_or_1d(y, warn=True)\n else:\n # we don't (yet) support multi-label classification in Ridge\n raise ValueError(\n \"%s doesn't support multi-label classification\" % (\n self.__class__.__name__))\n\n if self.class_weight:\n # modify the sample weights with the corresponding class weight\n sample_weight = (sample_weight *\n compute_sample_weight(self.class_weight, y))\n\n super().fit(X, Y, sample_weight=sample_weight)\n return self\n\n @property\n def classes_(self):\n return self._label_binarizer.classes_\n\n\ndef _check_gcv_mode(X, gcv_mode):\n possible_gcv_modes = [None, 'auto', 'svd', 'eigen']\n if gcv_mode not in possible_gcv_modes:\n raise ValueError(\n \"Unknown value for 'gcv_mode'. \"\n \"Got {} instead of one of {}\" .format(\n gcv_mode, possible_gcv_modes))\n if gcv_mode in ['eigen', 'svd']:\n return gcv_mode\n # if X has more rows than columns, use decomposition of X^T.X,\n # otherwise X.X^T\n if X.shape[0] > X.shape[1]:\n return 'svd'\n return 'eigen'\n\n\ndef _find_smallest_angle(query, vectors):\n \"\"\"Find the column of vectors that is most aligned with the query.\n\n Both query and the columns of vectors must have their l2 norm equal to 1.\n\n Parameters\n ----------\n query : ndarray of shape (n_samples,)\n Normalized query vector.\n\n vectors : ndarray of shape (n_samples, n_features)\n Vectors to which we compare query, as columns. Must be normalized.\n \"\"\"\n abs_cosine = np.abs(query.dot(vectors))\n index = np.argmax(abs_cosine)\n return index\n\n\nclass _X_CenterStackOp(sparse.linalg.LinearOperator):\n \"\"\"Behaves as centered and scaled X with an added intercept column.\n\n This operator behaves as\n np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]])\n \"\"\"\n\n def __init__(self, X, X_mean, sqrt_sw):\n n_samples, n_features = X.shape\n super().__init__(X.dtype, (n_samples, n_features + 1))\n self.X = X\n self.X_mean = X_mean\n self.sqrt_sw = sqrt_sw\n\n def _matvec(self, v):\n v = v.ravel()\n return safe_sparse_dot(\n self.X, v[:-1], dense_output=True\n ) - self.sqrt_sw * self.X_mean.dot(v[:-1]) + v[-1] * self.sqrt_sw\n\n def _matmat(self, v):\n return (\n safe_sparse_dot(self.X, v[:-1], dense_output=True) -\n self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1]) + v[-1] *\n self.sqrt_sw[:, None])\n\n def _transpose(self):\n return _XT_CenterStackOp(self.X, self.X_mean, self.sqrt_sw)\n\n\nclass _XT_CenterStackOp(sparse.linalg.LinearOperator):\n \"\"\"Behaves as transposed centered and scaled X with an intercept column.\n\n This operator behaves as\n np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T\n \"\"\"\n\n def __init__(self, X, X_mean, sqrt_sw):\n n_samples, n_features = X.shape\n super().__init__(X.dtype, (n_features + 1, n_samples))\n self.X = X\n self.X_mean = X_mean\n self.sqrt_sw = sqrt_sw\n\n def _matvec(self, v):\n v = v.ravel()\n n_features = self.shape[0]\n res = np.empty(n_features, dtype=self.X.dtype)\n res[:-1] = (\n safe_sparse_dot(self.X.T, v, dense_output=True) -\n (self.X_mean * self.sqrt_sw.dot(v))\n )\n res[-1] = np.dot(v, self.sqrt_sw)\n return res\n\n def _matmat(self, v):\n n_features = self.shape[0]\n res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype)\n res[:-1] = (\n safe_sparse_dot(self.X.T, v, dense_output=True) -\n self.X_mean[:, None] * self.sqrt_sw.dot(v)\n )\n res[-1] = np.dot(self.sqrt_sw, v)\n return res\n\n\nclass _IdentityRegressor:\n \"\"\"Fake regressor which will directly output the prediction.\"\"\"\n\n def decision_function(self, y_predict):\n return y_predict\n\n def predict(self, y_predict):\n return y_predict\n\n\nclass _IdentityClassifier(LinearClassifierMixin):\n \"\"\"Fake classifier which will directly output the prediction.\n\n We inherit from LinearClassifierMixin to get the proper shape for the\n output `y`.\n \"\"\"\n def __init__(self, classes):\n self.classes_ = classes\n\n def decision_function(self, y_predict):\n return y_predict\n\n\nclass _RidgeGCV(LinearModel):\n \"\"\"Ridge regression with built-in Leave-one-out Cross-Validation.\n\n This class is not intended to be used directly. Use RidgeCV instead.\n\n Notes\n -----\n\n We want to solve (K + alpha*Id)c = y,\n where K = X X^T is the kernel matrix.\n\n Let G = (K + alpha*Id).\n\n Dual solution: c = G^-1y\n Primal solution: w = X^T c\n\n Compute eigendecomposition K = Q V Q^T.\n Then G^-1 = Q (V + alpha*Id)^-1 Q^T,\n where (V + alpha*Id) is diagonal.\n It is thus inexpensive to inverse for many alphas.\n\n Let loov be the vector of prediction values for each example\n when the model was fitted with all examples but this example.\n\n loov = (KG^-1Y - diag(KG^-1)Y) / diag(I-KG^-1)\n\n Let looe be the vector of prediction errors for each example\n when the model was fitted with all examples but this example.\n\n looe = y - loov = c / diag(G^-1)\n\n The best score (negative mean squared error or user-provided scoring) is\n stored in the `best_score_` attribute, and the selected hyperparameter in\n `alpha_`.\n\n References\n ----------\n http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf\n https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, alphas=(0.1, 1.0, 10.0), *,\n fit_intercept=True, normalize=False,\n scoring=None, copy_X=True,\n gcv_mode=None, store_cv_values=False,\n is_clf=False, alpha_per_target=False):\n self.alphas = np.asarray(alphas)\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.scoring = scoring\n self.copy_X = copy_X\n self.gcv_mode = gcv_mode\n self.store_cv_values = store_cv_values\n self.is_clf = is_clf\n self.alpha_per_target = alpha_per_target\n\n @staticmethod\n def _decomp_diag(v_prime, Q):\n # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))\n return (v_prime * Q ** 2).sum(axis=-1)\n\n @staticmethod\n def _diag_dot(D, B):\n # compute dot(diag(D), B)\n if len(B.shape) > 1:\n # handle case where B is > 1-d\n D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]\n return D * B\n\n def _compute_gram(self, X, sqrt_sw):\n \"\"\"Computes the Gram matrix XX^T with possible centering.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The preprocessed design matrix.\n\n sqrt_sw : ndarray of shape (n_samples,)\n square roots of sample weights\n\n Returns\n -------\n gram : ndarray of shape (n_samples, n_samples)\n The Gram matrix.\n X_mean : ndarray of shape (n_feature,)\n The weighted mean of ``X`` for each feature.\n\n Notes\n -----\n When X is dense the centering has been done in preprocessing\n so the mean is 0 and we just compute XX^T.\n\n When X is sparse it has not been centered in preprocessing, but it has\n been scaled by sqrt(sample weights).\n\n When self.fit_intercept is False no centering is done.\n\n The centered X is never actually computed because centering would break\n the sparsity of X.\n \"\"\"\n center = self.fit_intercept and sparse.issparse(X)\n if not center:\n # in this case centering has been done in preprocessing\n # or we are not fitting an intercept.\n X_mean = np.zeros(X.shape[1], dtype=X.dtype)\n return safe_sparse_dot(X, X.T, dense_output=True), X_mean\n # X is sparse\n n_samples = X.shape[0]\n sample_weight_matrix = sparse.dia_matrix(\n (sqrt_sw, 0), shape=(n_samples, n_samples))\n X_weighted = sample_weight_matrix.dot(X)\n X_mean, _ = mean_variance_axis(X_weighted, axis=0)\n X_mean *= n_samples / sqrt_sw.dot(sqrt_sw)\n X_mX = sqrt_sw[:, None] * safe_sparse_dot(\n X_mean, X.T, dense_output=True)\n X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean)\n return (safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m\n - X_mX - X_mX.T, X_mean)\n\n def _compute_covariance(self, X, sqrt_sw):\n \"\"\"Computes covariance matrix X^TX with possible centering.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n The preprocessed design matrix.\n\n sqrt_sw : ndarray of shape (n_samples,)\n square roots of sample weights\n\n Returns\n -------\n covariance : ndarray of shape (n_features, n_features)\n The covariance matrix.\n X_mean : ndarray of shape (n_feature,)\n The weighted mean of ``X`` for each feature.\n\n Notes\n -----\n Since X is sparse it has not been centered in preprocessing, but it has\n been scaled by sqrt(sample weights).\n\n When self.fit_intercept is False no centering is done.\n\n The centered X is never actually computed because centering would break\n the sparsity of X.\n \"\"\"\n if not self.fit_intercept:\n # in this case centering has been done in preprocessing\n # or we are not fitting an intercept.\n X_mean = np.zeros(X.shape[1], dtype=X.dtype)\n return safe_sparse_dot(X.T, X, dense_output=True), X_mean\n # this function only gets called for sparse X\n n_samples = X.shape[0]\n sample_weight_matrix = sparse.dia_matrix(\n (sqrt_sw, 0), shape=(n_samples, n_samples))\n X_weighted = sample_weight_matrix.dot(X)\n X_mean, _ = mean_variance_axis(X_weighted, axis=0)\n X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw)\n weight_sum = sqrt_sw.dot(sqrt_sw)\n return (safe_sparse_dot(X.T, X, dense_output=True) -\n weight_sum * np.outer(X_mean, X_mean),\n X_mean)\n\n def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw):\n \"\"\"Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T)\n without explicitely centering X nor computing X.dot(A)\n when X is sparse.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n\n A : ndarray of shape (n_features, n_features)\n\n X_mean : ndarray of shape (n_features,)\n\n sqrt_sw : ndarray of shape (n_features,)\n square roots of sample weights\n\n Returns\n -------\n diag : np.ndarray, shape (n_samples,)\n The computed diagonal.\n \"\"\"\n intercept_col = scale = sqrt_sw\n batch_size = X.shape[1]\n diag = np.empty(X.shape[0], dtype=X.dtype)\n for start in range(0, X.shape[0], batch_size):\n batch = slice(start, min(X.shape[0], start + batch_size), 1)\n X_batch = np.empty(\n (X[batch].shape[0], X.shape[1] + self.fit_intercept),\n dtype=X.dtype\n )\n if self.fit_intercept:\n X_batch[:, :-1] = X[batch].A - X_mean * scale[batch][:, None]\n X_batch[:, -1] = intercept_col[batch]\n else:\n X_batch = X[batch].A\n diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1)\n return diag\n\n def _eigen_decompose_gram(self, X, y, sqrt_sw):\n \"\"\"Eigendecomposition of X.X^T, used when n_samples <= n_features.\"\"\"\n # if X is dense it has already been centered in preprocessing\n K, X_mean = self._compute_gram(X, sqrt_sw)\n if self.fit_intercept:\n # to emulate centering X with sample weights,\n # ie removing the weighted average, we add a column\n # containing the square roots of the sample weights.\n # by centering, it is orthogonal to the other columns\n K += np.outer(sqrt_sw, sqrt_sw)\n eigvals, Q = linalg.eigh(K)\n QT_y = np.dot(Q.T, y)\n return X_mean, eigvals, Q, QT_y\n\n def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y):\n \"\"\"Compute dual coefficients and diagonal of G^-1.\n\n Used when we have a decomposition of X.X^T (n_samples <= n_features).\n \"\"\"\n w = 1. / (eigvals + alpha)\n if self.fit_intercept:\n # the vector containing the square roots of the sample weights (1\n # when no sample weights) is the eigenvector of XX^T which\n # corresponds to the intercept; we cancel the regularization on\n # this dimension. the corresponding eigenvalue is\n # sum(sample_weight).\n normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)\n intercept_dim = _find_smallest_angle(normalized_sw, Q)\n w[intercept_dim] = 0 # cancel regularization for the intercept\n\n c = np.dot(Q, self._diag_dot(w, QT_y))\n G_inverse_diag = self._decomp_diag(w, Q)\n # handle case where y is 2-d\n if len(y.shape) != 1:\n G_inverse_diag = G_inverse_diag[:, np.newaxis]\n return G_inverse_diag, c\n\n def _eigen_decompose_covariance(self, X, y, sqrt_sw):\n \"\"\"Eigendecomposition of X^T.X, used when n_samples > n_features\n and X is sparse.\n \"\"\"\n n_samples, n_features = X.shape\n cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype)\n cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw)\n if not self.fit_intercept:\n cov = cov[:-1, :-1]\n # to emulate centering X with sample weights,\n # ie removing the weighted average, we add a column\n # containing the square roots of the sample weights.\n # by centering, it is orthogonal to the other columns\n # when all samples have the same weight we add a column of 1\n else:\n cov[-1] = 0\n cov[:, -1] = 0\n cov[-1, -1] = sqrt_sw.dot(sqrt_sw)\n nullspace_dim = max(0, n_features - n_samples)\n eigvals, V = linalg.eigh(cov)\n # remove eigenvalues and vectors in the null space of X^T.X\n eigvals = eigvals[nullspace_dim:]\n V = V[:, nullspace_dim:]\n return X_mean, eigvals, V, X\n\n def _solve_eigen_covariance_no_intercept(\n self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):\n \"\"\"Compute dual coefficients and diagonal of G^-1.\n\n Used when we have a decomposition of X^T.X\n (n_samples > n_features and X is sparse), and not fitting an intercept.\n \"\"\"\n w = 1 / (eigvals + alpha)\n A = (V * w).dot(V.T)\n AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True))\n y_hat = safe_sparse_dot(X, AXy, dense_output=True)\n hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)\n if len(y.shape) != 1:\n # handle case where y is 2-d\n hat_diag = hat_diag[:, np.newaxis]\n return (1 - hat_diag) / alpha, (y - y_hat) / alpha\n\n def _solve_eigen_covariance_intercept(\n self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):\n \"\"\"Compute dual coefficients and diagonal of G^-1.\n\n Used when we have a decomposition of X^T.X\n (n_samples > n_features and X is sparse),\n and we are fitting an intercept.\n \"\"\"\n # the vector [0, 0, ..., 0, 1]\n # is the eigenvector of X^TX which\n # corresponds to the intercept; we cancel the regularization on\n # this dimension. the corresponding eigenvalue is\n # sum(sample_weight), e.g. n when uniform sample weights.\n intercept_sv = np.zeros(V.shape[0])\n intercept_sv[-1] = 1\n intercept_dim = _find_smallest_angle(intercept_sv, V)\n w = 1 / (eigvals + alpha)\n w[intercept_dim] = 1 / eigvals[intercept_dim]\n A = (V * w).dot(V.T)\n # add a column to X containing the square roots of sample weights\n X_op = _X_CenterStackOp(X, X_mean, sqrt_sw)\n AXy = A.dot(X_op.T.dot(y))\n y_hat = X_op.dot(AXy)\n hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)\n # return (1 - hat_diag), (y - y_hat)\n if len(y.shape) != 1:\n # handle case where y is 2-d\n hat_diag = hat_diag[:, np.newaxis]\n return (1 - hat_diag) / alpha, (y - y_hat) / alpha\n\n def _solve_eigen_covariance(\n self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):\n \"\"\"Compute dual coefficients and diagonal of G^-1.\n\n Used when we have a decomposition of X^T.X\n (n_samples > n_features and X is sparse).\n \"\"\"\n if self.fit_intercept:\n return self._solve_eigen_covariance_intercept(\n alpha, y, sqrt_sw, X_mean, eigvals, V, X)\n return self._solve_eigen_covariance_no_intercept(\n alpha, y, sqrt_sw, X_mean, eigvals, V, X)\n\n def _svd_decompose_design_matrix(self, X, y, sqrt_sw):\n # X already centered\n X_mean = np.zeros(X.shape[1], dtype=X.dtype)\n if self.fit_intercept:\n # to emulate fit_intercept=True situation, add a column\n # containing the square roots of the sample weights\n # by centering, the other columns are orthogonal to that one\n intercept_column = sqrt_sw[:, None]\n X = np.hstack((X, intercept_column))\n U, singvals, _ = linalg.svd(X, full_matrices=0)\n singvals_sq = singvals ** 2\n UT_y = np.dot(U.T, y)\n return X_mean, singvals_sq, U, UT_y\n\n def _solve_svd_design_matrix(\n self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y):\n \"\"\"Compute dual coefficients and diagonal of G^-1.\n\n Used when we have an SVD decomposition of X\n (n_samples > n_features and X is dense).\n \"\"\"\n w = ((singvals_sq + alpha) ** -1) - (alpha ** -1)\n if self.fit_intercept:\n # detect intercept column\n normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)\n intercept_dim = _find_smallest_angle(normalized_sw, U)\n # cancel the regularization for the intercept\n w[intercept_dim] = - (alpha ** -1)\n c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y\n G_inverse_diag = self._decomp_diag(w, U) + (alpha ** -1)\n if len(y.shape) != 1:\n # handle case where y is 2-d\n G_inverse_diag = G_inverse_diag[:, np.newaxis]\n return G_inverse_diag, c\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model with gcv.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training data. Will be cast to float64 if necessary.\n\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n Target values. Will be cast to float64 if necessary.\n\n sample_weight : float or ndarray of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight.\n\n Returns\n -------\n self : object\n \"\"\"\n X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc', 'coo'],\n dtype=[np.float64],\n multi_output=True, y_numeric=True)\n\n # alpha_per_target cannot be used in classifier mode. All subclasses\n # of _RidgeGCV that are classifiers keep alpha_per_target at its\n # default value: False, so the condition below should never happen.\n assert not (self.is_clf and self.alpha_per_target)\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X,\n dtype=X.dtype)\n\n if np.any(self.alphas <= 0):\n raise ValueError(\n \"alphas must be positive. Got {} containing some \"\n \"negative or null value instead.\".format(self.alphas))\n\n X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X,\n sample_weight=sample_weight)\n\n gcv_mode = _check_gcv_mode(X, self.gcv_mode)\n\n if gcv_mode == 'eigen':\n decompose = self._eigen_decompose_gram\n solve = self._solve_eigen_gram\n elif gcv_mode == 'svd':\n if sparse.issparse(X):\n decompose = self._eigen_decompose_covariance\n solve = self._solve_eigen_covariance\n else:\n decompose = self._svd_decompose_design_matrix\n solve = self._solve_svd_design_matrix\n\n n_samples = X.shape[0]\n\n if sample_weight is not None:\n X, y = _rescale_data(X, y, sample_weight)\n sqrt_sw = np.sqrt(sample_weight)\n else:\n sqrt_sw = np.ones(n_samples, dtype=X.dtype)\n\n X_mean, *decomposition = decompose(X, y, sqrt_sw)\n\n scorer = check_scoring(self, scoring=self.scoring, allow_none=True)\n error = scorer is None\n\n n_y = 1 if len(y.shape) == 1 else y.shape[1]\n n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)\n\n if self.store_cv_values:\n self.cv_values_ = np.empty(\n (n_samples * n_y, n_alphas), dtype=X.dtype)\n\n best_coef, best_score, best_alpha = None, None, None\n\n for i, alpha in enumerate(np.atleast_1d(self.alphas)):\n G_inverse_diag, c = solve(\n float(alpha), y, sqrt_sw, X_mean, *decomposition)\n if error:\n squared_errors = (c / G_inverse_diag) ** 2\n if self.alpha_per_target:\n alpha_score = -squared_errors.mean(axis=0)\n else:\n alpha_score = -squared_errors.mean()\n if self.store_cv_values:\n self.cv_values_[:, i] = squared_errors.ravel()\n else:\n predictions = y - (c / G_inverse_diag)\n if self.store_cv_values:\n self.cv_values_[:, i] = predictions.ravel()\n\n if self.is_clf:\n identity_estimator = _IdentityClassifier(\n classes=np.arange(n_y)\n )\n alpha_score = scorer(identity_estimator,\n predictions, y.argmax(axis=1))\n else:\n identity_estimator = _IdentityRegressor()\n if self.alpha_per_target:\n alpha_score = np.array([\n scorer(identity_estimator,\n predictions[:, j], y[:, j])\n for j in range(n_y)\n ])\n else:\n alpha_score = scorer(identity_estimator,\n predictions.ravel(), y.ravel())\n\n # Keep track of the best model\n if best_score is None:\n # initialize\n if self.alpha_per_target and n_y > 1:\n best_coef = c\n best_score = np.atleast_1d(alpha_score)\n best_alpha = np.full(n_y, alpha)\n else:\n best_coef = c\n best_score = alpha_score\n best_alpha = alpha\n else:\n # update\n if self.alpha_per_target and n_y > 1:\n to_update = alpha_score > best_score\n best_coef[:, to_update] = c[:, to_update]\n best_score[to_update] = alpha_score[to_update]\n best_alpha[to_update] = alpha\n elif alpha_score > best_score:\n best_coef, best_score, best_alpha = c, alpha_score, alpha\n\n self.alpha_ = best_alpha\n self.best_score_ = best_score\n self.dual_coef_ = best_coef\n self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)\n\n X_offset += X_mean * X_scale\n self._set_intercept(X_offset, y_offset, X_scale)\n\n if self.store_cv_values:\n if len(y.shape) == 1:\n cv_values_shape = n_samples, n_alphas\n else:\n cv_values_shape = n_samples, n_y, n_alphas\n self.cv_values_ = self.cv_values_.reshape(cv_values_shape)\n\n return self\n\n\nclass _BaseRidgeCV(LinearModel):\n @_deprecate_positional_args\n def __init__(self, alphas=(0.1, 1.0, 10.0), *,\n fit_intercept=True, normalize=False, scoring=None,\n cv=None, gcv_mode=None, store_cv_values=False,\n alpha_per_target=False):\n self.alphas = np.asarray(alphas)\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.scoring = scoring\n self.cv = cv\n self.gcv_mode = gcv_mode\n self.store_cv_values = store_cv_values\n self.alpha_per_target = alpha_per_target\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model with cv.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training data. If using GCV, will be cast to float64\n if necessary.\n\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n Target values. Will be cast to X's dtype if necessary.\n\n sample_weight : float or ndarray of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight.\n\n Returns\n -------\n self : object\n\n Notes\n -----\n When sample_weight is provided, the selected hyperparameter may depend\n on whether we use leave-one-out cross-validation (cv=None or cv='auto')\n or another form of cross-validation, because only leave-one-out\n cross-validation takes the sample weights into account when computing\n the validation score.\n \"\"\"\n cv = self.cv\n if cv is None:\n estimator = _RidgeGCV(self.alphas,\n fit_intercept=self.fit_intercept,\n normalize=self.normalize,\n scoring=self.scoring,\n gcv_mode=self.gcv_mode,\n store_cv_values=self.store_cv_values,\n is_clf=is_classifier(self),\n alpha_per_target=self.alpha_per_target)\n estimator.fit(X, y, sample_weight=sample_weight)\n self.alpha_ = estimator.alpha_\n self.best_score_ = estimator.best_score_\n if self.store_cv_values:\n self.cv_values_ = estimator.cv_values_\n else:\n if self.store_cv_values:\n raise ValueError(\"cv!=None and store_cv_values=True\"\n \" are incompatible\")\n if self.alpha_per_target:\n raise ValueError(\"cv!=None and alpha_per_target=True\"\n \" are incompatible\")\n parameters = {'alpha': self.alphas}\n solver = 'sparse_cg' if sparse.issparse(X) else 'auto'\n model = RidgeClassifier if is_classifier(self) else Ridge\n gs = GridSearchCV(model(fit_intercept=self.fit_intercept,\n normalize=self.normalize,\n solver=solver),\n parameters, cv=cv, scoring=self.scoring)\n gs.fit(X, y, sample_weight=sample_weight)\n estimator = gs.best_estimator_\n self.alpha_ = gs.best_estimator_.alpha\n self.best_score_ = gs.best_score_\n\n self.coef_ = estimator.coef_\n self.intercept_ = estimator.intercept_\n self.n_features_in_ = estimator.n_features_in_\n\n return self\n\n\nclass RidgeCV(MultiOutputMixin, RegressorMixin, _BaseRidgeCV):\n \"\"\"Ridge regression with built-in cross-validation.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n By default, it performs Leave-One-Out Cross-Validation, which is a form of\n efficient Leave-One-Out cross-validation.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)\n Array of alpha values to try.\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``1 / (2C)`` in other linear models such as\n :class:`~sklearn.linear_model.LogisticRegression` or\n :class:`~sklearn.svm.LinearSVC`.\n If using Leave-One-Out cross-validation, alphas must be positive.\n\n fit_intercept : bool, default=True\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (i.e. data is expected to be centered).\n\n normalize : bool, default=False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n scoring : string, callable, default=None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n If None, the negative mean squared error if cv is 'auto' or None\n (i.e. when using leave-one-out cross-validation), and r2 score\n otherwise.\n\n cv : int, cross-validation generator or an iterable, default=None\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the efficient Leave-One-Out cross-validation\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`~sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`~sklearn.model_selection.KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n gcv_mode : {'auto', 'svd', eigen'}, default='auto'\n Flag indicating which strategy to use when performing\n Leave-One-Out Cross-Validation. Options are::\n\n 'auto' : use 'svd' if n_samples > n_features, otherwise use 'eigen'\n 'svd' : force use of singular value decomposition of X when X is\n dense, eigenvalue decomposition of X^T.X when X is sparse.\n 'eigen' : force computation via eigendecomposition of X.X^T\n\n The 'auto' mode is the default and is intended to pick the cheaper\n option of the two depending on the shape of the training data.\n\n store_cv_values : bool, default=False\n Flag indicating if the cross-validation values corresponding to\n each alpha should be stored in the ``cv_values_`` attribute (see\n below). This flag is only compatible with ``cv=None`` (i.e. using\n Leave-One-Out Cross-Validation).\n\n alpha_per_target : bool, default=False\n Flag indicating whether to optimize the alpha value (picked from the\n `alphas` parameter list) for each target separately (for multi-output\n settings: multiple prediction targets). When set to `True`, after\n fitting, the `alpha_` attribute will contain a value for each target.\n When set to `False`, a single alpha is used for all targets.\n\n .. versionadded:: 0.24\n\n Attributes\n ----------\n cv_values_ : ndarray of shape (n_samples, n_alphas) or \\\n shape (n_samples, n_targets, n_alphas), optional\n Cross-validation values for each alpha (only available if\n ``store_cv_values=True`` and ``cv=None``). After ``fit()`` has been\n called, this attribute will contain the mean squared errors\n (by default) or the values of the ``{loss,score}_func`` function\n (if provided in the constructor).\n\n coef_ : ndarray of shape (n_features) or (n_targets, n_features)\n Weight vector(s).\n\n intercept_ : float or ndarray of shape (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n alpha_ : float or ndarray of shape (n_targets,)\n Estimated regularization parameter, or, if ``alpha_per_target=True``,\n the estimated regularization parameter for each target.\n\n best_score_ : float or ndarray of shape (n_targets,)\n Score of base estimator with best alpha, or, if\n ``alpha_per_target=True``, a score for each target.\n\n Examples\n --------\n >>> from sklearn.datasets import load_diabetes\n >>> from sklearn.linear_model import RidgeCV\n >>> X, y = load_diabetes(return_X_y=True)\n >>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)\n >>> clf.score(X, y)\n 0.5166...\n\n See also\n --------\n Ridge : Ridge regression\n RidgeClassifier : Ridge classifier\n RidgeClassifierCV : Ridge classifier with built-in cross validation\n \"\"\"\n\n\nclass RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n \"\"\"Ridge classifier with built-in cross-validation.\n\n See glossary entry for :term:`cross-validation estimator`.\n\n By default, it performs Leave-One-Out Cross-Validation. Currently,\n only the n_features > n_samples case is handled efficiently.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)\n Array of alpha values to try.\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``1 / (2C)`` in other linear models such as\n :class:`~sklearn.linear_model.LogisticRegression` or\n :class:`~sklearn.svm.LinearSVC`.\n\n fit_intercept : bool, default=True\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (i.e. data is expected to be centered).\n\n normalize : bool, default=False\n This parameter is ignored when ``fit_intercept`` is set to False.\n If True, the regressors X will be normalized before regression by\n subtracting the mean and dividing by the l2-norm.\n If you wish to standardize, please use\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\n on an estimator with ``normalize=False``.\n\n scoring : string, callable, default=None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n cv : int, cross-validation generator or an iterable, default=None\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the efficient Leave-One-Out cross-validation\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n class_weight : dict or 'balanced', default=None\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n store_cv_values : bool, default=False\n Flag indicating if the cross-validation values corresponding to\n each alpha should be stored in the ``cv_values_`` attribute (see\n below). This flag is only compatible with ``cv=None`` (i.e. using\n Leave-One-Out Cross-Validation).\n\n Attributes\n ----------\n cv_values_ : ndarray of shape (n_samples, n_targets, n_alphas), optional\n Cross-validation values for each alpha (if ``store_cv_values=True`` and\n ``cv=None``). After ``fit()`` has been called, this attribute will\n contain the mean squared errors (by default) or the values of the\n ``{loss,score}_func`` function (if provided in the constructor). This\n attribute exists only when ``store_cv_values`` is True.\n\n coef_ : ndarray of shape (1, n_features) or (n_targets, n_features)\n Coefficient of the features in the decision function.\n\n ``coef_`` is of shape (1, n_features) when the given problem is binary.\n\n intercept_ : float or ndarray of shape (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n alpha_ : float\n Estimated regularization parameter.\n\n best_score_ : float\n Score of base estimator with best alpha.\n\n classes_ : ndarray of shape (n_classes,)\n The classes labels.\n\n Examples\n --------\n >>> from sklearn.datasets import load_breast_cancer\n >>> from sklearn.linear_model import RidgeClassifierCV\n >>> X, y = load_breast_cancer(return_X_y=True)\n >>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)\n >>> clf.score(X, y)\n 0.9630...\n\n See also\n --------\n Ridge : Ridge regression\n RidgeClassifier : Ridge classifier\n RidgeCV : Ridge regression with built-in cross validation\n\n Notes\n -----\n For multi-class classification, n_class classifiers are trained in\n a one-versus-all approach. Concretely, this is implemented by taking\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, alphas=(0.1, 1.0, 10.0), *, fit_intercept=True,\n normalize=False, scoring=None, cv=None, class_weight=None,\n store_cv_values=False):\n super().__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n scoring=scoring, cv=cv, store_cv_values=store_cv_values)\n self.class_weight = class_weight\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge classifier with cv.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features. When using GCV,\n will be cast to float64 if necessary.\n\n y : ndarray of shape (n_samples,)\n Target values. Will be cast to X's dtype if necessary.\n\n sample_weight : float or ndarray of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight.\n\n Returns\n -------\n self : object\n \"\"\"\n X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc', 'coo'],\n multi_output=True, y_numeric=False)\n sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)\n\n self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)\n Y = self._label_binarizer.fit_transform(y)\n if not self._label_binarizer.y_type_.startswith('multilabel'):\n y = column_or_1d(y, warn=True)\n\n if self.class_weight:\n # modify the sample weights with the corresponding class weight\n sample_weight = (sample_weight *\n compute_sample_weight(self.class_weight, y))\n\n target = Y if self.cv is None else y\n _BaseRidgeCV.fit(self, X, target, sample_weight=sample_weight)\n return self\n\n @property\n def classes_(self):\n return self._label_binarizer.classes_\n\n def _more_tags(self):\n return {\n '_xfail_checks': {\n 'check_sample_weights_invariance':\n 'zero sample_weight is not equivalent to removing samples',\n }\n }\n", "# Authors: Alexandre Gramfort <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Olivier Grisel <[email protected]>\n# Andreas Mueller <[email protected]>\n# Joel Nothman <[email protected]>\n# Hamzeh Alsalhi <[email protected]>\n# License: BSD 3 clause\n\nfrom collections import defaultdict\nimport itertools\nimport array\nimport warnings\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom ..base import BaseEstimator, TransformerMixin\n\nfrom ..utils.sparsefuncs import min_max_axis\nfrom ..utils import column_or_1d\nfrom ..utils.validation import check_array\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import _num_samples\nfrom ..utils.validation import _deprecate_positional_args\nfrom ..utils.multiclass import unique_labels\nfrom ..utils.multiclass import type_of_target\nfrom ..utils._encode import _encode, _unique\n\n\n__all__ = [\n 'label_binarize',\n 'LabelBinarizer',\n 'LabelEncoder',\n 'MultiLabelBinarizer',\n]\n\n\nclass LabelEncoder(TransformerMixin, BaseEstimator):\n \"\"\"Encode target labels with value between 0 and n_classes-1.\n\n This transformer should be used to encode target values, *i.e.* `y`, and\n not the input `X`.\n\n Read more in the :ref:`User Guide <preprocessing_targets>`.\n\n .. versionadded:: 0.12\n\n Attributes\n ----------\n classes_ : ndarray of shape (n_classes,)\n Holds the label for each class.\n\n Examples\n --------\n `LabelEncoder` can be used to normalize labels.\n\n >>> from sklearn import preprocessing\n >>> le = preprocessing.LabelEncoder()\n >>> le.fit([1, 2, 2, 6])\n LabelEncoder()\n >>> le.classes_\n array([1, 2, 6])\n >>> le.transform([1, 1, 2, 6])\n array([0, 0, 1, 2]...)\n >>> le.inverse_transform([0, 0, 1, 2])\n array([1, 1, 2, 6])\n\n It can also be used to transform non-numerical labels (as long as they are\n hashable and comparable) to numerical labels.\n\n >>> le = preprocessing.LabelEncoder()\n >>> le.fit([\"paris\", \"paris\", \"tokyo\", \"amsterdam\"])\n LabelEncoder()\n >>> list(le.classes_)\n ['amsterdam', 'paris', 'tokyo']\n >>> le.transform([\"tokyo\", \"tokyo\", \"paris\"])\n array([2, 2, 1]...)\n >>> list(le.inverse_transform([2, 2, 1]))\n ['tokyo', 'tokyo', 'paris']\n\n See also\n --------\n sklearn.preprocessing.OrdinalEncoder : Encode categorical features\n using an ordinal encoding scheme.\n\n sklearn.preprocessing.OneHotEncoder : Encode categorical features\n as a one-hot numeric array.\n \"\"\"\n\n def fit(self, y):\n \"\"\"Fit label encoder.\n\n Parameters\n ----------\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n y = column_or_1d(y, warn=True)\n self.classes_ = _unique(y)\n return self\n\n def fit_transform(self, y):\n \"\"\"Fit label encoder and return encoded labels.\n\n Parameters\n ----------\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n y : array-like of shape (n_samples,)\n \"\"\"\n y = column_or_1d(y, warn=True)\n self.classes_, y = _unique(y, return_inverse=True)\n return y\n\n def transform(self, y):\n \"\"\"Transform labels to normalized encoding.\n\n Parameters\n ----------\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n y : array-like of shape (n_samples,)\n \"\"\"\n check_is_fitted(self)\n y = column_or_1d(y, warn=True)\n # transform of empty array is empty array\n if _num_samples(y) == 0:\n return np.array([])\n\n return _encode(y, uniques=self.classes_)\n\n def inverse_transform(self, y):\n \"\"\"Transform labels back to original encoding.\n\n Parameters\n ----------\n y : ndarray of shape (n_samples,)\n Target values.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n \"\"\"\n check_is_fitted(self)\n y = column_or_1d(y, warn=True)\n # inverse transform of empty array is empty array\n if _num_samples(y) == 0:\n return np.array([])\n\n diff = np.setdiff1d(y, np.arange(len(self.classes_)))\n if len(diff):\n raise ValueError(\n \"y contains previously unseen labels: %s\" % str(diff))\n y = np.asarray(y)\n return self.classes_[y]\n\n def _more_tags(self):\n return {'X_types': ['1dlabels']}\n\n\nclass LabelBinarizer(TransformerMixin, BaseEstimator):\n \"\"\"Binarize labels in a one-vs-all fashion.\n\n Several regression and binary classification algorithms are\n available in scikit-learn. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n At learning time, this simply consists in learning one regressor\n or binary classifier per class. In doing so, one needs to convert\n multi-class labels to binary labels (belong or does not belong\n to the class). LabelBinarizer makes this process easy with the\n transform method.\n\n At prediction time, one assigns the class for which the corresponding\n model gave the greatest confidence. LabelBinarizer makes this easy\n with the inverse_transform method.\n\n Read more in the :ref:`User Guide <preprocessing_targets>`.\n\n Parameters\n ----------\n\n neg_label : int, default=0\n Value with which negative labels must be encoded.\n\n pos_label : int, default=1\n Value with which positive labels must be encoded.\n\n sparse_output : bool, default=False\n True if the returned array from transform is desired to be in sparse\n CSR format.\n\n Attributes\n ----------\n\n classes_ : ndarray of shape (n_classes,)\n Holds the label for each class.\n\n y_type_ : str\n Represents the type of the target data as evaluated by\n utils.multiclass.type_of_target. Possible type are 'continuous',\n 'continuous-multioutput', 'binary', 'multiclass',\n 'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.\n\n sparse_input_ : bool\n True if the input data to transform is given as a sparse matrix, False\n otherwise.\n\n Examples\n --------\n >>> from sklearn import preprocessing\n >>> lb = preprocessing.LabelBinarizer()\n >>> lb.fit([1, 2, 6, 4, 2])\n LabelBinarizer()\n >>> lb.classes_\n array([1, 2, 4, 6])\n >>> lb.transform([1, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n Binary targets transform to a column vector\n\n >>> lb = preprocessing.LabelBinarizer()\n >>> lb.fit_transform(['yes', 'no', 'no', 'yes'])\n array([[1],\n [0],\n [0],\n [1]])\n\n Passing a 2D matrix for multilabel classification\n\n >>> import numpy as np\n >>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))\n LabelBinarizer()\n >>> lb.classes_\n array([0, 1, 2])\n >>> lb.transform([0, 1, 2, 1])\n array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [0, 1, 0]])\n\n See also\n --------\n label_binarize : function to perform the transform operation of\n LabelBinarizer with fixed classes.\n sklearn.preprocessing.OneHotEncoder : encode categorical features\n using a one-hot aka one-of-K scheme.\n \"\"\"\n\n @_deprecate_positional_args\n def __init__(self, *, neg_label=0, pos_label=1, sparse_output=False):\n if neg_label >= pos_label:\n raise ValueError(\"neg_label={0} must be strictly less than \"\n \"pos_label={1}.\".format(neg_label, pos_label))\n\n if sparse_output and (pos_label == 0 or neg_label != 0):\n raise ValueError(\"Sparse binarization is only supported with non \"\n \"zero pos_label and zero neg_label, got \"\n \"pos_label={0} and neg_label={1}\"\n \"\".format(pos_label, neg_label))\n\n self.neg_label = neg_label\n self.pos_label = pos_label\n self.sparse_output = sparse_output\n\n def fit(self, y):\n \"\"\"Fit label binarizer.\n\n Parameters\n ----------\n y : ndarray of shape (n_samples,) or (n_samples, n_classes)\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self.y_type_ = type_of_target(y)\n if 'multioutput' in self.y_type_:\n raise ValueError(\"Multioutput target data is not supported with \"\n \"label binarization\")\n if _num_samples(y) == 0:\n raise ValueError('y has 0 samples: %r' % y)\n\n self.sparse_input_ = sp.issparse(y)\n self.classes_ = unique_labels(y)\n return self\n\n def fit_transform(self, y):\n \"\"\"Fit label binarizer and transform multi-class labels to binary\n labels.\n\n The output of transform is sometimes referred to as\n the 1-of-K coding scheme.\n\n Parameters\n ----------\n y : {ndarray, sparse matrix} of shape (n_samples,) or \\\n (n_samples, n_classes)\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification. Sparse matrix can be\n CSR, CSC, COO, DOK, or LIL.\n\n Returns\n -------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Shape will be (n_samples, 1) for binary problems. Sparse matrix\n will be of CSR format.\n \"\"\"\n return self.fit(y).transform(y)\n\n def transform(self, y):\n \"\"\"Transform multi-class labels to binary labels.\n\n The output of transform is sometimes referred to by some authors as\n the 1-of-K coding scheme.\n\n Parameters\n ----------\n y : {array, sparse matrix} of shape (n_samples,) or \\\n (n_samples, n_classes)\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification. Sparse matrix can be\n CSR, CSC, COO, DOK, or LIL.\n\n Returns\n -------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Shape will be (n_samples, 1) for binary problems. Sparse matrix\n will be of CSR format.\n \"\"\"\n check_is_fitted(self)\n\n y_is_multilabel = type_of_target(y).startswith('multilabel')\n if y_is_multilabel and not self.y_type_.startswith('multilabel'):\n raise ValueError(\"The object was not fitted with multilabel\"\n \" input.\")\n\n return label_binarize(y, classes=self.classes_,\n pos_label=self.pos_label,\n neg_label=self.neg_label,\n sparse_output=self.sparse_output)\n\n def inverse_transform(self, Y, threshold=None):\n \"\"\"Transform binary labels back to multi-class labels.\n\n Parameters\n ----------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Target values. All sparse matrices are converted to CSR before\n inverse transformation.\n\n threshold : float, default=None\n Threshold used in the binary and multi-label cases.\n\n Use 0 when ``Y`` contains the output of decision_function\n (classifier).\n Use 0.5 when ``Y`` contains the output of predict_proba.\n\n If None, the threshold is assumed to be half way between\n neg_label and pos_label.\n\n Returns\n -------\n y : {ndarray, sparse matrix} of shape (n_samples,)\n Target values. Sparse matrix will be of CSR format.\n\n Notes\n -----\n In the case when the binary labels are fractional\n (probabilistic), inverse_transform chooses the class with the\n greatest value. Typically, this allows to use the output of a\n linear model's decision_function method directly as the input\n of inverse_transform.\n \"\"\"\n check_is_fitted(self)\n\n if threshold is None:\n threshold = (self.pos_label + self.neg_label) / 2.\n\n if self.y_type_ == \"multiclass\":\n y_inv = _inverse_binarize_multiclass(Y, self.classes_)\n else:\n y_inv = _inverse_binarize_thresholding(Y, self.y_type_,\n self.classes_, threshold)\n\n if self.sparse_input_:\n y_inv = sp.csr_matrix(y_inv)\n elif sp.issparse(y_inv):\n y_inv = y_inv.toarray()\n\n return y_inv\n\n def _more_tags(self):\n return {'X_types': ['1dlabels']}\n\n\n@_deprecate_positional_args\ndef label_binarize(y, *, classes, neg_label=0, pos_label=1,\n sparse_output=False):\n \"\"\"Binarize labels in a one-vs-all fashion.\n\n Several regression and binary classification algorithms are\n available in scikit-learn. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n This function makes it possible to compute this transformation for a\n fixed set of class labels known ahead of time.\n\n Parameters\n ----------\n y : array-like\n Sequence of integer labels or multilabel data to encode.\n\n classes : array-like of shape (n_classes,)\n Uniquely holds the label for each class.\n\n neg_label : int, default=0\n Value with which negative labels must be encoded.\n\n pos_label : int, default=1\n Value with which positive labels must be encoded.\n\n sparse_output : bool, default=False,\n Set to true if output binary array is desired in CSR sparse format.\n\n Returns\n -------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Shape will be (n_samples, 1) for binary problems. Sparse matrix will\n be of CSR format.\n\n Examples\n --------\n >>> from sklearn.preprocessing import label_binarize\n >>> label_binarize([1, 6], classes=[1, 2, 4, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n The class ordering is preserved:\n\n >>> label_binarize([1, 6], classes=[1, 6, 4, 2])\n array([[1, 0, 0, 0],\n [0, 1, 0, 0]])\n\n Binary targets transform to a column vector\n\n >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])\n array([[1],\n [0],\n [0],\n [1]])\n\n See also\n --------\n LabelBinarizer : class used to wrap the functionality of label_binarize and\n allow for fitting to classes independently of the transform operation\n \"\"\"\n if not isinstance(y, list):\n # XXX Workaround that will be removed when list of list format is\n # dropped\n y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)\n else:\n if _num_samples(y) == 0:\n raise ValueError('y has 0 samples: %r' % y)\n if neg_label >= pos_label:\n raise ValueError(\"neg_label={0} must be strictly less than \"\n \"pos_label={1}.\".format(neg_label, pos_label))\n\n if (sparse_output and (pos_label == 0 or neg_label != 0)):\n raise ValueError(\"Sparse binarization is only supported with non \"\n \"zero pos_label and zero neg_label, got \"\n \"pos_label={0} and neg_label={1}\"\n \"\".format(pos_label, neg_label))\n\n # To account for pos_label == 0 in the dense case\n pos_switch = pos_label == 0\n if pos_switch:\n pos_label = -neg_label\n\n y_type = type_of_target(y)\n if 'multioutput' in y_type:\n raise ValueError(\"Multioutput target data is not supported with label \"\n \"binarization\")\n if y_type == 'unknown':\n raise ValueError(\"The type of target data is not known\")\n\n n_samples = y.shape[0] if sp.issparse(y) else len(y)\n n_classes = len(classes)\n classes = np.asarray(classes)\n\n if y_type == \"binary\":\n if n_classes == 1:\n if sparse_output:\n return sp.csr_matrix((n_samples, 1), dtype=int)\n else:\n Y = np.zeros((len(y), 1), dtype=int)\n Y += neg_label\n return Y\n elif len(classes) >= 3:\n y_type = \"multiclass\"\n\n sorted_class = np.sort(classes)\n if y_type == \"multilabel-indicator\":\n y_n_classes = y.shape[1] if hasattr(y, 'shape') else len(y[0])\n if classes.size != y_n_classes:\n raise ValueError(\"classes {0} mismatch with the labels {1}\"\n \" found in the data\"\n .format(classes, unique_labels(y)))\n\n if y_type in (\"binary\", \"multiclass\"):\n y = column_or_1d(y)\n\n # pick out the known labels from y\n y_in_classes = np.in1d(y, classes)\n y_seen = y[y_in_classes]\n indices = np.searchsorted(sorted_class, y_seen)\n indptr = np.hstack((0, np.cumsum(y_in_classes)))\n\n data = np.empty_like(indices)\n data.fill(pos_label)\n Y = sp.csr_matrix((data, indices, indptr),\n shape=(n_samples, n_classes))\n elif y_type == \"multilabel-indicator\":\n Y = sp.csr_matrix(y)\n if pos_label != 1:\n data = np.empty_like(Y.data)\n data.fill(pos_label)\n Y.data = data\n else:\n raise ValueError(\"%s target data is not supported with label \"\n \"binarization\" % y_type)\n\n if not sparse_output:\n Y = Y.toarray()\n Y = Y.astype(int, copy=False)\n\n if neg_label != 0:\n Y[Y == 0] = neg_label\n\n if pos_switch:\n Y[Y == pos_label] = 0\n else:\n Y.data = Y.data.astype(int, copy=False)\n\n # preserve label ordering\n if np.any(classes != sorted_class):\n indices = np.searchsorted(sorted_class, classes)\n Y = Y[:, indices]\n\n if y_type == \"binary\":\n if sparse_output:\n Y = Y.getcol(-1)\n else:\n Y = Y[:, -1].reshape((-1, 1))\n\n return Y\n\n\ndef _inverse_binarize_multiclass(y, classes):\n \"\"\"Inverse label binarization transformation for multiclass.\n\n Multiclass uses the maximal score instead of a threshold.\n \"\"\"\n classes = np.asarray(classes)\n\n if sp.issparse(y):\n # Find the argmax for each row in y where y is a CSR matrix\n\n y = y.tocsr()\n n_samples, n_outputs = y.shape\n outputs = np.arange(n_outputs)\n row_max = min_max_axis(y, 1)[1]\n row_nnz = np.diff(y.indptr)\n\n y_data_repeated_max = np.repeat(row_max, row_nnz)\n # picks out all indices obtaining the maximum per row\n y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)\n\n # For corner case where last row has a max of 0\n if row_max[-1] == 0:\n y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])\n\n # Gets the index of the first argmax in each row from y_i_all_argmax\n index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])\n # first argmax of each row\n y_ind_ext = np.append(y.indices, [0])\n y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]\n # Handle rows of all 0\n y_i_argmax[np.where(row_nnz == 0)[0]] = 0\n\n # Handles rows with max of 0 that contain negative numbers\n samples = np.arange(n_samples)[(row_nnz > 0) &\n (row_max.ravel() == 0)]\n for i in samples:\n ind = y.indices[y.indptr[i]:y.indptr[i + 1]]\n y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]\n\n return classes[y_i_argmax]\n else:\n return classes.take(y.argmax(axis=1), mode=\"clip\")\n\n\ndef _inverse_binarize_thresholding(y, output_type, classes, threshold):\n \"\"\"Inverse label binarization transformation using thresholding.\"\"\"\n\n if output_type == \"binary\" and y.ndim == 2 and y.shape[1] > 2:\n raise ValueError(\"output_type='binary', but y.shape = {0}\".\n format(y.shape))\n\n if output_type != \"binary\" and y.shape[1] != len(classes):\n raise ValueError(\"The number of class is not equal to the number of \"\n \"dimension of y.\")\n\n classes = np.asarray(classes)\n\n # Perform thresholding\n if sp.issparse(y):\n if threshold > 0:\n if y.format not in ('csr', 'csc'):\n y = y.tocsr()\n y.data = np.array(y.data > threshold, dtype=int)\n y.eliminate_zeros()\n else:\n y = np.array(y.toarray() > threshold, dtype=int)\n else:\n y = np.array(y > threshold, dtype=int)\n\n # Inverse transform data\n if output_type == \"binary\":\n if sp.issparse(y):\n y = y.toarray()\n if y.ndim == 2 and y.shape[1] == 2:\n return classes[y[:, 1]]\n else:\n if len(classes) == 1:\n return np.repeat(classes[0], len(y))\n else:\n return classes[y.ravel()]\n\n elif output_type == \"multilabel-indicator\":\n return y\n\n else:\n raise ValueError(\"{0} format is not supported\".format(output_type))\n\n\nclass MultiLabelBinarizer(TransformerMixin, BaseEstimator):\n \"\"\"Transform between iterable of iterables and a multilabel format.\n\n Although a list of sets or tuples is a very intuitive format for multilabel\n data, it is unwieldy to process. This transformer converts between this\n intuitive format and the supported multilabel format: a (samples x classes)\n binary matrix indicating the presence of a class label.\n\n Parameters\n ----------\n classes : array-like of shape (n_classes,), default=None\n Indicates an ordering for the class labels.\n All entries should be unique (cannot contain duplicate classes).\n\n sparse_output : bool, default=False\n Set to True if output binary array is desired in CSR sparse format.\n\n Attributes\n ----------\n classes_ : ndarray of shape (n_classes,)\n A copy of the `classes` parameter when provided.\n Otherwise it corresponds to the sorted set of classes found\n when fitting.\n\n Examples\n --------\n >>> from sklearn.preprocessing import MultiLabelBinarizer\n >>> mlb = MultiLabelBinarizer()\n >>> mlb.fit_transform([(1, 2), (3,)])\n array([[1, 1, 0],\n [0, 0, 1]])\n >>> mlb.classes_\n array([1, 2, 3])\n\n >>> mlb.fit_transform([{'sci-fi', 'thriller'}, {'comedy'}])\n array([[0, 1, 1],\n [1, 0, 0]])\n >>> list(mlb.classes_)\n ['comedy', 'sci-fi', 'thriller']\n\n A common mistake is to pass in a list, which leads to the following issue:\n\n >>> mlb = MultiLabelBinarizer()\n >>> mlb.fit(['sci-fi', 'thriller', 'comedy'])\n MultiLabelBinarizer()\n >>> mlb.classes_\n array(['-', 'c', 'd', 'e', 'f', 'h', 'i', 'l', 'm', 'o', 'r', 's', 't',\n 'y'], dtype=object)\n\n To correct this, the list of labels should be passed in as:\n\n >>> mlb = MultiLabelBinarizer()\n >>> mlb.fit([['sci-fi', 'thriller', 'comedy']])\n MultiLabelBinarizer()\n >>> mlb.classes_\n array(['comedy', 'sci-fi', 'thriller'], dtype=object)\n\n See also\n --------\n sklearn.preprocessing.OneHotEncoder : encode categorical features\n using a one-hot aka one-of-K scheme.\n \"\"\"\n\n @_deprecate_positional_args\n def __init__(self, *, classes=None, sparse_output=False):\n self.classes = classes\n self.sparse_output = sparse_output\n\n def fit(self, y):\n \"\"\"Fit the label sets binarizer, storing :term:`classes_`.\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n self : returns this MultiLabelBinarizer instance\n \"\"\"\n self._cached_dict = None\n if self.classes is None:\n classes = sorted(set(itertools.chain.from_iterable(y)))\n elif len(set(self.classes)) < len(self.classes):\n raise ValueError(\"The classes argument contains duplicate \"\n \"classes. Remove these duplicates before passing \"\n \"them to MultiLabelBinarizer.\")\n else:\n classes = self.classes\n dtype = int if all(isinstance(c, int) for c in classes) else object\n self.classes_ = np.empty(len(classes), dtype=dtype)\n self.classes_[:] = classes\n return self\n\n def fit_transform(self, y):\n \"\"\"Fit the label sets binarizer and transform the given label sets.\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n A matrix such that `y_indicator[i, j] = 1` i.f.f. `classes_[j]`\n is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR\n format.\n \"\"\"\n self._cached_dict = None\n\n if self.classes is not None:\n return self.fit(y).transform(y)\n\n # Automatically increment on new class\n class_mapping = defaultdict(int)\n class_mapping.default_factory = class_mapping.__len__\n yt = self._transform(y, class_mapping)\n\n # sort classes and reorder columns\n tmp = sorted(class_mapping, key=class_mapping.get)\n\n # (make safe for tuples)\n dtype = int if all(isinstance(c, int) for c in tmp) else object\n class_mapping = np.empty(len(tmp), dtype=dtype)\n class_mapping[:] = tmp\n self.classes_, inverse = np.unique(class_mapping, return_inverse=True)\n # ensure yt.indices keeps its current dtype\n yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,\n copy=False)\n\n if not self.sparse_output:\n yt = yt.toarray()\n\n return yt\n\n def transform(self, y):\n \"\"\"Transform the given label sets.\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n y_indicator : array or CSR matrix, shape (n_samples, n_classes)\n A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in\n `y[i]`, and 0 otherwise.\n \"\"\"\n check_is_fitted(self)\n\n class_to_index = self._build_cache()\n yt = self._transform(y, class_to_index)\n\n if not self.sparse_output:\n yt = yt.toarray()\n\n return yt\n\n def _build_cache(self):\n if self._cached_dict is None:\n self._cached_dict = dict(zip(self.classes_,\n range(len(self.classes_))))\n\n return self._cached_dict\n\n def _transform(self, y, class_mapping):\n \"\"\"Transforms the label sets with a given mapping\n\n Parameters\n ----------\n y : iterable of iterables\n class_mapping : Mapping\n Maps from label to column index in label indicator matrix.\n\n Returns\n -------\n y_indicator : sparse matrix of shape (n_samples, n_classes)\n Label indicator matrix. Will be of CSR format.\n \"\"\"\n indices = array.array('i')\n indptr = array.array('i', [0])\n unknown = set()\n for labels in y:\n index = set()\n for label in labels:\n try:\n index.add(class_mapping[label])\n except KeyError:\n unknown.add(label)\n indices.extend(index)\n indptr.append(len(indices))\n if unknown:\n warnings.warn('unknown class(es) {0} will be ignored'\n .format(sorted(unknown, key=str)))\n data = np.ones(len(indices), dtype=int)\n\n return sp.csr_matrix((data, indices, indptr),\n shape=(len(indptr) - 1, len(class_mapping)))\n\n def inverse_transform(self, yt):\n \"\"\"Transform the given indicator matrix into label sets.\n\n Parameters\n ----------\n yt : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n A matrix containing only 1s ands 0s.\n\n Returns\n -------\n y : list of tuples\n The set of labels for each sample such that `y[i]` consists of\n `classes_[j]` for each `yt[i, j] == 1`.\n \"\"\"\n check_is_fitted(self)\n\n if yt.shape[1] != len(self.classes_):\n raise ValueError('Expected indicator for {0} classes, but got {1}'\n .format(len(self.classes_), yt.shape[1]))\n\n if sp.issparse(yt):\n yt = yt.tocsr()\n if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:\n raise ValueError('Expected only 0s and 1s in label indicator.')\n return [tuple(self.classes_.take(yt.indices[start:end]))\n for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]\n else:\n unexpected = np.setdiff1d(yt, [0, 1])\n if len(unexpected) > 0:\n raise ValueError('Expected only 0s and 1s in label indicator. '\n 'Also got {0}'.format(unexpected))\n return [tuple(self.classes_.compress(indicators)) for indicators\n in yt]\n\n def _more_tags(self):\n return {'X_types': ['2dlabels']}\n", "# Author: Alexandre Gramfort <[email protected]>\n# Fabian Pedregosa <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom math import log\n\nimport numpy as np\nfrom scipy.linalg import pinvh\nimport pytest\n\n\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.utils._testing import assert_almost_equal\nfrom sklearn.utils._testing import assert_array_less\nfrom sklearn.utils._testing import assert_raise_message\nfrom sklearn.utils import check_random_state\nfrom sklearn.linear_model import BayesianRidge, ARDRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn import datasets\nfrom sklearn.utils.extmath import fast_logdet\n\ndiabetes = datasets.load_diabetes()\n\n\ndef test_n_iter():\n \"\"\"Check value of n_iter.\"\"\"\n X = np.array([[1], [2], [6], [8], [10]])\n y = np.array([1, 2, 6, 8, 10])\n clf = BayesianRidge(n_iter=0)\n msg = \"n_iter should be greater than or equal to 1.\"\n assert_raise_message(ValueError, msg, clf.fit, X, y)\n\n\ndef test_bayesian_ridge_scores():\n \"\"\"Check scores attribute shape\"\"\"\n X, y = diabetes.data, diabetes.target\n\n clf = BayesianRidge(compute_score=True)\n clf.fit(X, y)\n\n assert clf.scores_.shape == (clf.n_iter_ + 1,)\n\n\ndef test_bayesian_ridge_score_values():\n \"\"\"Check value of score on toy example.\n\n Compute log marginal likelihood with equation (36) in Sparse Bayesian\n Learning and the Relevance Vector Machine (Tipping, 2001):\n\n - 0.5 * (log |Id/alpha + X.X^T/lambda| +\n y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi))\n + lambda_1 * log(lambda) - lambda_2 * lambda\n + alpha_1 * log(alpha) - alpha_2 * alpha\n\n and check equality with the score computed during training.\n \"\"\"\n\n X, y = diabetes.data, diabetes.target\n n_samples = X.shape[0]\n # check with initial values of alpha and lambda (see code for the values)\n eps = np.finfo(np.float64).eps\n alpha_ = 1. / (np.var(y) + eps)\n lambda_ = 1.\n\n # value of the parameters of the Gamma hyperpriors\n alpha_1 = 0.1\n alpha_2 = 0.1\n lambda_1 = 0.1\n lambda_2 = 0.1\n\n # compute score using formula of docstring\n score = lambda_1 * log(lambda_) - lambda_2 * lambda_\n score += alpha_1 * log(alpha_) - alpha_2 * alpha_\n M = 1. / alpha_ * np.eye(n_samples) + 1. / lambda_ * np.dot(X, X.T)\n M_inv = pinvh(M)\n score += - 0.5 * (fast_logdet(M) + np.dot(y.T, np.dot(M_inv, y)) +\n n_samples * log(2 * np.pi))\n\n # compute score with BayesianRidge\n clf = BayesianRidge(alpha_1=alpha_1, alpha_2=alpha_2,\n lambda_1=lambda_1, lambda_2=lambda_2,\n n_iter=1, fit_intercept=False, compute_score=True)\n clf.fit(X, y)\n\n assert_almost_equal(clf.scores_[0], score, decimal=9)\n\n\ndef test_bayesian_ridge_parameter():\n # Test correctness of lambda_ and alpha_ parameters (GitHub issue #8224)\n X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])\n y = np.array([1, 2, 3, 2, 0, 4, 5]).T\n\n # A Ridge regression model using an alpha value equal to the ratio of\n # lambda_ and alpha_ from the Bayesian Ridge model must be identical\n br_model = BayesianRidge(compute_score=True).fit(X, y)\n rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(X, y)\n assert_array_almost_equal(rr_model.coef_, br_model.coef_)\n assert_almost_equal(rr_model.intercept_, br_model.intercept_)\n\n\ndef test_bayesian_sample_weights():\n # Test correctness of the sample_weights method\n X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])\n y = np.array([1, 2, 3, 2, 0, 4, 5]).T\n w = np.array([4, 3, 3, 1, 1, 2, 3]).T\n\n # A Ridge regression model using an alpha value equal to the ratio of\n # lambda_ and alpha_ from the Bayesian Ridge model must be identical\n br_model = BayesianRidge(compute_score=True).fit(X, y, sample_weight=w)\n rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(\n X, y, sample_weight=w)\n assert_array_almost_equal(rr_model.coef_, br_model.coef_)\n assert_almost_equal(rr_model.intercept_, br_model.intercept_)\n\n\ndef test_toy_bayesian_ridge_object():\n # Test BayesianRidge on toy\n X = np.array([[1], [2], [6], [8], [10]])\n Y = np.array([1, 2, 6, 8, 10])\n clf = BayesianRidge(compute_score=True)\n clf.fit(X, Y)\n\n # Check that the model could approximately learn the identity function\n test = [[1], [3], [4]]\n assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)\n\n\ndef test_bayesian_initial_params():\n # Test BayesianRidge with initial values (alpha_init, lambda_init)\n X = np.vander(np.linspace(0, 4, 5), 4)\n y = np.array([0., 1., 0., -1., 0.]) # y = (x^3 - 6x^2 + 8x) / 3\n\n # In this case, starting from the default initial values will increase\n # the bias of the fitted curve. So, lambda_init should be small.\n reg = BayesianRidge(alpha_init=1., lambda_init=1e-3)\n # Check the R2 score nearly equals to one.\n r2 = reg.fit(X, y).score(X, y)\n assert_almost_equal(r2, 1.)\n\n\ndef test_prediction_bayesian_ridge_ard_with_constant_input():\n # Test BayesianRidge and ARDRegression predictions for edge case of\n # constant target vectors\n n_samples = 4\n n_features = 5\n random_state = check_random_state(42)\n constant_value = random_state.rand()\n X = random_state.random_sample((n_samples, n_features))\n y = np.full(n_samples, constant_value,\n dtype=np.array(constant_value).dtype)\n expected = np.full(n_samples, constant_value,\n dtype=np.array(constant_value).dtype)\n\n for clf in [BayesianRidge(), ARDRegression()]:\n y_pred = clf.fit(X, y).predict(X)\n assert_array_almost_equal(y_pred, expected)\n\n\ndef test_std_bayesian_ridge_ard_with_constant_input():\n # Test BayesianRidge and ARDRegression standard dev. for edge case of\n # constant target vector\n # The standard dev. should be relatively small (< 0.01 is tested here)\n n_samples = 10\n n_features = 5\n random_state = check_random_state(42)\n constant_value = random_state.rand()\n X = random_state.random_sample((n_samples, n_features))\n y = np.full(n_samples, constant_value,\n dtype=np.array(constant_value).dtype)\n expected_upper_boundary = 0.01\n\n for clf in [BayesianRidge(), ARDRegression()]:\n _, y_std = clf.fit(X, y).predict(X, return_std=True)\n assert_array_less(y_std, expected_upper_boundary)\n\n\ndef test_update_of_sigma_in_ard():\n # Checks that `sigma_` is updated correctly after the last iteration\n # of the ARDRegression algorithm. See issue #10128.\n X = np.array([[1, 0],\n [0, 0]])\n y = np.array([0, 0])\n clf = ARDRegression(n_iter=1)\n clf.fit(X, y)\n # With the inputs above, ARDRegression prunes both of the two coefficients\n # in the first iteration. Hence, the expected shape of `sigma_` is (0, 0).\n assert clf.sigma_.shape == (0, 0)\n # Ensure that no error is thrown at prediction stage\n clf.predict(X, return_std=True)\n\n\ndef test_toy_ard_object():\n # Test BayesianRegression ARD classifier\n X = np.array([[1], [2], [3]])\n Y = np.array([1, 2, 3])\n clf = ARDRegression(compute_score=True)\n clf.fit(X, Y)\n\n # Check that the model could approximately learn the identity function\n test = [[1], [3], [4]]\n assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)\n\n\[email protected]('seed', range(100))\[email protected]('n_samples, n_features', ((10, 100), (100, 10)))\ndef test_ard_accuracy_on_easy_problem(seed, n_samples, n_features):\n # Check that ARD converges with reasonable accuracy on an easy problem\n # (Github issue #14055)\n X = np.random.RandomState(seed=seed).normal(size=(250, 3))\n y = X[:, 1]\n\n regressor = ARDRegression()\n regressor.fit(X, y)\n\n abs_coef_error = np.abs(1 - regressor.coef_[1])\n assert abs_coef_error < 1e-10\n\n\ndef test_return_std():\n # Test return_std option for both Bayesian regressors\n def f(X):\n return np.dot(X, w) + b\n\n def f_noise(X, noise_mult):\n return f(X) + np.random.randn(X.shape[0]) * noise_mult\n\n d = 5\n n_train = 50\n n_test = 10\n\n w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])\n b = 1.0\n\n X = np.random.random((n_train, d))\n X_test = np.random.random((n_test, d))\n\n for decimal, noise_mult in enumerate([1, 0.1, 0.01]):\n y = f_noise(X, noise_mult)\n\n m1 = BayesianRidge()\n m1.fit(X, y)\n y_mean1, y_std1 = m1.predict(X_test, return_std=True)\n assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)\n\n m2 = ARDRegression()\n m2.fit(X, y)\n y_mean2, y_std2 = m2.predict(X_test, return_std=True)\n assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)\n\n\[email protected]('seed', range(10))\ndef test_update_sigma(seed):\n # make sure the two update_sigma() helpers are equivalent. The woodbury\n # formula is used when n_samples < n_features, and the other one is used\n # otherwise.\n\n rng = np.random.RandomState(seed)\n\n # set n_samples == n_features to avoid instability issues when inverting\n # the matrices. Using the woodbury formula would be unstable when\n # n_samples > n_features\n n_samples = n_features = 10\n X = rng.randn(n_samples, n_features)\n alpha = 1\n lmbda = np.arange(1, n_features + 1)\n keep_lambda = np.array([True] * n_features)\n\n reg = ARDRegression()\n\n sigma = reg._update_sigma(X, alpha, lmbda, keep_lambda)\n sigma_woodbury = reg._update_sigma_woodbury(X, alpha, lmbda, keep_lambda)\n\n np.testing.assert_allclose(sigma, sigma_woodbury)\n" ]
[ [ "matplotlib.pyplot.imshow", "scipy.ndimage.binary_erosion", "numpy.linspace", "numpy.random.randn", "numpy.hstack", "scipy.sparse.coo_matrix", "numpy.arange", "sklearn.linear_model.Lasso", "numpy.sin", "matplotlib.pyplot.subplot", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.axis", "numpy.ravel", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "sklearn.linear_model.Ridge", "numpy.floor", "numpy.logical_and", "matplotlib.pyplot.show", "numpy.random.RandomState", "scipy.ndimage.gaussian_filter", "numpy.cos" ], [ "numpy.nonzero", "numpy.set_printoptions", "numpy.get_printoptions" ], [ "numpy.dot", "scipy.linalg.svd", "numpy.sqrt", "numpy.asarray", "numpy.any", "scipy.sparse.dia_matrix", "scipy.sparse.linalg.lsqr", "numpy.hstack", "scipy.sparse.issparse", "numpy.arange", "scipy.sparse.linalg.cg", "scipy.linalg.lstsq", "numpy.full", "numpy.atleast_1d", "scipy.linalg.eigh", "numpy.argmax", "scipy.sparse.linalg.aslinearoperator", "numpy.outer", "numpy.repeat", "scipy.linalg.solve", "numpy.zeros", "numpy.ndim", "scipy.sparse.linalg.LinearOperator", "numpy.sum", "numpy.linalg.norm", "numpy.ones", "numpy.empty" ], [ "scipy.sparse.issparse", "numpy.unique", "numpy.asarray", "numpy.empty_like", "numpy.in1d", "numpy.arange", "numpy.cumsum", "numpy.sort", "scipy.sparse.csr_matrix", "numpy.flatnonzero", "numpy.setdiff1d", "numpy.append", "numpy.diff", "numpy.any", "numpy.searchsorted", "numpy.repeat", "numpy.array", "numpy.where" ], [ "numpy.dot", "numpy.linspace", "sklearn.datasets.load_diabetes", "sklearn.utils._testing.assert_array_less", "sklearn.utils._testing.assert_almost_equal", "numpy.random.randn", "numpy.var", "sklearn.linear_model.ARDRegression", "numpy.arange", "numpy.eye", "numpy.finfo", "scipy.linalg.pinvh", "sklearn.utils._testing.assert_raise_message", "sklearn.linear_model.Ridge", "sklearn.linear_model.BayesianRidge", "numpy.testing.assert_allclose", "numpy.array", "numpy.random.RandomState", "sklearn.utils.extmath.fast_logdet", "numpy.random.random", "numpy.abs", "sklearn.utils._testing.assert_array_almost_equal", "sklearn.utils.check_random_state" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] } ]
physycom/slides
[ "ff73de94997e39673d6d5c82b1bb4d9d0069fee6" ]
[ "tools/dubrovnik/router_map.py" ]
[ "#! /usr/bin/env python3\n\nimport os\nimport json\nimport folium\nimport argparse\nimport pandas as pd\nimport mysql.connector\nfrom matplotlib import cm\nfrom matplotlib.colors import to_hex\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--cfg', type=str, required=True)\n\n args = parser.parse_args()\n\n with open(args.cfg, encoding='utf-8') as f:\n config = json.load(f)\n\n conf = config['model_data']['params']['dubrovnik']['mysql']\n db = mysql.connector.connect(\n host = conf['host'],\n port = conf['port'],\n user = conf['user'],\n passwd = conf['pwd'],\n database = conf['db']\n )\n cursor = db.cursor()\n try:\n query = f\"\"\"\n SELECT\n ds.id AS id,\n ds.name AS name,\n ds.serial AS serial,\n ds.lat AS lat,\n ds.lng AS lon,\n ds.networkId,\n ds.status as status\n FROM\t\n Devices ds \n \"\"\" \n # print(query)\n cursor.execute(query)\n result = cursor.fetchall()\n print(f'Received {len(result)} mysql data in {query}')\n stationsmeta = pd.DataFrame(result)\n stationsmeta.columns = cursor.column_names\n except Exception as e:\n print('Connection error : {}'.format(e))\n\n if 0 == 1: # To be activate if and when the longitude will be fixed from Meraki\n data = list(stationsmeta.T.to_dict().values())\n with open('dubrovnik_router.json', 'w') as out:\n json.dump(data, out, indent=2, ensure_ascii=False)\n else:\n station_json = os.path.join(os.environ['WORKSPACE'], 'slides', 'vars', 'extra', 'dubrovnik_router.json')\n stationsmeta = pd.DataFrame.from_dict(json.load(open(station_json)))\n\n map_file = 'dubrovnik_router.html'\n cmap = cm.get_cmap('viridis', len(stationsmeta))\n stationsmeta['color'] = [ to_hex(c) for c in cmap.colors ]\n stationsmeta.index = stationsmeta.id\n stationsmeta = stationsmeta.drop(columns='id')\n stationsmeta = stationsmeta[stationsmeta.lon > 0]\n print(len(stationsmeta))\n map_center = stationsmeta[['lat', 'lon']].mean()\n\n m = folium.Map(location=map_center, control_scale=True, tiles = 'Stamen Terrain')\n layerlabel = '<span style=\"color: {col};\">{txt}</span>'\n for sid, data in stationsmeta.iterrows():\n layer_sel = folium.FeatureGroup(name=layerlabel.format(col=f'{data.color}', txt=f'Router {sid}'))\n pt = folium.CircleMarker(\n location=[data.lat, data.lon],\n radius=5,\n color=f'{data.color}',\n fill=True,\n fill_color=f'{data.color}',\n fill_opacity=1,\n popup=folium.Popup(f'<p>Router <b>{sid}</b></br> Name <b>{data[0]}</b></br> Serial <b>{data.serial}</b></br></p>', show=False, sticky=True, max_width=300),\n )\n layer_sel.add_child(pt)\n m.add_child(layer_sel)\n \n folium.map.LayerControl(collapsed=False).add_to(m)\n s, w = stationsmeta[['lat', 'lon']].min()\n n, e = stationsmeta[['lat', 'lon']].max()\n m.fit_bounds([ [s,w], [n,e] ])\n m.save(f'dubrovnik_router_map.html')" ]
[ [ "matplotlib.colors.to_hex", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
applejenny66/snoopy
[ "916700661976aef121c16c3cf1418f395eff54a6" ]
[ "test.py" ]
[ "import numpy\nimport argparse\nimport cv2\n\nimage = cv2.imread('pikachu.jpg')\ncv2.imshow(\"Original\", image)\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncv2.imshow(\"Gray\", gray)\n\neq = cv2.equalizeHist(gray)\n##cv2.imshow(\"Gray EQ\", eq)\n\n#display two images in a figure\ncv2.imshow(\"Histogram Equalization\", numpy.hstack([gray, eq]))\n\ncv2.imwrite(\"pikachu_eq.jpg\", numpy.hstack([gray, eq]))\n\n\nif(cv2.waitKey(0)==27):\n cv2.destroyAllWindows()\n" ]
[ [ "numpy.hstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tehcoderer/GamestonkTerminal
[ "54a1b6f545a0016c576e9e00eef5c003d229dacf", "54a1b6f545a0016c576e9e00eef5c003d229dacf", "54a1b6f545a0016c576e9e00eef5c003d229dacf", "54a1b6f545a0016c576e9e00eef5c003d229dacf", "54a1b6f545a0016c576e9e00eef5c003d229dacf", "54a1b6f545a0016c576e9e00eef5c003d229dacf", "54a1b6f545a0016c576e9e00eef5c003d229dacf" ]
[ "openbb_terminal/cryptocurrency/discovery/coinmarketcap_model.py", "bots/stocks/government/lastcontracts.py", "openbb_terminal/common/technical_analysis/custom_indicators_model.py", "tests/openbb_terminal/economy/test_finnhub_model.py", "tests/openbb_terminal/etf/discovery/test_disc_controller.py", "openbb_terminal/stocks/dark_pool_shorts/quandl_model.py", "openbb_terminal/econometrics/regression_view.py" ]
[ "\"\"\"CoinMarketCap model\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\n\nimport pandas as pd\nfrom coinmarketcapapi import CoinMarketCapAPI, CoinMarketCapAPIError\n\nimport openbb_terminal.config_terminal as cfg\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.rich_config import console\n\nlogger = logging.getLogger(__name__)\n\nFILTERS = [\"Symbol\", \"CMC_Rank\", \"LastPrice\", \"DayPctChange\", \"MarketCap\"]\n\n\n@log_start_end(log=logger)\ndef get_cmc_top_n() -> pd.DataFrame:\n \"\"\"Shows top n coins. [Source: CoinMarketCap]\n\n Returns\n -------\n pd.DataFrame\n Top coin on CoinMarketCap\n\n \"\"\"\n df = pd.DataFrame()\n\n try:\n cmc = CoinMarketCapAPI(cfg.API_CMC_KEY)\n ratings = cmc.cryptocurrency_listings_latest().data\n\n symbol, rank, price, pchange1d, mkt_cap = [], [], [], [], []\n\n for coin in ratings:\n symbol.append(coin[\"symbol\"])\n rank.append(coin[\"cmc_rank\"])\n price.append(coin[\"quote\"][\"USD\"][\"price\"])\n pchange1d.append(coin[\"quote\"][\"USD\"][\"percent_change_24h\"])\n mkt_cap.append(coin[\"quote\"][\"USD\"][\"market_cap\"] / (10**9))\n\n df = pd.DataFrame(data=[symbol, rank, price, pchange1d, mkt_cap]).transpose()\n df.columns = [\n \"Symbol\",\n \"CMC_Rank\",\n \"Last Price\",\n \"1 Day Pct Change\",\n \"Market Cap ($B)\",\n ]\n except CoinMarketCapAPIError as e:\n if \"API Key\" in str(e):\n console.print(\"[red]Invalid API Key[/red]\\n\")\n else:\n console.print(e)\n\n return df\n", "import logging\n\nimport disnake\nimport pandas as pd\n\nfrom bots import imps\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.stocks.government import quiverquant_model\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef lastcontracts_command(past_transactions_days: int = 2, num: int = 20):\n \"\"\"Displays last government contracts [quiverquant.com]\"\"\"\n\n # Debug user input\n if imps.DEBUG:\n logger.debug(\"gov lastcontracts %s %s\", past_transactions_days, num)\n\n df_contracts = quiverquant_model.get_government_trading(\"contracts\")\n\n if df_contracts.empty:\n logger.debug(\"No government contracts found\")\n raise Exception(\"No government contracts found\")\n\n df_contracts.sort_values(\"Date\", ascending=False)\n\n df_contracts[\"Date\"] = pd.to_datetime(df_contracts[\"Date\"])\n df_contracts[\"Date\"] = df_contracts[\"Date\"].dt.date\n\n df_contracts.drop_duplicates(inplace=True)\n df_contracts = df_contracts[\n df_contracts[\"Date\"].isin(\n df_contracts[\"Date\"].unique()[:past_transactions_days]\n )\n ]\n\n df_contracts = df_contracts[[\"Date\", \"Ticker\", \"Amount\", \"Agency\"]][:num]\n choices = [\n disnake.SelectOption(label=\"Overview\", value=\"0\", emoji=\"🟢\"),\n ]\n title = \"Stocks: [quiverquant.com] Top buy government trading\"\n initial_str = \"Overview\"\n i = 1\n for col_name in df_contracts[\"Ticker\"].values:\n menu = f\"\\nPage {i}: {col_name}\"\n initial_str += f\"\\nPage {i}: {col_name}\"\n choices.append(\n disnake.SelectOption(label=menu, value=f\"{i}\", emoji=\"🟢\"),\n )\n i += 1\n\n embeds = []\n df_contracts = df_contracts.T\n reports = [f\"{initial_str}\"]\n embeds.append(\n disnake.Embed(\n title=title,\n description=initial_str,\n colour=imps.COLOR,\n ).set_author(\n name=imps.AUTHOR_NAME,\n icon_url=imps.AUTHOR_ICON_URL,\n )\n )\n for column in df_contracts.columns.values:\n description = \"```\" + df_contracts[column].fillna(\"\").to_string() + \"```\"\n embeds.append(\n disnake.Embed(description=description, colour=imps.COLOR,).set_author(\n name=imps.AUTHOR_NAME,\n icon_url=imps.AUTHOR_ICON_URL,\n )\n )\n reports.append(f\"{description}\")\n\n return {\n \"view\": imps.Menu,\n \"title\": title,\n \"description\": reports,\n \"embed\": embeds,\n \"choices\": choices,\n }\n", "\"\"\"Custom Indicator Models\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nfrom typing import Any, Tuple\n\nimport pandas as pd\n\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.rich_config import console\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef calculate_fib_levels(\n df_stock: pd.DataFrame,\n period: int = 120,\n open_date: Any = None,\n close_date: Any = None,\n) -> Tuple[pd.DataFrame, pd.Timestamp, pd.Timestamp, float, float]:\n \"\"\"Calculate Fibonacci levels\n\n Parameters\n ----------\n df_stock : pd.DataFrame\n Dataframe of prices\n period : int\n Days to look back for retracement\n open_date : Any\n Custom start date for retracement\n close_date : Any\n Custom end date for retracement\n\n Returns\n -------\n df : pd.DataFrame\n Dataframe of fib levels\n min_date: pd.Timestamp\n Date of min point\n max_date: pd.Timestamp:\n Date of max point\n min_pr: float\n Price at min point\n max_pr: float\n Price at max point\n \"\"\"\n if open_date and close_date:\n if open_date not in df_stock.index:\n date0 = df_stock.index[df_stock.index.get_loc(open_date, method=\"nearest\")]\n console.print(f\"Start date not in df_stock. Using nearest: {date0}\")\n else:\n date0 = open_date\n if close_date not in df_stock.index:\n date1 = df_stock.index[df_stock.index.get_loc(close_date, method=\"nearest\")]\n console.print(f\"End date not in df_stock. Using nearest: {date1}\")\n else:\n date1 = close_date\n\n df_stock0 = df_stock.loc[date0, \"Adj Close\"]\n df_stock1 = df_stock.loc[date1, \"Adj Close\"]\n\n min_pr = min(df_stock0, df_stock1)\n max_pr = max(df_stock0, df_stock1)\n\n if min_pr == df_stock0:\n min_date = date0\n max_date = date1\n else:\n min_date = date1\n max_date = date0\n else:\n data_to_use = df_stock.iloc[period:][\"Adj Close\"]\n\n min_pr = data_to_use.min()\n min_date = data_to_use.idxmin()\n max_pr = data_to_use.max()\n max_date = data_to_use.idxmax()\n\n fib_levels = [0, 0.235, 0.382, 0.5, 0.618, 0.65, 1]\n price_dif = max_pr - min_pr\n\n levels = [round(max_pr - price_dif * f_lev, 2) for f_lev in fib_levels]\n\n df = pd.DataFrame()\n df[\"Level\"] = fib_levels\n df[\"Level\"] = df[\"Level\"].apply(lambda x: str(x * 100) + \"%\")\n df[\"Price\"] = levels\n\n return df, min_date, max_date, min_pr, max_pr\n", "# IMPORTATION STANDARD\n\n# IMPORTATION THIRDPARTY\nimport pandas as pd\nimport pytest\n\n# IMPORTATION INTERNAL\nfrom openbb_terminal.economy import finnhub_model\n\n\[email protected](scope=\"module\")\ndef vcr_config():\n return {\n \"filter_query_parameters\": [(\"token\", \"MOCK_TOKEN\")],\n }\n\n\[email protected](record_mode=\"none\")\ndef test_get_economy_calendar_events(mocker):\n # MOCK JSON\n mock_json = pd.DataFrame()\n mock_json[\"economicCalendar\"] = [\"MOCK_ROW_1\", \"MOCK_ROW_2\"]\n\n # MOCK GET\n attrs = {\n \"status_code\": 200,\n \"json.return_value\": mock_json,\n }\n mock_response = mocker.Mock(**attrs)\n mocker.patch(target=\"requests.get\", new=mocker.Mock(return_value=mock_response))\n\n result_df = finnhub_model.get_economy_calendar_events()\n\n assert not result_df.empty\n\n\[email protected](record_mode=\"none\")\ndef test_get_economy_calendar_events_no_response(mocker):\n # MOCK GET\n attrs = {\n \"json.return_value\": {\"error\": \"mock error message\"},\n }\n mock_response = mocker.Mock(**attrs)\n\n mocker.patch(target=\"requests.get\", new=mocker.Mock(return_value=mock_response))\n\n result_df = finnhub_model.get_economy_calendar_events()\n\n assert isinstance(result_df, pd.DataFrame)\n assert result_df.empty\n", "# IMPORTATION STANDARD\nimport os\n\n# IMPORTATION THIRDPARTY\nimport pandas as pd\nimport pytest\n\n# IMPORTATION INTERNAL\nfrom openbb_terminal.etf.discovery import disc_controller\n\n# pylint: disable=E1101\n# pylint: disable=W0603\n# pylint: disable=E1111\n\nEMPTY_DF = pd.DataFrame()\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"queue, expected\",\n [\n ([\"load\", \"help\"], []),\n ([\"quit\", \"help\"], [\"help\"]),\n ],\n)\ndef test_menu_with_queue(expected, mocker, queue):\n path_controller = \"openbb_terminal.etf.discovery.disc_controller\"\n\n # MOCK SWITCH\n mocker.patch(\n target=f\"{path_controller}.DiscoveryController.switch\",\n return_value=[\"quit\"],\n )\n result_menu = disc_controller.DiscoveryController(queue=queue).menu()\n\n assert result_menu == expected\n\n\[email protected](record_mode=\"none\")\ndef test_menu_without_queue_completion(mocker):\n path_controller = \"openbb_terminal.etf.discovery.disc_controller\"\n\n # ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU\n mocker.patch(\n target=\"openbb_terminal.feature_flags.USE_PROMPT_TOOLKIT\",\n new=True,\n )\n mocker.patch(\n target=\"openbb_terminal.parent_classes.session\",\n )\n mocker.patch(\n target=\"openbb_terminal.parent_classes.session.prompt\",\n return_value=\"quit\",\n )\n mocker.patch(\n target=\"openbb_terminal.etf.financedatabase_model.get_etfs_categories\",\n return_value=[\"Bank Loan\"],\n )\n\n # DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER\n mocker.patch.object(\n target=disc_controller.obbff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=True,\n )\n mocker.patch(\n target=f\"{path_controller}.session\",\n )\n mocker.patch(\n target=f\"{path_controller}.session.prompt\",\n return_value=\"quit\",\n )\n\n result_menu = disc_controller.DiscoveryController(queue=None).menu()\n\n assert result_menu == []\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"mock_input\",\n [\"help\", \"homee help\", \"home help\", \"mock\"],\n)\ndef test_menu_without_queue_sys_exit(mock_input, mocker):\n path_controller = \"openbb_terminal.etf.discovery.disc_controller\"\n\n # DISABLE AUTO-COMPLETION\n mocker.patch.object(\n target=disc_controller.obbff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=False,\n )\n mocker.patch(\n target=f\"{path_controller}.session\",\n return_value=None,\n )\n\n # MOCK USER INPUT\n mocker.patch(\"builtins.input\", return_value=mock_input)\n\n # MOCK SWITCH\n class SystemExitSideEffect:\n def __init__(self):\n self.first_call = True\n\n def __call__(self, *args, **kwargs):\n if self.first_call:\n self.first_call = False\n raise SystemExit()\n return [\"quit\"]\n\n mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())\n mocker.patch(\n target=f\"{path_controller}.DiscoveryController.switch\",\n new=mock_switch,\n )\n\n result_menu = disc_controller.DiscoveryController(queue=None).menu()\n\n assert result_menu == []\n\n\[email protected](record_mode=\"none\")\[email protected]_stdout\ndef test_print_help():\n controller = disc_controller.DiscoveryController(queue=None)\n controller.print_help()\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\", [\"home\", \"help\"]),\n (\"help/help\", [\"help\", \"help\"]),\n (\"q\", [\"quit\"]),\n (\"h\", []),\n (\n \"r\",\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"etf\",\n \"disc\",\n ],\n ),\n ],\n)\ndef test_switch(an_input, expected_queue):\n controller = disc_controller.DiscoveryController(queue=None)\n queue = controller.switch(an_input=an_input)\n\n assert queue == expected_queue\n\n\[email protected](record_mode=\"none\")\ndef test_call_cls(mocker):\n mocker.patch(\"os.system\")\n\n controller = disc_controller.DiscoveryController(queue=None)\n controller.call_cls([])\n\n assert controller.queue == []\n os.system.assert_called_once_with(\"cls||clear\")\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"func, queue, expected_queue\",\n [\n (\n \"call_exit\",\n [],\n [\"quit\", \"quit\", \"quit\"],\n ),\n (\"call_exit\", [\"help\"], [\"quit\", \"quit\", \"quit\", \"help\"]),\n (\"call_home\", [], [\"quit\", \"quit\"]),\n (\"call_help\", [], []),\n (\"call_quit\", [], [\"quit\"]),\n (\"call_quit\", [\"help\"], [\"quit\", \"help\"]),\n (\n \"call_reset\",\n [],\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"etf\",\n \"disc\",\n ],\n ),\n (\n \"call_reset\",\n [\"help\"],\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"etf\",\n \"disc\",\n \"help\",\n ],\n ),\n ],\n)\ndef test_call_func_expect_queue(expected_queue, func, queue):\n controller = disc_controller.DiscoveryController(queue=queue)\n result = getattr(controller, func)([])\n\n assert result is None\n assert controller.queue == expected_queue\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"tested_func, other_args, mocked_func, called_args, called_kwargs\",\n [\n (\n \"call_gainers\",\n [\"-l=10\"],\n \"wsj_view.show_top_mover\",\n [\"gainers\", 10, \"\"],\n dict(),\n ),\n (\n \"call_decliners\",\n [\"-l=10\"],\n \"wsj_view.show_top_mover\",\n [\"decliners\", 10, \"\"],\n dict(),\n ),\n (\n \"call_active\",\n [\"-l=10\"],\n \"wsj_view.show_top_mover\",\n [\"active\", 10, \"\"],\n dict(),\n ),\n ],\n)\ndef test_call_func_test(\n tested_func, mocked_func, other_args, called_args, called_kwargs, mocker\n):\n path_controller = \"openbb_terminal.etf.discovery.disc_controller\"\n\n if mocked_func:\n mock = mocker.Mock()\n mocker.patch(\n target=f\"{path_controller}.{mocked_func}\",\n new=mock,\n )\n\n controller = disc_controller.DiscoveryController(queue=None)\n\n getattr(controller, tested_func)(other_args)\n\n if called_args or called_kwargs:\n mock.assert_called_once_with(*called_args, **called_kwargs)\n else:\n mock.assert_called_once()\n else:\n controller = disc_controller.DiscoveryController(queue=None)\n getattr(controller, tested_func)(other_args)\n", "\"\"\" Quandl Model \"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nfrom multiprocessing import AuthenticationError\n\nimport pandas as pd\nimport quandl\n\nfrom openbb_terminal import config_terminal as cfg\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.rich_config import console\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef get_short_interest(ticker: str, nyse: bool) -> pd.DataFrame:\n \"\"\"Plots the short interest of a stock. This corresponds to the\n number of shares that have been sold short but have not yet been\n covered or closed out. Either NASDAQ or NYSE [Source: Quandl]\n\n Parameters\n ----------\n ticker : str\n ticker to get short interest from\n nyse : bool\n data from NYSE if true, otherwise NASDAQ\n\n Returns\n ----------\n pd.DataFrame\n short interest volume data\n \"\"\"\n quandl.ApiConfig.api_key = cfg.API_KEY_QUANDL\n\n df = pd.DataFrame()\n\n try:\n\n if nyse:\n df = quandl.get(f\"FINRA/FNYX_{ticker}\")\n else:\n df = quandl.get(f\"FINRA/FNSQ_{ticker}\")\n\n except AuthenticationError:\n console.print(\"[red]Invalid API Key[/red]\\n\")\n # Catch invalid ticker\n except Exception as e:\n console.print(e)\n\n return df\n", "\"\"\"Regression View\"\"\"\n__docformat__ = \"numpy\"\n\nfrom typing import Optional, List, Tuple, Dict, Any\nimport os\nimport logging\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom openbb_terminal.config_plot import PLOT_DPI\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.helper_funcs import plot_autoscale, export_data\nfrom openbb_terminal.rich_config import console\nfrom openbb_terminal.econometrics import regression_model\nfrom openbb_terminal.helper_funcs import (\n print_rich_table,\n)\nfrom openbb_terminal.config_terminal import theme\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef display_panel(\n regression_type: str,\n regression_variables: List[Tuple],\n data: Dict[str, pd.DataFrame],\n datasets: Dict[pd.DataFrame, Any],\n entity_effects: bool = False,\n time_effects: bool = False,\n export: str = \"\",\n):\n \"\"\"Based on the regression type, this function decides what regression to run.\n\n Parameters\n ----------\n regression_type: str\n The type of regression you wish to execute.\n regression_variables : list\n The regressions variables entered where the first variable is\n the dependent variable.\n data : dict\n A dictionary containing the datasets.\n datasets: dict\n A dictionary containing the column and dataset names of\n each column/dataset combination.\n entity_effects: bool\n Whether to apply Fixed Effects on entities.\n time_effects: bool\n Whether to apply Fixed Effects on time.\n export : str\n Format to export data\n\n Returns\n -------\n The dataset used, the dependent variable, the independent variable and\n the regression model.\n \"\"\"\n (\n regression_df,\n dependent,\n independent,\n model,\n ) = regression_model.get_regressions_results(\n regression_type,\n regression_variables,\n data,\n datasets,\n entity_effects,\n time_effects,\n )\n\n if export:\n results_as_html = model.summary.tables[1].as_html()\n df = pd.read_html(results_as_html, header=0, index_col=0)[0]\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n f\"{dependent}_{regression_type}_regression\",\n df,\n )\n\n return regression_df, dependent, independent, model\n\n\n@log_start_end(log=logger)\ndef display_dwat(\n dependent_variable: pd.Series,\n residual: pd.DataFrame,\n plot: bool = False,\n export: str = \"\",\n external_axes: Optional[List[plt.axes]] = None,\n):\n \"\"\"Show Durbin-Watson autocorrelation tests\n\n Parameters\n ----------\n dependent_variable : pd.Series\n The dependent variable.\n residual : OLS Model\n The residual of an OLS model.\n plot : bool\n Whether to plot the residuals\n export : str\n Format to export data\n external_axes: Optional[List[plt.axes]]\n External axes to plot on\n \"\"\"\n autocorrelation = regression_model.get_dwat(residual)\n\n if 1.5 < autocorrelation < 2.5:\n console.print(\n f\"The result {autocorrelation} is within the range 1.5 and 2.5 which therefore indicates \"\n f\"autocorrelation not to be problematic.\"\n )\n else:\n console.print(\n f\"The result {autocorrelation} is outside the range 1.5 and 2.5 and therefore autocorrelation \"\n f\"can be problematic. Please consider lags of the dependent or independent variable.\"\n )\n\n if plot:\n if external_axes is None:\n _, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n else:\n ax = external_axes[0]\n\n ax.scatter(dependent_variable, residual)\n ax.axhline(y=0, color=\"r\", linestyle=\"-\")\n ax.set_ylabel(\"Residual\")\n ax.set_xlabel(dependent_variable.name.capitalize())\n ax.set_title(\"Plot of Residuals\")\n theme.style_primary_axis(ax)\n\n if external_axes is None:\n theme.visualize_output()\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n f\"{dependent_variable.name}_dwat\",\n autocorrelation,\n )\n\n console.print()\n\n\n@log_start_end(log=logger)\ndef display_bgod(model: pd.DataFrame, lags: int, export: str = \"\"):\n \"\"\"Show Breusch-Godfrey autocorrelation test\n\n Parameters\n ----------\n model : OLS Model\n Model containing residual values.\n lags : int\n The amount of lags included.\n export : str\n Format to export data\n \"\"\"\n (\n lm_stat,\n p_value,\n f_stat,\n fp_value,\n ) = regression_model.get_bgod(model, lags)\n\n df = pd.DataFrame(\n [lm_stat, p_value, f_stat, fp_value],\n index=[\"LM-stat\", \"p-value\", \"f-stat\", \"fp-value\"],\n )\n\n print_rich_table(\n df,\n headers=list([\"Breusch-Godfrey\"]),\n show_index=True,\n title=f\"Breusch-Godfrey autocorrelation test [Lags: {lags}]\",\n )\n\n if p_value > 0.05:\n console.print(\n f\"The result {round(p_value, 2)} indicates the existence of autocorrelation. Consider re-estimating \"\n f\"with clustered standard errors and applying the Random Effects or Fixed Effects model.\"\n )\n else:\n console.print(\n f\"The result {round(p_value, 2)} indicates no existence of autocorrelation.\"\n )\n\n export_data(export, os.path.dirname(os.path.abspath(__file__)), \"results_bgod\", df)\n\n console.print()\n\n\n@log_start_end(log=logger)\ndef display_bpag(model: pd.DataFrame, export: str = \"\"):\n \"\"\"Show Breusch-Pagan heteroscedasticity test\n\n Parameters\n ----------\n model : OLS Model\n Model containing residual values.\n export : str\n Format to export data\n \"\"\"\n (\n lm_stat,\n p_value,\n f_stat,\n fp_value,\n ) = regression_model.get_bpag(model)\n\n df = pd.DataFrame(\n [lm_stat, p_value, f_stat, fp_value],\n index=[\"lm-stat\", \"p-value\", \"f-stat\", \"fp-value\"],\n )\n\n print_rich_table(\n df,\n headers=list([\"Breusch-Pagan\"]),\n show_index=True,\n title=\"Breusch-Pagan heteroscedasticity test\",\n )\n\n if p_value > 0.05:\n console.print(\n f\"The result {round(p_value, 2)} indicates the existence of heteroscedasticity. Consider taking the log \"\n f\"or a rate for the dependent variable.\"\n )\n else:\n console.print(\n f\"The result {round(p_value, 2)} indicates no existence of heteroscedasticity.\"\n )\n\n export_data(export, os.path.dirname(os.path.abspath(__file__)), \"results_bpag\", df)\n\n console.print()\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.to_datetime" ], [ "pandas.DataFrame" ], [ "pandas.DataFrame" ], [ "pandas.DataFrame" ], [ "pandas.DataFrame" ], [ "pandas.DataFrame", "pandas.read_html" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
techkang/mmcv
[ "333eb6a8f964f005d4c0f34c3214ac2036bb228c" ]
[ "mmcv/parallel/collate.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections.abc import Mapping, Sequence\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data.dataloader import default_collate\n\nfrom .data_container import DataContainer\n\n\ndef collate(batch, samples_per_gpu=1):\n \"\"\"Puts each data field into a tensor/DataContainer with outer dimension\n batch size.\n\n Extend default_collate to add support for\n :type:`~mmcv.parallel.DataContainer`. There are 3 cases.\n\n 1. cpu_only = True, e.g., meta data\n 2. cpu_only = False, stack = True, e.g., images tensors\n 3. cpu_only = False, stack = False, e.g., gt bboxes\n \"\"\"\n\n if not isinstance(batch, Sequence):\n raise TypeError(f'{batch.dtype} is not supported.')\n\n if isinstance(batch[0], DataContainer):\n stacked = []\n if batch[0].cpu_only:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i:i + samples_per_gpu]])\n return DataContainer(\n stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)\n elif batch[0].stack:\n for i in range(0, len(batch), samples_per_gpu):\n assert isinstance(batch[i].data, torch.Tensor)\n\n if batch[i].pad_dims is not None:\n ndim = batch[i].dim()\n assert ndim > batch[i].pad_dims\n max_shape = [0 for _ in range(batch[i].pad_dims)]\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = batch[i].size(-dim)\n for sample in batch[i:i + samples_per_gpu]:\n for dim in range(0, ndim - batch[i].pad_dims):\n assert batch[i].size(dim) == sample.size(dim)\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = max(max_shape[dim - 1],\n sample.size(-dim))\n padded_samples = []\n for sample in batch[i:i + samples_per_gpu]:\n pad = [0 for _ in range(batch[i].pad_dims * 2)]\n for dim in range(1, batch[i].pad_dims + 1):\n pad[2 * dim -\n 1] = max_shape[dim - 1] - sample.size(-dim)\n padded_samples.append(\n F.pad(\n sample.data, pad, value=sample.padding_value))\n stacked.append(default_collate(padded_samples))\n elif batch[i].pad_dims is None:\n stacked.append(\n default_collate([\n sample.data\n for sample in batch[i:i + samples_per_gpu]\n ]))\n else:\n raise ValueError(\n 'pad_dims should be either None or integers (1-3)')\n\n else:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i:i + samples_per_gpu]])\n return DataContainer(stacked, batch[0].stack, batch[0].padding_value)\n elif isinstance(batch[0], Sequence) and not isinstance(batch[0], (str, bytes)):\n transposed = zip(*batch)\n return [collate(samples, samples_per_gpu) for samples in transposed]\n elif isinstance(batch[0], Mapping):\n return {\n key: collate([d[key] for d in batch], samples_per_gpu)\n for key in batch[0]\n }\n else:\n return default_collate(batch)\n" ]
[ [ "torch.nn.functional.pad", "torch.utils.data.dataloader.default_collate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Tejas-Nanaware/Learning-OpenCV
[ "4956db7f7f90d9d4a44327aebc2f809e4d9b2ca3" ]
[ "corner detection.py" ]
[ "import cv2\nimport numpy as np\n\nimg = cv2.imread('corner detection.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ngray = np.float32(gray)\n\ncorners = cv2.goodFeaturesToTrack(gray, 100, 0.01, 10)\ncorners = np.int0(corners)\n\nfor corner in corners:\n\tx, y = corner.ravel()\n\tcv2.circle(img, (x,y), 3, 255, -1)\n\ncv2.imshow('Corner', img)\ncv2.waitKey(0)" ]
[ [ "numpy.int0", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Irme/MONAI
[ "49e693c4e7df83dc1f8ab87349373de9263188a9", "49e693c4e7df83dc1f8ab87349373de9263188a9", "49e693c4e7df83dc1f8ab87349373de9263188a9", "49e693c4e7df83dc1f8ab87349373de9263188a9" ]
[ "tests/test_mask_intensityd.py", "tests/test_data_statsd.py", "tests/test_handler_lr_scheduler.py", "tests/test_rand_spatial_cropd.py" ]
[ "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.transforms import MaskIntensityd\n\nTEST_CASE_1 = [\n {\"keys\": \"img\", \"mask_data\": np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]]])},\n {\"img\": np.array([[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, 6, 6]]])},\n np.array([[[0, 0, 0], [0, 2, 0], [0, 0, 0]], [[0, 0, 0], [0, 5, 0], [0, 0, 0]]]),\n]\n\nTEST_CASE_2 = [\n {\"keys\": \"img\", \"mask_data\": np.array([[[0, 0, 0], [0, 5, 0], [0, 0, 0]]])},\n {\"img\": np.array([[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, 6, 6]]])},\n np.array([[[0, 0, 0], [0, 2, 0], [0, 0, 0]], [[0, 0, 0], [0, 5, 0], [0, 0, 0]]]),\n]\n\nTEST_CASE_3 = [\n {\"keys\": \"img\", \"mask_data\": np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 1, 0], [0, 1, 0], [0, 1, 0]]])},\n {\"img\": np.array([[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, 6, 6]]])},\n np.array([[[0, 0, 0], [0, 2, 0], [0, 0, 0]], [[0, 4, 0], [0, 5, 0], [0, 6, 0]]]),\n]\n\n\nclass TestMaskIntensityd(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])\n def test_value(self, argments, image, expected_data):\n result = MaskIntensityd(**argments)(image)\n np.testing.assert_allclose(result[\"img\"], expected_data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.transforms import DataStatsd\n\nTEST_CASE_1 = [\n {\n \"keys\": \"img\",\n \"prefix\": \"test data\",\n \"data_shape\": False,\n \"value_range\": False,\n \"data_value\": False,\n \"additional_info\": None,\n },\n {\"img\": np.array([[0, 1], [1, 2]])},\n \"test data statistics:\",\n]\n\nTEST_CASE_2 = [\n {\n \"keys\": \"img\",\n \"prefix\": \"test data\",\n \"data_shape\": True,\n \"value_range\": False,\n \"data_value\": False,\n \"additional_info\": None,\n },\n {\"img\": np.array([[0, 1], [1, 2]])},\n \"test data statistics:\\nShape: (2, 2)\",\n]\n\nTEST_CASE_3 = [\n {\n \"keys\": \"img\",\n \"prefix\": \"test data\",\n \"data_shape\": True,\n \"value_range\": True,\n \"data_value\": False,\n \"additional_info\": None,\n },\n {\"img\": np.array([[0, 1], [1, 2]])},\n \"test data statistics:\\nShape: (2, 2)\\nValue range: (0, 2)\",\n]\n\nTEST_CASE_4 = [\n {\n \"keys\": \"img\",\n \"prefix\": \"test data\",\n \"data_shape\": True,\n \"value_range\": True,\n \"data_value\": True,\n \"additional_info\": None,\n },\n {\"img\": np.array([[0, 1], [1, 2]])},\n \"test data statistics:\\nShape: (2, 2)\\nValue range: (0, 2)\\nValue: [[0 1]\\n [1 2]]\",\n]\n\nTEST_CASE_5 = [\n {\n \"keys\": \"img\",\n \"prefix\": \"test data\",\n \"data_shape\": True,\n \"value_range\": True,\n \"data_value\": True,\n \"additional_info\": lambda x: np.mean(x),\n },\n {\"img\": np.array([[0, 1], [1, 2]])},\n \"test data statistics:\\nShape: (2, 2)\\nValue range: (0, 2)\\nValue: [[0 1]\\n [1 2]]\\nAdditional info: 1.0\",\n]\n\nTEST_CASE_6 = [\n {\n \"keys\": \"img\",\n \"prefix\": \"test data\",\n \"data_shape\": True,\n \"value_range\": True,\n \"data_value\": True,\n \"additional_info\": lambda x: torch.mean(x.float()),\n },\n {\"img\": torch.tensor([[0, 1], [1, 2]])},\n (\n \"test data statistics:\\nShape: torch.Size([2, 2])\\nValue range: (0, 2)\\n\"\n \"Value: tensor([[0, 1],\\n [1, 2]])\\nAdditional info: 1.0\"\n ),\n]\n\nTEST_CASE_7 = [\n {\n \"keys\": (\"img\", \"affine\"),\n \"prefix\": (\"image\", \"affine\"),\n \"data_shape\": True,\n \"value_range\": (True, False),\n \"data_value\": (False, True),\n \"additional_info\": (lambda x: np.mean(x), None),\n },\n {\"img\": np.array([[0, 1], [1, 2]]), \"affine\": np.eye(2, 2)},\n \"affine statistics:\\nShape: (2, 2)\\nValue: [[1. 0.]\\n [0. 1.]]\",\n]\n\nTEST_CASE_8 = [\n {\"img\": np.array([[0, 1], [1, 2]])},\n \"test data statistics:\\nShape: (2, 2)\\nValue range: (0, 2)\\nValue: [[0 1]\\n [1 2]]\\nAdditional info: 1.0\\n\",\n]\n\n\nclass TestDataStatsd(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7])\n def test_value(self, input_param, input_data, expected_print):\n transform = DataStatsd(**input_param)\n _ = transform(input_data)\n self.assertEqual(transform.printer.output, expected_print)\n\n @parameterized.expand([TEST_CASE_8])\n def test_file(self, input_data, expected_print):\n with tempfile.TemporaryDirectory() as tempdir:\n filename = os.path.join(tempdir, \"test_stats.log\")\n handler = logging.FileHandler(filename, mode=\"w\")\n input_param = {\n \"keys\": \"img\",\n \"prefix\": \"test data\",\n \"data_shape\": True,\n \"value_range\": True,\n \"data_value\": True,\n \"additional_info\": lambda x: np.mean(x),\n \"logger_handler\": handler,\n }\n transform = DataStatsd(**input_param)\n _ = transform(input_data)\n handler.stream.close()\n transform.printer._logger.removeHandler(handler)\n with open(filename, \"r\") as f:\n content = f.read()\n self.assertEqual(content, expected_print)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport sys\nimport unittest\n\nimport numpy as np\nimport torch\nfrom ignite.engine import Engine, Events\n\nfrom monai.handlers import LrScheduleHandler\n\n\nclass TestHandlerLrSchedule(unittest.TestCase):\n def test_content(self):\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n data = [0] * 8\n\n # set up engine\n def _train_func(engine, batch):\n pass\n\n val_engine = Engine(_train_func)\n train_engine = Engine(_train_func)\n\n @train_engine.on(Events.EPOCH_COMPLETED)\n def run_validation(engine):\n val_engine.run(data)\n val_engine.state.metrics[\"val_loss\"] = 1\n\n # set up testing handler\n net = torch.nn.PReLU()\n\n def _reduce_lr_on_plateau():\n optimizer = torch.optim.SGD(net.parameters(), 0.1)\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=1)\n handler = LrScheduleHandler(lr_scheduler, step_transform=lambda x: val_engine.state.metrics[\"val_loss\"])\n handler.attach(train_engine)\n return lr_scheduler\n\n def _reduce_on_step():\n optimizer = torch.optim.SGD(net.parameters(), 0.1)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.1)\n handler = LrScheduleHandler(lr_scheduler)\n handler.attach(train_engine)\n return lr_scheduler\n\n schedulers = _reduce_lr_on_plateau(), _reduce_on_step()\n\n train_engine.run(data, max_epochs=5)\n for scheduler in schedulers:\n np.testing.assert_allclose(scheduler._last_lr[0], 0.001)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.transforms import RandSpatialCropd\n\nTEST_CASE_0 = [\n {\"keys\": \"img\", \"roi_size\": [3, 3, -1], \"random_center\": True},\n {\"img\": np.random.randint(0, 2, size=[3, 3, 3, 5])},\n (3, 3, 3, 5),\n]\n\nTEST_CASE_1 = [\n {\"keys\": \"img\", \"roi_size\": [3, 3, 3], \"random_center\": True},\n {\"img\": np.random.randint(0, 2, size=[3, 3, 3, 3])},\n (3, 3, 3, 3),\n]\n\nTEST_CASE_2 = [\n {\"keys\": \"img\", \"roi_size\": [3, 3, 3], \"random_center\": False},\n {\"img\": np.random.randint(0, 2, size=[3, 3, 3, 3])},\n (3, 3, 3, 3),\n]\n\nTEST_CASE_3 = [\n {\"keys\": \"img\", \"roi_size\": [3, 3], \"random_center\": False},\n {\"img\": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]])},\n]\n\n\nclass TestRandSpatialCropd(unittest.TestCase):\n @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2])\n def test_shape(self, input_param, input_data, expected_shape):\n result = RandSpatialCropd(**input_param)(input_data)\n self.assertTupleEqual(result[\"img\"].shape, expected_shape)\n\n @parameterized.expand([TEST_CASE_3])\n def test_value(self, input_param, input_data):\n cropper = RandSpatialCropd(**input_param)\n result = cropper(input_data)\n roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size]\n np.testing.assert_allclose(result[\"img\"], input_data[\"img\"][:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.testing.assert_allclose" ], [ "numpy.eye", "numpy.array", "numpy.mean", "torch.tensor" ], [ "torch.nn.PReLU", "torch.optim.lr_scheduler.ReduceLROnPlateau", "numpy.testing.assert_allclose", "torch.optim.lr_scheduler.StepLR" ], [ "numpy.array", "numpy.testing.assert_allclose", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ditwoo/catalyst
[ "3126390f9f679ebcfedbe01707b416678a2732ac" ]
[ "catalyst/metrics/accuracy.py" ]
[ "\"\"\"\nVarious accuracy metrics:\n * :func:`accuracy`\n * :func:`multi_label_accuracy`\n\"\"\"\nfrom typing import Optional, Sequence, Union\n\nimport numpy as np\n\nimport torch\n\nfrom catalyst.metrics.functional import process_multilabel_components\nfrom catalyst.utils.torch import get_activation_fn\n\n\ndef accuracy(\n outputs: torch.Tensor,\n targets: torch.Tensor,\n topk: Sequence[int] = (1,),\n activation: Optional[str] = None,\n) -> Sequence[torch.Tensor]:\n \"\"\"\n Computes multi-class accuracy@topk for the specified values of `topk`.\n\n Args:\n outputs: model outputs, logits\n with shape [bs; num_classes]\n targets: ground truth, labels\n with shape [bs; 1]\n activation: activation to use for model output\n topk: `topk` for accuracy@topk computing\n\n Returns:\n list with computed accuracy@topk\n \"\"\"\n activation_fn = get_activation_fn(activation)\n outputs = activation_fn(outputs)\n\n max_k = max(topk)\n batch_size = targets.size(0)\n\n if len(outputs.shape) == 1 or outputs.shape[1] == 1:\n # binary accuracy\n pred = outputs.t()\n else:\n # multi-class accuracy\n _, pred = outputs.topk(max_k, 1, True, True) # noqa: WPS425\n pred = pred.t()\n correct = pred.eq(targets.long().view(1, -1).expand_as(pred))\n\n output = []\n for k in topk:\n correct_k = (\n correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)\n )\n output.append(correct_k.mul_(1.0 / batch_size))\n return output\n\n\ndef multi_label_accuracy(\n outputs: torch.Tensor,\n targets: torch.Tensor,\n threshold: Union[float, torch.Tensor],\n activation: Optional[str] = None,\n) -> torch.Tensor:\n \"\"\"\n Computes multi-label accuracy for the specified activation and threshold.\n\n Args:\n outputs: NxK tensor that for each of the N examples\n indicates the probability of the example belonging to each of\n the K classes, according to the model.\n targets: binary NxK tensort that encodes which of the K\n classes are associated with the N-th input\n (eg: a row [0, 1, 0, 1] indicates that the example is\n associated with classes 2 and 4)\n threshold: threshold for for model output\n activation: activation to use for model output\n\n Returns:\n computed multi-label accuracy\n \"\"\"\n outputs, targets, _ = process_multilabel_components(\n outputs=outputs, targets=targets\n )\n activation_fn = get_activation_fn(activation)\n outputs = activation_fn(outputs)\n\n outputs = (outputs > threshold).long()\n output = (targets.long() == outputs.long()).sum().float() / np.prod(\n targets.shape\n )\n return output\n\n\n__all__ = [\"accuracy\", \"multi_label_accuracy\"]\n" ]
[ [ "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ackness/GazeFlow
[ "ca6b7d548571f85af84bdec77292758ab5d36449", "ca6b7d548571f85af84bdec77292758ab5d36449" ]
[ "layers/spadebn.py", "flows/squeeze_test.py" ]
[ "#!/usr/bin/env python3\n\nimport tensorflow as tf\n\nfrom layers.spectral_normalization import SpectralNormalization\n\n\nclass SpadeBN(tf.keras.layers.Layer):\n \"\"\"SPADE BatchNormalization\n\n Sources:\n\n https://towardsdatascience.com/implementing-spade-using-fastai-6ad86b94030a\n \"\"\"\n\n def __init__(self, width: int = 128, kernel_size=3, **kwargs):\n self.bn = tf.keras.layers.experimental.SyncBatchNormalization()\n self.conv0 = SpectralNormalization(\n tf.keras.layers.Conv2D(width, kernel_size=kernel_size, activation=\"relu\")\n )\n self.conv1 = SpectralNormalization(\n tf.keras.layers.Conv2D(width, kernel_size=kernel_size, activation=\"relu\")\n )\n self.conv2 = SpectralNormalization(\n tf.keras.layers.Conv2D(width, kernel_size=kernel_size, activation=\"relu\")\n )\n\n def call(self, x: tf.Tensor, cond: tf.Tensor):\n interim_conv = self.conv0(cond)\n gamma = self.conv1(interim_conv)\n beta = self.conv2(interim_conv)\n outputs = self.bn(x) * gamma + beta\n return outputs\n\n def get_config(self):\n config = super().get_config()\n config_update = {\"width\": self.width, \"kernel_size\": 3}\n config.update(config_update)\n return config\n", "#!/usr/bin/env python3\n\nimport tensorflow as tf\n\nfrom flows.squeeze import Squeeze, Squeeze2DWithMask\n\n\nclass Squeeze2DTest(tf.test.TestCase):\n def setUp(self):\n super().setUp()\n self.squeeze = Squeeze2DWithMask()\n\n def testSqueezeWithOutAnythng(self):\n x = tf.random.normal([32, 16, 8])\n y, mask = self.squeeze(x, inverse=False)\n rev_x, mask = self.squeeze(y, inverse=True)\n self.assertAllEqual(x, rev_x)\n zaux = tf.random.normal([32, 16, 16])\n y, mask, new_zaux = self.squeeze(x, zaux=zaux, inverse=False)\n rev_x, mask, rev_zaux = self.squeeze(y, zaux=new_zaux, inverse=True)\n self.assertAllEqual(x, rev_x)\n self.assertAllEqual(zaux, rev_zaux)\n\n\nclass SqueezeTest(tf.test.TestCase):\n def setUp(self):\n super().setUp()\n self.squeeze = Squeeze(with_zaux=True)\n\n def testSqueezeWithOutAnythng(self):\n x = tf.random.normal([32, 16, 16, 8])\n y = self.squeeze(x, inverse=False)\n rev_x = self.squeeze(y, inverse=True)\n self.assertAllEqual(x, rev_x)\n zaux = tf.random.normal([32, 16, 16, 12])\n y, new_zaux = self.squeeze(x, zaux=zaux, inverse=False)\n rev_x, rev_zaux = self.squeeze(y, zaux=new_zaux, inverse=True)\n self.assertAllEqual(x, rev_x)\n self.assertAllEqual(zaux, rev_zaux)\n" ]
[ [ "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.experimental.SyncBatchNormalization" ], [ "tensorflow.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
woojinsong/PyTorch-tutorials-kr
[ "36fefd556f45c2b1f5db912793172c0369430fd4", "36fefd556f45c2b1f5db912793172c0369430fd4", "36fefd556f45c2b1f5db912793172c0369430fd4", "36fefd556f45c2b1f5db912793172c0369430fd4" ]
[ "docs/_downloads/295945daa9a2749eebb39cf0af107ee2/polynomial_custom_function.py", "docs/_downloads/94a9d45986dd030a54a92e1793ccaf15/text_sentiment_ngrams_tutorial.py", "docs/_downloads/45357057c93bfed1360f925e84da2682/deploy_seq2seq_hybrid_frontend_tutorial.py", "docs/_downloads/d1cc6fb4e467c3f398247dc8a72f0c42/autograd_tutorial_old.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nPyTorch: 새 autograd Function 정의하기\n----------------------------------------\n\n:math:`y=\\sin(x)` 을 예측할 수 있도록, :math:`-\\pi` 부터 :math:`pi` 까지\n유클리드 거리(Euclidean distance)를 최소화하도록 3차 다항식을 학습합니다.\n다항식을 :math:`y=a+bx+cx^2+dx^3` 라고 쓰는 대신 :math:`y=a+b P_3(c+dx)` 로 다항식을 적겠습니다.\n여기서 :math:`P_3(x)=\\frac{1}{2}\\left(5x^3-3x\\right)` 은 3차\n`르장드르 다항식(Legendre polynomial)`_ 입니다.\n\n.. _르장드르 다항식(Legendre polynomial):\n https://en.wikipedia.org/wiki/Legendre_polynomials\n\n이 구현은 PyTorch 텐서 연산을 사용하여 순전파 단계를 계산하고, PyTorch autograd를 사용하여\n변화도(gradient)를 계산합니다.\n\n아래 구현에서는 :math:`P_3'(x)` 을 수행하기 위해 사용자 정의 autograd Function를 구현합니다.\n수학적으로는 :math:`P_3'(x)=\\frac{3}{2}\\left(5x^2-1\\right)` 입니다.\n\"\"\"\nimport torch\nimport math\n\n\nclass LegendrePolynomial3(torch.autograd.Function):\n \"\"\"\n torch.autograd.Function을 상속받아 사용자 정의 autograd Function을 구현하고,\n 텐서 연산을 하는 순전파 단계와 역전파 단계를 구현해보겠습니다.\n \"\"\"\n\n @staticmethod\n def forward(ctx, input):\n \"\"\"\n 순전파 단계에서는 입력을 갖는 텐서를 받아 출력을 갖는 텐서를 반환합니다.\n ctx는 컨텍스트 객체(context object)로 역전파 연산을 위한 정보 저장에 사용합니다.\n ctx.save_for_backward 메소드를 사용하여 역전파 단계에서 사용할 어떤 객체도\n 저장(cache)해 둘 수 있습니다.\n \"\"\"\n ctx.save_for_backward(input)\n return 0.5 * (5 * input ** 3 - 3 * input)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n 역전파 단계에서는 출력에 대한 손실(loss)의 변화도(gradient)를 갖는 텐서를 받고,\n 입력에 대한 손실의 변화도를 계산해야 합니다.\n \"\"\"\n input, = ctx.saved_tensors\n return grad_output * 1.5 * (5 * input ** 2 - 1)\n\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:0\") # GPU에서 실행하려면 이 주석을 제거하세요\n\n# 입력값과 출력값을 갖는 텐서들을 생성합니다.\n# requires_grad=False가 기본값으로 설정되어 역전파 단계 중에 이 텐서들에 대한 변화도를 계산할\n# 필요가 없음을 나타냅니다.\nx = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)\ny = torch.sin(x)\n\n# 가중치를 갖는 임의의 텐서를 생성합니다. 3차 다항식이므로 4개의 가중치가 필요합니다:\n# y = a + b * P3(c + d * x) \n# 이 가중치들이 수렴(convergence)하기 위해서는 정답으로부터 너무 멀리 떨어지지 않은 값으로\n# 초기화가 되어야 합니다. \n# requires_grad=True로 설정하여 역전파 단계 중에 이 텐서들에 대한 변화도를 계산할 필요가\n# 있음을 나타냅니다. \na = torch.full((), 0.0, device=device, dtype=dtype, requires_grad=True)\nb = torch.full((), -1.0, device=device, dtype=dtype, requires_grad=True)\nc = torch.full((), 0.0, device=device, dtype=dtype, requires_grad=True)\nd = torch.full((), 0.3, device=device, dtype=dtype, requires_grad=True)\n\nlearning_rate = 5e-6\nfor t in range(2000):\n # 사용자 정의 Function을 적용하기 위해 Function.apply 메소드를 사용합니다.\n # 여기에 'P3'라고 이름을 붙였습니다.\n P3 = LegendrePolynomial3.apply\n\n # 순전파 단계: 연산을 하여 예측값 y를 계산합니다; \n # 사용자 정의 autograd 연산을 사용하여 P3를 계산합니다.\n y_pred = a + b * P3(c + d * x)\n\n # 손실을 계산하고 출력합니다.\n loss = (y_pred - y).pow(2).sum()\n if t % 100 == 99:\n print(t, loss.item())\n\n # autograd를 사용하여 역전파 단계를 계산합니다.\n loss.backward()\n\n # 경사하강법(gradient descent)을 사용하여 가중치를 갱신합니다.\n with torch.no_grad():\n a -= learning_rate * a.grad\n b -= learning_rate * b.grad\n c -= learning_rate * c.grad\n d -= learning_rate * d.grad\n\n # 가중치 갱신 후에는 변화도를 직접 0으로 만듭니다.\n a.grad = None\n b.grad = None\n c.grad = None\n d.grad = None\n\nprint(f'Result: y = {a.item()} + {b.item()} * P3({c.item()} + {d.item()} x)')\n", "\"\"\"\ntorchtext 라이브러리로 텍스트 분류하기\n===============================================\n\n**번역**: `김강민 <https://github.com/gangsss>`_ , `김진현 <https://github.com/lewha0>`_\n\n이 튜토리얼에서는 torchtext 라이브러리를 사용하여 어떻게 텍스트 분류 분석을 위한 데이터셋을 만드는지를 살펴보겠습니다.\n다음과 같은 내용들을 알게 됩니다:\n\n - 반복자(iterator)로 가공되지 않은 데이터(raw data)에 접근하기\n - 가공되지 않은 텍스트 문장들을 모델 학습에 사용할 수 있는 ``torch.Tensor`` 로 변환하는 데이터 처리 파이프라인 만들기\n - `torch.utils.data.DataLoader <https://pytorch.org/docs/stable/data.html?highlight=dataloader#torch.utils.data.DataLoader>`__ 를 사용하여 데이터를 섞고 반복하기(shuffle and iterate)\n\"\"\"\n\n######################################################################\n# 기초 데이터셋 반복자(raw data iterator)에 접근하기\n# -------------------------------------------------------------\n#\n# torchtext 라이브러리는 가공되지 않은 텍스트 문장들을 만드는(yield) 몇 가지 기초 데이터셋 반복자(raw dataset iterator)를 제공합니다.\n# 예를 들어, ``AG_NEWS`` 데이터셋 반복자는 레이블(label)과 문장의 튜플(tuple) 형태로 가공되지 않은 데이터를 만듭니다.\n\nimport torch\nfrom torchtext.datasets import AG_NEWS\ntrain_iter = AG_NEWS(split='train')\n\n######################################################################\n# ::\n#\n# next(train_iter)\n# >>> (3, \"Wall St. Bears Claw Back Into the Black (Reuters) Reuters -\n# Short-sellers, Wall Street's dwindling\\\\band of ultra-cynics, are seeing green\n# again.\")\n#\n# next(train_iter)\n# >>> (3, 'Carlyle Looks Toward Commercial Aerospace (Reuters) Reuters - Private\n# investment firm Carlyle Group,\\\\which has a reputation for making well-timed\n# and occasionally\\\\controversial plays in the defense industry, has quietly\n# placed\\\\its bets on another part of the market.')\n#\n# next(train_iter)\n# >>> (3, \"Oil and Economy Cloud Stocks' Outlook (Reuters) Reuters - Soaring\n# crude prices plus worries\\\\about the economy and the outlook for earnings are\n# expected to\\\\hang over the stock market next week during the depth of\n# the\\\\summer doldrums.\")\n#\n\n######################################################################\n# 데이터 처리 파이프라인 준비하기\n# ---------------------------------\n#\n# 어휘집(vocab), 단어 벡터(word vector), 토크나이저(tokenizer)를 포함하여 torchtext 라이브러리의 가장 기본적인 구성요소를 재검토했습니다.\n# 이들은 가공되지 않은 텍스트 문자열에 대한 기본적인 데이터 처리 빌딩 블록(data processing building block)입니다.\n#\n# 다음은 토크나이저 및 어휘집을 사용한 일반적인 NLP 데이터 처리의 예입니다.\n# 첫번째 단계는 가공되지 않은 학습 데이터셋으로 어휘집을 만드는 것입니다.\n# 여기에서는 토큰의 목록 또는 반복자를 받는 내장(built-in) 팩토리 함수(factory function) `build_vocab_from_iterator` 를 사용합니다.\n# 사용자는 어휘집에 추가할 특수 기호(special symbol) 같은 것들을 전달할 수도 있습니다.\n\nfrom torchtext.data.utils import get_tokenizer\nfrom torchtext.vocab import build_vocab_from_iterator\n\ntokenizer = get_tokenizer('basic_english')\ntrain_iter = AG_NEWS(split='train')\n\ndef yield_tokens(data_iter):\n for _, text in data_iter:\n yield tokenizer(text)\n\nvocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=[\"<unk>\"])\nvocab.set_default_index(vocab[\"<unk>\"])\n\n######################################################################\n# 어휘집 블록(vocabulary block)은 토큰 목록을 정수로 변환합니다.\n#\n# ::\n#\n# vocab(['here', 'is', 'an', 'example'])\n# >>> [475, 21, 30, 5286]\n#\n# 토크나이저와 어휘집을 갖춘 텍스트 처리 파이프라인을 준비합니다.\n# 텍스트 파이프라인과 레이블(label) 파이프라인은 데이터셋 반복자로부터 얻어온 가공되지 않은 문장 데이터를 처리하기 위해 사용됩니다.\n\ntext_pipeline = lambda x: vocab(tokenizer(x))\nlabel_pipeline = lambda x: int(x) - 1\n\n\n######################################################################\n# 텍스트 파이프라인은 어휘집에 정의된 룩업 테이블(순람표; lookup table)에 기반하여 텍스트 문장을 정수 목록으로 변환합니다.\n# 레이블(label) 파이프라인은 레이블을 정수로 변환합니다. 예를 들어,\n#\n# ::\n#\n# text_pipeline('here is the an example')\n# >>> [475, 21, 2, 30, 5286]\n# label_pipeline('10')\n# >>> 9\n#\n\n\n\n######################################################################\n# 데이터 배치(batch)와 반복자 생성하기\n# ----------------------------------------\n#\n# `torch.utils.data.DataLoader <https://pytorch.org/docs/stable/data.html?highlight=dataloader#torch.utils.data.DataLoader>`__ 를\n# 권장합니다. (튜토리얼은 `여기 <https://tutorials.pytorch.kr/beginner/data_loading_tutorial.html>`__ 있습니다.)\n# 이는 ``getitem()`` 과 ``len()`` 프로토콜을 구현한 맵 형태(map-style)의 데이터셋으로 동작하며, 맵(map)처럼 인덱스/키로 데이터 샘플을 얻어옵니다.\n# 또한, 셔플(shuffle) 인자를 ``False`` 로 설정하면 순회 가능한(iterable) 데이터셋처럼 동작합니다.\n#\n# 모델로 보내기 전, ``collate_fn`` 함수는 ``DataLoader`` 로부터 생성된 샘플 배치로 동작합니다.\n# ``collate_fn`` 의 입력은 ``DataLoader`` 에 배치 크기(batch size)가 있는 배치(batch) 데이터이며,\n# ``collate_fn`` 은 이를 미리 선언된 데이터 처리 파이프라인에 따라 처리합니다.\n# ``collate_fn`` 이 최상위 수준으로 정의(top level def)되었는지 확인합니다. 이렇게 하면 모든 워커에서 이 함수를 사용할 수 있습니다.\n#\n# 아래 예제에서, 주어진(original) 데이터 배치의 텍스트 항목들은 리스트(list)에 담긴(pack) 뒤 ``nn.EmbeddingBag`` 의 입력을 위한 하나의 tensor로 합쳐(concatenate)집니다.\n# 오프셋(offset)은 텍스트 tensor에서 개별 시퀀스 시작 인덱스를 표현하기 위한 구분자(delimiter) tensor입니다.\n# 레이블(label)은 개별 텍스트 항목의 레이블을 저장하는 tensor입니다.\n\n\nfrom torch.utils.data import DataLoader\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef collate_batch(batch):\n label_list, text_list, offsets = [], [], [0]\n for (_label, _text) in batch:\n label_list.append(label_pipeline(_label))\n processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)\n text_list.append(processed_text)\n offsets.append(processed_text.size(0))\n label_list = torch.tensor(label_list, dtype=torch.int64)\n offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)\n text_list = torch.cat(text_list)\n return label_list.to(device), text_list.to(device), offsets.to(device)\n\ntrain_iter = AG_NEWS(split='train')\ndataloader = DataLoader(train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch)\n\n\n######################################################################\n# 모델 정의하기\n# ---------------\n#\n# 모델은\n# `nn.EmbeddingBag <https://pytorch.org/docs/stable/nn.html?highlight=embeddingbag#torch.nn.EmbeddingBag>`__\n# 레이어와 분류(classification) 목적을 위한 선형 레이어로 구성됩니다.\n# 기본 모드가 \"평균(mean)\"인 ``nn.EmbeddingBag`` 은 임베딩들의 \"가방(bag)\"의 평균 값을 계산합니다.\n# 이때 텍스트(text) 항목들은 각기 그 길이가 다를 수 있지만, ``nn.EmbeddingBag`` 모듈은 텍스트의 길이를\n# 오프셋(offset)으로 저장하고 있으므로 패딩(padding)이 필요하지는 않습니다.\n#\n# 덧붙여서, ``nn.EmbeddingBag`` 은 임베딩의 평균을 즉시 계산하기 때문에,\n# tensor들의 시퀀스를 처리할 때 성능 및 메모리 효율성 측면에서의 장점도\n# 갖고 있습니다.\n#\n# .. image:: ../_static/img/text_sentiment_ngrams_model.png\n#\n\n\nfrom torch import nn\n\nclass TextClassificationModel(nn.Module):\n\n def __init__(self, vocab_size, embed_dim, num_class):\n super(TextClassificationModel, self).__init__()\n self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True)\n self.fc = nn.Linear(embed_dim, num_class)\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.5\n self.embedding.weight.data.uniform_(-initrange, initrange)\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc.bias.data.zero_()\n\n def forward(self, text, offsets):\n embedded = self.embedding(text, offsets)\n return self.fc(embedded)\n\n\n######################################################################\n# 인스턴스 생성하기\n# -----------------\n#\n# ``AG_NEWS`` 데이터셋에는 4종류의 레이블이 존재하므로 클래스의 개수도 4개입니다.\n#\n# ::\n#\n# 1 : World (세계)\n# 2 : Sports (스포츠)\n# 3 : Business (경제)\n# 4 : Sci/Tec (과학/기술)\n#\n# 임베딩 차원이 64인 모델을 만듭니다.\n# 어휘집의 크기(Vocab size)는 어휘집(vocab)의 길이와 같습니다.\n# 클래스의 개수는 레이블의 개수와 같습니다.\n#\n\ntrain_iter = AG_NEWS(split='train')\nnum_class = len(set([label for (label, text) in train_iter]))\nvocab_size = len(vocab)\nemsize = 64\nmodel = TextClassificationModel(vocab_size, emsize, num_class).to(device)\n\n\n######################################################################\n# 모델을 학습하고 결과를 평가하는 함수 정의하기\n# ---------------------------------------------\n#\n\n\nimport time\n\ndef train(dataloader):\n model.train()\n total_acc, total_count = 0, 0\n log_interval = 500\n start_time = time.time()\n for idx, (label, text, offsets) in enumerate(dataloader):\n optimizer.zero_grad()\n predited_label = model(text, offsets)\n loss = criterion(predited_label, label)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)\n optimizer.step()\n total_acc += (predited_label.argmax(1) == label).sum().item()\n total_count += label.size(0)\n if idx % log_interval == 0 and idx > 0:\n elapsed = time.time() - start_time\n print('| epoch {:3d} | {:5d}/{:5d} batches '\n '| accuracy {:8.3f}'.format(epoch, idx, len(dataloader),\n total_acc/total_count))\n total_acc, total_count = 0, 0\n start_time = time.time()\n\ndef evaluate(dataloader):\n model.eval()\n total_acc, total_count = 0, 0\n\n with torch.no_grad():\n for idx, (label, text, offsets) in enumerate(dataloader):\n predited_label = model(text, offsets)\n loss = criterion(predited_label, label)\n total_acc += (predited_label.argmax(1) == label).sum().item()\n total_count += label.size(0)\n return total_acc/total_count\n\n\n######################################################################\n# 데이터셋을 분할하고 모델 수행하기\n# ---------------------------------\n#\n# 원본 ``AG_NEWS`` 에는 검증용 데이터가 포함되어 있지 않기 때문에, 우리는 학습\n# 데이터를 학습 및 검증 데이터로 분할하려 합니다. 이때 데이터를 분할하는\n# 비율은 0.95(학습)와 0.05(검증) 입니다. 우리는 여기서 PyTorch의\n# 핵심 라이브러리 중 하나인\n# `torch.utils.data.dataset.random_split <https://pytorch.org/docs/stable/data.html?highlight=random_split#torch.utils.data.random_split>`__\n# 함수를 사용합니다.\n#\n# `CrossEntropyLoss <https://pytorch.org/docs/stable/nn.html?highlight=crossentropyloss#torch.nn.CrossEntropyLoss>`__\n# 기준(criterion)은 각 클래스에 대해 ``nn.LogSoftmax()`` 와 ``nn.NLLLoss()`` 를\n# 합쳐놓은 방식입니다.\n# `SGD <https://pytorch.org/docs/stable/_modules/torch/optim/sgd.html>`__\n# optimizer는 확률적 경사 하강법를 구현해놓은 것입니다. 처음의 학습률은\n# 5.0으로 두었습니다. 매 에폭을 진행하면서 학습률을 조절할 때는\n# `StepLR <https://pytorch.org/docs/master/_modules/torch/optim/lr_scheduler.html#StepLR>`__\n# 을 사용합니다.\n#\n\nfrom torch.utils.data.dataset import random_split\nfrom torchtext.data.functional import to_map_style_dataset\n# Hyperparameters\nEPOCHS = 10 # epoch\nLR = 5 # learning rate\nBATCH_SIZE = 64 # batch size for training\n\ncriterion = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=LR)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)\ntotal_accu = None\ntrain_iter, test_iter = AG_NEWS()\ntrain_dataset = to_map_style_dataset(train_iter)\ntest_dataset = to_map_style_dataset(test_iter)\nnum_train = int(len(train_dataset) * 0.95)\nsplit_train_, split_valid_ = \\\n random_split(train_dataset, [num_train, len(train_dataset) - num_train])\n\ntrain_dataloader = DataLoader(split_train_, batch_size=BATCH_SIZE,\n shuffle=True, collate_fn=collate_batch)\nvalid_dataloader = DataLoader(split_valid_, batch_size=BATCH_SIZE,\n shuffle=True, collate_fn=collate_batch)\ntest_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE,\n shuffle=True, collate_fn=collate_batch)\n\nfor epoch in range(1, EPOCHS + 1):\n epoch_start_time = time.time()\n train(train_dataloader)\n accu_val = evaluate(valid_dataloader)\n if total_accu is not None and total_accu > accu_val:\n scheduler.step()\n else:\n total_accu = accu_val\n print('-' * 59)\n print('| end of epoch {:3d} | time: {:5.2f}s | '\n 'valid accuracy {:8.3f} '.format(epoch,\n time.time() - epoch_start_time,\n accu_val))\n print('-' * 59)\n\n\n\n######################################################################\n# 평가 데이터로 모델 평가하기\n# -------------------------------\n#\n\n\n\n######################################################################\n# 평가 데이터셋을 통한 결과를 확인합니다...\n\nprint('Checking the results of test dataset.')\naccu_test = evaluate(test_dataloader)\nprint('test accuracy {:8.3f}'.format(accu_test))\n\n\n\n\n######################################################################\n# 임의의 뉴스로 평가하기\n# ----------------------\n#\n# 현재까지 최고의 모델로 골프 뉴스를 테스트해보겠습니다.\n#\n\nag_news_label = {1: \"World\",\n 2: \"Sports\",\n 3: \"Business\",\n 4: \"Sci/Tec\"}\n\ndef predict(text, text_pipeline):\n with torch.no_grad():\n text = torch.tensor(text_pipeline(text))\n output = model(text, torch.tensor([0]))\n return output.argmax(1).item() + 1\n\nex_text_str = \"MEMPHIS, Tenn. – Four days ago, Jon Rahm was \\\n enduring the season’s worst weather conditions on Sunday at The \\\n Open on his way to a closing 75 at Royal Portrush, which \\\n considering the wind and the rain was a respectable showing. \\\n Thursday’s first round at the WGC-FedEx St. Jude Invitational \\\n was another story. With temperatures in the mid-80s and hardly any \\\n wind, the Spaniard was 13 strokes better in a flawless round. \\\n Thanks to his best putting performance on the PGA Tour, Rahm \\\n finished with an 8-under 62 for a three-stroke lead, which \\\n was even more impressive considering he’d never played the \\\n front nine at TPC Southwind.\"\n\nmodel = model.to(\"cpu\")\n\nprint(\"This is a %s news\" %ag_news_label[predict(ex_text_str, text_pipeline)])\n\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDeploying a Seq2Seq Model with TorchScript\n==================================================\n**Author:** `Matthew Inkawhich <https://github.com/MatthewInkawhich>`_\n\"\"\"\n\n\n######################################################################\n# This tutorial will walk through the process of transitioning a\n# sequence-to-sequence model to TorchScript using the TorchScript\n# API. The model that we will convert is the chatbot model from the\n# `Chatbot tutorial <https://tutorials.pytorch.kr/beginner/chatbot_tutorial.html>`__.\n# You can either treat this tutorial as a “Part 2” to the Chatbot tutorial\n# and deploy your own pretrained model, or you can start with this\n# document and use a pretrained model that we host. In the latter case,\n# you can reference the original Chatbot tutorial for details\n# regarding data preprocessing, model theory and definition, and model\n# training.\n#\n# What is TorchScript?\n# ----------------------------\n#\n# During the research and development phase of a deep learning-based\n# project, it is advantageous to interact with an **eager**, imperative\n# interface like PyTorch’s. This gives users the ability to write\n# familiar, idiomatic Python, allowing for the use of Python data\n# structures, control flow operations, print statements, and debugging\n# utilities. Although the eager interface is a beneficial tool for\n# research and experimentation applications, when it comes time to deploy\n# the model in a production environment, having a **graph**-based model\n# representation is very beneficial. A deferred graph representation\n# allows for optimizations such as out-of-order execution, and the ability\n# to target highly optimized hardware architectures. Also, a graph-based\n# representation enables framework-agnostic model exportation. PyTorch\n# provides mechanisms for incrementally converting eager-mode code into\n# TorchScript, a statically analyzable and optimizable subset of Python\n# that Torch uses to represent deep learning programs independently from\n# the Python runtime.\n#\n# The API for converting eager-mode PyTorch programs into TorchScript is\n# found in the torch.jit module. This module has two core modalities for\n# converting an eager-mode model to a TorchScript graph representation:\n# **tracing** and **scripting**. The ``torch.jit.trace`` function takes a\n# module or function and a set of example inputs. It then runs the example\n# input through the function or module while tracing the computational\n# steps that are encountered, and outputs a graph-based function that\n# performs the traced operations. **Tracing** is great for straightforward\n# modules and functions that do not involve data-dependent control flow,\n# such as standard convolutional neural networks. However, if a function\n# with data-dependent if statements and loops is traced, only the\n# operations called along the execution route taken by the example input\n# will be recorded. In other words, the control flow itself is not\n# captured. To convert modules and functions containing data-dependent\n# control flow, a **scripting** mechanism is provided. The\n# ``torch.jit.script`` function/decorator takes a module or function and\n# does not requires example inputs. Scripting then explicitly converts\n# the module or function code to TorchScript, including all control flows.\n# One caveat with using scripting is that it only supports a subset of\n# Python, so you might need to rewrite the code to make it compatible\n# with the TorchScript syntax.\n#\n# For all details relating to the supported features, see the `TorchScript\n# language reference <https://pytorch.org/docs/master/jit.html>`__.\n# To provide the maximum flexibility, you can also mix tracing and scripting\n# modes together to represent your whole program, and these techniques can\n# be applied incrementally.\n#\n# .. figure:: /_static/img/chatbot/pytorch_workflow.png\n# :align: center\n# :alt: workflow\n#\n\n\n\n######################################################################\n# Acknowledgements\n# ----------------\n#\n# This tutorial was inspired by the following sources:\n#\n# 1) Yuan-Kuei Wu’s pytorch-chatbot implementation:\n# https://github.com/ywk991112/pytorch-chatbot\n#\n# 2) Sean Robertson’s practical-pytorch seq2seq-translation example:\n# https://github.com/spro/practical-pytorch/tree/master/seq2seq-translation\n#\n# 3) FloydHub’s Cornell Movie Corpus preprocessing code:\n# https://github.com/floydhub/textutil-preprocess-cornell-movie-corpus\n#\n\n\n######################################################################\n# Prepare Environment\n# -------------------\n#\n# First, we will import the required modules and set some constants. If\n# you are planning on using your own model, be sure that the\n# ``MAX_LENGTH`` constant is set correctly. As a reminder, this constant\n# defines the maximum allowed sentence length during training and the\n# maximum length output that the model is capable of producing.\n#\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport re\nimport os\nimport unicodedata\nimport numpy as np\n\ndevice = torch.device(\"cpu\")\n\n\nMAX_LENGTH = 10 # Maximum sentence length\n\n# Default word tokens\nPAD_token = 0 # Used for padding short sentences\nSOS_token = 1 # Start-of-sentence token\nEOS_token = 2 # End-of-sentence token\n\n\n######################################################################\n# Model Overview\n# --------------\n#\n# As mentioned, the model that we are using is a\n# `sequence-to-sequence <https://arxiv.org/abs/1409.3215>`__ (seq2seq)\n# model. This type of model is used in cases when our input is a\n# variable-length sequence, and our output is also a variable length\n# sequence that is not necessarily a one-to-one mapping of the input. A\n# seq2seq model is comprised of two recurrent neural networks (RNNs) that\n# work cooperatively: an **encoder** and a **decoder**.\n#\n# .. figure:: /_static/img/chatbot/seq2seq_ts.png\n# :align: center\n# :alt: model\n#\n#\n# Image source:\n# https://jeddy92.github.io/JEddy92.github.io/ts_seq2seq_intro/\n#\n# Encoder\n# ~~~~~~~\n#\n# The encoder RNN iterates through the input sentence one token\n# (e.g. word) at a time, at each time step outputting an “output” vector\n# and a “hidden state” vector. The hidden state vector is then passed to\n# the next time step, while the output vector is recorded. The encoder\n# transforms the context it saw at each point in the sequence into a set\n# of points in a high-dimensional space, which the decoder will use to\n# generate a meaningful output for the given task.\n#\n# Decoder\n# ~~~~~~~\n#\n# The decoder RNN generates the response sentence in a token-by-token\n# fashion. It uses the encoder’s context vectors, and internal hidden\n# states to generate the next word in the sequence. It continues\n# generating words until it outputs an *EOS_token*, representing the end\n# of the sentence. We use an `attention\n# mechanism <https://arxiv.org/abs/1409.0473>`__ in our decoder to help it\n# to “pay attention” to certain parts of the input when generating the\n# output. For our model, we implement `Luong et\n# al. <https://arxiv.org/abs/1508.04025>`__\\ ’s “Global attention” module,\n# and use it as a submodule in our decode model.\n#\n\n\n######################################################################\n# Data Handling\n# -------------\n#\n# Although our models conceptually deal with sequences of tokens, in\n# reality, they deal with numbers like all machine learning models do. In\n# this case, every word in the model’s vocabulary, which was established\n# before training, is mapped to an integer index. We use a ``Voc`` object\n# to contain the mappings from word to index, as well as the total number\n# of words in the vocabulary. We will load the object later before we run\n# the model.\n#\n# Also, in order for us to be able to run evaluations, we must provide a\n# tool for processing our string inputs. The ``normalizeString`` function\n# converts all characters in a string to lowercase and removes all\n# non-letter characters. The ``indexesFromSentence`` function takes a\n# sentence of words and returns the corresponding sequence of word\n# indexes.\n#\n\nclass Voc:\n def __init__(self, name):\n self.name = name\n self.trimmed = False\n self.word2index = {}\n self.word2count = {}\n self.index2word = {PAD_token: \"PAD\", SOS_token: \"SOS\", EOS_token: \"EOS\"}\n self.num_words = 3 # Count SOS, EOS, PAD\n\n def addSentence(self, sentence):\n for word in sentence.split(' '):\n self.addWord(word)\n\n def addWord(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.num_words\n self.word2count[word] = 1\n self.index2word[self.num_words] = word\n self.num_words += 1\n else:\n self.word2count[word] += 1\n\n # Remove words below a certain count threshold\n def trim(self, min_count):\n if self.trimmed:\n return\n self.trimmed = True\n keep_words = []\n for k, v in self.word2count.items():\n if v >= min_count:\n keep_words.append(k)\n\n print('keep_words {} / {} = {:.4f}'.format(\n len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)\n ))\n # Reinitialize dictionaries\n self.word2index = {}\n self.word2count = {}\n self.index2word = {PAD_token: \"PAD\", SOS_token: \"SOS\", EOS_token: \"EOS\"}\n self.num_words = 3 # Count default tokens\n for word in keep_words:\n self.addWord(word)\n\n\n# Lowercase and remove non-letter characters\ndef normalizeString(s):\n s = s.lower()\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n return s\n\n\n# Takes string sentence, returns sentence of word indexes\ndef indexesFromSentence(voc, sentence):\n return [voc.word2index[word] for word in sentence.split(' ')] + [EOS_token]\n\n\n######################################################################\n# Define Encoder\n# --------------\n#\n# We implement our encoder’s RNN with the ``torch.nn.GRU`` module which we\n# feed a batch of sentences (vectors of word embeddings) and it internally\n# iterates through the sentences one token at a time calculating the\n# hidden states. We initialize this module to be bidirectional, meaning\n# that we have two independent GRUs: one that iterates through the\n# sequences in chronological order, and another that iterates in reverse\n# order. We ultimately return the sum of these two GRUs’ outputs. Since\n# our model was trained using batching, our ``EncoderRNN`` model’s\n# ``forward`` function expects a padded input batch. To batch\n# variable-length sentences, we allow a maximum of *MAX_LENGTH* tokens in\n# a sentence, and all sentences in the batch that have less than\n# *MAX_LENGTH* tokens are padded at the end with our dedicated *PAD_token*\n# tokens. To use padded batches with a PyTorch RNN module, we must wrap\n# the forward pass call with ``torch.nn.utils.rnn.pack_padded_sequence``\n# and ``torch.nn.utils.rnn.pad_packed_sequence`` data transformations.\n# Note that the ``forward`` function also takes an ``input_lengths`` list,\n# which contains the length of each sentence in the batch. This input is\n# used by the ``torch.nn.utils.rnn.pack_padded_sequence`` function when\n# padding.\n#\n# TorchScript Notes:\n# ~~~~~~~~~~~~~~~~~~~~~~\n#\n# Since the encoder’s ``forward`` function does not contain any\n# data-dependent control flow, we will use **tracing** to convert it to\n# script mode. When tracing a module, we can leave the module definition\n# as-is. We will initialize all models towards the end of this document\n# before we run evaluations.\n#\n\nclass EncoderRNN(nn.Module):\n def __init__(self, hidden_size, embedding, n_layers=1, dropout=0):\n super(EncoderRNN, self).__init__()\n self.n_layers = n_layers\n self.hidden_size = hidden_size\n self.embedding = embedding\n\n # Initialize GRU; the input_size and hidden_size params are both set to 'hidden_size'\n # because our input size is a word embedding with number of features == hidden_size\n self.gru = nn.GRU(hidden_size, hidden_size, n_layers,\n dropout=(0 if n_layers == 1 else dropout), bidirectional=True)\n\n def forward(self, input_seq, input_lengths, hidden=None):\n # type: (Tensor, Tensor, Optional[Tensor]) -> Tuple[Tensor, Tensor]\n # Convert word indexes to embeddings\n embedded = self.embedding(input_seq)\n # Pack padded batch of sequences for RNN module\n packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)\n # Forward pass through GRU\n outputs, hidden = self.gru(packed, hidden)\n # Unpack padding\n outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)\n # Sum bidirectional GRU outputs\n outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:]\n # Return output and final hidden state\n return outputs, hidden\n\n\n######################################################################\n# Define Decoder’s Attention Module\n# ---------------------------------\n#\n# Next, we’ll define our attention module (``Attn``). Note that this\n# module will be used as a submodule in our decoder model. Luong et\n# al. consider various “score functions”, which take the current decoder\n# RNN output and the entire encoder output, and return attention\n# “energies”. This attention energies tensor is the same size as the\n# encoder output, and the two are ultimately multiplied, resulting in a\n# weighted tensor whose largest values represent the most important parts\n# of the query sentence at a particular time-step of decoding.\n#\n\n# Luong attention layer\nclass Attn(nn.Module):\n def __init__(self, method, hidden_size):\n super(Attn, self).__init__()\n self.method = method\n if self.method not in ['dot', 'general', 'concat']:\n raise ValueError(self.method, \"is not an appropriate attention method.\")\n self.hidden_size = hidden_size\n if self.method == 'general':\n self.attn = nn.Linear(self.hidden_size, hidden_size)\n elif self.method == 'concat':\n self.attn = nn.Linear(self.hidden_size * 2, hidden_size)\n self.v = nn.Parameter(torch.FloatTensor(hidden_size))\n\n def dot_score(self, hidden, encoder_output):\n return torch.sum(hidden * encoder_output, dim=2)\n\n def general_score(self, hidden, encoder_output):\n energy = self.attn(encoder_output)\n return torch.sum(hidden * energy, dim=2)\n\n def concat_score(self, hidden, encoder_output):\n energy = self.attn(torch.cat((hidden.expand(encoder_output.size(0), -1, -1), encoder_output), 2)).tanh()\n return torch.sum(self.v * energy, dim=2)\n\n def forward(self, hidden, encoder_outputs):\n # Calculate the attention weights (energies) based on the given method\n if self.method == 'general':\n attn_energies = self.general_score(hidden, encoder_outputs)\n elif self.method == 'concat':\n attn_energies = self.concat_score(hidden, encoder_outputs)\n elif self.method == 'dot':\n attn_energies = self.dot_score(hidden, encoder_outputs)\n\n # Transpose max_length and batch_size dimensions\n attn_energies = attn_energies.t()\n\n # Return the softmax normalized probability scores (with added dimension)\n return F.softmax(attn_energies, dim=1).unsqueeze(1)\n\n\n######################################################################\n# Define Decoder\n# --------------\n#\n# Similarly to the ``EncoderRNN``, we use the ``torch.nn.GRU`` module for\n# our decoder’s RNN. This time, however, we use a unidirectional GRU. It\n# is important to note that unlike the encoder, we will feed the decoder\n# RNN one word at a time. We start by getting the embedding of the current\n# word and applying a\n# `dropout <https://pytorch.org/docs/stable/nn.html?highlight=dropout#torch.nn.Dropout>`__.\n# Next, we forward the embedding and the last hidden state to the GRU and\n# obtain a current GRU output and hidden state. We then use our ``Attn``\n# module as a layer to obtain the attention weights, which we multiply by\n# the encoder’s output to obtain our attended encoder output. We use this\n# attended encoder output as our ``context`` tensor, which represents a\n# weighted sum indicating what parts of the encoder’s output to pay\n# attention to. From here, we use a linear layer and softmax normalization\n# to select the next word in the output sequence.\n\n# TorchScript Notes:\n# ~~~~~~~~~~~~~~~~~~~~~~\n#\n# Similarly to the ``EncoderRNN``, this module does not contain any\n# data-dependent control flow. Therefore, we can once again use\n# **tracing** to convert this model to TorchScript after it\n# is initialized and its parameters are loaded.\n#\n\nclass LuongAttnDecoderRNN(nn.Module):\n def __init__(self, attn_model, embedding, hidden_size, output_size, n_layers=1, dropout=0.1):\n super(LuongAttnDecoderRNN, self).__init__()\n\n # Keep for reference\n self.attn_model = attn_model\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.n_layers = n_layers\n self.dropout = dropout\n\n # Define layers\n self.embedding = embedding\n self.embedding_dropout = nn.Dropout(dropout)\n self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout))\n self.concat = nn.Linear(hidden_size * 2, hidden_size)\n self.out = nn.Linear(hidden_size, output_size)\n\n self.attn = Attn(attn_model, hidden_size)\n\n def forward(self, input_step, last_hidden, encoder_outputs):\n # Note: we run this one step (word) at a time\n # Get embedding of current input word\n embedded = self.embedding(input_step)\n embedded = self.embedding_dropout(embedded)\n # Forward through unidirectional GRU\n rnn_output, hidden = self.gru(embedded, last_hidden)\n # Calculate attention weights from the current GRU output\n attn_weights = self.attn(rnn_output, encoder_outputs)\n # Multiply attention weights to encoder outputs to get new \"weighted sum\" context vector\n context = attn_weights.bmm(encoder_outputs.transpose(0, 1))\n # Concatenate weighted context vector and GRU output using Luong eq. 5\n rnn_output = rnn_output.squeeze(0)\n context = context.squeeze(1)\n concat_input = torch.cat((rnn_output, context), 1)\n concat_output = torch.tanh(self.concat(concat_input))\n # Predict next word using Luong eq. 6\n output = self.out(concat_output)\n output = F.softmax(output, dim=1)\n # Return output and final hidden state\n return output, hidden\n\n\n######################################################################\n# Define Evaluation\n# -----------------\n#\n# Greedy Search Decoder\n# ~~~~~~~~~~~~~~~~~~~~~\n#\n# As in the chatbot tutorial, we use a ``GreedySearchDecoder`` module to\n# facilitate the actual decoding process. This module has the trained\n# encoder and decoder models as attributes, and drives the process of\n# encoding an input sentence (a vector of word indexes), and iteratively\n# decoding an output response sequence one word (word index) at a time.\n#\n# Encoding the input sequence is straightforward: simply forward the\n# entire sequence tensor and its corresponding lengths vector to the\n# ``encoder``. It is important to note that this module only deals with\n# one input sequence at a time, **NOT** batches of sequences. Therefore,\n# when the constant **1** is used for declaring tensor sizes, this\n# corresponds to a batch size of 1. To decode a given decoder output, we\n# must iteratively run forward passes through our decoder model, which\n# outputs softmax scores corresponding to the probability of each word\n# being the correct next word in the decoded sequence. We initialize the\n# ``decoder_input`` to a tensor containing an *SOS_token*. After each pass\n# through the ``decoder``, we *greedily* append the word with the highest\n# softmax probability to the ``decoded_words`` list. We also use this word\n# as the ``decoder_input`` for the next iteration. The decoding process\n# terminates either if the ``decoded_words`` list has reached a length of\n# *MAX_LENGTH* or if the predicted word is the *EOS_token*.\n#\n# TorchScript Notes:\n# ~~~~~~~~~~~~~~~~~~~~~~\n#\n# The ``forward`` method of this module involves iterating over the range\n# of :math:`[0, max\\_length)` when decoding an output sequence one word at\n# a time. Because of this, we should use **scripting** to convert this\n# module to TorchScript. Unlike with our encoder and decoder models,\n# which we can trace, we must make some necessary changes to the\n# ``GreedySearchDecoder`` module in order to initialize an object without\n# error. In other words, we must ensure that our module adheres to the\n# rules of the TorchScript mechanism, and does not utilize any language\n# features outside of the subset of Python that TorchScript includes.\n#\n# To get an idea of some manipulations that may be required, we will go\n# over the diffs between the ``GreedySearchDecoder`` implementation from\n# the chatbot tutorial and the implementation that we use in the cell\n# below. Note that the lines highlighted in red are lines removed from the\n# original implementation and the lines highlighted in green are new.\n#\n# .. figure:: /_static/img/chatbot/diff.png\n# :align: center\n# :alt: diff\n#\n# Changes:\n# ^^^^^^^^\n#\n# - Added ``decoder_n_layers`` to the constructor arguments\n#\n# - This change stems from the fact that the encoder and decoder\n# models that we pass to this module will be a child of\n# ``TracedModule`` (not ``Module``). Therefore, we cannot access the\n# decoder’s number of layers with ``decoder.n_layers``. Instead, we\n# plan for this, and pass this value in during module construction.\n#\n#\n# - Store away new attributes as constants\n#\n# - In the original implementation, we were free to use variables from\n# the surrounding (global) scope in our ``GreedySearchDecoder``\\ ’s\n# ``forward`` method. However, now that we are using scripting, we\n# do not have this freedom, as the assumption with scripting is that\n# we cannot necessarily hold on to Python objects, especially when\n# exporting. An easy solution to this is to store these values from\n# the global scope as attributes to the module in the constructor,\n# and add them to a special list called ``__constants__`` so that\n# they can be used as literal values when constructing the graph in\n# the ``forward`` method. An example of this usage is on NEW line\n# 19, where instead of using the ``device`` and ``SOS_token`` global\n# values, we use our constant attributes ``self._device`` and\n# ``self._SOS_token``.\n#\n#\n# - Enforce types of ``forward`` method arguments\n#\n# - By default, all parameters to a TorchScript function are assumed\n# to be Tensor. If we need to pass an argument of a different type,\n# we can use function type annotations as introduced in `PEP\n# 3107 <https://www.python.org/dev/peps/pep-3107/>`__. In addition,\n# it is possible to declare arguments of different types using\n# MyPy-style type annotations (see\n# `doc <https://pytorch.org/docs/master/jit.html#types>`__).\n#\n#\n# - Change initialization of ``decoder_input``\n#\n# - In the original implementation, we initialized our\n# ``decoder_input`` tensor with ``torch.LongTensor([[SOS_token]])``.\n# When scripting, we are not allowed to initialize tensors in a\n# literal fashion like this. Instead, we can initialize our tensor\n# with an explicit torch function such as ``torch.ones``. In this\n# case, we can easily replicate the scalar ``decoder_input`` tensor\n# by multiplying 1 by our SOS_token value stored in the constant\n# ``self._SOS_token``.\n#\n\nclass GreedySearchDecoder(nn.Module):\n def __init__(self, encoder, decoder, decoder_n_layers):\n super(GreedySearchDecoder, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self._device = device\n self._SOS_token = SOS_token\n self._decoder_n_layers = decoder_n_layers\n\n __constants__ = ['_device', '_SOS_token', '_decoder_n_layers']\n\n def forward(self, input_seq : torch.Tensor, input_length : torch.Tensor, max_length : int):\n # Forward input through encoder model\n encoder_outputs, encoder_hidden = self.encoder(input_seq, input_length)\n # Prepare encoder's final hidden layer to be first hidden input to the decoder\n decoder_hidden = encoder_hidden[:self._decoder_n_layers]\n # Initialize decoder input with SOS_token\n decoder_input = torch.ones(1, 1, device=self._device, dtype=torch.long) * self._SOS_token\n # Initialize tensors to append decoded words to\n all_tokens = torch.zeros([0], device=self._device, dtype=torch.long)\n all_scores = torch.zeros([0], device=self._device)\n # Iteratively decode one word token at a time\n for _ in range(max_length):\n # Forward pass through decoder\n decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)\n # Obtain most likely word token and its softmax score\n decoder_scores, decoder_input = torch.max(decoder_output, dim=1)\n # Record token and score\n all_tokens = torch.cat((all_tokens, decoder_input), dim=0)\n all_scores = torch.cat((all_scores, decoder_scores), dim=0)\n # Prepare current token to be next decoder input (add a dimension)\n decoder_input = torch.unsqueeze(decoder_input, 0)\n # Return collections of word tokens and scores\n return all_tokens, all_scores\n\n\n\n######################################################################\n# Evaluating an Input\n# ~~~~~~~~~~~~~~~~~~~\n#\n# Next, we define some functions for evaluating an input. The ``evaluate``\n# function takes a normalized string sentence, processes it to a tensor of\n# its corresponding word indexes (with batch size of 1), and passes this\n# tensor to a ``GreedySearchDecoder`` instance called ``searcher`` to\n# handle the encoding/decoding process. The searcher returns the output\n# word index vector and a scores tensor corresponding to the softmax\n# scores for each decoded word token. The final step is to convert each\n# word index back to its string representation using ``voc.index2word``.\n#\n# We also define two functions for evaluating an input sentence. The\n# ``evaluateInput`` function prompts a user for an input, and evaluates\n# it. It will continue to ask for another input until the user enters ‘q’\n# or ‘quit’.\n#\n# The ``evaluateExample`` function simply takes a string input sentence as\n# an argument, normalizes it, evaluates it, and prints the response.\n#\n\ndef evaluate(searcher, voc, sentence, max_length=MAX_LENGTH):\n ### Format input sentence as a batch\n # words -> indexes\n indexes_batch = [indexesFromSentence(voc, sentence)]\n # Create lengths tensor\n lengths = torch.tensor([len(indexes) for indexes in indexes_batch])\n # Transpose dimensions of batch to match models' expectations\n input_batch = torch.LongTensor(indexes_batch).transpose(0, 1)\n # Use appropriate device\n input_batch = input_batch.to(device)\n lengths = lengths.to(device)\n # Decode sentence with searcher\n tokens, scores = searcher(input_batch, lengths, max_length)\n # indexes -> words\n decoded_words = [voc.index2word[token.item()] for token in tokens]\n return decoded_words\n\n\n# Evaluate inputs from user input (stdin)\ndef evaluateInput(searcher, voc):\n input_sentence = ''\n while(1):\n try:\n # Get input sentence\n input_sentence = input('> ')\n # Check if it is quit case\n if input_sentence == 'q' or input_sentence == 'quit': break\n # Normalize sentence\n input_sentence = normalizeString(input_sentence)\n # Evaluate sentence\n output_words = evaluate(searcher, voc, input_sentence)\n # Format and print response sentence\n output_words[:] = [x for x in output_words if not (x == 'EOS' or x == 'PAD')]\n print('Bot:', ' '.join(output_words))\n\n except KeyError:\n print(\"Error: Encountered unknown word.\")\n\n# Normalize input sentence and call evaluate()\ndef evaluateExample(sentence, searcher, voc):\n print(\"> \" + sentence)\n # Normalize sentence\n input_sentence = normalizeString(sentence)\n # Evaluate sentence\n output_words = evaluate(searcher, voc, input_sentence)\n output_words[:] = [x for x in output_words if not (x == 'EOS' or x == 'PAD')]\n print('Bot:', ' '.join(output_words))\n\n\n######################################################################\n# Load Pretrained Parameters\n# --------------------------\n#\n# Ok, its time to load our model!\n#\n# Use hosted model\n# ~~~~~~~~~~~~~~~~\n#\n# To load the hosted model:\n#\n# 1) Download the model `here <https://download.pytorch.org/models/tutorials/4000_checkpoint.tar>`__.\n#\n# 2) Set the ``loadFilename`` variable to the path to the downloaded\n# checkpoint file.\n#\n# 3) Leave the ``checkpoint = torch.load(loadFilename)`` line uncommented,\n# as the hosted model was trained on CPU.\n#\n# Use your own model\n# ~~~~~~~~~~~~~~~~~~\n#\n# To load your own pre-trained model:\n#\n# 1) Set the ``loadFilename`` variable to the path to the checkpoint file\n# that you wish to load. Note that if you followed the convention for\n# saving the model from the chatbot tutorial, this may involve changing\n# the ``model_name``, ``encoder_n_layers``, ``decoder_n_layers``,\n# ``hidden_size``, and ``checkpoint_iter`` (as these values are used in\n# the model path).\n#\n# 2) If you trained the model on a CPU, make sure that you are opening the\n# checkpoint with the ``checkpoint = torch.load(loadFilename)`` line.\n# If you trained the model on a GPU and are running this tutorial on a\n# CPU, uncomment the\n# ``checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))``\n# line.\n#\n# TorchScript Notes:\n# ~~~~~~~~~~~~~~~~~~~~~~\n#\n# Notice that we initialize and load parameters into our encoder and\n# decoder models as usual. If you are using tracing mode(`torch.jit.trace`)\n# for some part of your models, you must call .to(device) to set the device\n# options of the models and .eval() to set the dropout layers to test mode\n# **before** tracing the models. `TracedModule` objects do not inherit the\n# ``to`` or ``eval`` methods. Since in this tutorial we are only using\n# scripting instead of tracing, we only need to do this before we do\n# evaluation (which is the same as we normally do in eager mode).\n#\n\nsave_dir = os.path.join(\"data\", \"save\")\ncorpus_name = \"cornell movie-dialogs corpus\"\n\n# Configure models\nmodel_name = 'cb_model'\nattn_model = 'dot'\n#attn_model = 'general'\n#attn_model = 'concat'\nhidden_size = 500\nencoder_n_layers = 2\ndecoder_n_layers = 2\ndropout = 0.1\nbatch_size = 64\n\n# If you're loading your own model\n# Set checkpoint to load from\ncheckpoint_iter = 4000\n# loadFilename = os.path.join(save_dir, model_name, corpus_name,\n# '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size),\n# '{}_checkpoint.tar'.format(checkpoint_iter))\n\n# If you're loading the hosted model\nloadFilename = 'data/4000_checkpoint.tar'\n\n# Load model\n# Force CPU device options (to match tensors in this tutorial)\ncheckpoint = torch.load(loadFilename, map_location=torch.device('cpu'))\nencoder_sd = checkpoint['en']\ndecoder_sd = checkpoint['de']\nencoder_optimizer_sd = checkpoint['en_opt']\ndecoder_optimizer_sd = checkpoint['de_opt']\nembedding_sd = checkpoint['embedding']\nvoc = Voc(corpus_name)\nvoc.__dict__ = checkpoint['voc_dict']\n\n\nprint('Building encoder and decoder ...')\n# Initialize word embeddings\nembedding = nn.Embedding(voc.num_words, hidden_size)\nembedding.load_state_dict(embedding_sd)\n# Initialize encoder & decoder models\nencoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)\ndecoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout)\n# Load trained model params\nencoder.load_state_dict(encoder_sd)\ndecoder.load_state_dict(decoder_sd)\n# Use appropriate device\nencoder = encoder.to(device)\ndecoder = decoder.to(device)\n# Set dropout layers to eval mode\nencoder.eval()\ndecoder.eval()\nprint('Models built and ready to go!')\n\n\n######################################################################\n# Convert Model to TorchScript\n# -----------------------------\n#\n# Encoder\n# ~~~~~~~\n#\n# As previously mentioned, to convert the encoder model to TorchScript,\n# we use **scripting**. The encoder model takes an input sequence and\n# a corresponding lengths tensor. Therefore, we create an example input\n# sequence tensor ``test_seq``, which is of appropriate size (MAX_LENGTH,\n# 1), contains numbers in the appropriate range\n# :math:`[0, voc.num\\_words)`, and is of the appropriate type (int64). We\n# also create a ``test_seq_length`` scalar which realistically contains\n# the value corresponding to how many words are in the ``test_seq``. The\n# next step is to use the ``torch.jit.trace`` function to trace the model.\n# Notice that the first argument we pass is the module that we want to\n# trace, and the second is a tuple of arguments to the module’s\n# ``forward`` method.\n#\n# Decoder\n# ~~~~~~~\n#\n# We perform the same process for tracing the decoder as we did for the\n# encoder. Notice that we call forward on a set of random inputs to the\n# traced_encoder to get the output that we need for the decoder. This is\n# not required, as we could also simply manufacture a tensor of the\n# correct shape, type, and value range. This method is possible because in\n# our case we do not have any constraints on the values of the tensors\n# because we do not have any operations that could fault on out-of-range\n# inputs.\n#\n# GreedySearchDecoder\n# ~~~~~~~~~~~~~~~~~~~\n#\n# Recall that we scripted our searcher module due to the presence of\n# data-dependent control flow. In the case of scripting, we do necessary\n# language changes to make sure the implementation complies with\n# TorchScript. We initialize the scripted searcher the same way that we\n# would initialize an un-scripted variant.\n#\n\n### Compile the whole greedy search model to TorchScript model\n# Create artificial inputs\ntest_seq = torch.LongTensor(MAX_LENGTH, 1).random_(0, voc.num_words).to(device)\ntest_seq_length = torch.LongTensor([test_seq.size()[0]]).to(device)\n# Trace the model\ntraced_encoder = torch.jit.trace(encoder, (test_seq, test_seq_length))\n\n### Convert decoder model\n# Create and generate artificial inputs\ntest_encoder_outputs, test_encoder_hidden = traced_encoder(test_seq, test_seq_length)\ntest_decoder_hidden = test_encoder_hidden[:decoder.n_layers]\ntest_decoder_input = torch.LongTensor(1, 1).random_(0, voc.num_words)\n# Trace the model\ntraced_decoder = torch.jit.trace(decoder, (test_decoder_input, test_decoder_hidden, test_encoder_outputs))\n\n### Initialize searcher module by wrapping ``torch.jit.script`` call\nscripted_searcher = torch.jit.script(GreedySearchDecoder(traced_encoder, traced_decoder, decoder.n_layers))\n\n\n\n\n######################################################################\n# Print Graphs\n# ------------\n#\n# Now that our models are in TorchScript form, we can print the graphs of\n# each to ensure that we captured the computational graph appropriately.\n# Since TorchScript allow us to recursively compile the whole model\n# hierarchy and inline the ``encoder`` and ``decoder`` graph into a single\n# graph, we just need to print the `scripted_searcher` graph\n\nprint('scripted_searcher graph:\\n', scripted_searcher.graph)\n\n\n######################################################################\n# Run Evaluation\n# --------------\n#\n# Finally, we will run evaluation of the chatbot model using the TorchScript\n# models. If converted correctly, the models will behave exactly as they\n# would in their eager-mode representation.\n#\n# By default, we evaluate a few common query sentences. If you want to\n# chat with the bot yourself, uncomment the ``evaluateInput`` line and\n# give it a spin.\n#\n\n\n# Use appropriate device\nscripted_searcher.to(device)\n# Set dropout layers to eval mode\nscripted_searcher.eval()\n\n# Evaluate examples\nsentences = [\"hello\", \"what's up?\", \"who are you?\", \"where am I?\", \"where are you from?\"]\nfor s in sentences:\n evaluateExample(s, scripted_searcher, voc)\n\n# Evaluate your input\n#evaluateInput(traced_encoder, traced_decoder, scripted_searcher, voc)\n\n\n######################################################################\n# Save Model\n# ----------\n#\n# Now that we have successfully converted our model to TorchScript, we\n# will serialize it for use in a non-Python deployment environment. To do\n# this, we can simply save our ``scripted_searcher`` module, as this is\n# the user-facing interface for running inference against the chatbot\n# model. When saving a Script module, use script_module.save(PATH) instead\n# of torch.save(model, PATH).\n#\n\nscripted_searcher.save(\"scripted_chatbot.pth\")\n", "# -*- coding: utf-8 -*-\n\"\"\"\nAutograd\n========\n\nAutograd는 자동 미분을 수행하는 torch의 핵심 패키지로, 자동 미분을 위해\n테잎(tape) 기반 시스템을 사용합니다.\n\n순전파(forward) 단계에서 autograd 테잎은 수행하는 모든 연산을 기억합니다.\n그리고, 역전파(backward) 단계에서 연산들을 재현(replay)합니다.\n\n연산 기록을 추적하는 Tensor\n----------------------------\n\nAutograd에서 ``requires_grad=True`` 로 설정된 입력 ``Tensor`` 의 연산은\n기록됩니다. 역전파 단계 연산 후에, 이 Tensor에 대한 변화도(grdient)는 ``.grad``\n속성에 누적됩니다.\n\nAutograd 구현에서 매우 중요한 클래스가 하나 더 있는데, 이것은 바로 ``Function``\n클래스입니다. ``Tensor`` 와 ``Function`` 은 서로 연결되어 있으며, 모든 연산 과정을\n부호화(encode)하여 순환하지 않는 그래프(acyclic graph)를 생성합니다. 각 변수는\n``.grad_fn`` 속성을 갖고 있는데, 이는 ``Tensor`` 를 생성한 ``Function`` 을\n참조하고 있습니다. (단, 사용자가 만든 Tensor는 예외로, 이 때 ``grad_fn`` 은\n``None`` 입니다.)\n\n도함수를 계산하기 위해서는 ``Tensor`` 의 ``.backward()`` 를 호출하면 됩니다.\n``Tensor`` 가 스칼라(scalar)인 경우(예. 하나의 요소만 갖는 등)에는, ``backward`` 에\n인자를 정해줄 필요가 없습니다. 하지만 여러 개의 요소를 갖고 있을\n때는 tensor의 모양을 ``gradient`` 의 인자로 지정할 필요가 있습니다.\n\"\"\"\n\nimport torch\n\n###############################################################\n# tensor를 생성하고 requires_grad=True를 설정하여 연산을 기록합니다.\nx = torch.ones(2, 2, requires_grad=True)\nprint(x)\n\n###############################################################\n#\nprint(x.data)\n\n###############################################################\n#\nprint(x.grad)\n\n###############################################################\n#\n\nprint(x.grad_fn) # x는 직접 생성하였기 때문에 아무런 값도 없습니다.\n\n###############################################################\n# x에 연산을 수행합니다:\n\ny = x + 2\nprint(y)\n\n###############################################################\n# y 는 연산의 결과로 생성된 것이므로, grad_fn을 갖습니다.\nprint(y.grad_fn)\n\n###############################################################\n# y에 다른 연산을 수행합니다.\n\nz = y * y * 3\nout = z.mean()\n\nprint(z, out)\n\n################################################################\n# ``.requires_grad_( ... )`` 는 기존 Tensor의 ``requires_grad`` 값을 바꿔치기하여\n# 변경합니다. 입력값이 지정되지 않으면 기본값은 ``False`` 입니다.\na = torch.randn(2, 2)\na = ((a * 3) / (a - 1))\nprint(a.requires_grad)\na.requires_grad_(True)\nprint(a.requires_grad)\nb = (a * a).sum()\nprint(b.grad_fn)\n\n###############################################################\n# 변화도(Gradient)\n# ----------------\n#\n# 이제 역전파를 한 후 변화도 d(out)/dx를 출력해보겠습니다.\n\nout.backward()\nprint(x.grad)\n\n\n###############################################################\n# 기본적으로 변화도 연산은 그래프 내의 모든 내부 버퍼를 날려버리므로,\n# 그래프의 일부를 2번 역전파하려면 첫번째 역전파 시에 미리\n# ``retain_graph = True`` 을 지정해둘 필요가 있습니다.\n\nx = torch.ones(2, 2, requires_grad=True)\ny = x + 2\ny.backward(torch.ones(2, 2), retain_graph=True)\n# retain_graph는 내부 버퍼들이 지워지는 것을 막습니다.\nprint(x.grad)\n\n###############################################################\n#\nz = y * y\nprint(z)\n\n###############################################################\n#\n# 무작위 경사도로 역전파해보겠습니다\n\ngradient = torch.randn(2, 2)\n\n# 만약 앞에서 retain_graph를 하지 않았다면 여기서 에러가 발생할 것입니다.\ny.backward(gradient)\n\nprint(x.grad)\n\n###############################################################\n# 또한 ``with torch.no_grad():`` 로 코드 블럭을 감싸서 autograd가\n# ``.requires_grad=True`` 인 Tensor들의 연산 기록을 추적하는 것을 멈출 수 있습니다:\nprint(x.requires_grad)\nprint((x ** 2).requires_grad)\n\nwith torch.no_grad():\n\tprint((x ** 2).requires_grad)\n" ]
[ [ "torch.linspace", "torch.full", "torch.sin", "torch.no_grad", "torch.device" ], [ "torch.nn.CrossEntropyLoss", "torch.cat", "torch.utils.data.DataLoader", "torch.tensor", "torch.nn.Linear", "torch.no_grad", "torch.cuda.is_available", "torch.nn.EmbeddingBag", "torch.optim.lr_scheduler.StepLR" ], [ "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.jit.trace", "torch.LongTensor", "torch.ones", "torch.cat", "torch.zeros", "torch.max", "torch.nn.GRU", "torch.sum", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.Embedding", "torch.unsqueeze", "torch.nn.Linear", "torch.nn.utils.rnn.pad_packed_sequence", "torch.FloatTensor", "torch.device" ], [ "torch.randn", "torch.no_grad", "torch.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sarnthil/emotion-classification-roles
[ "1fdd3a8cbdac5ab2ad9598a101b763882df78280" ]
[ "scripts/indicator_experiment/calculate-fscores-from-aggregations.py" ]
[ "import json\nfrom pathlib import Path\nfrom collections import defaultdict, Counter\n\nfrom sklearn.metrics import precision_recall_fscore_support\n\nPRECISION_FALLBACK = RECALL_FALLBACK = 1\n\n# dataset -> setting -> emotion -> measure -> score\nresults = {}\n\nfor dataset_path in Path(\"workdata/indicator-experiment/predictions\").glob(\"*\"):\n dataset = dataset_path.name\n for file in dataset_path.glob(\"*.aggregated\"):\n part = file.stem\n results.setdefault(dataset, {}).setdefault(part, {})\n confusion_matrix = defaultdict(Counter)\n y_true, y_pred = [], []\n instances = 0\n with file.open() as f:\n for line in f:\n data = json.loads(line)\n confusion_matrix[data[\"gold\"]][data[\"prediction\"]] += 1\n y_true.append(data[\"gold\"])\n y_pred.append(data[\"prediction\"])\n instances += 1\n for emotion in confusion_matrix:\n tp = confusion_matrix[emotion][emotion]\n fn = sum(\n confusion_matrix[emotion][other]\n for other in confusion_matrix[emotion]\n if other != emotion\n )\n fp = sum(\n confusion_matrix[other][emotion]\n for other in confusion_matrix\n if other != emotion\n )\n tn = instances - tp - fn - fp\n precision = tp / (tp + fp) if tp + fp else PRECISION_FALLBACK\n recall = tp / (tp + fn) if tp + fn else RECALL_FALLBACK\n f1 = (\n 2 * ((precision * recall) / (precision + recall))\n if precision and recall\n else 0\n )\n results[dataset][part][emotion] = {\n \"precision\": precision,\n \"recall\": recall,\n \"f1\": f1,\n }\n emos = list(results[dataset][part].keys())\n\n for average in [\"macro\", \"micro\"]:\n p, r, f, s = precision_recall_fscore_support(\n y_true, y_pred, zero_division=1, average=average\n )\n results[dataset][part][f\"all_{average}\"] = {\n \"precision\": p,\n \"recall\": r,\n \"f1\": f,\n }\n results[dataset][part][\"all_unweighted_mean\"] = {\n \"precision\": sum(\n results[dataset][part][emo][\"precision\"] for emo in emos\n )\n / len(emos),\n \"recall\": sum(results[dataset][part][emo][\"recall\"] for emo in emos)\n / len(emos),\n \"f1\": sum(results[dataset][part][emo][\"f1\"] for emo in emos)\n / len(emos),\n }\n results[dataset][part][\"all_weighted_mean\"] = {\n \"precision\": sum(\n results[dataset][part][emo][\"precision\"]\n * sum(\n confusion_matrix[emo][other]\n for other in confusion_matrix[emo]\n )\n for emo in emos\n )\n / instances,\n \"recall\": sum(\n results[dataset][part][emo][\"recall\"]\n * sum(\n confusion_matrix[emo][other]\n for other in confusion_matrix[emo]\n )\n for emo in emos\n )\n / instances,\n \"f1\": sum(\n results[dataset][part][emo][\"f1\"]\n * sum(\n confusion_matrix[emo][other]\n for other in confusion_matrix[emo]\n )\n for emo in emos\n )\n / instances,\n }\n\n with Path(\"workdata/indicator-experiment/results.json\").open(\"w\") as f:\n json.dump(results, f)\n" ]
[ [ "sklearn.metrics.precision_recall_fscore_support" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RikPi/bluerov2
[ "811dcca43241221b918425cf8351219d183f4c03" ]
[ "bluerov2_hmi/src/bluerov2_hmi/__init__.py" ]
[ "import rospy\nimport cv2\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom sensor_msgs.msg import Range, BatteryState, CameraInfo, Temperature, NavSatFix, Image\nfrom geometry_msgs.msg import PoseWithCovarianceStamped, PointStamped\nfrom std_msgs.msg import Float64, Header\nfrom mavros_msgs.msg import State\nimport numpy as np\nfrom image_geometry import PinholeCameraModel\nfrom tf2_ros import TransformListener, Buffer\nimport tf2_geometry_msgs\nfrom message_filters import TimeSynchronizer, Subscriber\nfrom bluerov2_navigation.helpers import math\nfrom pathlib import Path\nfrom PyQt5.QtWidgets import QWidget, QMessageBox\nfrom qt_gui.plugin import Plugin\nfrom python_qt_binding import loadUi\nimport rospkg\nfrom bluerov2_msgs.srv import SetAutopilot, SetAutopilotResponse, SetAutopilotRequest\nfrom bluerov2_msgs.msg import Autopilot\n\n\nclass MsgMonitor:\n \"\"\" Checks if a message has been received within a timeout. \"\"\"\n def __init__(self, timeout: float = 5.0):\n self._last_time = rospy.Time.now()\n self._timeout = timeout\n\n def is_valid(self):\n return (rospy.Time.now() - self._last_time).to_sec() < self._timeout\n\n def __call__(self, msg: rospy.AnyMsg):\n self._last_time = rospy.Time.now()\n\n\nclass AnnotationFormat:\n def __init__(self, color: tuple = (255, 255, 255), thickness: int = 2, font: int = cv2.FONT_HERSHEY_SIMPLEX):\n self.color = color\n self.thickness = thickness\n self.font = font\n\n\nclass HudOverlay:\n def __init__(self):\n self._c = 0\n self._cvbridge = CvBridge()\n # --------- Params ---------\n self._fontsize = rospy.get_param(\"fontsize\", 1.5)\n self._skip_frames = int(rospy.get_param(\"skip_frames\", 5))\n # --------- Topic Defs -------\n self._info_in_topic = \"image_in/camera_info\"\n self._image_in_topic = \"image_in/image_raw\"\n self._image_out_topic = \"image_out/image_raw\"\n self._heading_topic = \"mavros/global_position/compass_hdg\"\n self._alt_topic = \"mavros/distance_sensor/rangefinder_pub\"\n self._state_topic = \"mavros/state\"\n self._batt_topic = \"mavros/battery\"\n self._temp_topic = \"mavros/imu/temperature_baro\"\n self._pose_topic = \"waterlinked/pose_with_cov_stamped\"\n self._latlon_topic = \"mavros/global_position/global\"\n self._sog_topic = \"guidance/sog\"\n self._cog_topic = \"guidance/cog\"\n self._depth_topic = \"mavros/global_position/rel_alt\"\n # ------- MSG Defs ---------\n self._heading = Float64()\n self._sog = Float64()\n self._cog = Float64\n self._alt = Range()\n self._depth = Float64()\n self._battery = BatteryState()\n self._state = State()\n self._temperature = Temperature()\n self._pose = PoseWithCovarianceStamped()\n self._latlon = NavSatFix()\n # ------- MSG Monitors --------\n self._heading_mon = MsgMonitor()\n self._sog_mon = MsgMonitor()\n self._cog_mon = MsgMonitor()\n self._alt_mon = MsgMonitor()\n self._depth_mon = MsgMonitor()\n self._battery_mon = MsgMonitor()\n self._state_mon = MsgMonitor()\n self._temperature_mon = MsgMonitor()\n self._pose_mon = MsgMonitor()\n self._latlon_mon = MsgMonitor()\n # ------- Image Geometry ----------\n self._cam_model = PinholeCameraModel()\n self._home = None # Frame projected onto camera\n # ------- TF\n self._tf_buffer = Buffer()\n TransformListener(self._tf_buffer)\n # ---------- Annotation -------\n self._valid_annotation = AnnotationFormat()\n self._invalid_annotation = AnnotationFormat(color=(0, 0, 255))\n # ------- Subscribers -------\n self._image_in_sub = Subscriber(self._image_in_topic, Image)\n self._info_in_sub = Subscriber(self._info_in_topic, CameraInfo)\n ts = TimeSynchronizer([self._image_in_sub, self._info_in_sub], 10)\n ts.registerCallback(self._annotate_img)\n rospy.Subscriber(self._heading_topic, Float64, self._update_heading)\n rospy.Subscriber(self._depth_topic, Float64, self._update_depth)\n rospy.Subscriber(self._alt_topic, Range, self._update_alt)\n rospy.Subscriber(self._state_topic, State, self._update_state)\n rospy.Subscriber(self._batt_topic, BatteryState, self._update_bat)\n rospy.Subscriber(self._temp_topic, Temperature, self._update_temperature)\n rospy.Subscriber(self._pose_topic, PoseWithCovarianceStamped, self._update_pose)\n rospy.Subscriber(self._latlon_topic, NavSatFix, self._update_latlon)\n rospy.Subscriber(self._cog_topic, Float64, self._update_cog)\n rospy.Subscriber(self._sog_topic, Float64, self._update_sog)\n # ---------- Publisher --------\n self._pub = rospy.Publisher(self._image_out_topic, Image, queue_size=10)\n\n\n def _update_cog(self, msg: Float64):\n self._cog_mon(msg)\n self._cog = msg\n\n def _update_sog(self, msg: Float64):\n self._sog_mon(msg)\n self._sog = msg\n\n def _update_heading(self, msg: Float64):\n self._heading_mon(msg)\n self._heading = msg\n\n def _update_state(self, msg: State):\n self._state_mon(msg)\n self._state = msg\n\n def _update_depth(self, msg: Float64):\n self._depth_mon(msg)\n self._depth = msg\n\n def _update_alt(self, msg: Range):\n self._alt_mon(msg)\n self._alt = msg\n\n def _update_bat(self, msg: BatteryState):\n self._battery_mon(msg)\n self._battery = msg\n\n def _update_temperature(self, msg: Temperature):\n self._temperature_mon(msg)\n self._temperature = msg\n\n def _update_pose(self, msg: PoseWithCovarianceStamped):\n self._pose_mon(msg)\n self._pose = msg\n\n def _update_latlon(self, msg: NavSatFix):\n self._latlon_mon(msg)\n self._latlon = msg\n\n def _degToCompass(self, num):\n val = int((num / 22.5) + .5)\n arr = [\"N\", \"NNE\", \"NE\", \"ENE\", \"E\", \"ESE\", \"SE\", \"SSE\", \"S\", \"SSW\", \"SW\", \"WSW\", \"W\", \"WNW\", \"NW\", \"NNW\"]\n return arr[(val % 16)]\n\n def _gen_annotation(self, fmt: str=\"{}\", value: tuple = (np.inf,), anno: AnnotationFormat = AnnotationFormat()):\n st = fmt.format(*value) if value is not None else fmt.format(None)\n box, _ = cv2.getTextSize(st, anno.font, self._fontsize, anno.thickness)\n w, h = box\n b, g, r = anno.color\n return st, w, h, b, g, r\n\n def _annotate_img(self, image_msg: Image, info_msg: CameraInfo):\n if self._c % self._skip_frames == 0:\n # Convert image to opencv format\n img = self._cvbridge.imgmsg_to_cv2(image_msg)\n # Construct pinhole camera model\n self._cam_model.fromCameraInfo(info_msg)\n # Get the frame origin projected into image coordiantes\n self._home = None\n try:\n if self._tf_buffer.can_transform(self._cam_model.tf_frame, \"waterlinked\", rospy.Time.from_sec(0)):\n point = self._tf_buffer.transform(PointStamped(Header(0, \"waterlinked\", rospy.Time.now()), None),\n self._cam_model.tf_frame)\n self._home = self._cam_model.project3dToPixel(point.point.x, point.point.y, point.point.z)\n except Exception as e:\n rospy.logerr_throttle(10.0, f\"{rospy.get_name()} | {e}\")\n\n # TOP CENTRAL BOX: IMPORTANT DATA (Altitude, Heading, Depth)\n st, w, h, b, g, r = self._gen_annotation(\"Alt: {:02.1f} m\", (self._alt.range,), self._valid_annotation) if self._alt_mon.is_valid() else self._gen_annotation(\"Alt: {:02.1f} m\", (self._alt.range,), self._invalid_annotation)\n pos = (int(img.shape[1] - w/2), 10)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b,g,r), self._valid_annotation.thickness, cv2.LINE_AA)\n\n st, w1, h1, b, g, r = self._gen_annotation(\"Hdg: {:03d} {}\", (int(self._heading.data), self._degToCompass(self._heading.data)),\n self._valid_annotation) if self._heading_mon.is_valid() else self._gen_annotation(\n \"Hdg: {:03d} {}\", (self._heading.data, self._degToCompass(self._heading.data)), self._invalid_annotation)\n pos = (int(img.shape[1] - w1/2) + w + 5, 10)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b,g,r), self._valid_annotation.thickness, cv2.LINE_AA)\n\n st, w2, h2, b, g, r = self._gen_annotation(\"Dpt: {:02.1f} m\",\n (self._depth.data,),\n self._valid_annotation) if self._depth_mon.is_valid() else self._gen_annotation(\n \"Dpt: {:02.1f} m\", (self._depth.data), self._invalid_annotation)\n pos = (int(img.shape[1] - w2 / 2) + w1 + w2 + 10, 10)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n\n #BOTTOM LEFT BOX: SYSTEM HEALTH (State, Battery voltage, Temperature)\n # State ROV\n st, w, h, b, g, r = self._gen_annotation(\"Bat: {:02.1f} V\", (self._battery.voltage,),\n self._valid_annotation) if self._alt_mon.is_valid() else self._gen_annotation(\n \"Bat: {:02.1f} V\", (self._battery.voltage,), self._invalid_annotation)\n pos = (0, img.shape[0] - 3*h)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n fmt = \"St: Armed\" if self._state.armed else \"St: Disarmed\"\n st, w1, h1, b, g, r = self._gen_annotation(fmt, None,\n self._valid_annotation) if self._alt_mon.is_valid() else self._gen_annotation(\n fmt, None, self._invalid_annotation)\n pos = (0, img.shape[0] - 2*h + 5 )\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n # Temperature\n st, w2, h2, b, g, r = self._gen_annotation(\"Temp: {:02.1f} degC\", (self._temperature.temperature,),\n self._valid_annotation) if self._alt_mon.is_valid() else self._gen_annotation(\n \"Temp: {:02.1f} degC\", (self._temperature.temperature,), self._invalid_annotation)\n pos = (0, img.shape[0] - 2*h + h1 + 10 )\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n\n # TOP LEFT BOX: POSITION RELATIVE TO BOAT (LOS distance, LOS angle)\n distance = np.sqrt(self._pose.pose.pose.position.x ** 2 + self._pose.pose.pose.position.y ** 2)\n\n\n st, w, h, b, g, r = self._gen_annotation(\"Ship Distance: {:02.1f} m\", (distance,),\n self._valid_annotation) if self._pose_mon.is_valid() else self._gen_annotation(\n \"Ship distance: {:02.1f} m\", (distance,), self._invalid_annotation)\n pos = (0, 150)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n\n bearing = math.rad2deg(math.atan2(self._pose.pose.pose.position.y,\n self._pose.pose.pose.position.x)) # This is ENU, convert to NED\n bearing = 90 - bearing\n bearing = 360 + bearing if bearing < 0 else bearing\n relative_bearing = bearing - self._heading.data\n relative_bearing = relative_bearing + 360 if abs(relative_bearing) > 180 and relative_bearing < 0 else relative_bearing\n relative_bearing = relative_bearing - 360 if abs(relative_bearing) > 180 and relative_bearing > 0 else relative_bearing\n st, w1, h1, b, g, r = self._gen_annotation(\"Ship Bearing: {:02.1f} deg\", (relative_bearing,),\n self._valid_annotation) if self._pose_mon.is_valid() else self._gen_annotation(\n \"Ship Bearing: {:02.1f} deg\", (relative_bearing,), self._invalid_annotation)\n pos = (0, 150 + h + 20)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n\n # TOP RIGHT BOX: POSITION GLOBAL (Latitude, Longitude, SoG, CoG)\n # # Latitude\n # pos = (img.shape[1] - w/3 - 100, 150)\n # st = \"Lat: {:02.2f}deg\".format(self._latlon.latitude)\n # img = cv2.putText(img, st, (pos[0] + 2, pos[1]), font, self._fontsize, color, thickness, cv2.LINE_AA)\n # # Longitude\n # pos = (img.shape[1] - w/3 - 100, 150 + h + 20)\n # st = \"Lon: {:02.2f}deg\".format(self._latlon.longitude)\n # img = cv2.putText(img, st, (pos[0] + 2, pos[1] + 2), font, self._fontsize, color, thickness, cv2.LINE_AA)\n # # SoG\n # pos = (img.shape[1] - w/3 - 100, 150 + 2*(h + 20))\n # st = \"SoG: {:02.2f}km/h\".format(self._sogcog.sog)\n # img = cv2.putText(img, st, (pos[0] + 2, pos[1] + 2), font, self._fontsize, color, thickness, cv2.LINE_AA)\n # # CoG\n # pos = (img.shape[1] - w/3 - 100, 150 + 3*(h + 20))\n # st = \"CoG: {:02.2f}deg\".format(self._sogcog.cog)\n # img = cv2.putText(img, st, (pos[0] + 2, pos[1] + 2), font, self._fontsize, color, thickness, cv2.LINE_AA)\n\n if self._home is not None:\n self._home = [int(i) for i in self._home]\n img = cv2.circle(img, self._home, 5, (0, 255, 0), -1)\n box, baseline = cv2.getTextSize(\"SHIP\", cv2.FONT_HERSHEY_PLAIN, self._fontsize, 2)\n pos = (int(self._home[0] - box[0] / 2), self._home[1] - 5 - baseline - 3)\n img = cv2.putText(img, \"SHIP\", pos, cv2.FONT_HERSHEY_PLAIN, self._fontsize, (0, 255, 0), 2)\n\n out = self._cvbridge.cv2_to_imgmsg(img, encoding=\"bgr8\")\n self._pub.publish(out)\n self._c = 0\n else :\n self._c = self._c + 1\n\n\nclass AutopilotInterface(Plugin):\n def __init__(self, context):\n super().__init__(context)\n # Give QObjects reasonable names\n self.setObjectName('Autopilot Interface')\n # Create QWidget\n self._widget = QWidget()\n # Get path to UI file which should be in the \"resource\" folder of this package\n ui_file = str(Path(rospkg.RosPack().get_path('bluerov2_hmi') + '/resource/AutopilotInterface.ui'))\n # Extend the widget with all attributes and children from UI file\n loadUi(ui_file, self._widget)\n # Give QObjects reasonable names\n self._widget.setObjectName('AutopilotInterface')\n\n self._autopilot_msg = Autopilot()\n\n self._widget.SetAutopilotButton.clicked.connect(self.on_button_clicked)\n self._widget.AMSLRadio.clicked.connect(self.amsl_radio_clicked)\n self._widget.BottomRadio.clicked.connect(self.bottom_radio_clicked)\n self._widget.DepthRadio.clicked.connect(self.depth_radio_clicked)\n self._widget.HeightSetText.editingFinished.connect(self.height_text_finished)\n self._widget.SpeedSetText.editingFinished.connect(self.speed_text_finished)\n self._widget.HeadingDial.valueChanged.connect(self.height_dial_changed)\n\n # Show _widget.windowTitle on left-top of each plugin (when\n # it's set in _widget). This is useful when you open multiple\n # plugins at once. Also if you open multiple instances of your\n # plugin at once, these lines add number to make it easy to\n # tell from pane to pane.\n self._autopilot_service = rospy.ServiceProxy(\"autopilot/set\", SetAutopilot)\n if context.serial_number() > 1:\n self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))\n # Add widget to the user interface\n context.add_widget(self._widget)\n\n def amsl_radio_clicked(self):\n self._autopilot_msg.height_reference = self._autopilot_msg.MSL\n\n def bottom_radio_clicked(self):\n self._autopilot_msg.height_reference = self._autopilot_msg.BTM\n\n def depth_radio_clicked(self):\n self._autopilot_msg.height_reference = self._autopilot_msg.DPT\n\n def height_text_finished(self):\n try:\n self._autopilot_msg.Z = float(self._widget.HeightSetText.text())\n except ValueError as e:\n alert = QMessageBox()\n alert.setText(str(e))\n alert.exec()\n\n def speed_text_finished(self):\n try:\n self._autopilot_msg.U = float(self._widget.SpeedSetText.text())\n except ValueError as e:\n alert = QMessageBox()\n alert.setText(str(e))\n alert.exec()\n\n def height_dial_changed(self):\n value = self._widget.HeadingDial.value()\n if value > 180:\n value = value - 180\n else:\n value = value + 180\n self._autopilot_msg.heading = value\n self._widget.HeadingLabel.setText(\"{:03d}N deg\".format(int(self._autopilot_msg.heading)))\n\n def on_button_clicked(self):\n req = SetAutopilotRequest()\n req.settings = self._autopilot_msg\n try:\n self._autopilot_service.wait_for_service(1.0)\n res = self._autopilot_service.call(req)\n except Exception as e:\n alert = QMessageBox()\n alert.setText(str(e))\n alert.exec()\n if res.success:\n alert = QMessageBox()\n alert.setText(\"Autopilot Active\\nSpeed: {:2f} m/s\\nHeading: {:03d} deg\\nHeight: {:.2f} m {}\".format(self._autopilot_msg.U,\n int(self._autopilot_msg.heading),\n self._autopilot_msg.Z,\n self._autopilot_msg.height_reference))\n alert.exec()\n else:\n alert = QMessageBox()\n alert.setText(\"Autopilot Not Set.\")\n alert.exec()\n\n\n def shutdown_plugin(self):\n # TODO unregister all publishers here\n pass\n\n def save_settings(self, plugin_settings, instance_settings):\n # TODO save intrinsic configuration, usually using:\n # instance_settings.set_value(k, v)\n pass\n\n def restore_settings(self, plugin_settings, instance_settings):\n # TODO restore intrinsic configuration, usually using:\n # v = instance_settings.value(k)\n pass\n\n # def trigger_configuration(self):\n # Comment in to signal that the plugin has a way to configure\n # This will enable a setting button (gear icon) in each dock widget title bar\n # Usually used to open a modal configuration dialog\n" ]
[ [ "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neptune-ml/pytorch-lightning
[ "3bcaed52454f3e6c3bce5513032e34302e5b1bb6", "3bcaed52454f3e6c3bce5513032e34302e5b1bb6", "3bcaed52454f3e6c3bce5513032e34302e5b1bb6" ]
[ "tests/strategies/test_deepspeed_strategy.py", "tests/loops/test_loops.py", "pytorch_lightning/utilities/apply_func.py" ]
[ "import contextlib\nimport json\nimport logging\nimport os\nfrom typing import Any, Dict, Optional\nfrom unittest import mock\n\nimport pytest\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\nfrom torchmetrics import Accuracy\n\nfrom pytorch_lightning import LightningDataModule, LightningModule, seed_everything, Trainer\nfrom pytorch_lightning.callbacks import Callback, LearningRateMonitor, ModelCheckpoint\nfrom pytorch_lightning.plugins import DeepSpeedPrecisionPlugin\nfrom pytorch_lightning.strategies import DeepSpeedStrategy\nfrom pytorch_lightning.strategies.deepspeed import LightningDeepSpeedModule\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.imports import _DEEPSPEED_AVAILABLE\nfrom pytorch_lightning.utilities.meta import init_meta_context\nfrom tests.helpers.boring_model import BoringModel, RandomDataset, RandomIterableDataset\nfrom tests.helpers.datamodules import ClassifDataModule\nfrom tests.helpers.runif import RunIf\n\nif _DEEPSPEED_AVAILABLE:\n import deepspeed\n from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict\n\n\nclass ModelParallelBoringModel(BoringModel):\n def __init__(self):\n super().__init__()\n self.layer = None\n\n def configure_sharded_model(self) -> None:\n self.layer = torch.nn.Linear(32, 2)\n\n def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n self.configure_sharded_model()\n\n\nclass ModelParallelBoringModelNoSchedulers(ModelParallelBoringModel):\n def configure_optimizers(self):\n return torch.optim.SGD(self.layer.parameters(), lr=0.1)\n\n\nclass ModelParallelBoringModelManualOptim(BoringModel):\n def __init__(self):\n super().__init__()\n self.layer = None\n\n def training_step(self, batch, batch_idx):\n opt = self.optimizers()\n output = self(batch)\n loss = self.loss(batch, output)\n opt.zero_grad()\n self.manual_backward(loss)\n opt.step()\n\n def configure_sharded_model(self) -> None:\n self.layer = torch.nn.Linear(32, 2)\n\n def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n self.configure_sharded_model()\n\n @property\n def automatic_optimization(self) -> bool:\n return False\n\n\ndef test_deepspeed_lightning_module(tmpdir):\n \"\"\"Test to ensure that a model wrapped in `LightningDeepSpeedModule` moves types and device correctly.\"\"\"\n\n model = BoringModel()\n module = LightningDeepSpeedModule(model, precision=16)\n\n module.half()\n assert module.dtype == torch.half\n assert model.dtype == torch.half\n\n module.to(torch.double)\n assert module.dtype == torch.double\n assert model.dtype == torch.double\n\n\n@RunIf(min_gpus=1)\ndef test_deepspeed_lightning_module_precision(tmpdir):\n \"\"\"Test to ensure that a model wrapped in `LightningDeepSpeedModule` moves tensors to half when precision\n 16.\"\"\"\n\n model = BoringModel()\n module = LightningDeepSpeedModule(model, precision=16)\n\n module.cuda().half()\n assert module.dtype == torch.half\n assert model.dtype == torch.half\n\n x = torch.randn((1, 32), dtype=torch.float).cuda()\n out = module(x)\n\n assert out.dtype == torch.half\n\n module.to(torch.double)\n assert module.dtype == torch.double\n assert model.dtype == torch.double\n\n\[email protected]\ndef deepspeed_config():\n return {\n \"optimizer\": {\"type\": \"SGD\", \"params\": {\"lr\": 3e-5}},\n \"scheduler\": {\n \"type\": \"WarmupLR\",\n \"params\": {\"last_batch_iteration\": -1, \"warmup_min_lr\": 0, \"warmup_max_lr\": 3e-5, \"warmup_num_steps\": 100},\n },\n }\n\n\[email protected]\ndef deepspeed_zero_config(deepspeed_config):\n return {**deepspeed_config, \"zero_allow_untested_optimizer\": True, \"zero_optimization\": {\"stage\": 2}}\n\n\n@RunIf(deepspeed=True)\[email protected](\"strategy\", (\"deepspeed\", DeepSpeedStrategy))\ndef test_deepspeed_strategy_string(tmpdir, strategy):\n \"\"\"Test to ensure that the strategy can be passed via string or instance, and parallel devices is correctly\n set.\"\"\"\n\n trainer = Trainer(\n fast_dev_run=True, default_root_dir=tmpdir, strategy=strategy if isinstance(strategy, str) else strategy()\n )\n\n assert isinstance(trainer.strategy, DeepSpeedStrategy)\n assert trainer.strategy.parallel_devices == [torch.device(\"cpu\")]\n\n\n@RunIf(deepspeed=True)\ndef test_deepspeed_strategy_env(tmpdir, monkeypatch, deepspeed_config):\n \"\"\"Test to ensure that the strategy can be passed via a string with an environment variable.\"\"\"\n config_path = os.path.join(tmpdir, \"temp.json\")\n with open(config_path, \"w\") as f:\n f.write(json.dumps(deepspeed_config))\n monkeypatch.setenv(\"PL_DEEPSPEED_CONFIG_PATH\", config_path)\n\n trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir, strategy=\"deepspeed\")\n\n strategy = trainer.strategy\n assert isinstance(strategy, DeepSpeedStrategy)\n assert strategy.parallel_devices == [torch.device(\"cpu\")]\n assert strategy.config == deepspeed_config\n\n\n@RunIf(deepspeed=True)\[email protected](\"precision\", [16, \"mixed\"])\[email protected](\n \"amp_backend\",\n [\"native\", pytest.param(\"apex\", marks=RunIf(amp_apex=True))],\n)\ndef test_deepspeed_precision_choice(amp_backend, precision, tmpdir):\n \"\"\"Test to ensure precision plugin is also correctly chosen.\n\n DeepSpeed handles precision via Custom DeepSpeedPrecisionPlugin\n \"\"\"\n\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n accelerator=\"gpu\",\n strategy=\"deepspeed\",\n amp_backend=amp_backend,\n precision=precision,\n )\n\n assert isinstance(trainer.strategy, DeepSpeedStrategy)\n assert isinstance(trainer.strategy.precision_plugin, DeepSpeedPrecisionPlugin)\n assert trainer.strategy.precision_plugin.precision == precision\n\n\n@RunIf(deepspeed=True)\ndef test_deepspeed_with_invalid_config_path(tmpdir):\n \"\"\"Test to ensure if we pass an invalid config path we throw an exception.\"\"\"\n\n with pytest.raises(\n MisconfigurationException, match=\"You passed in a path to a DeepSpeed config but the path does not exist\"\n ):\n DeepSpeedStrategy(config=\"invalid_path.json\")\n\n\n@RunIf(deepspeed=True)\ndef test_deepspeed_with_env_path(tmpdir, monkeypatch, deepspeed_config):\n \"\"\"Test to ensure if we pass an env variable, we load the config from the path.\"\"\"\n config_path = os.path.join(tmpdir, \"temp.json\")\n with open(config_path, \"w\") as f:\n f.write(json.dumps(deepspeed_config))\n monkeypatch.setenv(\"PL_DEEPSPEED_CONFIG_PATH\", config_path)\n strategy = DeepSpeedStrategy()\n assert strategy.config == deepspeed_config\n\n\n@RunIf(deepspeed=True)\ndef test_deepspeed_defaults(tmpdir):\n \"\"\"Ensure that defaults are correctly set as a config for DeepSpeed if no arguments are passed.\"\"\"\n strategy = DeepSpeedStrategy()\n assert strategy.config is not None\n assert isinstance(strategy.config[\"zero_optimization\"], dict)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_warn_deepspeed_ignored(tmpdir):\n class TestModel(BoringModel):\n def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:\n return loss.backward()\n\n model = TestModel()\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(),\n accelerator=\"gpu\",\n devices=1,\n precision=16,\n track_grad_norm=2,\n )\n from pytorch_lightning.plugins.precision.deepspeed import warning_cache\n\n with pytest.warns(UserWarning, match=\"will be ignored since DeepSpeed handles the backward\"):\n trainer.fit(model)\n assert any(\"track_grad_norm=2.0)' but this is not supported\" in w for w in warning_cache)\n\n\n@RunIf(min_gpus=1, deepspeed=True)\[email protected](\n [\"dataset_cls\", \"value\"],\n [(RandomDataset, \"auto\"), (RandomDataset, 10), (RandomIterableDataset, \"auto\"), (RandomIterableDataset, 10)],\n)\[email protected](\"deepspeed.init_distributed\", autospec=True)\[email protected](\"pytorch_lightning.Trainer.log_dir\", new_callable=mock.PropertyMock, return_value=\"abc\")\ndef test_deepspeed_auto_batch_size_config_select(mock_deepspeed_distributed, mock_log_dir, tmpdir, dataset_cls, value):\n \"\"\"Test to ensure that the batch size is correctly set as expected for deepspeed logging purposes.\"\"\"\n\n class TestModel(BoringModel):\n def train_dataloader(self):\n return DataLoader(dataset_cls(32, 64))\n\n class AssertCallback(Callback):\n def setup(self, trainer, pl_module, stage: Optional[str] = None) -> None:\n assert isinstance(trainer.strategy, DeepSpeedStrategy)\n config = trainer.strategy.config\n\n # int value overrides auto mode\n expected_value = value if isinstance(value, int) else 1\n if dataset_cls == RandomDataset:\n expected_value = pl_module.train_dataloader().batch_size if value == \"auto\" else value\n\n assert config[\"train_micro_batch_size_per_gpu\"] == expected_value\n raise SystemExit\n\n ck = AssertCallback()\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=True,\n callbacks=ck,\n gpus=1,\n strategy=DeepSpeedStrategy(logging_batch_size_per_gpu=value, zero_optimization=False),\n )\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_run_configure_optimizers(tmpdir):\n \"\"\"Test end to end that deepspeed works with defaults (without ZeRO as that requires compilation), whilst using\n configure_optimizers for optimizers and schedulers.\"\"\"\n\n class TestCB(Callback):\n def on_train_start(self, trainer, pl_module) -> None:\n from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer\n\n assert isinstance(trainer.optimizers[0], FP16_DeepSpeedZeroOptimizer)\n assert isinstance(trainer.optimizers[0].optimizer, torch.optim.SGD)\n assert isinstance(trainer.lr_scheduler_configs[0].scheduler, torch.optim.lr_scheduler.StepLR)\n # check that the lr_scheduler config was preserved\n assert trainer.lr_scheduler_configs[0].name == \"Sean\"\n\n class TestModel(BoringModel):\n def configure_optimizers(self):\n [optimizer], [scheduler] = super().configure_optimizers()\n return {\"optimizer\": optimizer, \"lr_scheduler\": {\"scheduler\": scheduler, \"name\": \"Sean\"}}\n\n model = TestModel()\n lr_monitor = LearningRateMonitor()\n trainer = Trainer(\n strategy=DeepSpeedStrategy(), # disable ZeRO so our optimizers are not wrapped\n default_root_dir=tmpdir,\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n callbacks=[TestCB(), lr_monitor],\n )\n trainer.fit(model)\n\n assert lr_monitor.lrs == {\"Sean\": [0.1]}\n\n _assert_save_model_is_equal(model, tmpdir, trainer)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_config(tmpdir, deepspeed_zero_config):\n \"\"\"Test to ensure deepspeed works correctly when passed a DeepSpeed config object including\n optimizers/schedulers and saves the model weights to load correctly.\"\"\"\n\n class TestCB(Callback):\n def on_train_start(self, trainer, pl_module) -> None:\n from deepspeed.runtime.lr_schedules import WarmupLR\n from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer\n\n assert isinstance(trainer.optimizers[0], FP16_DeepSpeedZeroOptimizer)\n assert isinstance(trainer.optimizers[0].optimizer, torch.optim.SGD)\n assert isinstance(trainer.lr_scheduler_configs[0].scheduler, WarmupLR)\n assert trainer.lr_scheduler_configs[0].interval == \"step\"\n assert trainer.lr_scheduler_configs[0].opt_idx == 0\n\n model = BoringModel()\n lr_monitor = LearningRateMonitor()\n trainer = Trainer(\n strategy=DeepSpeedStrategy(config=deepspeed_zero_config),\n default_root_dir=tmpdir,\n accelerator=\"gpu\",\n devices=1,\n log_every_n_steps=1,\n limit_train_batches=4,\n limit_val_batches=4,\n limit_test_batches=4,\n max_epochs=2,\n precision=16,\n callbacks=[TestCB(), lr_monitor],\n )\n\n trainer.fit(model)\n trainer.test(model)\n assert list(lr_monitor.lrs) == [\"lr-SGD\"]\n assert len(set(lr_monitor.lrs[\"lr-SGD\"])) == 8\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_custom_precision_params(tmpdir):\n \"\"\"Ensure if we modify the FP16 parameters via the DeepSpeedStrategy, the deepspeed config contains these\n changes.\"\"\"\n\n class TestCB(Callback):\n def on_train_start(self, trainer, pl_module) -> None:\n assert trainer.strategy.config[\"fp16\"][\"loss_scale\"] == 10\n assert trainer.strategy.config[\"fp16\"][\"initial_scale_power\"] == 10\n assert trainer.strategy.config[\"fp16\"][\"loss_scale_window\"] == 10\n assert trainer.strategy.config[\"fp16\"][\"hysteresis\"] == 10\n assert trainer.strategy.config[\"fp16\"][\"min_loss_scale\"] == 10\n raise SystemExit()\n\n model = BoringModel()\n ds = DeepSpeedStrategy(\n loss_scale=10, initial_scale_power=10, loss_scale_window=10, hysteresis=10, min_loss_scale=10\n )\n trainer = Trainer(\n default_root_dir=tmpdir, strategy=ds, precision=16, accelerator=\"gpu\", devices=1, callbacks=[TestCB()]\n )\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@RunIf(deepspeed=True)\ndef test_deepspeed_custom_activation_checkpointing_params(tmpdir):\n \"\"\"Ensure if we modify the activation checkpointing parameters, the deepspeed config contains these changes.\"\"\"\n ds = DeepSpeedStrategy(\n partition_activations=True,\n cpu_checkpointing=True,\n contiguous_memory_optimization=True,\n synchronize_checkpoint_boundary=True,\n )\n checkpoint_config = ds.config[\"activation_checkpointing\"]\n assert checkpoint_config[\"partition_activations\"]\n assert checkpoint_config[\"cpu_checkpointing\"]\n assert checkpoint_config[\"contiguous_memory_optimization\"]\n assert checkpoint_config[\"synchronize_checkpoint_boundary\"]\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_custom_activation_checkpointing_params_forwarded(tmpdir):\n \"\"\"Ensure if we modify the activation checkpointing parameters, we pass these to\n deepspeed.checkpointing.configure correctly.\"\"\"\n ds = DeepSpeedStrategy(\n partition_activations=True,\n cpu_checkpointing=True,\n contiguous_memory_optimization=True,\n synchronize_checkpoint_boundary=True,\n )\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n enable_progress_bar=False,\n fast_dev_run=1,\n strategy=ds,\n precision=16,\n accelerator=\"gpu\",\n devices=1,\n )\n with mock.patch(\n \"deepspeed.checkpointing.configure\", wraps=deepspeed.checkpointing.configure\n ) as deepspeed_checkpointing_configure:\n trainer.fit(model)\n\n deepspeed_checkpointing_configure.assert_called_with(\n mpu_=None, partition_activations=True, contiguous_checkpointing=True, checkpoint_in_cpu=True, profile=None\n )\n\n\n@RunIf(min_gpus=1, deepspeed=True)\ndef test_deepspeed_assert_config_zero_offload_disabled(tmpdir, deepspeed_zero_config):\n \"\"\"Ensure if we use a config and turn off offload_optimizer, that this is set to False within the config.\"\"\"\n\n deepspeed_zero_config[\"zero_optimization\"][\"offload_optimizer\"] = False\n\n class TestCallback(Callback):\n def setup(self, trainer, pl_module, stage=None) -> None:\n assert trainer.strategy.config[\"zero_optimization\"][\"offload_optimizer\"] is False\n raise SystemExit()\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n enable_progress_bar=False,\n max_epochs=1,\n strategy=DeepSpeedStrategy(config=deepspeed_zero_config),\n precision=16,\n gpus=1,\n callbacks=[TestCallback()],\n )\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu(tmpdir):\n \"\"\"Test to ensure that DeepSpeed with multiple GPUs works and deepspeed distributed is initialized\n correctly.\"\"\"\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n with mock.patch(\"deepspeed.init_distributed\", wraps=deepspeed.init_distributed) as mock_deepspeed_distributed:\n trainer.fit(model)\n mock_deepspeed_distributed.assert_called_once()\n trainer.test(model)\n\n _assert_save_model_is_equal(model, tmpdir, trainer)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_fp32_works(tmpdir):\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir, accelerator=\"gpu\", devices=1, strategy=\"deepspeed_stage_3\", fast_dev_run=True\n )\n trainer.fit(model)\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_stage_3_save_warning(tmpdir):\n \"\"\"Test to ensure that DeepSpeed Stage 3 gives a warning when saving on rank zero.\"\"\"\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n\n # both ranks need to call save checkpoint, however only rank 0 needs to check the warning\n context_manager = (\n pytest.warns(UserWarning, match=\"each worker will save a shard of the checkpoint within a directory.\")\n if trainer.is_global_zero\n else contextlib.suppress()\n )\n with context_manager:\n trainer.save_checkpoint(checkpoint_path)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_single_file(tmpdir):\n \"\"\"Test to ensure that DeepSpeed loads from a single file checkpoint.\"\"\"\n model = BoringModel()\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n trainer.fit(model)\n trainer.save_checkpoint(checkpoint_path)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n )\n strategy = trainer.strategy\n assert isinstance(strategy, DeepSpeedStrategy)\n assert not strategy.load_full_weights\n with pytest.raises(MisconfigurationException, match=\"DeepSpeed was unable to load the checkpoint.\"):\n trainer.test(model, ckpt_path=checkpoint_path)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3, load_full_weights=True),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n )\n strategy = trainer.strategy\n assert isinstance(strategy, DeepSpeedStrategy)\n assert strategy.load_full_weights\n trainer.test(model, ckpt_path=checkpoint_path)\n\n\nclass ModelParallelClassificationModel(LightningModule):\n def __init__(self, lr: float = 0.01, num_blocks: int = 5):\n super().__init__()\n self.lr = lr\n self.num_blocks = num_blocks\n self.prepare_data_per_node = True\n\n self.train_acc = Accuracy()\n self.valid_acc = Accuracy()\n self.test_acc = Accuracy()\n\n def make_block(self):\n return nn.Sequential(nn.Linear(32, 32, bias=False), nn.ReLU())\n\n def configure_sharded_model(self) -> None:\n self.model = nn.Sequential(*(self.make_block() for x in range(self.num_blocks)), nn.Linear(32, 3))\n\n def forward(self, x):\n x = self.model(x)\n # Ensure output is in float32 for softmax operation\n x = x.float()\n logits = F.softmax(x, dim=1)\n return logits\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n logits = self.forward(x)\n loss = F.cross_entropy(logits, y)\n self.log(\"train_loss\", loss, prog_bar=True)\n self.log(\"train_acc\", self.train_acc(logits, y), prog_bar=True, sync_dist=True)\n return {\"loss\": loss}\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n logits = self.forward(x)\n self.log(\"val_loss\", F.cross_entropy(logits, y), prog_bar=False, sync_dist=True)\n self.log(\"val_acc\", self.valid_acc(logits, y), prog_bar=True, sync_dist=True)\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n logits = self.forward(x)\n self.log(\"test_loss\", F.cross_entropy(logits, y), prog_bar=False, sync_dist=True)\n self.log(\"test_acc\", self.test_acc(logits, y), prog_bar=True, sync_dist=True)\n\n def predict_step(self, batch, batch_idx, dataloader_idx=0):\n x, y = batch\n logits = self.forward(x)\n self.test_acc(logits, y)\n return self.test_acc.compute()\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)\n return [optimizer], [{\"scheduler\": lr_scheduler, \"interval\": \"step\"}]\n\n def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n if not hasattr(self, \"model\"):\n self.configure_sharded_model()\n\n # Lightning saves the lr schedulers, but DeepSpeed saves the optimizer states separately\n assert len(checkpoint[\"lr_schedulers\"]) == 1\n assert \"optimizer_states\" not in checkpoint\n\n\nclass ManualModelParallelClassificationModel(ModelParallelClassificationModel):\n @property\n def automatic_optimization(self) -> bool:\n return False\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n logits = self.forward(x)\n loss = F.cross_entropy(logits, y)\n opt = self.optimizers()\n self.log(\"train_loss\", loss, prog_bar=True)\n self.log(\"train_acc\", self.train_acc(logits, y), prog_bar=True, sync_dist=True)\n opt.zero_grad()\n self.manual_backward(loss)\n opt.step()\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_3(tmpdir, deepspeed_config):\n \"\"\"Test to ensure ZeRO Stage 3 works with a parallel model.\"\"\"\n model = ModelParallelBoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n trainer.test(model)\n\n _assert_save_model_is_equal(model, tmpdir, trainer)\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_3_manual_optimization(tmpdir, deepspeed_config):\n \"\"\"Test to ensure ZeRO Stage 3 works with a parallel model.\"\"\"\n model = ModelParallelBoringModelManualOptim()\n model.training_epoch_end = None\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n trainer.test(model)\n\n _assert_save_model_is_equal(model, tmpdir, trainer)\n\n\[email protected]((\"accumulate_grad_batches\", \"automatic_optimization\"), [(1, False), (2, True)])\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_3_checkpointing(tmpdir, automatic_optimization, accumulate_grad_batches):\n seed_everything(1)\n if automatic_optimization:\n model = ModelParallelClassificationModel()\n else:\n model = ManualModelParallelClassificationModel()\n dm = ClassifDataModule()\n ck = ModelCheckpoint(monitor=\"val_acc\", mode=\"max\", save_last=True, save_top_k=-1)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=10,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n precision=16,\n accumulate_grad_batches=accumulate_grad_batches,\n callbacks=[ck],\n )\n trainer.fit(model, datamodule=dm)\n\n results = trainer.test(datamodule=dm)\n assert results[0][\"test_acc\"] > 0.7\n saved_results = trainer.test(ckpt_path=ck.best_model_path, datamodule=dm)\n assert saved_results[0][\"test_acc\"] > 0.7\n assert saved_results == results\n\n if automatic_optimization:\n model = ModelParallelClassificationModel()\n else:\n model = ManualModelParallelClassificationModel()\n trainer = Trainer(default_root_dir=tmpdir, gpus=2, strategy=DeepSpeedStrategy(stage=3), precision=16)\n\n results = trainer.test(model, datamodule=dm, ckpt_path=ck.best_model_path)\n assert results[0][\"test_acc\"] > 0.7\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_3_warns_resume_training(tmpdir):\n \"\"\"Test to ensure with Stage 3 and multiple GPUs that we can resume from training, throwing a warning that the\n optimizer state and scheduler states cannot be restored.\"\"\"\n dm = ClassifDataModule()\n model = BoringModel()\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n trainer.fit(model)\n trainer.save_checkpoint(checkpoint_path)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=True,\n strategy=DeepSpeedStrategy(stage=3, load_full_weights=True),\n accelerator=\"gpu\",\n devices=1,\n precision=16,\n )\n with pytest.warns(\n UserWarning,\n match=\"A single checkpoint file has been given. This means optimizer states cannot be restored. \"\n \"If you'd like to restore these states, you must \"\n \"provide a path to the originally saved DeepSpeed checkpoint.\",\n ):\n trainer.fit(model, datamodule=dm, ckpt_path=checkpoint_path)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_3_resume_training(tmpdir):\n \"\"\"Test to ensure with Stage 3 and single GPU that we can resume training.\"\"\"\n initial_model = ModelParallelClassificationModel()\n dm = ClassifDataModule()\n\n ck = ModelCheckpoint(monitor=\"val_acc\", mode=\"max\", save_last=True, save_top_k=-1)\n initial_trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=2,\n limit_val_batches=2,\n limit_test_batches=2,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=1,\n precision=16,\n callbacks=[ck],\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n initial_trainer.fit(initial_model, datamodule=dm)\n\n class TestCallback(Callback):\n def on_train_batch_start(\n self, trainer: Trainer, pl_module: LightningModule, batch: Any, batch_idx: int\n ) -> None:\n original_deepspeed_strategy = initial_trainer.strategy\n current_deepspeed_strategy = trainer.strategy\n\n assert isinstance(original_deepspeed_strategy, DeepSpeedStrategy)\n assert isinstance(current_deepspeed_strategy, DeepSpeedStrategy)\n # assert optimizer states are the correctly loaded\n original_optimizer_dict = original_deepspeed_strategy.deepspeed_engine.optimizer.state_dict()\n current_optimizer_dict = current_deepspeed_strategy.deepspeed_engine.optimizer.state_dict()\n for orig_tensor, current_tensor in zip(\n original_optimizer_dict[\"fp32_flat_groups\"], current_optimizer_dict[\"fp32_flat_groups\"]\n ):\n assert torch.all(orig_tensor.eq(current_tensor))\n # assert model state is loaded correctly\n for current_param, initial_param in zip(pl_module.parameters(), initial_model.parameters()):\n assert torch.equal(current_param.cpu(), initial_param.cpu())\n # assert epoch has correctly been restored\n assert trainer.current_epoch == 1\n\n # assert lr-scheduler states are loaded correctly\n original_lr_scheduler = initial_trainer.lr_scheduler_configs[0].scheduler\n current_lr_scheduler = trainer.lr_scheduler_configs[0].scheduler\n assert original_lr_scheduler.state_dict() == current_lr_scheduler.state_dict()\n\n model = ModelParallelClassificationModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=True,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=1,\n precision=16,\n callbacks=TestCallback(),\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model, datamodule=dm, ckpt_path=ck.best_model_path)\n\n\[email protected](\"offload_optimizer\", [False, True])\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir, offload_optimizer):\n \"\"\"Test to ensure with Stage 2 and multiple GPUs, accumulated grad batches works.\"\"\"\n seed_everything(42)\n\n class VerificationCallback(Callback):\n def __init__(self):\n self.on_train_batch_start_called = False\n\n def on_train_batch_start(self, trainer, pl_module: LightningModule, batch: Any, batch_idx: int) -> None:\n deepspeed_engine = trainer.strategy.model\n assert trainer.global_step == deepspeed_engine.global_steps\n self.on_train_batch_start_called = True\n\n model = ModelParallelClassificationModel()\n dm = ClassifDataModule()\n verification_callback = VerificationCallback()\n trainer = Trainer(\n default_root_dir=tmpdir,\n enable_progress_bar=False,\n # TODO: this test fails with max_epochs >1 as there are leftover batches per epoch.\n # there's divergence in how Lightning handles the last batch of the epoch with how DeepSpeed does it.\n # we step the optimizers on the last batch but DeepSpeed keeps the accumulation for the next epoch\n max_epochs=1,\n strategy=DeepSpeedStrategy(stage=2, offload_optimizer=offload_optimizer),\n accelerator=\"gpu\",\n devices=2,\n limit_train_batches=5,\n limit_val_batches=2,\n precision=16,\n accumulate_grad_batches=2,\n callbacks=[verification_callback],\n )\n assert trainer.limit_train_batches % trainer.accumulate_grad_batches != 0, \"leftover batches should be tested\"\n trainer.fit(model, datamodule=dm)\n assert verification_callback.on_train_batch_start_called\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_test(tmpdir):\n \"\"\"Test to ensure we can use DeepSpeed with just test using ZeRO Stage 3.\"\"\"\n model = ModelParallelBoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.test(model)\n\n\n# TODO(Sean): Once partial parameter partitioning is supported this test should be re-enabled\[email protected](\"Partial parameter partitioning for DeepSpeed is currently broken.\")\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_partial_partition_parameters(tmpdir):\n \"\"\"Test to ensure that a module that defines a layer inside the ``__init__`` and ``configure_sharded_model``\n correctly converts all parameters to float16 when ``precision=16`` and runs successfully.\"\"\"\n\n class TestModel(ModelParallelBoringModel):\n def __init__(self):\n super().__init__()\n self.layer_2 = torch.nn.Linear(32, 32)\n\n def configure_sharded_model(self) -> None:\n self.layer = torch.nn.Linear(32, 2)\n\n def forward(self, x):\n x = self.layer_2(x)\n return self.layer(x)\n\n def on_train_epoch_start(self) -> None:\n assert all([x.dtype == torch.float16 for x in self.parameters()])\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_test_rnn(tmpdir):\n \"\"\"Test to ensure that turning off explicit partitioning of the entire module for ZeRO Stage 3 works when\n training with certain layers which will crash with explicit partitioning.\"\"\"\n\n class TestModel(BoringModel):\n def __init__(self):\n super().__init__()\n self.rnn = torch.nn.GRU(32, 32)\n\n def on_train_epoch_start(self) -> None:\n assert all([x.dtype == torch.float16 for x in self.parameters()])\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n\n\n@RunIf(deepspeed=True)\[email protected](\"deepspeed.init_distributed\", autospec=True)\[email protected](\"platform\", [\"Linux\", \"Windows\"])\ndef test_deepspeed_strategy_env_variables(mock_deepspeed_distributed, tmpdir, platform):\n \"\"\"Test to ensure that we setup distributed communication using correctly.\n\n When using windows, ranks environment variables should not be set, and deepspeed should handle this.\n \"\"\"\n trainer = Trainer(default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3))\n strategy = trainer.strategy\n assert isinstance(strategy, DeepSpeedStrategy)\n with mock.patch(\"platform.system\", return_value=platform) as mock_platform:\n strategy._init_deepspeed_distributed()\n mock_deepspeed_distributed.assert_called()\n mock_platform.assert_called()\n if platform == \"Windows\":\n # assert no env variables have been set within the DeepSpeedStrategy\n assert all(k not in os.environ for k in (\"MASTER_PORT\", \"MASTER_ADDR\", \"RANK\", \"WORLD_SIZE\", \"LOCAL_RANK\"))\n else:\n assert os.environ[\"MASTER_ADDR\"] == str(trainer.strategy.cluster_environment.main_address)\n assert os.environ[\"MASTER_PORT\"] == str(trainer.strategy.cluster_environment.main_port)\n assert os.environ[\"RANK\"] == str(trainer.strategy.global_rank)\n assert os.environ[\"WORLD_SIZE\"] == str(trainer.strategy.world_size)\n assert os.environ[\"LOCAL_RANK\"] == str(trainer.strategy.local_rank)\n\n\ndef _assert_save_model_is_equal(model, tmpdir, trainer):\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n checkpoint_path = trainer.strategy.broadcast(checkpoint_path)\n trainer.save_checkpoint(checkpoint_path)\n trainer.strategy.barrier()\n\n # carry out the check only on rank 0\n if trainer.is_global_zero:\n single_ckpt_path = os.path.join(tmpdir, \"single_model.pt\")\n convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, single_ckpt_path)\n state_dict = torch.load(single_ckpt_path)\n\n model = model.cpu()\n # Assert model parameters are identical after loading\n for orig_param, saved_model_param in zip(model.parameters(), state_dict.values()):\n if model.dtype == torch.half:\n # moved model to float32 for comparison with single fp32 saved weights\n saved_model_param = saved_model_param.half()\n assert torch.equal(orig_param, saved_model_param)\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_no_schedulers(tmpdir):\n \"\"\"Test to ensure ZeRO Stage 3 works with a parallel model and no schedulers.\"\"\"\n model = ModelParallelBoringModelNoSchedulers()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n\n _assert_save_model_is_equal(model, tmpdir, trainer)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_skip_backward_raises(tmpdir):\n class TestModel(BoringModel):\n def training_step(self, batch, batch_idx):\n return None\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n )\n with pytest.raises(MisconfigurationException, match=\"returning `None` .* is not supported\"):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_setup_train_dataloader(tmpdir):\n \"\"\"Test DeepSpeed works when setup is required to call in the DataModule.\"\"\"\n\n class TestSetupIsCalledDataModule(LightningDataModule):\n def __init__(self):\n super().__init__()\n self._setup = False\n\n def setup(self, stage: Optional[str] = None) -> None:\n self._setup = True\n\n def train_dataloader(self):\n assert self._setup\n return DataLoader(RandomDataset(32, 64), batch_size=2)\n\n def val_dataloader(self):\n assert self._setup\n return DataLoader(RandomDataset(32, 64), batch_size=2)\n\n def test_dataloader(self):\n assert self._setup\n return DataLoader(RandomDataset(32, 64), batch_size=2)\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(logging_level=logging.INFO),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n )\n dm = TestSetupIsCalledDataModule()\n with mock.patch(\"deepspeed.utils.logging.logger.warning\", autospec=True) as mock_object:\n trainer.fit(model, datamodule=dm)\n assert any(\"Tried to infer the batch size\" in str(arg) for arg in mock_object.call_args_list)\n\n\[email protected](\"torch.optim.lr_scheduler.StepLR.step\", autospec=True)\[email protected](\"interval\", [\"step\", \"epoch\"])\[email protected](\"max_epoch\", [2])\[email protected](\"limit_train_batches\", [2])\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_scheduler_step_count(mock_step, max_epoch, limit_train_batches, interval):\n \"\"\"Test to ensure that the scheduler is called the correct amount of times during training when scheduler is\n set to step or epoch.\"\"\"\n\n class TestModel(BoringModel):\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\"scheduler\": scheduler, \"interval\": interval},\n }\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=os.getcwd(),\n limit_train_batches=limit_train_batches,\n limit_val_batches=0,\n max_epochs=max_epoch,\n accelerator=\"gpu\",\n devices=1,\n strategy=\"deepspeed\",\n )\n trainer.fit(model)\n if interval == \"epoch\":\n # assert called once at init and once during training\n assert mock_step.call_count == 1 + max_epoch\n else:\n # assert called once at init and once during training\n assert mock_step.call_count == 1 + (max_epoch * limit_train_batches)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_configure_gradient_clipping(tmpdir):\n \"\"\"Test to ensure that a warning is raised when `LightningModule.configure_gradient_clipping` is overridden in\n case of deepspeed.\"\"\"\n\n class TestModel(BoringModel):\n def configure_gradient_clipping(self, optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm):\n if optimizer_idx == 0:\n self.clip_gradients(optimizer, gradient_clip_val, gradient_clip_algorithm)\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n accelerator=\"gpu\",\n devices=1,\n strategy=\"deepspeed\",\n fast_dev_run=True,\n )\n with pytest.warns(UserWarning, match=\"handles gradient clipping internally\"):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_gradient_clip_by_value(tmpdir):\n \"\"\"Test to ensure that an exception is raised when using `gradient_clip_algorithm='value'`.\"\"\"\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n accelerator=\"gpu\",\n devices=1,\n strategy=\"deepspeed\",\n gradient_clip_algorithm=\"value\",\n )\n with pytest.raises(MisconfigurationException, match=\"does not support clipping gradients by value\"):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_different_accumulate_grad_batches_fails(tmpdir):\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir, accumulate_grad_batches={1: 2}, accelerator=\"gpu\", devices=1, strategy=\"deepspeed\"\n )\n with pytest.raises(\n MisconfigurationException, match=\"DeepSpeed currently does not support different `accumulate_grad_batches`\"\n ):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_specific_gpu_device_id(tmpdir):\n class TestCallback(Callback):\n def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:\n assert model.device.index == 1\n\n def on_train_batch_start(\n self,\n trainer: Trainer,\n pl_module: LightningModule,\n batch: Any,\n batch_idx: int,\n ) -> None:\n assert batch.device.index == 1\n\n def on_test_start(self, trainer: Trainer, pl_module: LightningModule) -> None:\n assert model.device.index == 1\n\n def on_test_batch_start(\n self,\n trainer: Trainer,\n pl_module: LightningModule,\n batch: Any,\n batch_idx: int,\n dataloader_idx: int,\n ) -> None:\n assert batch.device.index == 1\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=True,\n accelerator=\"gpu\",\n devices=[1],\n strategy=\"deepspeed\",\n callbacks=TestCallback(),\n )\n trainer.fit(model)\n trainer.test(model)\n\n\n@RunIf(min_gpus=2, min_torch=\"1.10.0\", standalone=True, deepspeed=True)\ndef test_deepspeed_with_meta_device(tmpdir):\n with init_meta_context():\n model = BoringModel()\n assert model.layer.weight.device.type == \"meta\"\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n assert model.layer.weight.device.type == \"cpu\"\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multi_save_same_filepath(tmpdir):\n \"\"\"Test that verifies that deepspeed saves only latest checkpoint in the specified path and deletes the old\n sharded checkpoints.\"\"\"\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=\"deepspeed\",\n accelerator=\"gpu\",\n devices=2,\n callbacks=[ModelCheckpoint(save_top_k=1, save_last=True)],\n limit_train_batches=1,\n limit_val_batches=0,\n num_sanity_val_steps=0,\n max_epochs=2,\n )\n trainer.fit(model)\n ckpt_path = os.path.join(trainer.checkpoint_callback.dirpath, \"last.ckpt\")\n expected = [\"latest\", \"zero_to_fp32.py\", \"checkpoint\"]\n assert set(expected) == set(os.listdir(ckpt_path))\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Iterator\nfrom unittest import mock\nfrom unittest.mock import ANY\n\nimport pytest\nimport torch\nfrom torch.utils.data.dataloader import _MultiProcessingDataLoaderIter, DataLoader\n\nfrom pytorch_lightning import LightningModule, Trainer\nfrom pytorch_lightning.callbacks import Callback, ModelCheckpoint\nfrom pytorch_lightning.loops import EvaluationLoop, Loop, TrainingBatchLoop, TrainingEpochLoop\nfrom pytorch_lightning.trainer.progress import BaseProgress\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.helpers import BoringModel, RandomDataset\nfrom tests.helpers.runif import RunIf\n\n\nclass NestedLoop(Loop):\n def __init__(self):\n super().__init__()\n self.child_loop0 = None\n self.child_loop1 = None\n\n @property\n def done(self) -> bool:\n return False\n\n def connect(self, child0, child1):\n self.child_loop0 = child0\n self.child_loop1 = child1\n\n def reset(self) -> None:\n pass\n\n def advance(self, *args, **kwargs):\n pass\n\n\[email protected](\"loop_name\", [\"fit_loop\", \"validate_loop\", \"test_loop\", \"predict_loop\"])\ndef test_connect_loops_direct(loop_name):\n \"\"\"Test Trainer references in loops on assignment.\"\"\"\n loop = NestedLoop()\n\n with pytest.raises(RuntimeError, match=\"The loop is not attached to a Trainer\"):\n _ = loop.trainer\n\n trainer = Trainer()\n\n # trainer.loop_name = loop\n setattr(trainer, loop_name, loop)\n assert loop.trainer is trainer\n\n\ndef test_connect_loops_recursive():\n \"\"\"Test Trainer references in a nested loop assigned to a Trainer.\"\"\"\n main_loop = NestedLoop()\n child0 = NestedLoop()\n child1 = NestedLoop()\n main_loop.connect(child0, child1)\n\n with pytest.raises(RuntimeError, match=\"The loop is not attached to a Trainer\"):\n _ = main_loop.trainer\n\n with pytest.raises(RuntimeError, match=\"The loop is not attached to a Trainer\"):\n _ = main_loop.child_loop0.trainer\n\n trainer = Trainer()\n trainer.fit_loop = main_loop\n assert child0.trainer is trainer\n assert child1.trainer is trainer\n\n\ndef test_restarting_loops_recursive():\n class MyLoop(NestedLoop):\n def __init__(self, loop=None):\n super().__init__()\n self.child = loop\n\n loop = MyLoop(MyLoop(MyLoop()))\n\n assert not loop.restarting\n assert not loop.child.restarting\n assert not loop.child.child.restarting\n loop.restarting = True\n assert loop.restarting\n assert loop.child.restarting\n assert loop.child.child.restarting\n\n\ndef test_connect_subloops(tmpdir):\n \"\"\"Test connecting individual subloops by calling `trainer.x.y.connect()`\"\"\"\n model = BoringModel()\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n\n epoch_loop = trainer.fit_loop.epoch_loop\n new_batch_loop = TrainingBatchLoop()\n epoch_loop.connect(batch_loop=new_batch_loop)\n assert epoch_loop.batch_loop is new_batch_loop\n\n with pytest.raises(RuntimeError, match=\"The loop is not attached to a Trainer\"):\n _ = new_batch_loop.trainer\n\n trainer.fit(model)\n assert new_batch_loop.trainer is trainer\n\n\ndef test_replace_loops():\n class TestLoop(TrainingEpochLoop):\n def __init__(self, foo):\n super().__init__()\n\n trainer = Trainer(min_steps=123, max_steps=321)\n\n with pytest.raises(\n MisconfigurationException, match=r\"FitLoop.replace\\(TestLoop\\)`.*`__init__`.*`TrainingEpochLoop`\"\n ):\n trainer.fit_loop.replace(epoch_loop=TestLoop)\n\n class TestLoop(TrainingEpochLoop):\n ...\n\n # test passing a loop where previous state should be connected\n old_loop = trainer.fit_loop.epoch_loop\n trainer.fit_loop.replace(epoch_loop=TestLoop)\n new_loop = trainer.fit_loop.epoch_loop\n\n assert isinstance(new_loop, TestLoop)\n assert trainer.fit_loop.epoch_loop is new_loop\n assert new_loop.min_steps == 123\n assert new_loop.max_steps == 321\n assert new_loop.batch_loop is old_loop.batch_loop\n assert new_loop.val_loop is old_loop.val_loop\n assert new_loop.trainer is trainer\n\n class MyBatchLoop(TrainingBatchLoop):\n ...\n\n class MyEvalLoop(EvaluationLoop):\n ...\n\n # test passing more than one where one is an instance and the other a class\n trainer.fit_loop.epoch_loop.replace(batch_loop=MyBatchLoop, val_loop=MyEvalLoop())\n new_batch_loop = trainer.fit_loop.epoch_loop.batch_loop\n new_val_loop = trainer.fit_loop.epoch_loop.val_loop\n\n assert isinstance(new_batch_loop, MyBatchLoop)\n assert isinstance(new_val_loop, MyEvalLoop)\n\n\nclass CustomException(Exception):\n pass\n\n\ndef test_loop_restore():\n class Simple(Loop):\n def __init__(self, dataset: Iterator):\n super().__init__()\n self.iteration_count = 0\n self.dataset = dataset\n\n @property\n def skip(self) -> bool:\n return False\n\n @property\n def done(self) -> bool:\n return self.iteration_count > len(self.dataset)\n\n def reset(self) -> None:\n self.iter_dataset = iter(self.dataset)\n if self.restarting:\n for _ in range(self.iteration_count):\n next(self.iter_dataset)\n self.iteration_count += 1\n else:\n self.outputs = []\n\n def advance(self) -> None:\n value = next(self.iter_dataset)\n\n if self.iteration_count == 5:\n raise CustomException\n\n self.outputs.append(value)\n\n def on_advance_end(self) -> None:\n self.iteration_count += 1\n\n def state_dict(self) -> Dict:\n return {\"iteration_count\": self.iteration_count, \"outputs\": self.outputs}\n\n def load_state_dict(self, state_dict: Dict) -> None:\n self.iteration_count = state_dict[\"iteration_count\"]\n self.outputs = state_dict[\"outputs\"]\n\n trainer = Trainer()\n\n data = range(10)\n loop = Simple(data)\n loop.trainer = trainer\n try:\n loop.run()\n state_dict = {}\n except CustomException:\n state_dict = loop.state_dict()\n\n loop = Simple(data)\n loop.trainer = trainer\n loop.load_state_dict(state_dict)\n loop.restarting = True\n loop.run()\n\n assert not loop.restarting\n assert loop.outputs == list(range(10))\n\n\ndef test_loop_hierarchy():\n @dataclass\n class SimpleProgress(BaseProgress):\n increment: int = 0\n\n class Simple(Loop):\n def __init__(self, a):\n super().__init__()\n self.a = a\n self.progress = SimpleProgress()\n\n def advance(self, *args: Any, **kwargs: Any) -> None:\n loop = getattr(self, \"loop_child\", None)\n if not loop:\n return\n loop.run()\n\n def on_advance_end(self):\n self.progress.increment += 1\n\n @property\n def done(self) -> bool:\n return self.progress.increment > 0\n\n def reset(self) -> None:\n ...\n\n def on_save_checkpoint(self) -> Dict:\n return {\"a\": self.a}\n\n def on_load_checkpoint(self, state_dict: Dict) -> None:\n self.a = state_dict[\"a\"]\n\n loop_parent = Simple(1)\n loop_child = Simple(2)\n loop_parent.loop_child = loop_child\n\n # check the trainer reference is propagated\n loop_parent.trainer = Trainer()\n assert loop_child.trainer is loop_parent.trainer\n\n state_dict = loop_parent.state_dict()\n assert state_dict == {\n \"state_dict\": {\"a\": 1},\n \"progress\": {\"increment\": 0},\n \"loop_child.state_dict\": {\"a\": 2},\n \"loop_child.progress\": {\"increment\": 0},\n }\n\n state_dict[\"loop_child.state_dict\"][\"a\"] = 3\n # check restarting after `load_state_dict`\n loop_parent.load_state_dict(state_dict)\n assert loop_parent.restarting\n\n loop_parent.run()\n\n # check the new state after `run`\n state_dict = loop_parent.state_dict()\n assert state_dict == {\n \"state_dict\": {\"a\": 1},\n \"progress\": {\"increment\": 1},\n \"loop_child.state_dict\": {\"a\": 3},\n \"loop_child.progress\": {\"increment\": 1},\n }\n\n loop_parent_copy = deepcopy(loop_parent)\n assert loop_parent_copy.state_dict() == loop_parent.state_dict()\n\n assert loop_parent_copy.on_save_checkpoint() == state_dict[\"state_dict\"]\n assert loop_parent_copy.loop_child.on_save_checkpoint() == state_dict[\"loop_child.state_dict\"]\n\n loop_parent = Simple(1)\n loop_child = Simple(2)\n loop_parent.loop_child = loop_child\n loop_parent.load_state_dict(state_dict)\n assert loop_parent.progress.increment == 1\n assert loop_parent.loop_child.progress.increment == 1\n\n del loop_parent.loop_child\n state_dict = loop_parent.state_dict()\n assert state_dict == {\"state_dict\": {\"a\": 1}, \"progress\": {\"increment\": 1}}\n\n\[email protected](os.environ, {\"PL_FAULT_TOLERANT_TRAINING\": \"1\"})\[email protected](\"stop_epoch\", (1, 2))\[email protected](\"stop_batch\", (1, 2))\[email protected](\"n_dataloaders,stop_dataloader\", [(2, 0), (2, 1), (3, 2)])\ndef test_loop_restart_progress_multiple_dataloaders(tmpdir, n_dataloaders, stop_dataloader, stop_epoch, stop_batch):\n n_batches = 5\n n_epochs = 3\n\n class ValidationModel(BoringModel):\n def __init__(self):\n super().__init__()\n\n def validation_step(self, batch, batch_idx, dataloader_idx):\n if self.current_epoch == stop_epoch and batch_idx == stop_batch and dataloader_idx == stop_dataloader:\n raise CustomException\n return super().validation_step(batch, batch_idx)\n\n def val_dataloader(self):\n return [super(ValidationModel, self).val_dataloader() for _ in range(n_dataloaders)]\n\n model = ValidationModel()\n model.validation_epoch_end = None\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=n_epochs,\n limit_train_batches=1,\n limit_val_batches=n_batches,\n )\n\n # simulate a failure\n with pytest.raises(CustomException):\n trainer.fit(model)\n\n ckpt_path = str(tmpdir / \".pl_auto_save.ckpt\")\n checkpoint = torch.load(ckpt_path)[\"loops\"][\"fit_loop\"]\n\n total_dataloader = stop_epoch * n_dataloaders + stop_dataloader\n expected = {\n \"total\": {\"ready\": total_dataloader + 1, \"completed\": total_dataloader},\n \"current\": {\"ready\": stop_dataloader + 1, \"completed\": stop_dataloader},\n }\n assert checkpoint[\"epoch_loop.val_loop.dataloader_progress\"] == expected\n\n trainer.fit_loop.load_state_dict(checkpoint)\n\n # `nbe_`: non-breaking epoch, as in, no exception will be raised. `be_`: breaking epoch\n # the fit-validation total batch progress is reset per epoch so it's not counted for the total value.\n nbe_total_val_batch = 0 # stop_epoch * n_dataloaders * n_batches\n be_total_val_batch = stop_dataloader * n_batches + stop_batch\n total_val_batch = nbe_total_val_batch + be_total_val_batch\n expected = {\n \"total\": {\n \"ready\": total_val_batch + 1,\n \"started\": total_val_batch + 1,\n \"processed\": total_val_batch,\n \"completed\": total_val_batch,\n },\n \"current\": {\n \"ready\": stop_batch + 1,\n \"started\": stop_batch + 1,\n \"processed\": stop_batch,\n \"completed\": stop_batch,\n },\n \"is_last_batch\": False,\n }\n assert trainer.fit_loop.epoch_loop.val_loop.epoch_loop.batch_progress.state_dict() == expected\n\n\[email protected](os.environ, {\"PL_FAULT_TOLERANT_TRAINING\": \"1\"})\[email protected](\"accumulate_grad_batches\", (1, 2, 3))\[email protected](\"n_optimizers\", (1, 3, 5))\[email protected](\"stop_epoch\", (1, 2))\[email protected](\"stop_batch\", (1, 2))\[email protected](\"stop_optimizer\", (1, 2))\ndef test_loop_state_on_exception(accumulate_grad_batches, stop_epoch, stop_batch, stop_optimizer, n_optimizers, tmpdir):\n stop_optimizer = stop_optimizer if stop_optimizer < n_optimizers else 0\n n_epochs = 3\n n_batches = 3\n\n class TestModel(BoringModel):\n def __init__(self):\n super().__init__()\n if n_optimizers > 1:\n self.configure_optimizers = self.configure_optimizers_multiple\n\n def training_step(self, batch, batch_idx, optimizer_idx=0):\n if self.trainer.current_epoch == stop_epoch and batch_idx == stop_batch and optimizer_idx == stop_optimizer:\n raise CustomException\n return super().training_step(batch, batch_idx)\n\n def configure_optimizers_multiple(self):\n optimizers = [torch.optim.Adam(self.layer.parameters(), lr=0.1) for _ in range(n_optimizers)]\n\n lr_scheduler_0 = torch.optim.lr_scheduler.StepLR(optimizers[0], step_size=1)\n lr_scheduler_1 = torch.optim.lr_scheduler.StepLR(optimizers[1], step_size=1)\n # no scheduler for optimizer_2\n lr_schedulers = [lr_scheduler_0, {\"scheduler\": lr_scheduler_1, \"interval\": \"step\"}]\n\n return optimizers, lr_schedulers\n\n model = TestModel()\n model.training_epoch_end = None\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=n_epochs,\n limit_train_batches=n_batches,\n limit_val_batches=0,\n accumulate_grad_batches=accumulate_grad_batches,\n enable_progress_bar=False,\n logger=False,\n enable_checkpointing=False,\n )\n\n # simulate a failure\n with pytest.raises(CustomException):\n trainer.fit(model)\n\n ckpt_path = str(tmpdir / \".pl_auto_save.ckpt\")\n assert os.path.exists(ckpt_path)\n checkpoint = torch.load(ckpt_path)\n\n optim_progress = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.optim_progress\n sch_progress = trainer.fit_loop.epoch_loop.scheduler_progress\n\n # `nbe_`: non-breaking epoch, as in, no exception will be raised. `be_`: breaking epoch\n nbe_batches_completed = stop_epoch * n_batches\n be_batches_completed = stop_batch\n be_batches_ready = stop_batch + 1\n # lightning applies leftover accumulated gradients when the epoch ends\n has_leftover_accumulation_batches = n_batches % accumulate_grad_batches != 0\n # number of batches that will call `optimizer.step()` during non-breaking and breaking epochs\n nbe_stepping_batches = nbe_batches_completed // accumulate_grad_batches\n be_stepping_batches = be_batches_completed // accumulate_grad_batches\n\n nbe_total_opt_steps = (nbe_stepping_batches + has_leftover_accumulation_batches) * n_optimizers\n does_last_be_batch_step = be_batches_ready % accumulate_grad_batches == 0 or has_leftover_accumulation_batches\n be_total_opt_steps = be_stepping_batches * n_optimizers + does_last_be_batch_step * stop_optimizer\n assert optim_progress.optimizer_steps == nbe_total_opt_steps + be_total_opt_steps\n assert optim_progress.optimizer.step.current.completed == be_total_opt_steps\n has_opt_stepped_in_be = stop_batch + 1 >= accumulate_grad_batches\n\n nbe_total_zero_grad = (nbe_stepping_batches + has_leftover_accumulation_batches) * n_optimizers\n does_last_be_batch_zero_grad = be_batches_completed % accumulate_grad_batches == 0\n # `max` because the first batch always zero-grads\n be_total_zero_grad = max(1, be_stepping_batches) * n_optimizers + stop_optimizer * does_last_be_batch_zero_grad\n assert optim_progress.optimizer.zero_grad.total.completed == nbe_total_zero_grad + be_total_zero_grad\n assert optim_progress.optimizer.zero_grad.current.completed == be_total_zero_grad\n\n nbe_sch_steps = stop_epoch\n be_sch_steps = 0 # the current epoch did not complete\n if n_optimizers > 1:\n # assumes that the scheduler config is unchanged\n # `* 1` because there is only one step-level scheduler\n nbe_sch_steps = stop_epoch + nbe_stepping_batches + has_leftover_accumulation_batches * 1\n # `0 +` for the epoch-level scheduler\n be_sch_steps = 0 + be_stepping_batches\n assert sch_progress.total.completed == nbe_sch_steps + be_sch_steps\n assert sch_progress.current.completed == be_sch_steps\n\n expected = {\n \"state_dict\": ANY,\n \"epoch_progress\": {\n \"total\": {\n \"ready\": stop_epoch + 1,\n \"started\": stop_epoch + 1,\n \"processed\": stop_epoch,\n \"completed\": stop_epoch,\n },\n \"current\": {\n \"ready\": stop_epoch + 1,\n \"started\": stop_epoch + 1,\n \"processed\": stop_epoch,\n \"completed\": stop_epoch,\n },\n },\n \"epoch_loop.state_dict\": ANY,\n \"epoch_loop.batch_progress\": {\n \"total\": {\n \"ready\": nbe_batches_completed + be_batches_completed + 1,\n \"started\": nbe_batches_completed + be_batches_completed + 1,\n \"processed\": nbe_batches_completed + be_batches_completed,\n \"completed\": nbe_batches_completed + be_batches_completed,\n },\n \"current\": {\n \"ready\": stop_batch + 1,\n \"started\": stop_batch + 1,\n \"processed\": stop_batch,\n \"completed\": stop_batch,\n },\n \"is_last_batch\": False,\n },\n \"epoch_loop.scheduler_progress\": {\n \"total\": {\"ready\": nbe_sch_steps + be_sch_steps, \"completed\": nbe_sch_steps + be_sch_steps},\n \"current\": {\"ready\": be_sch_steps, \"completed\": be_sch_steps},\n },\n \"epoch_loop.batch_loop.state_dict\": ANY,\n \"epoch_loop.batch_loop.manual_loop.state_dict\": ANY,\n \"epoch_loop.batch_loop.manual_loop.optim_step_progress\": {\n \"total\": {\"ready\": 0, \"completed\": 0},\n \"current\": {\"ready\": 0, \"completed\": 0},\n },\n \"epoch_loop.batch_loop.optimizer_loop.state_dict\": {},\n \"epoch_loop.batch_loop.optimizer_loop.optim_progress\": {\n \"optimizer_position\": stop_optimizer,\n \"optimizer\": {\n \"step\": {\n \"total\": {\n \"ready\": nbe_total_opt_steps + be_total_opt_steps + has_opt_stepped_in_be,\n \"completed\": nbe_total_opt_steps + be_total_opt_steps,\n },\n \"current\": {\"ready\": be_total_opt_steps + has_opt_stepped_in_be, \"completed\": be_total_opt_steps},\n },\n \"zero_grad\": {\n \"total\": {\n \"ready\": nbe_total_zero_grad + be_total_zero_grad,\n \"started\": nbe_total_zero_grad + be_total_zero_grad,\n \"completed\": nbe_total_zero_grad + be_total_zero_grad,\n },\n \"current\": {\n \"ready\": be_total_zero_grad,\n \"started\": be_total_zero_grad,\n \"completed\": be_total_zero_grad,\n },\n },\n },\n },\n \"epoch_loop.val_loop.state_dict\": ANY,\n \"epoch_loop.val_loop.dataloader_progress\": ANY,\n \"epoch_loop.val_loop.epoch_loop.state_dict\": ANY,\n \"epoch_loop.val_loop.epoch_loop.batch_progress\": ANY,\n \"epoch_loop.val_loop._results\": ANY,\n \"epoch_loop._results\": ANY,\n }\n assert checkpoint[\"loops\"][\"fit_loop\"] == expected\n\n trainer.fit_loop.load_state_dict(checkpoint[\"loops\"][\"fit_loop\"])\n state_dict = trainer.fit_loop.state_dict()\n\n # need to remove these elements for comparison; comparing with `fit_loop.state_dict()` would require the\n # fit loop to have an iterator, which is only available during training\n state_dict[\"epoch_loop.state_dict\"][\"dataloader_state_dict\"] = ANY\n checkpoint[\"loops\"][\"fit_loop\"][\"epoch_loop.state_dict\"][\"dataloader_state_dict\"] = ANY\n assert state_dict == checkpoint[\"loops\"][\"fit_loop\"]\n\n trainer.fit_loop.load_state_dict(checkpoint[\"loops\"][\"fit_loop\"])\n # test resetting manually, we expect all `ready` counters to be reset to `completed`\n trainer.fit_loop.reset()\n trainer.fit_loop.epoch_loop.reset()\n trainer.fit_loop.epoch_loop.batch_loop.reset()\n trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.reset()\n\n epoch_progress = trainer.fit_loop.epoch_progress\n assert epoch_progress.current.ready == stop_epoch\n assert epoch_progress.current.completed == stop_epoch\n\n batch_progress = trainer.fit_loop.epoch_loop.batch_progress\n assert batch_progress.current.ready == be_batches_completed\n assert batch_progress.current.completed == be_batches_completed\n\n optim_progress = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.optim_progress\n assert optim_progress.optimizer.step.current.ready == be_total_opt_steps\n assert optim_progress.optimizer.step.current.completed == be_total_opt_steps\n assert optim_progress.optimizer.zero_grad.current.ready == be_total_zero_grad\n assert optim_progress.optimizer.zero_grad.current.completed == be_total_zero_grad\n\n state_dict = trainer.fit_loop.state_dict()\n assert state_dict != checkpoint[\"loops\"][\"fit_loop\"]\n assert state_dict[\"epoch_progress\"][\"total\"][\"started\"] == stop_epoch + 1\n assert state_dict[\"epoch_progress\"][\"current\"][\"started\"] == stop_epoch\n\n\[email protected](os.environ, {\"PL_FAULT_TOLERANT_TRAINING\": \"1\"})\[email protected](\"n_optimizers\", (1, 3, 5))\ndef test_loop_state_on_complete_run(n_optimizers, tmpdir):\n n_epochs = 3\n n_batches = 3\n accumulate_grad_batches = 1\n\n class TestModel(BoringModel):\n def __init__(self):\n super().__init__()\n if n_optimizers > 1:\n self.configure_optimizers = self.configure_optimizers_multiple\n\n def training_step(self, batch, batch_idx, optimizer_idx=0):\n return super().training_step(batch, batch_idx)\n\n def configure_optimizers_multiple(self):\n optimizers = [torch.optim.Adam(self.layer.parameters(), lr=0.1) for _ in range(n_optimizers)]\n\n lr_scheduler_0 = torch.optim.lr_scheduler.StepLR(optimizers[0], step_size=1)\n lr_scheduler_1 = torch.optim.lr_scheduler.StepLR(optimizers[1], step_size=1)\n # no scheduler for optimizer_2\n lr_schedulers = [lr_scheduler_0, {\"scheduler\": lr_scheduler_1, \"interval\": \"step\"}]\n\n return optimizers, lr_schedulers\n\n def train_dataloader(self):\n # override to test the `is_last_batch` value\n return DataLoader(RandomDataset(32, n_batches))\n\n model = TestModel()\n model.training_epoch_end = None\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=n_epochs,\n limit_val_batches=0,\n accumulate_grad_batches=accumulate_grad_batches,\n enable_progress_bar=False,\n logger=False,\n )\n trainer.fit(model)\n\n assert trainer.num_training_batches == n_batches\n\n ckpt_path = trainer.checkpoint_callback.best_model_path\n assert os.path.exists(ckpt_path)\n checkpoint = torch.load(ckpt_path)\n\n n_sch_steps_total = n_epochs\n n_sch_steps_current = 1\n if n_optimizers > 1:\n n_sch_steps_total = n_epochs + n_epochs * n_batches\n n_sch_steps_current = n_batches + 1\n\n expected = {\n \"state_dict\": ANY,\n \"epoch_progress\": {\n \"total\": {\n \"ready\": n_epochs,\n \"started\": n_epochs,\n \"processed\": n_epochs,\n \"completed\": n_epochs - 1,\n },\n \"current\": {\n \"ready\": n_epochs,\n \"started\": n_epochs,\n \"processed\": n_epochs,\n \"completed\": n_epochs - 1,\n },\n },\n \"epoch_loop.state_dict\": ANY,\n \"epoch_loop.batch_progress\": {\n \"total\": {\n \"ready\": n_epochs * n_batches,\n \"started\": n_epochs * n_batches,\n \"processed\": n_epochs * n_batches,\n \"completed\": n_epochs * n_batches,\n },\n \"current\": {\n \"ready\": n_batches,\n \"started\": n_batches,\n \"processed\": n_batches,\n \"completed\": n_batches,\n },\n \"is_last_batch\": True,\n },\n \"epoch_loop.scheduler_progress\": {\n \"total\": {\"ready\": n_sch_steps_total, \"completed\": n_sch_steps_total},\n \"current\": {\"ready\": n_sch_steps_current, \"completed\": n_sch_steps_current},\n },\n \"epoch_loop.batch_loop.state_dict\": ANY,\n \"epoch_loop.batch_loop.manual_loop.state_dict\": ANY,\n \"epoch_loop.batch_loop.manual_loop.optim_step_progress\": {\n \"total\": {\"ready\": 0, \"completed\": 0},\n \"current\": {\"ready\": 0, \"completed\": 0},\n },\n \"epoch_loop.batch_loop.optimizer_loop.state_dict\": {},\n \"epoch_loop.batch_loop.optimizer_loop.optim_progress\": {\n \"optimizer_position\": n_optimizers,\n \"optimizer\": {\n \"step\": {\n \"total\": {\n \"ready\": n_epochs * n_batches * n_optimizers,\n \"completed\": n_epochs * n_batches * n_optimizers,\n },\n \"current\": {\n \"ready\": n_batches * n_optimizers,\n \"completed\": n_batches * n_optimizers,\n },\n },\n \"zero_grad\": {\n \"total\": {\n \"ready\": n_epochs * n_batches * n_optimizers,\n \"started\": n_epochs * n_batches * n_optimizers,\n \"completed\": n_epochs * n_batches * n_optimizers,\n },\n \"current\": {\n \"ready\": n_batches * n_optimizers,\n \"started\": n_batches * n_optimizers,\n \"completed\": n_batches * n_optimizers,\n },\n },\n },\n },\n \"epoch_loop.val_loop.state_dict\": ANY,\n \"epoch_loop.val_loop.dataloader_progress\": ANY,\n \"epoch_loop.val_loop.epoch_loop.state_dict\": ANY,\n \"epoch_loop.val_loop.epoch_loop.batch_progress\": ANY,\n \"epoch_loop.val_loop._results\": ANY,\n \"epoch_loop._results\": ANY,\n }\n assert checkpoint[\"loops\"][\"fit_loop\"] == expected\n\n\ndef test_fit_loop_reset(tmpdir):\n \"\"\"Test that the reset logic in fit- and epoch loop is aware of whether the loop is restarting from a completed\n loop or from a mid-epoch checkpoint.\"\"\"\n\n # generate checkpoints at end of epoch and mid-epoch\n model = BoringModel()\n checkpoint_callback = ModelCheckpoint(\n dirpath=tmpdir,\n every_n_train_steps=2,\n save_top_k=-1,\n )\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=4,\n max_epochs=2,\n callbacks=[checkpoint_callback],\n logger=False,\n enable_model_summary=False,\n )\n trainer.fit(model)\n\n # reset state loaded from a checkpoint from mid-epoch\n mid_epoch_ckpt = torch.load(str(tmpdir / \"epoch=0-step=2.ckpt\"))\n fit_loop = trainer.fit_loop\n epoch_loop = fit_loop.epoch_loop\n optimizer_loop = epoch_loop.batch_loop.optimizer_loop\n assert not fit_loop.restarting\n assert not epoch_loop.restarting\n assert not optimizer_loop.restarting\n\n # we load exactly what was saved - no reset yet\n fit_loop.load_state_dict(mid_epoch_ckpt[\"loops\"][\"fit_loop\"])\n # resetting from a mid-of-epoch checkpoint SHOULD NOT reset the current counters to 0\n fit_loop.reset()\n epoch_loop.reset()\n optimizer_loop.reset()\n\n assert fit_loop.restarting\n assert fit_loop.epoch_progress.total.ready == 1\n assert fit_loop.epoch_progress.total.completed == 0 # the checkpoint was saved mid epoch\n assert fit_loop.epoch_progress.current.ready == 0\n assert fit_loop.epoch_progress.current.completed == 0\n\n assert epoch_loop.restarting\n assert epoch_loop.batch_progress.total.ready == 2\n assert epoch_loop.batch_progress.total.processed == 2\n assert epoch_loop.batch_progress.total.completed == 1 # the checkpoint was saved on train_batch_end\n assert epoch_loop.batch_progress.current.ready == 1 # currents get set to the completed value\n assert epoch_loop.batch_progress.current.processed == 1\n assert epoch_loop.batch_progress.current.completed == 1\n\n assert optimizer_loop.restarting\n assert optimizer_loop.optim_progress.optimizer_position == 1\n\n # reset state loaded from a checkpoint from the end of an epoch\n end_of_epoch_ckpt = torch.load(str(tmpdir / \"epoch=0-step=4.ckpt\"))\n fit_loop = trainer.fit_loop\n epoch_loop = fit_loop.epoch_loop\n fit_loop.restarting = False\n epoch_loop.restarting = False\n optimizer_loop.restarting = False\n\n # we load exactly what was saved - no reset yet\n fit_loop.load_state_dict(end_of_epoch_ckpt[\"loops\"][\"fit_loop\"])\n # resetting from a end-of-epoch checkpoint SHOULD reset the current counters to 0\n fit_loop.reset()\n epoch_loop.reset()\n optimizer_loop.reset()\n\n assert fit_loop.restarting\n assert fit_loop.epoch_progress.total.ready == 1\n assert fit_loop.epoch_progress.total.completed == 0 # the checkpoint saves before the epoch completes\n assert fit_loop.epoch_progress.current.ready == 0\n assert fit_loop.epoch_progress.current.completed == 0\n\n assert epoch_loop.restarting\n assert epoch_loop.batch_progress.total.ready == 4\n assert epoch_loop.batch_progress.total.processed == 4\n assert epoch_loop.batch_progress.total.completed == 3 # the checkpoint was saved on train_batch_end\n assert epoch_loop.batch_progress.current.ready == 3 # currents get set to the completed value\n assert epoch_loop.batch_progress.current.processed == 3\n assert epoch_loop.batch_progress.current.completed == 3\n\n assert optimizer_loop.optim_progress.optimizer_position == 1\n\n\[email protected](os.environ, {\"PL_FAULT_TOLERANT_TRAINING\": \"1\"})\[email protected](\n [\"train_datasets\", \"val_datasets\"],\n [([RandomDataset], [RandomDataset]), ([RandomDataset], [RandomDataset, RandomDataset])],\n)\[email protected](\"val_check_interval\", [0.5, 1.0])\ndef test_fit_can_fail_during_validation(train_datasets, val_datasets, val_check_interval, tmpdir):\n size, n_batches = 2, 4\n stop_batch = 1\n n_val_dataloaders = len(val_datasets)\n stop_dataloader = n_val_dataloaders - 1\n\n class TestModel(LightningModule):\n def __init__(self, should_fail):\n super().__init__()\n self.layer = torch.nn.Linear(size, 2)\n self.should_fail = should_fail\n\n def step(self, batch):\n return sum(self.layer(b).sum() for b in batch)\n\n def training_step(self, batch, batch_idx):\n return self.step(batch)\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n if self.should_fail and dataloader_idx == stop_dataloader and batch_idx == stop_batch:\n raise CustomException\n return self.step(batch)\n\n def configure_optimizers(self):\n return torch.optim.SGD(self.layer.parameters(), lr=0.1)\n\n def train_dataloader(self):\n return [DataLoader(cls(size, n_batches)) for cls in train_datasets]\n\n def val_dataloader(self):\n return [DataLoader(cls(size, n_batches)) for cls in val_datasets]\n\n model = TestModel(False)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_check_interval=val_check_interval,\n num_sanity_val_steps=0,\n enable_progress_bar=False,\n )\n trainer.fit(model)\n\n ckpt_path = os.path.join(tmpdir, \".pl_auto_save.ckpt\")\n assert not os.path.exists(ckpt_path), \"Shouldn't have failed\"\n state_dict = trainer.fit_loop.state_dict()\n expected_global_step = trainer.global_step\n\n assert state_dict[\"epoch_loop.batch_progress\"] == {\n \"total\": {\"ready\": n_batches, \"started\": n_batches, \"processed\": n_batches, \"completed\": n_batches},\n \"current\": {\"ready\": n_batches, \"started\": n_batches, \"processed\": n_batches, \"completed\": n_batches},\n \"is_last_batch\": True,\n }\n\n val_per_epoch = int(1 // val_check_interval)\n assert state_dict[\"epoch_loop.val_loop.dataloader_progress\"] == {\n \"total\": {\"ready\": n_val_dataloaders * val_per_epoch, \"completed\": n_val_dataloaders * val_per_epoch},\n \"current\": {\"ready\": n_val_dataloaders, \"completed\": n_val_dataloaders},\n }\n\n assert state_dict[\"epoch_loop.val_loop.epoch_loop.batch_progress\"] == {\n \"total\": {\n \"ready\": n_val_dataloaders * val_per_epoch * n_batches,\n \"started\": n_val_dataloaders * val_per_epoch * n_batches,\n \"processed\": n_val_dataloaders * val_per_epoch * n_batches,\n \"completed\": n_val_dataloaders * val_per_epoch * n_batches,\n },\n \"current\": {\"ready\": n_batches, \"completed\": n_batches, \"started\": n_batches, \"processed\": n_batches},\n \"is_last_batch\": True,\n }\n\n model = TestModel(True)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_check_interval=val_check_interval,\n num_sanity_val_steps=0,\n enable_progress_bar=False,\n )\n with pytest.raises(CustomException):\n # will stop during validation\n trainer.fit(model)\n\n assert os.path.exists(ckpt_path)\n checkpoint = torch.load(ckpt_path)[\"loops\"][\"fit_loop\"]\n\n per_val_train_batches = int(n_batches * val_check_interval)\n assert checkpoint[\"epoch_loop.batch_progress\"] == {\n \"total\": {\n \"ready\": per_val_train_batches,\n \"started\": per_val_train_batches,\n \"processed\": per_val_train_batches,\n \"completed\": per_val_train_batches,\n },\n \"current\": {\n \"ready\": per_val_train_batches,\n \"started\": per_val_train_batches,\n \"processed\": per_val_train_batches,\n \"completed\": per_val_train_batches,\n },\n \"is_last_batch\": val_check_interval == 1,\n }\n\n val_batch_progress = \"epoch_loop.val_loop.epoch_loop.batch_progress\"\n # \"nb_\": non-breaking\n nb_total_val_batch = stop_dataloader * n_batches\n assert checkpoint[val_batch_progress] == {\n \"total\": {\n \"ready\": nb_total_val_batch + stop_batch + 1,\n \"started\": nb_total_val_batch + stop_batch + 1,\n \"processed\": nb_total_val_batch + stop_batch,\n \"completed\": nb_total_val_batch + stop_batch,\n },\n \"current\": {\n \"ready\": stop_batch + 1,\n \"started\": stop_batch + 1,\n \"processed\": stop_batch,\n \"completed\": stop_batch,\n },\n \"is_last_batch\": False,\n }\n\n model = TestModel(False)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_check_interval=val_check_interval,\n enable_progress_bar=False,\n )\n trainer.fit(model, ckpt_path=ckpt_path)\n\n assert trainer.global_step == expected_global_step\n\n state_dict_after_restart = trainer.fit_loop.state_dict()\n\n # should get the same values as in the run that did not fail\n # totals are increased by 1 (the failed batch which never completed)\n expected = state_dict.copy()\n\n assert state_dict_after_restart[\"epoch_loop.batch_progress\"] == expected[\"epoch_loop.batch_progress\"]\n\n val_dl_progress = \"epoch_loop.val_loop.dataloader_progress\"\n expected[val_dl_progress][\"total\"][\"ready\"] += 1\n assert state_dict_after_restart[val_dl_progress] == expected[val_dl_progress]\n\n expected[val_batch_progress][\"total\"][\"ready\"] += 1\n expected[val_batch_progress][\"total\"][\"started\"] += 1\n assert state_dict_after_restart[val_batch_progress] == expected[val_batch_progress]\n\n\[email protected](\"should_fail\", [False, True])\[email protected](\"persistent_workers\", [pytest.param(False, marks=RunIf(slow=True)), True])\ndef test_workers_are_shutdown(tmpdir, should_fail, persistent_workers):\n # `num_workers == 1` uses `_MultiProcessingDataLoaderIter`\n # `persistent_workers` makes sure `self._iterator` gets set on the `DataLoader` instance\n\n class _TestMultiProcessingDataLoaderIter(_MultiProcessingDataLoaderIter):\n def __init__(self, *args, dataloader, **kwargs):\n super().__init__(*args, **kwargs)\n self.dataloader = dataloader\n\n def _shutdown_workers(self):\n self.dataloader.count_shutdown_workers += 1\n super()._shutdown_workers()\n\n class TestDataLoader(DataLoader):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.count_shutdown_workers = 0\n\n def _get_iterator(self):\n if self.num_workers == 0:\n return super()._get_iterator()\n else:\n self.check_worker_number_rationality()\n return _TestMultiProcessingDataLoaderIter(self, dataloader=self)\n\n train_dataloader = TestDataLoader(RandomDataset(32, 64), num_workers=1, persistent_workers=persistent_workers)\n val_dataloader = TestDataLoader(RandomDataset(32, 64), num_workers=1, persistent_workers=persistent_workers)\n\n class TestCallback(Callback):\n def on_train_epoch_end(self, trainer, *_):\n if trainer.current_epoch == 1:\n raise CustomException\n\n max_epochs = 3\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=2,\n limit_val_batches=2,\n max_epochs=max_epochs,\n callbacks=TestCallback() if should_fail else None,\n )\n\n if should_fail:\n with pytest.raises(CustomException):\n trainer.fit(model, train_dataloader, val_dataloader)\n else:\n trainer.fit(model, train_dataloader, val_dataloader)\n\n assert train_dataloader.count_shutdown_workers == 2 if should_fail else (2 if persistent_workers else max_epochs)\n # on sanity checking end, the workers are being deleted too.\n assert val_dataloader.count_shutdown_workers == 2 if persistent_workers else (3 if should_fail else max_epochs + 1)\n assert train_dataloader._iterator is None\n assert val_dataloader._iterator is None\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities used for collections.\"\"\"\n\nimport dataclasses\nimport operator\nfrom abc import ABC\nfrom collections import defaultdict, OrderedDict\nfrom collections.abc import Mapping, Sequence\nfrom copy import copy, deepcopy\nfrom functools import partial\nfrom typing import Any, Callable, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.imports import _compare_version, _TORCHTEXT_LEGACY\nfrom pytorch_lightning.utilities.warnings import rank_zero_deprecation\n\nif _TORCHTEXT_LEGACY:\n if _compare_version(\"torchtext\", operator.ge, \"0.9.0\"):\n from torchtext.legacy.data import Batch\n else:\n from torchtext.data import Batch\nelse:\n Batch = type(None)\n\n\n_CPU_DEVICES = (\"cpu\", torch.device(\"cpu\"))\n\n\ndef to_dtype_tensor(\n value: Union[int, float, List[Union[int, float]]], dtype: torch.dtype, device: Union[str, torch.device]\n) -> torch.Tensor:\n return torch.tensor(value, dtype=dtype, device=device)\n\n\ndef from_numpy(value: np.ndarray, device: Union[str, torch.device]) -> torch.Tensor:\n return torch.from_numpy(value).to(device)\n\n\nCONVERSION_DTYPES: List[Tuple[Any, Callable[[Any, Any], torch.Tensor]]] = [\n # bool -> uint8 as bool -> torch.bool triggers RuntimeError: Unsupported data type for NCCL process group\n (bool, partial(to_dtype_tensor, dtype=torch.uint8)),\n (int, partial(to_dtype_tensor, dtype=torch.int)),\n (float, partial(to_dtype_tensor, dtype=torch.float)),\n (np.ndarray, from_numpy),\n]\n\n\ndef _is_namedtuple(obj: object) -> bool:\n # https://github.com/pytorch/pytorch/blob/v1.8.1/torch/nn/parallel/scatter_gather.py#L4-L8\n return isinstance(obj, tuple) and hasattr(obj, \"_asdict\") and hasattr(obj, \"_fields\")\n\n\ndef _is_dataclass_instance(obj: object) -> bool:\n # https://docs.python.org/3/library/dataclasses.html#module-level-decorators-classes-and-functions\n return dataclasses.is_dataclass(obj) and not isinstance(obj, type)\n\n\ndef apply_to_collection(\n data: Any,\n dtype: Union[type, Any, Tuple[Union[type, Any]]],\n function: Callable,\n *args: Any,\n wrong_dtype: Optional[Union[type, Tuple[type]]] = None,\n include_none: bool = True,\n **kwargs: Any,\n) -> Any:\n \"\"\"Recursively applies a function to all elements of a certain dtype.\n\n Args:\n data: the collection to apply the function to\n dtype: the given function will be applied to all elements of this dtype\n function: the function to apply\n *args: positional arguments (will be forwarded to calls of ``function``)\n wrong_dtype: the given function won't be applied if this type is specified and the given collections\n is of the ``wrong_dtype`` even if it is of type ``dtype``\n include_none: Whether to include an element if the output of ``function`` is ``None``.\n **kwargs: keyword arguments (will be forwarded to calls of ``function``)\n\n Returns:\n The resulting collection\n \"\"\"\n # Breaking condition\n if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)):\n return function(data, *args, **kwargs)\n\n elem_type = type(data)\n\n # Recursively apply to collection items\n if isinstance(data, Mapping):\n out = []\n for k, v in data.items():\n v = apply_to_collection(\n v, dtype, function, *args, wrong_dtype=wrong_dtype, include_none=include_none, **kwargs\n )\n if include_none or v is not None:\n out.append((k, v))\n if isinstance(data, defaultdict):\n return elem_type(data.default_factory, OrderedDict(out))\n return elem_type(OrderedDict(out))\n\n is_namedtuple = _is_namedtuple(data)\n is_sequence = isinstance(data, Sequence) and not isinstance(data, str)\n if is_namedtuple or is_sequence:\n out = []\n for d in data:\n v = apply_to_collection(\n d, dtype, function, *args, wrong_dtype=wrong_dtype, include_none=include_none, **kwargs\n )\n if include_none or v is not None:\n out.append(v)\n return elem_type(*out) if is_namedtuple else elem_type(out)\n\n if _is_dataclass_instance(data):\n # make a deepcopy of the data,\n # but do not deepcopy mapped fields since the computation would\n # be wasted on values that likely get immediately overwritten\n fields = {}\n memo = {}\n for field in dataclasses.fields(data):\n field_value = getattr(data, field.name)\n fields[field.name] = (field_value, field.init)\n memo[id(field_value)] = field_value\n result = deepcopy(data, memo=memo)\n # apply function to each field\n for field_name, (field_value, field_init) in fields.items():\n v = None\n if field_init:\n v = apply_to_collection(\n field_value,\n dtype,\n function,\n *args,\n wrong_dtype=wrong_dtype,\n include_none=include_none,\n **kwargs,\n )\n if not field_init or (not include_none and v is None): # retain old value\n v = getattr(data, field_name)\n try:\n setattr(result, field_name, v)\n except dataclasses.FrozenInstanceError as e:\n raise MisconfigurationException(\n \"A frozen dataclass was passed to `apply_to_collection` but this is not allowed.\"\n \" HINT: is your batch a frozen dataclass?\"\n ) from e\n return result\n\n # data is neither of dtype, nor a collection\n return data\n\n\ndef apply_to_collections(\n data1: Optional[Any],\n data2: Optional[Any],\n dtype: Union[type, Any, Tuple[Union[type, Any]]],\n function: Callable,\n *args: Any,\n wrong_dtype: Optional[Union[type, Tuple[type]]] = None,\n **kwargs: Any,\n) -> Any:\n \"\"\"Zips two collections and applies a function to their items of a certain dtype.\n\n Args:\n data1: The first collection\n data2: The second collection\n dtype: the given function will be applied to all elements of this dtype\n function: the function to apply\n *args: positional arguments (will be forwarded to calls of ``function``)\n wrong_dtype: the given function won't be applied if this type is specified and the given collections\n is of the ``wrong_dtype`` even if it is of type ``dtype``\n **kwargs: keyword arguments (will be forwarded to calls of ``function``)\n\n Returns:\n The resulting collection\n\n Raises:\n AssertionError:\n If sequence collections have different data sizes.\n \"\"\"\n if data1 is None:\n if data2 is None:\n return\n # in case they were passed reversed\n data1, data2 = data2, None\n\n elem_type = type(data1)\n\n if isinstance(data1, dtype) and data2 is not None and (wrong_dtype is None or not isinstance(data1, wrong_dtype)):\n return function(data1, data2, *args, **kwargs)\n\n if isinstance(data1, Mapping) and data2 is not None:\n # use union because we want to fail if a key does not exist in both\n zipped = {k: (data1[k], data2[k]) for k in data1.keys() | data2.keys()}\n return elem_type(\n {\n k: apply_to_collections(*v, dtype, function, *args, wrong_dtype=wrong_dtype, **kwargs)\n for k, v in zipped.items()\n }\n )\n\n is_namedtuple = _is_namedtuple(data1)\n is_sequence = isinstance(data1, Sequence) and not isinstance(data1, str)\n if (is_namedtuple or is_sequence) and data2 is not None:\n assert len(data1) == len(data2), \"Sequence collections have different sizes.\"\n out = [\n apply_to_collections(v1, v2, dtype, function, *args, wrong_dtype=wrong_dtype, **kwargs)\n for v1, v2 in zip(data1, data2)\n ]\n return elem_type(*out) if is_namedtuple else elem_type(out)\n\n if _is_dataclass_instance(data1) and data2 is not None:\n if not _is_dataclass_instance(data2):\n raise TypeError(\n \"Expected inputs to be dataclasses of the same type or to have identical fields\"\n f\" but got input 1 of type {type(data1)} and input 2 of type {type(data2)}.\"\n )\n if not (\n len(dataclasses.fields(data1)) == len(dataclasses.fields(data2))\n and all(map(lambda f1, f2: isinstance(f1, type(f2)), dataclasses.fields(data1), dataclasses.fields(data2)))\n ):\n raise TypeError(\"Dataclasses fields do not match.\")\n # make a deepcopy of the data,\n # but do not deepcopy mapped fields since the computation would\n # be wasted on values that likely get immediately overwritten\n data = [data1, data2]\n fields: List[dict] = [{}, {}]\n memo: dict = {}\n for i in range(len(data)):\n for field in dataclasses.fields(data[i]):\n field_value = getattr(data[i], field.name)\n fields[i][field.name] = (field_value, field.init)\n if i == 0:\n memo[id(field_value)] = field_value\n\n result = deepcopy(data1, memo=memo)\n\n # apply function to each field\n for ((field_name, (field_value1, field_init1)), (_, (field_value2, field_init2))) in zip(\n fields[0].items(), fields[1].items()\n ):\n v = None\n if field_init1 and field_init2:\n v = apply_to_collections(\n field_value1,\n field_value2,\n dtype,\n function,\n *args,\n wrong_dtype=wrong_dtype,\n **kwargs,\n )\n if not field_init1 or not field_init2 or v is None: # retain old value\n return apply_to_collection(data1, dtype, function, *args, wrong_dtype=wrong_dtype, **kwargs)\n try:\n setattr(result, field_name, v)\n except dataclasses.FrozenInstanceError as e:\n raise MisconfigurationException(\n \"A frozen dataclass was passed to `apply_to_collections` but this is not allowed.\"\n \" HINT: is your batch a frozen dataclass?\"\n ) from e\n return result\n\n return apply_to_collection(data1, dtype, function, *args, wrong_dtype=wrong_dtype, **kwargs)\n\n\nclass TransferableDataType(ABC):\n \"\"\"A custom type for data that can be moved to a torch device via ``.to(...)``.\n\n Example:\n\n >>> isinstance(dict, TransferableDataType)\n False\n >>> isinstance(torch.rand(2, 3), TransferableDataType)\n True\n >>> class CustomObject:\n ... def __init__(self):\n ... self.x = torch.rand(2, 2)\n ... def to(self, device):\n ... self.x = self.x.to(device)\n ... return self\n >>> isinstance(CustomObject(), TransferableDataType)\n True\n \"\"\"\n\n @classmethod\n def __subclasshook__(cls, subclass: Any) -> Union[bool, Any]:\n if cls is TransferableDataType:\n to = getattr(subclass, \"to\", None)\n return callable(to)\n return NotImplemented\n\n\ndef move_data_to_device(batch: Any, device: Union[str, torch.device]) -> Any:\n \"\"\"Transfers a collection of data to the given device. Any object that defines a method ``to(device)`` will be\n moved and all other objects in the collection will be left untouched.\n\n Args:\n batch: A tensor or collection of tensors or anything that has a method ``.to(...)``.\n See :func:`apply_to_collection` for a list of supported collection types.\n device: The device to which the data should be moved\n\n Return:\n the same collection but with all contained tensors residing on the new device.\n\n See Also:\n - :meth:`torch.Tensor.to`\n - :class:`torch.device`\n \"\"\"\n\n def batch_to(data: Any) -> Any:\n # try to move torchtext data first\n if _TORCHTEXT_LEGACY and isinstance(data, Batch):\n # TODO: also remove the torchtext dependency with Lightning 1.8\n rank_zero_deprecation(\n \"The `torchtext.legacy.Batch` object is deprecated and Lightning will remove support for it in v1.8.\"\n \" We recommend you to migrate away from Batch by following the TorchText README:\"\n \" https://github.com/pytorch/text#bc-breaking-legacy\"\n )\n # Shallow copy because each Batch has a reference to Dataset which contains all examples\n device_data = copy(data)\n for field, field_value in data.dataset.fields.items():\n if field_value is None:\n continue\n device_field = move_data_to_device(getattr(data, field), device)\n setattr(device_data, field, device_field)\n return device_data\n\n kwargs = {}\n # Don't issue non-blocking transfers to CPU\n if isinstance(data, torch.Tensor) and device not in _CPU_DEVICES:\n kwargs[\"non_blocking\"] = True\n data_output = data.to(device, **kwargs)\n if data_output is not None:\n return data_output\n # user wrongly implemented the `TransferableDataType` and forgot to return `self`.\n return data\n\n dtype = (TransferableDataType, Batch) if _TORCHTEXT_LEGACY else TransferableDataType\n return apply_to_collection(batch, dtype=dtype, function=batch_to)\n\n\ndef convert_to_tensors(data: Any, device: Union[str, torch.device]) -> Any:\n for src_dtype, conversion_func in CONVERSION_DTYPES:\n data = apply_to_collection(data, src_dtype, conversion_func, device=device)\n\n def _move_to_device_and_make_contiguous(t: torch.Tensor, device: Union[str, torch.device]) -> torch.Tensor:\n return t.to(device).contiguous()\n\n data = apply_to_collection(data, torch.Tensor, _move_to_device_and_make_contiguous, device=device)\n return data\n" ]
[ [ "torch.nn.functional.softmax", "torch.load", "torch.randn", "torch.nn.functional.cross_entropy", "torch.nn.GRU", "torch.optim.lr_scheduler.ExponentialLR", "torch.equal", "torch.nn.Linear", "torch.device", "torch.nn.ReLU", "torch.optim.lr_scheduler.StepLR" ], [ "torch.nn.Linear", "torch.optim.lr_scheduler.StepLR", "torch.load" ], [ "torch.device", "torch.from_numpy", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
groussea/custom_colormap
[ "e92f88d1b5be18509fa6e6204387b10c74b4c8ad" ]
[ "custom_colormaps.py" ]
[ "\"\"\"\nNAME\n Custom Colormaps for Matplotlib\nPURPOSE\n This program shows how to implement make_cmap which is a function that\n generates a colorbar\nPROGRAMMER(S)\n Chris Slocum\n Gauthier Rousseau\nREVISION HISTORY\n 20130411 -- Initial version created\n 20140313 -- Small changes made and code posted online\n 20140320 -- Added the ability to set the position of each color\n 20150724 -- Attempted to make this more Pythonic\n 20180307 -- Changed license to BSD 3-clause\n 20190711 -- Added transprency (Alpha component) + different customized color maps + resolution of the colormap as an argument\n\"\"\"\nimport numpy as np\n\n\ndef create_colormap(colors, position=None, bit=False, reverse=False, res=256, name='custom_colormap'):\n \"\"\"\n returns a linear custom colormap\n\n Parameters\n ----------\n colors : array-like\n contain RGBA values. The RGBA values may either be in 8-bit [0 to 255]\n or arithmetic [0 to 1] (default).\n Arrange your tuples so that the first color is the lowest value for the\n colorbar and the last is the highest.\n position : array like\n contains values from 0 to 1 to dictate the location of each color.\n bit : Boolean\n 8-bit [0 to 255] (in which bit must be set to\n True when called) or arithmetic [0 to 1] (default)\n reverse : Boolean\n If you want to flip the scheme\n res : integer\n Resolution of the colormap\n name : string\n name of the scheme if you plan to save it\n\n Returns\n -------\n cmap : matplotlib.colors.LinearSegmentedColormap\n cmap with equally spaced colors\n \"\"\"\n from matplotlib.colors import LinearSegmentedColormap\n if not isinstance(colors, np.ndarray):\n colors = np.array(colors, dtype='f')\n if reverse:\n colors = colors[::-1]\n if position is not None and not isinstance(position, np.ndarray):\n position = np.array(position)\n elif position is None:\n position = np.linspace(0, 1, colors.shape[0])\n else:\n if position.size != colors.shape[0]:\n raise ValueError(\"position length must be the same as colors\")\n elif not np.isclose(position[0], 0) and not np.isclose(position[-1], 1):\n raise ValueError(\"position must start with 0 and end with 1\")\n if bit:\n colors[:] = [tuple(map(lambda x: x / 255., color)) for color in colors]\n cdict = {'red':[], 'green':[], 'blue':[], 'alpha':[]}\n for pos, color in zip(position, colors):\n cdict['red'].append((pos, color[0], color[0]))\n cdict['green'].append((pos, color[1], color[1]))\n cdict['blue'].append((pos, color[2], color[2]))\n cdict['alpha'].append((pos, color[3], color[3]))\n return LinearSegmentedColormap(name, cdict,res)\n\ndef make_cmap_customized(Palette='mountain',position=[0.0, 0.16, 0.2, 0.24, 0.4, 0.7, 0.8, 1],reverse=False,alpha=255):\n if Palette=='sunrise':\n couleur7=(0,0,0,alpha)\n couleur6=(64,50,79,alpha)\n couleur5=(107,64,110,alpha)\n couleur4=(141,76,125,alpha)\n couleur3=(172,85,122,alpha)\n couleur2=(210,124,124,alpha)\n couleur1=(240,206,125,alpha) \n couleur0=(255,255,255,alpha)\n elif Palette=='green':\n couleur7=(0,0,0,alpha)\n couleur6=(6,49,50,alpha)\n couleur5=(28,78,78,alpha)\n couleur4=(55,140,129,alpha)\n couleur3=(172,185,153,alpha)\n couleur2=(199,205,181,alpha)\n couleur1=(232,219,194,alpha) \n couleur0=(255,255,255,alpha)\n elif Palette=='mountain':\n couleur7=(0,0,0,alpha)\n couleur6=(45,52,70,alpha)\n couleur5=(89,76,96,alpha)\n couleur4=(145,101,118,alpha)\n couleur3=(212,119,127,alpha)\n couleur2=(212,153,154,alpha)\n couleur1=(238,189,184,alpha) \n couleur0=(255,255,255,alpha) \n elif Palette=='prune':\n couleur7=(0,0,0,alpha)\n couleur6=(66,37,67,alpha)\n couleur5=(125,58,91,alpha)\n couleur4=(107,77,131,alpha)\n couleur3=(205,179,214,alpha)\n couleur2=(164,173,154,alpha)\n couleur1=(207,213,199,alpha) \n couleur0=(255,255,255,alpha)\n elif Palette=='asym_mountain5':\n couleur7=(45,52,70,alpha)\n couleur6=(110,86,96,alpha)\n couleur5=(135,90,115,alpha)\n couleur4=(145,101,118,alpha) \n couleur3=(212,119,127,alpha)\n couleur2=(232,219,194,alpha) \n couleur1=(167,213,229,alpha) \n couleur0=(121,175,204,alpha)\n \n colors = [couleur0,couleur1,couleur2,couleur3,couleur4,couleur5,couleur6,couleur7]\n\n return create_colormap(colors, bit=True ,position=position,reverse=reverse,res=1000) \n\n\n\n\nif __name__ == \"__main__\":\n # An example of how to use make_cmap\n import matplotlib.pyplot as plt\n fig = plt.figure()\n ax = fig.add_subplot(311)\n # Create a list of RGB tuples\n colors = [(255, 0, 0,10), (255, 255, 0,70), (255, 255, 255,80), (0, 157, 0,255), (0, 0, 255,255)] # This example uses the 8-bit RGB\n # Call the function make_cmap which returns your colormap\n my_cmap = create_colormap(colors, bit=True)\n # Use your colormap\n plt.plot([0,50],[0,25],color='k',zorder=0)\n\n plt.text(25,12.5,'colormaps',zorder=0,horizontalalignment='center',verticalalignment='center',fontsize=30)\n plt.pcolor(np.random.rand(25, 50), cmap=my_cmap)\n\n plt.colorbar()\n ax = fig.add_subplot(312)\n\n plt.plot([0,50],[0,25],color='k',zorder=0)\n plt.text(25,12.5,'with',zorder=0,horizontalalignment='center',verticalalignment='center',fontsize=30)\n plt.pcolor(np.random.rand(25, 50), cmap=make_cmap_customized(Palette='green',alpha=255/4))\n plt.colorbar()\n\n ax = fig.add_subplot(313)\n colors = [(0.4, 0.2, 0.0,0.5), (1, 1, 1,0.2), (0, 0.3, 0.4,0.8)]\n # Create an array or list of positions from 0 to 1.\n position = [0, 0.3, 1]\n plt.plot([0,50],[0,25],color='k',zorder=0)\n plt.text(25,12.5,'transparency',zorder=0,horizontalalignment='center',verticalalignment='center',fontsize=30)\n plt.pcolor(np.random.rand(25, 50), cmap=make_cmap_customized(Palette='mountain',alpha=255/2))\n plt.colorbar()\n plt.savefig(\"example_custom_colormap.png\")\n plt.show()\n" ]
[ [ "numpy.array", "numpy.linspace", "matplotlib.colors.LinearSegmentedColormap", "numpy.isclose", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.colorbar", "numpy.random.rand", "matplotlib.pyplot.text", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fedesigno/solar-panel-segmentation
[ "75856be3361bb4904387e6abc986627d1cc98ebb" ]
[ "solarnet/models/segmenter.py" ]
[ "import torch\nfrom torch import nn\n\nfrom typing import List\n\nfrom .base import ResnetBase\n\n\nclass Segmenter(ResnetBase):\n \"\"\"A ResNet34 U-Net model, as described in\n https://github.com/fastai/fastai/blob/master/courses/dl2/carvana-unet-lrg.ipynb\n\n Attributes:\n imagenet_base: boolean, default: False\n Whether or not to load weights pretrained on imagenet\n \"\"\"\n\n def __init__(self, imagenet_base: bool = False) -> None:\n super().__init__(imagenet_base=imagenet_base)\n\n self.target_modules = [str(x) for x in [2, 4, 5, 6]]\n self.hooks = self.add_hooks()\n\n self.relu = nn.ReLU()\n self.upsamples = nn.ModuleList([\n UpBlock(2048, 1024, 512),\n UpBlock(512, 512, 256),\n UpBlock(256, 256, 64),\n UpBlock(64, 64, 32),\n UpBlock(32, 3, 16),\n ])\n self.conv_transpose = nn.ConvTranspose2d(16, 1, 1)\n self.sigmoid = nn.Sigmoid()\n\n def add_hooks(self) -> List[torch.utils.hooks.RemovableHandle]:\n hooks = []\n for name, child in self.pretrained.named_children():\n if name in self.target_modules:\n hooks.append(child.register_forward_hook(self.save_output))\n return hooks\n\n def retrieve_hooked_outputs(self) -> List[torch.Tensor]:\n # to be called in the forward pass, this method returns the tensors\n # which were saved by the forward hooks\n outputs = []\n for name, child in self.pretrained.named_children():\n if name in self.target_modules:\n outputs.append(child.output)\n return outputs\n\n def cleanup(self) -> None:\n # removes the hooks, and the tensors which were added\n for name, child in self.pretrained.named_children():\n if name in self.target_modules:\n # allows the method to be safely called even if\n # the hooks aren't there\n try:\n del child.output\n except AttributeError:\n continue\n for hook in self.hooks:\n hook.remove()\n\n @staticmethod\n def save_output(module, input, output):\n # the hook to add to the target modules\n module.output = output\n\n def load_base(self, state_dict: dict) -> None:\n # This allows a model trained on the classifier to be loaded\n # into the model used for segmentation, even though their state_dicts\n # differ\n self.load_state_dict(state_dict, strict=False)\n\n def forward(self, x):\n org_input = x\n x = self.relu(self.pretrained(x))\n # we reverse the outputs so that the smallest output\n # is the first one we get, and the largest the last\n interim = self.retrieve_hooked_outputs()[::-1]\n\n for upsampler, interim_output in zip(self.upsamples[:-1], interim):\n x = upsampler(x, interim_output)\n x = self.upsamples[-1](x, org_input)\n return self.sigmoid(self.conv_transpose(x))\n\n\nclass UpBlock(nn.Module):\n\n def __init__(self, in_channels: int, across_channels: int, out_channels: int) -> None:\n super().__init__()\n up_out = across_out = out_channels // 2\n self.conv_across = nn.Conv2d(across_channels, across_out, 1)\n # alternative: ConvTranspose2d(in_channels, up_out, 2, stride=2)\n self.upsample = nn.Sequential(nn.Upsample(scale_factor=2, mode=\"bilinear\", align_corners=True),\n nn.Conv2d(in_channels, up_out, kernel_size=1))\n self.batchnorm = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU()\n\n def forward(self, x_up, x_across):\n upsampled = self.upsample(x_up)\n skipped = self.conv_across(x_across)\n joint = torch.cat((upsampled, skipped), dim=1)\n return self.batchnorm(self.relu(joint))\n" ]
[ [ "torch.nn.ConvTranspose2d", "torch.cat", "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.nn.Upsample", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rajat315315/pandas
[ "2eec4f7cfa1c45671b9875062343521a53ae8b28", "2eec4f7cfa1c45671b9875062343521a53ae8b28" ]
[ "pandas/core/series.py", "pandas/core/arrays/integer.py" ]
[ "\"\"\"\nData structure for 1-dimensional cross-sectional and time series data\n\"\"\"\nfrom __future__ import annotations\n\nfrom io import StringIO\nfrom shutil import get_terminal_size\nfrom textwrap import dedent\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n Callable,\n Hashable,\n Iterable,\n List,\n Optional,\n Tuple,\n Type,\n Union,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import lib, properties, reshape, tslibs\nfrom pandas._libs.lib import no_default\nfrom pandas._typing import (\n AggFuncType,\n ArrayLike,\n Axis,\n Dtype,\n DtypeObj,\n FrameOrSeriesUnion,\n IndexKeyFunc,\n NpDtype,\n StorageOptions,\n ValueKeyFunc,\n)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import Appender, Substitution, doc\nfrom pandas.util._validators import validate_bool_kwarg, validate_percentile\n\nfrom pandas.core.dtypes.cast import (\n convert_dtypes,\n maybe_cast_to_extension_array,\n validate_numeric_casting,\n)\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_bool,\n is_categorical_dtype,\n is_dict_like,\n is_extension_array_dtype,\n is_integer,\n is_iterator,\n is_list_like,\n is_object_dtype,\n is_scalar,\n validate_all_hashable,\n)\nfrom pandas.core.dtypes.generic import ABCDataFrame\nfrom pandas.core.dtypes.inference import is_hashable\nfrom pandas.core.dtypes.missing import (\n isna,\n na_value_for_dtype,\n notna,\n remove_na_arraylike,\n)\n\nfrom pandas.core import algorithms, base, generic, missing, nanops, ops\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.core.aggregation import transform\nfrom pandas.core.apply import series_apply\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.arrays.categorical import CategoricalAccessor\nfrom pandas.core.arrays.sparse import SparseAccessor\nimport pandas.core.common as com\nfrom pandas.core.construction import (\n create_series_with_explicit_dtype,\n extract_array,\n is_empty_data,\n sanitize_array,\n)\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.indexers import deprecate_ndim_indexing, unpack_1tuple\nfrom pandas.core.indexes.accessors import CombinedDatetimelikeProperties\nfrom pandas.core.indexes.api import (\n CategoricalIndex,\n Float64Index,\n Index,\n MultiIndex,\n ensure_index,\n)\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.period import PeriodIndex\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex\nfrom pandas.core.indexing import check_bool_indexer\nfrom pandas.core.internals import SingleBlockManager\nfrom pandas.core.internals.construction import sanitize_index\nfrom pandas.core.shared_docs import _shared_docs\nfrom pandas.core.sorting import ensure_key_mapped, nargsort\nfrom pandas.core.strings import StringMethods\nfrom pandas.core.tools.datetimes import to_datetime\n\nimport pandas.io.formats.format as fmt\nimport pandas.plotting\n\nif TYPE_CHECKING:\n from pandas._typing import TimedeltaConvertibleTypes, TimestampConvertibleTypes\n\n from pandas.core.frame import DataFrame\n from pandas.core.groupby.generic import SeriesGroupBy\n from pandas.core.resample import Resampler\n\n__all__ = [\"Series\"]\n\n_shared_doc_kwargs = {\n \"axes\": \"index\",\n \"klass\": \"Series\",\n \"axes_single_arg\": \"{0 or 'index'}\",\n \"axis\": \"\"\"axis : {0 or 'index'}\n Parameter needed for compatibility with DataFrame.\"\"\",\n \"inplace\": \"\"\"inplace : boolean, default False\n If True, performs operation inplace and returns None.\"\"\",\n \"unique\": \"np.ndarray\",\n \"duplicated\": \"Series\",\n \"optional_by\": \"\",\n \"optional_mapper\": \"\",\n \"optional_labels\": \"\",\n \"optional_axis\": \"\",\n \"replace_iloc\": \"\"\"\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\"\"\",\n}\n\n\ndef _coerce_method(converter):\n \"\"\"\n Install the scalar coercion methods.\n \"\"\"\n\n def wrapper(self):\n if len(self) == 1:\n return converter(self.iloc[0])\n raise TypeError(f\"cannot convert the series to {converter}\")\n\n wrapper.__name__ = f\"__{converter.__name__}__\"\n return wrapper\n\n\n# ----------------------------------------------------------------------\n# Series class\n\n\nclass Series(base.IndexOpsMixin, generic.NDFrame):\n \"\"\"\n One-dimensional ndarray with axis labels (including time series).\n\n Labels need not be unique but must be a hashable type. The object\n supports both integer- and label-based indexing and provides a host of\n methods for performing operations involving the index. Statistical\n methods from ndarray have been overridden to automatically exclude\n missing data (currently represented as NaN).\n\n Operations between Series (+, -, /, *, **) align values based on their\n associated index values-- they need not be the same length. The result\n index will be the sorted union of the two indexes.\n\n Parameters\n ----------\n data : array-like, Iterable, dict, or scalar value\n Contains data stored in Series. If data is a dict, argument order is\n maintained.\n index : array-like or Index (1d)\n Values must be hashable and have the same length as `data`.\n Non-unique index values are allowed. Will default to\n RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like\n and index is None, then the values in the index are used to\n reindex the Series after it is created using the keys in the data.\n dtype : str, numpy.dtype, or ExtensionDtype, optional\n Data type for the output Series. If not specified, this will be\n inferred from `data`.\n See the :ref:`user guide <basics.dtypes>` for more usages.\n name : str, optional\n The name to give to the Series.\n copy : bool, default False\n Copy input data.\n \"\"\"\n\n _typ = \"series\"\n _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)\n\n _name: Hashable\n _metadata: List[str] = [\"name\"]\n _internal_names_set = {\"index\"} | generic.NDFrame._internal_names_set\n _accessors = {\"dt\", \"cat\", \"str\", \"sparse\"}\n _hidden_attrs = (\n base.IndexOpsMixin._hidden_attrs\n | generic.NDFrame._hidden_attrs\n | frozenset([\"compress\", \"ptp\"])\n )\n\n # Override cache_readonly bc Series is mutable\n hasnans = property(\n base.IndexOpsMixin.hasnans.func, doc=base.IndexOpsMixin.hasnans.__doc__\n )\n __hash__ = generic.NDFrame.__hash__\n _mgr: SingleBlockManager\n div: Callable[[Series, Any], Series]\n rdiv: Callable[[Series, Any], Series]\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data=None,\n index=None,\n dtype: Optional[Dtype] = None,\n name=None,\n copy: bool = False,\n fastpath: bool = False,\n ):\n\n if (\n isinstance(data, SingleBlockManager)\n and index is None\n and dtype is None\n and copy is False\n ):\n # GH#33357 called with just the SingleBlockManager\n NDFrame.__init__(self, data)\n self.name = name\n return\n\n # we are called internally, so short-circuit\n if fastpath:\n\n # data is an ndarray, index is defined\n if not isinstance(data, SingleBlockManager):\n data = SingleBlockManager.from_array(data, index)\n if copy:\n data = data.copy()\n if index is None:\n index = data.index\n\n else:\n\n name = ibase.maybe_extract_name(name, data, type(self))\n\n if is_empty_data(data) and dtype is None:\n # gh-17261\n warnings.warn(\n \"The default dtype for empty Series will be 'object' instead \"\n \"of 'float64' in a future version. Specify a dtype explicitly \"\n \"to silence this warning.\",\n DeprecationWarning,\n stacklevel=2,\n )\n # uncomment the line below when removing the DeprecationWarning\n # dtype = np.dtype(object)\n\n if index is not None:\n index = ensure_index(index)\n\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, MultiIndex):\n raise NotImplementedError(\n \"initializing a Series from a MultiIndex is not supported\"\n )\n elif isinstance(data, Index):\n\n if dtype is not None:\n # astype copies\n data = data.astype(dtype)\n else:\n # GH#24096 we need to ensure the index remains immutable\n data = data._values.copy()\n copy = False\n\n elif isinstance(data, np.ndarray):\n if len(data.dtype):\n # GH#13296 we are dealing with a compound dtype, which\n # should be treated as 2D\n raise ValueError(\n \"Cannot construct a Series from an ndarray with \"\n \"compound dtype. Use DataFrame instead.\"\n )\n elif isinstance(data, Series):\n if index is None:\n index = data.index\n else:\n data = data.reindex(index, copy=copy)\n copy = False\n data = data._mgr\n elif is_dict_like(data):\n data, index = self._init_dict(data, index, dtype)\n dtype = None\n copy = False\n elif isinstance(data, SingleBlockManager):\n if index is None:\n index = data.index\n elif not data.index.equals(index) or copy:\n # GH#19275 SingleBlockManager input should only be called\n # internally\n raise AssertionError(\n \"Cannot pass both SingleBlockManager \"\n \"`data` argument and a different \"\n \"`index` argument. `copy` must be False.\"\n )\n\n elif is_extension_array_dtype(data):\n pass\n elif isinstance(data, (set, frozenset)):\n raise TypeError(f\"'{type(data).__name__}' type is unordered\")\n else:\n data = com.maybe_iterable_to_list(data)\n\n if index is None:\n if not is_list_like(data):\n data = [data]\n index = ibase.default_index(len(data))\n elif is_list_like(data):\n sanitize_index(data, index)\n\n # create/copy the manager\n if isinstance(data, SingleBlockManager):\n if dtype is not None:\n data = data.astype(dtype=dtype, errors=\"ignore\", copy=copy)\n elif copy:\n data = data.copy()\n else:\n data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)\n\n data = SingleBlockManager.from_array(data, index)\n\n generic.NDFrame.__init__(self, data)\n self.name = name\n self._set_axis(0, index, fastpath=True)\n\n def _init_dict(self, data, index=None, dtype: Optional[Dtype] = None):\n \"\"\"\n Derive the \"_mgr\" and \"index\" attributes of a new Series from a\n dictionary input.\n\n Parameters\n ----------\n data : dict or dict-like\n Data used to populate the new Series.\n index : Index or index-like, default None\n Index for the new Series: if None, use dict keys.\n dtype : dtype, default None\n The dtype for the new Series: if None, infer from data.\n\n Returns\n -------\n _data : BlockManager for the new Series\n index : index for the new Series\n \"\"\"\n # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]\n # raises KeyError), so we iterate the entire dict, and align\n if data:\n # GH:34717, issue was using zip to extract key and values from data.\n # using generators in effects the performance.\n # Below is the new way of extracting the keys and values\n\n keys = tuple(data.keys())\n values = list(data.values()) # Generating list of values- faster way\n elif index is not None:\n # fastpath for Series(data=None). Just use broadcasting a scalar\n # instead of reindexing.\n values = na_value_for_dtype(dtype)\n keys = index\n else:\n keys, values = (), []\n\n # Input is now list-like, so rely on \"standard\" construction:\n\n # TODO: passing np.float64 to not break anything yet. See GH-17261\n s = create_series_with_explicit_dtype(\n values, index=keys, dtype=dtype, dtype_if_empty=np.float64\n )\n\n # Now we just make sure the order is respected, if any\n if data and index is not None:\n s = s.reindex(index, copy=False)\n return s._mgr, s.index\n\n # ----------------------------------------------------------------------\n\n @property\n def _constructor(self) -> Type[Series]:\n return Series\n\n @property\n def _constructor_expanddim(self) -> Type[DataFrame]:\n from pandas.core.frame import DataFrame\n\n return DataFrame\n\n # types\n @property\n def _can_hold_na(self) -> bool:\n return self._mgr._can_hold_na\n\n _index = None\n\n def _set_axis(self, axis: int, labels, fastpath: bool = False) -> None:\n \"\"\"\n Override generic, we want to set the _typ here.\n\n This is called from the cython code when we set the `index` attribute\n directly, e.g. `series.index = [1, 2, 3]`.\n \"\"\"\n if not fastpath:\n labels = ensure_index(labels)\n\n if labels._is_all_dates:\n deep_labels = labels\n if isinstance(labels, CategoricalIndex):\n deep_labels = labels.categories\n\n if not isinstance(\n deep_labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)\n ):\n try:\n labels = DatetimeIndex(labels)\n # need to set here because we changed the index\n if fastpath:\n self._mgr.set_axis(axis, labels)\n except (tslibs.OutOfBoundsDatetime, ValueError):\n # labels may exceeds datetime bounds,\n # or not be a DatetimeIndex\n pass\n\n object.__setattr__(self, \"_index\", labels)\n if not fastpath:\n # The ensure_index call above ensures we have an Index object\n self._mgr.set_axis(axis, labels)\n\n # ndarray compatibility\n @property\n def dtype(self) -> DtypeObj:\n \"\"\"\n Return the dtype object of the underlying data.\n \"\"\"\n return self._mgr.dtype\n\n @property\n def dtypes(self) -> DtypeObj:\n \"\"\"\n Return the dtype object of the underlying data.\n \"\"\"\n # DataFrame compatibility\n return self.dtype\n\n @property\n def name(self) -> Hashable:\n \"\"\"\n Return the name of the Series.\n\n The name of a Series becomes its index or column name if it is used\n to form a DataFrame. It is also used whenever displaying the Series\n using the interpreter.\n\n Returns\n -------\n label (hashable object)\n The name of the Series, also the column name if part of a DataFrame.\n\n See Also\n --------\n Series.rename : Sets the Series name when given a scalar input.\n Index.name : Corresponding Index property.\n\n Examples\n --------\n The Series name can be set initially when calling the constructor.\n\n >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers')\n >>> s\n 0 1\n 1 2\n 2 3\n Name: Numbers, dtype: int64\n >>> s.name = \"Integers\"\n >>> s\n 0 1\n 1 2\n 2 3\n Name: Integers, dtype: int64\n\n The name of a Series within a DataFrame is its column name.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]],\n ... columns=[\"Odd Numbers\", \"Even Numbers\"])\n >>> df\n Odd Numbers Even Numbers\n 0 1 2\n 1 3 4\n 2 5 6\n >>> df[\"Even Numbers\"].name\n 'Even Numbers'\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, value: Hashable) -> None:\n validate_all_hashable(value, error_name=f\"{type(self).__name__}.name\")\n object.__setattr__(self, \"_name\", value)\n\n @property\n def values(self):\n \"\"\"\n Return Series as ndarray or ndarray-like depending on the dtype.\n\n .. warning::\n\n We recommend using :attr:`Series.array` or\n :meth:`Series.to_numpy`, depending on whether you need\n a reference to the underlying data or a NumPy array.\n\n Returns\n -------\n numpy.ndarray or ndarray-like\n\n See Also\n --------\n Series.array : Reference to the underlying data.\n Series.to_numpy : A NumPy array representing the underlying data.\n\n Examples\n --------\n >>> pd.Series([1, 2, 3]).values\n array([1, 2, 3])\n\n >>> pd.Series(list('aabc')).values\n array(['a', 'a', 'b', 'c'], dtype=object)\n\n >>> pd.Series(list('aabc')).astype('category').values\n ['a', 'a', 'b', 'c']\n Categories (3, object): ['a', 'b', 'c']\n\n Timezone aware datetime data is converted to UTC:\n\n >>> pd.Series(pd.date_range('20130101', periods=3,\n ... tz='US/Eastern')).values\n array(['2013-01-01T05:00:00.000000000',\n '2013-01-02T05:00:00.000000000',\n '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')\n \"\"\"\n return self._mgr.external_values()\n\n @property\n def _values(self):\n \"\"\"\n Return the internal repr of this data (defined by Block.interval_values).\n This are the values as stored in the Block (ndarray or ExtensionArray\n depending on the Block class), with datetime64[ns] and timedelta64[ns]\n wrapped in ExtensionArrays to match Index._values behavior.\n\n Differs from the public ``.values`` for certain data types, because of\n historical backwards compatibility of the public attribute (e.g. period\n returns object ndarray and datetimetz a datetime64[ns] ndarray for\n ``.values`` while it returns an ExtensionArray for ``._values`` in those\n cases).\n\n Differs from ``.array`` in that this still returns the numpy array if\n the Block is backed by a numpy array (except for datetime64 and\n timedelta64 dtypes), while ``.array`` ensures to always return an\n ExtensionArray.\n\n Overview:\n\n dtype | values | _values | array |\n ----------- | ------------- | ------------- | ------------- |\n Numeric | ndarray | ndarray | PandasArray |\n Category | Categorical | Categorical | Categorical |\n dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray |\n dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray |\n td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] |\n Period | ndarray[obj] | PeriodArray | PeriodArray |\n Nullable | EA | EA | EA |\n\n \"\"\"\n return self._mgr.internal_values()\n\n # error: Decorated property not supported\n @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc]\n @property\n def array(self) -> ExtensionArray:\n return self._mgr._block.array_values()\n\n # ops\n def ravel(self, order=\"C\"):\n \"\"\"\n Return the flattened underlying data as an ndarray.\n\n Returns\n -------\n numpy.ndarray or ndarray-like\n Flattened data of the Series.\n\n See Also\n --------\n numpy.ndarray.ravel : Return a flattened array.\n \"\"\"\n return self._values.ravel(order=order)\n\n def __len__(self) -> int:\n \"\"\"\n Return the length of the Series.\n \"\"\"\n return len(self._mgr)\n\n def view(self, dtype: Optional[Dtype] = None) -> Series:\n \"\"\"\n Create a new view of the Series.\n\n This function will return a new Series with a view of the same\n underlying values in memory, optionally reinterpreted with a new data\n type. The new data type must preserve the same size in bytes as to not\n cause index misalignment.\n\n Parameters\n ----------\n dtype : data type\n Data type object or one of their string representations.\n\n Returns\n -------\n Series\n A new Series object as a view of the same data in memory.\n\n See Also\n --------\n numpy.ndarray.view : Equivalent numpy function to create a new view of\n the same data in memory.\n\n Notes\n -----\n Series are instantiated with ``dtype=float64`` by default. While\n ``numpy.ndarray.view()`` will return a view with the same data type as\n the original array, ``Series.view()`` (without specified dtype)\n will try using ``float64`` and may fail if the original data type size\n in bytes is not the same.\n\n Examples\n --------\n >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')\n >>> s\n 0 -2\n 1 -1\n 2 0\n 3 1\n 4 2\n dtype: int8\n\n The 8 bit signed integer representation of `-1` is `0b11111111`, but\n the same bytes represent 255 if read as an 8 bit unsigned integer:\n\n >>> us = s.view('uint8')\n >>> us\n 0 254\n 1 255\n 2 0\n 3 1\n 4 2\n dtype: uint8\n\n The views share the same underlying values:\n\n >>> us[0] = 128\n >>> s\n 0 -128\n 1 -1\n 2 0\n 3 1\n 4 2\n dtype: int8\n \"\"\"\n return self._constructor(\n self._values.view(dtype), index=self.index\n ).__finalize__(self, method=\"view\")\n\n # ----------------------------------------------------------------------\n # NDArray Compat\n _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)\n\n def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:\n \"\"\"\n Return the values as a NumPy array.\n\n Users should not call this directly. Rather, it is invoked by\n :func:`numpy.array` and :func:`numpy.asarray`.\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to use for the resulting NumPy array. By default,\n the dtype is inferred from the data.\n\n Returns\n -------\n numpy.ndarray\n The values in the series converted to a :class:`numpy.ndarray`\n with the specified `dtype`.\n\n See Also\n --------\n array : Create a new array from data.\n Series.array : Zero-copy view to the array backing the Series.\n Series.to_numpy : Series method for similar behavior.\n\n Examples\n --------\n >>> ser = pd.Series([1, 2, 3])\n >>> np.asarray(ser)\n array([1, 2, 3])\n\n For timezone-aware data, the timezones may be retained with\n ``dtype='object'``\n\n >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz=\"CET\"))\n >>> np.asarray(tzser, dtype=\"object\")\n array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),\n Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],\n dtype=object)\n\n Or the values may be localized to UTC and the tzinfo discarded with\n ``dtype='datetime64[ns]'``\n\n >>> np.asarray(tzser, dtype=\"datetime64[ns]\") # doctest: +ELLIPSIS\n array(['1999-12-31T23:00:00.000000000', ...],\n dtype='datetime64[ns]')\n \"\"\"\n return np.asarray(self.array, dtype)\n\n # ----------------------------------------------------------------------\n # Unary Methods\n\n # coercion\n __float__ = _coerce_method(float)\n __long__ = _coerce_method(int)\n __int__ = _coerce_method(int)\n\n # ----------------------------------------------------------------------\n\n # indexers\n @property\n def axes(self) -> List[Index]:\n \"\"\"\n Return a list of the row axis labels.\n \"\"\"\n return [self.index]\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n @Appender(generic.NDFrame.take.__doc__)\n def take(self, indices, axis=0, is_copy=None, **kwargs) -> Series:\n if is_copy is not None:\n warnings.warn(\n \"is_copy is deprecated and will be removed in a future version. \"\n \"'take' always returns a copy, so there is no need to specify this.\",\n FutureWarning,\n stacklevel=2,\n )\n nv.validate_take((), kwargs)\n\n indices = ensure_platform_int(indices)\n new_index = self.index.take(indices)\n new_values = self._values.take(indices)\n\n result = self._constructor(new_values, index=new_index, fastpath=True)\n return result.__finalize__(self, method=\"take\")\n\n def _take_with_is_copy(self, indices, axis=0):\n \"\"\"\n Internal version of the `take` method that sets the `_is_copy`\n attribute to keep track of the parent dataframe (using in indexing\n for the SettingWithCopyWarning). For Series this does the same\n as the public take (it never sets `_is_copy`).\n\n See the docstring of `take` for full explanation of the parameters.\n \"\"\"\n return self.take(indices=indices, axis=axis)\n\n def _ixs(self, i: int, axis: int = 0):\n \"\"\"\n Return the i-th value or values in the Series by location.\n\n Parameters\n ----------\n i : int\n\n Returns\n -------\n scalar (int) or Series (slice, sequence)\n \"\"\"\n return self._values[i]\n\n def _slice(self, slobj: slice, axis: int = 0) -> Series:\n # axis kwarg is retained for compat with NDFrame method\n # _slice is *always* positional\n return self._get_values(slobj)\n\n def __getitem__(self, key):\n key = com.apply_if_callable(key, self)\n\n if key is Ellipsis:\n return self\n\n key_is_scalar = is_scalar(key)\n if isinstance(key, (list, tuple)):\n key = unpack_1tuple(key)\n\n if is_integer(key) and self.index._should_fallback_to_positional():\n return self._values[key]\n\n elif key_is_scalar:\n return self._get_value(key)\n\n if is_hashable(key):\n # Otherwise index.get_value will raise InvalidIndexError\n try:\n # For labels that don't resolve as scalars like tuples and frozensets\n result = self._get_value(key)\n\n return result\n\n except (KeyError, TypeError):\n if isinstance(key, tuple) and isinstance(self.index, MultiIndex):\n # We still have the corner case where a tuple is a key\n # in the first level of our MultiIndex\n return self._get_values_tuple(key)\n\n if is_iterator(key):\n key = list(key)\n\n if com.is_bool_indexer(key):\n key = check_bool_indexer(self.index, key)\n key = np.asarray(key, dtype=bool)\n return self._get_values(key)\n\n return self._get_with(key)\n\n def _get_with(self, key):\n # other: fancy integer or otherwise\n if isinstance(key, slice):\n # _convert_slice_indexer to determine if this slice is positional\n # or label based, and if the latter, convert to positional\n slobj = self.index._convert_slice_indexer(key, kind=\"getitem\")\n return self._slice(slobj)\n elif isinstance(key, ABCDataFrame):\n raise TypeError(\n \"Indexing a Series with DataFrame is not \"\n \"supported, use the appropriate DataFrame column\"\n )\n elif isinstance(key, tuple):\n return self._get_values_tuple(key)\n\n elif not is_list_like(key):\n # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684\n return self.loc[key]\n\n if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)):\n key = list(key)\n\n if isinstance(key, Index):\n key_type = key.inferred_type\n else:\n key_type = lib.infer_dtype(key, skipna=False)\n\n # Note: The key_type == \"boolean\" case should be caught by the\n # com.is_bool_indexer check in __getitem__\n if key_type == \"integer\":\n # We need to decide whether to treat this as a positional indexer\n # (i.e. self.iloc) or label-based (i.e. self.loc)\n if not self.index._should_fallback_to_positional():\n return self.loc[key]\n else:\n return self.iloc[key]\n\n # handle the dup indexing case GH#4246\n return self.loc[key]\n\n def _get_values_tuple(self, key):\n # mpl hackaround\n if com.any_none(*key):\n result = self._get_values(key)\n deprecate_ndim_indexing(result, stacklevel=5)\n return result\n\n if not isinstance(self.index, MultiIndex):\n raise KeyError(\"key of type tuple not found and not a MultiIndex\")\n\n # If key is contained, would have returned by now\n indexer, new_index = self.index.get_loc_level(key)\n return self._constructor(self._values[indexer], index=new_index).__finalize__(\n self\n )\n\n def _get_values(self, indexer):\n try:\n return self._constructor(self._mgr.get_slice(indexer)).__finalize__(self)\n except ValueError:\n # mpl compat if we look up e.g. ser[:, np.newaxis];\n # see tests.series.timeseries.test_mpl_compat_hack\n # the asarray is needed to avoid returning a 2D DatetimeArray\n return np.asarray(self._values[indexer])\n\n def _get_value(self, label, takeable: bool = False):\n \"\"\"\n Quickly retrieve single value at passed index label.\n\n Parameters\n ----------\n label : object\n takeable : interpret the index as indexers, default False\n\n Returns\n -------\n scalar value\n \"\"\"\n if takeable:\n return self._values[label]\n\n # Similar to Index.get_value, but we do not fall back to positional\n loc = self.index.get_loc(label)\n return self.index._get_values_for_loc(self, loc, label)\n\n def __setitem__(self, key, value):\n key = com.apply_if_callable(key, self)\n cacher_needs_updating = self._check_is_chained_assignment_possible()\n\n if key is Ellipsis:\n key = slice(None)\n\n try:\n self._set_with_engine(key, value)\n except (KeyError, ValueError):\n values = self._values\n if is_integer(key) and self.index.inferred_type != \"integer\":\n # positional setter\n values[key] = value\n else:\n # GH#12862 adding a new key to the Series\n self.loc[key] = value\n\n except TypeError as err:\n if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):\n raise KeyError(\n \"key of type tuple not found and not a MultiIndex\"\n ) from err\n\n if com.is_bool_indexer(key):\n key = check_bool_indexer(self.index, key)\n key = np.asarray(key, dtype=bool)\n try:\n self._where(~key, value, inplace=True)\n except InvalidIndexError:\n self.iloc[key] = value\n return\n\n else:\n self._set_with(key, value)\n\n if cacher_needs_updating:\n self._maybe_update_cacher()\n\n def _set_with_engine(self, key, value):\n # fails with AttributeError for IntervalIndex\n loc = self.index._engine.get_loc(key)\n validate_numeric_casting(self.dtype, value)\n self._values[loc] = value\n\n def _set_with(self, key, value):\n # other: fancy integer or otherwise\n if isinstance(key, slice):\n indexer = self.index._convert_slice_indexer(key, kind=\"getitem\")\n return self._set_values(indexer, value)\n\n else:\n assert not isinstance(key, tuple)\n\n if is_scalar(key):\n key = [key]\n\n if isinstance(key, Index):\n key_type = key.inferred_type\n key = key._values\n else:\n key_type = lib.infer_dtype(key, skipna=False)\n\n # Note: key_type == \"boolean\" should not occur because that\n # should be caught by the is_bool_indexer check in __setitem__\n if key_type == \"integer\":\n if not self.index._should_fallback_to_positional():\n self._set_labels(key, value)\n else:\n self._set_values(key, value)\n else:\n self.loc[key] = value\n\n def _set_labels(self, key, value):\n key = com.asarray_tuplesafe(key)\n indexer: np.ndarray = self.index.get_indexer(key)\n mask = indexer == -1\n if mask.any():\n raise KeyError(f\"{key[mask]} not in index\")\n self._set_values(indexer, value)\n\n def _set_values(self, key, value):\n if isinstance(key, Series):\n key = key._values\n self._mgr = self._mgr.setitem( # type: ignore[assignment]\n indexer=key, value=value\n )\n self._maybe_update_cacher()\n\n def _set_value(self, label, value, takeable: bool = False):\n \"\"\"\n Quickly set single value at passed label.\n\n If label is not contained, a new object is created with the label\n placed at the end of the result index.\n\n Parameters\n ----------\n label : object\n Partial indexing with MultiIndex not allowed.\n value : object\n Scalar value.\n takeable : interpret the index as indexers, default False\n \"\"\"\n try:\n if takeable:\n self._values[label] = value\n else:\n loc = self.index.get_loc(label)\n validate_numeric_casting(self.dtype, value)\n self._values[loc] = value\n except KeyError:\n\n # set using a non-recursive method\n self.loc[label] = value\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n @property\n def _is_mixed_type(self):\n return False\n\n def repeat(self, repeats, axis=None) -> Series:\n \"\"\"\n Repeat elements of a Series.\n\n Returns a new Series where each element of the current Series\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n Series.\n axis : None\n Must be ``None``. Has no effect but is accepted for compatibility\n with numpy.\n\n Returns\n -------\n Series\n Newly created Series with repeated elements.\n\n See Also\n --------\n Index.repeat : Equivalent function for Index.\n numpy.repeat : Similar method for :class:`numpy.ndarray`.\n\n Examples\n --------\n >>> s = pd.Series(['a', 'b', 'c'])\n >>> s\n 0 a\n 1 b\n 2 c\n dtype: object\n >>> s.repeat(2)\n 0 a\n 0 a\n 1 b\n 1 b\n 2 c\n 2 c\n dtype: object\n >>> s.repeat([1, 2, 3])\n 0 a\n 1 b\n 1 b\n 2 c\n 2 c\n 2 c\n dtype: object\n \"\"\"\n nv.validate_repeat((), {\"axis\": axis})\n new_index = self.index.repeat(repeats)\n new_values = self._values.repeat(repeats)\n return self._constructor(new_values, index=new_index).__finalize__(\n self, method=\"repeat\"\n )\n\n def reset_index(self, level=None, drop=False, name=None, inplace=False):\n \"\"\"\n Generate a new DataFrame or Series with the index reset.\n\n This is useful when the index needs to be treated as a column, or\n when the index is meaningless and needs to be reset to the default\n before another operation.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default optional\n For a Series with a MultiIndex, only remove the specified levels\n from the index. Removes all levels by default.\n drop : bool, default False\n Just reset the index, without inserting it as a column in\n the new DataFrame.\n name : object, optional\n The name to use for the column containing the original Series\n values. Uses ``self.name`` by default. This argument is ignored\n when `drop` is True.\n inplace : bool, default False\n Modify the Series in place (do not create a new object).\n\n Returns\n -------\n Series or DataFrame or None\n When `drop` is False (the default), a DataFrame is returned.\n The newly created columns will come first in the DataFrame,\n followed by the original Series values.\n When `drop` is True, a `Series` is returned.\n In either case, if ``inplace=True``, no value is returned.\n\n See Also\n --------\n DataFrame.reset_index: Analogous function for DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4], name='foo',\n ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))\n\n Generate a DataFrame with default index.\n\n >>> s.reset_index()\n idx foo\n 0 a 1\n 1 b 2\n 2 c 3\n 3 d 4\n\n To specify the name of the new column use `name`.\n\n >>> s.reset_index(name='values')\n idx values\n 0 a 1\n 1 b 2\n 2 c 3\n 3 d 4\n\n To generate a new Series with the default set `drop` to True.\n\n >>> s.reset_index(drop=True)\n 0 1\n 1 2\n 2 3\n 3 4\n Name: foo, dtype: int64\n\n To update the Series in place, without generating a new one\n set `inplace` to True. Note that it also requires ``drop=True``.\n\n >>> s.reset_index(inplace=True, drop=True)\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n Name: foo, dtype: int64\n\n The `level` parameter is interesting for Series with a multi-level\n index.\n\n >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),\n ... np.array(['one', 'two', 'one', 'two'])]\n >>> s2 = pd.Series(\n ... range(4), name='foo',\n ... index=pd.MultiIndex.from_arrays(arrays,\n ... names=['a', 'b']))\n\n To remove a specific level from the Index, use `level`.\n\n >>> s2.reset_index(level='a')\n a foo\n b\n one bar 0\n two bar 1\n one baz 2\n two baz 3\n\n If `level` is not set, all levels are removed from the Index.\n\n >>> s2.reset_index()\n a b foo\n 0 bar one 0\n 1 bar two 1\n 2 baz one 2\n 3 baz two 3\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if drop:\n new_index = ibase.default_index(len(self))\n if level is not None:\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < self.index.nlevels:\n new_index = self.index.droplevel(level)\n\n if inplace:\n self.index = new_index\n # set name if it was passed, otherwise, keep the previous name\n self.name = name or self.name\n else:\n return self._constructor(\n self._values.copy(), index=new_index\n ).__finalize__(self, method=\"reset_index\")\n elif inplace:\n raise TypeError(\n \"Cannot reset_index inplace on a Series to create a DataFrame\"\n )\n else:\n df = self.to_frame(name)\n return df.reset_index(level=level, drop=drop)\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def __repr__(self) -> str:\n \"\"\"\n Return a string representation for a particular Series.\n \"\"\"\n buf = StringIO(\"\")\n width, height = get_terminal_size()\n max_rows = (\n height\n if get_option(\"display.max_rows\") == 0\n else get_option(\"display.max_rows\")\n )\n min_rows = (\n height\n if get_option(\"display.max_rows\") == 0\n else get_option(\"display.min_rows\")\n )\n show_dimensions = get_option(\"display.show_dimensions\")\n\n self.to_string(\n buf=buf,\n name=self.name,\n dtype=self.dtype,\n min_rows=min_rows,\n max_rows=max_rows,\n length=show_dimensions,\n )\n return buf.getvalue()\n\n def to_string(\n self,\n buf=None,\n na_rep=\"NaN\",\n float_format=None,\n header=True,\n index=True,\n length=False,\n dtype=False,\n name=False,\n max_rows=None,\n min_rows=None,\n ):\n \"\"\"\n Render a string representation of the Series.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n na_rep : str, optional\n String representation of NaN to use, default 'NaN'.\n float_format : one-parameter function, optional\n Formatter function to apply to columns' elements if they are\n floats, default None.\n header : bool, default True\n Add the Series header (index name).\n index : bool, optional\n Add index (row) labels, default True.\n length : bool, default False\n Add the Series length.\n dtype : bool, default False\n Add the Series dtype.\n name : bool, default False\n Add the Series name if not None.\n max_rows : int, optional\n Maximum number of rows to show before truncating. If None, show\n all.\n min_rows : int, optional\n The number of rows to display in a truncated repr (when number\n of rows is above `max_rows`).\n\n Returns\n -------\n str or None\n String representation of Series if ``buf=None``, otherwise None.\n \"\"\"\n formatter = fmt.SeriesFormatter(\n self,\n name=name,\n length=length,\n header=header,\n index=index,\n dtype=dtype,\n na_rep=na_rep,\n float_format=float_format,\n min_rows=min_rows,\n max_rows=max_rows,\n )\n result = formatter.to_string()\n\n # catch contract violations\n if not isinstance(result, str):\n raise AssertionError(\n \"result must be of type str, type \"\n f\"of result is {repr(type(result).__name__)}\"\n )\n\n if buf is None:\n return result\n else:\n try:\n buf.write(result)\n except AttributeError:\n with open(buf, \"w\") as f:\n f.write(result)\n\n @doc(\n klass=_shared_doc_kwargs[\"klass\"],\n storage_options=generic._shared_docs[\"storage_options\"],\n examples=dedent(\n \"\"\"\n Examples\n --------\n >>> s = pd.Series([\"elk\", \"pig\", \"dog\", \"quetzal\"], name=\"animal\")\n >>> print(s.to_markdown())\n | | animal |\n |---:|:---------|\n | 0 | elk |\n | 1 | pig |\n | 2 | dog |\n | 3 | quetzal |\n \"\"\"\n ),\n )\n def to_markdown(\n self,\n buf: Optional[IO[str]] = None,\n mode: str = \"wt\",\n index: bool = True,\n storage_options: StorageOptions = None,\n **kwargs,\n ) -> Optional[str]:\n \"\"\"\n Print {klass} in Markdown-friendly format.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n mode : str, optional\n Mode in which file is opened, \"wt\" by default.\n index : bool, optional, default True\n Add index (row) labels.\n\n .. versionadded:: 1.1.0\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n **kwargs\n These parameters will be passed to `tabulate \\\n <https://pypi.org/project/tabulate>`_.\n\n Returns\n -------\n str\n {klass} in Markdown-friendly format.\n\n Notes\n -----\n Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.\n\n Examples\n --------\n >>> s = pd.Series([\"elk\", \"pig\", \"dog\", \"quetzal\"], name=\"animal\")\n >>> print(s.to_markdown())\n | | animal |\n |---:|:---------|\n | 0 | elk |\n | 1 | pig |\n | 2 | dog |\n | 3 | quetzal |\n\n Output markdown with a tabulate option.\n\n >>> print(s.to_markdown(tablefmt=\"grid\"))\n +----+----------+\n | | animal |\n +====+==========+\n | 0 | elk |\n +----+----------+\n | 1 | pig |\n +----+----------+\n | 2 | dog |\n +----+----------+\n | 3 | quetzal |\n +----+----------+\n \"\"\"\n return self.to_frame().to_markdown(\n buf, mode, index, storage_options=storage_options, **kwargs\n )\n\n # ----------------------------------------------------------------------\n\n def items(self) -> Iterable[Tuple[Hashable, Any]]:\n \"\"\"\n Lazily iterate over (index, value) tuples.\n\n This method returns an iterable tuple (index, value). This is\n convenient if you want to create a lazy iterator.\n\n Returns\n -------\n iterable\n Iterable of tuples containing the (index, value) pairs from a\n Series.\n\n See Also\n --------\n DataFrame.items : Iterate over (column name, Series) pairs.\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.\n\n Examples\n --------\n >>> s = pd.Series(['A', 'B', 'C'])\n >>> for index, value in s.items():\n ... print(f\"Index : {index}, Value : {value}\")\n Index : 0, Value : A\n Index : 1, Value : B\n Index : 2, Value : C\n \"\"\"\n return zip(iter(self.index), iter(self))\n\n @Appender(items.__doc__)\n def iteritems(self) -> Iterable[Tuple[Hashable, Any]]:\n return self.items()\n\n # ----------------------------------------------------------------------\n # Misc public methods\n\n def keys(self) -> Index:\n \"\"\"\n Return alias for index.\n\n Returns\n -------\n Index\n Index of the Series.\n \"\"\"\n return self.index\n\n def to_dict(self, into=dict):\n \"\"\"\n Convert Series to {label -> value} dict or dict-like object.\n\n Parameters\n ----------\n into : class, default dict\n The collections.abc.Mapping subclass to use as the return\n object. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n collections.abc.Mapping\n Key-value representation of Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_dict()\n {0: 1, 1: 2, 2: 3, 3: 4}\n >>> from collections import OrderedDict, defaultdict\n >>> s.to_dict(OrderedDict)\n OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])\n >>> dd = defaultdict(list)\n >>> s.to_dict(dd)\n defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})\n \"\"\"\n # GH16122\n into_c = com.standardize_mapping(into)\n return into_c(self.items())\n\n def to_frame(self, name=None) -> DataFrame:\n \"\"\"\n Convert Series to DataFrame.\n\n Parameters\n ----------\n name : object, default None\n The passed name should substitute for the series name (if it has\n one).\n\n Returns\n -------\n DataFrame\n DataFrame representation of Series.\n\n Examples\n --------\n >>> s = pd.Series([\"a\", \"b\", \"c\"],\n ... name=\"vals\")\n >>> s.to_frame()\n vals\n 0 a\n 1 b\n 2 c\n \"\"\"\n if name is None:\n df = self._constructor_expanddim(self)\n else:\n df = self._constructor_expanddim({name: self})\n\n return df\n\n def _set_name(self, name, inplace=False) -> Series:\n \"\"\"\n Set the Series name.\n\n Parameters\n ----------\n name : str\n inplace : bool\n Whether to modify `self` directly or return a copy.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n ser = self if inplace else self.copy()\n ser.name = name\n return ser\n\n @Appender(\n \"\"\"\nExamples\n--------\n>>> ser = pd.Series([390., 350., 30., 20.],\n... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name=\"Max Speed\")\n>>> ser\nFalcon 390.0\nFalcon 350.0\nParrot 30.0\nParrot 20.0\nName: Max Speed, dtype: float64\n>>> ser.groupby([\"a\", \"b\", \"a\", \"b\"]).mean()\na 210.0\nb 185.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(level=0).mean()\nFalcon 370.0\nParrot 25.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(ser > 100).mean()\nMax Speed\nFalse 25.0\nTrue 370.0\nName: Max Speed, dtype: float64\n\n**Grouping by Indexes**\n\nWe can groupby different levels of a hierarchical index\nusing the `level` parameter:\n\n>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n... ['Captive', 'Wild', 'Captive', 'Wild']]\n>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))\n>>> ser = pd.Series([390., 350., 30., 20.], index=index, name=\"Max Speed\")\n>>> ser\nAnimal Type\nFalcon Captive 390.0\n Wild 350.0\nParrot Captive 30.0\n Wild 20.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(level=0).mean()\nAnimal\nFalcon 370.0\nParrot 25.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(level=\"Type\").mean()\nType\nCaptive 210.0\nWild 185.0\nName: Max Speed, dtype: float64\n\nWe can also choose to include `NA` in group keys or not by defining\n`dropna` parameter, the default setting is `True`:\n\n>>> ser = pd.Series([1, 2, 3, 3], index=[\"a\", 'a', 'b', np.nan])\n>>> ser.groupby(level=0).sum()\na 3\nb 3\ndtype: int64\n\n>>> ser.groupby(level=0, dropna=False).sum()\na 3\nb 3\nNaN 3\ndtype: int64\n\n>>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot']\n>>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name=\"Max Speed\")\n>>> ser.groupby([\"a\", \"b\", \"a\", np.nan]).mean()\na 210.0\nb 350.0\nName: Max Speed, dtype: float64\n\n>>> ser.groupby([\"a\", \"b\", \"a\", np.nan], dropna=False).mean()\na 210.0\nb 350.0\nNaN 20.0\nName: Max Speed, dtype: float64\n\"\"\"\n )\n @Appender(generic._shared_docs[\"groupby\"] % _shared_doc_kwargs)\n def groupby(\n self,\n by=None,\n axis=0,\n level=None,\n as_index: bool = True,\n sort: bool = True,\n group_keys: bool = True,\n squeeze: bool = no_default,\n observed: bool = False,\n dropna: bool = True,\n ) -> SeriesGroupBy:\n from pandas.core.groupby.generic import SeriesGroupBy\n\n if squeeze is not no_default:\n warnings.warn(\n (\n \"The `squeeze` parameter is deprecated and \"\n \"will be removed in a future version.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n else:\n squeeze = False\n\n if level is None and by is None:\n raise TypeError(\"You have to supply one of 'by' and 'level'\")\n axis = self._get_axis_number(axis)\n\n return SeriesGroupBy(\n obj=self,\n keys=by,\n axis=axis,\n level=level,\n as_index=as_index,\n sort=sort,\n group_keys=group_keys,\n squeeze=squeeze,\n observed=observed,\n dropna=dropna,\n )\n\n # ----------------------------------------------------------------------\n # Statistics, overridden ndarray methods\n\n # TODO: integrate bottleneck\n\n def count(self, level=None):\n \"\"\"\n Return number of non-NA/null observations in the Series.\n\n Parameters\n ----------\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a smaller Series.\n\n Returns\n -------\n int or Series (if level specified)\n Number of non-null values in the Series.\n\n See Also\n --------\n DataFrame.count : Count non-NA cells for each column or row.\n\n Examples\n --------\n >>> s = pd.Series([0.0, 1.0, np.nan])\n >>> s.count()\n 2\n \"\"\"\n if level is None:\n return notna(self.array).sum()\n elif not isinstance(self.index, MultiIndex):\n raise ValueError(\"Series.count level is only valid with a MultiIndex\")\n\n index = self.index\n assert isinstance(index, MultiIndex) # for mypy\n\n if isinstance(level, str):\n level = index._get_level_number(level)\n\n lev = index.levels[level]\n level_codes = np.array(index.codes[level], subok=False, copy=True)\n\n mask = level_codes == -1\n if mask.any():\n level_codes[mask] = cnt = len(lev)\n lev = lev.insert(cnt, lev._na_value)\n\n obs = level_codes[notna(self._values)]\n out = np.bincount(obs, minlength=len(lev) or None)\n return self._constructor(out, index=lev, dtype=\"int64\").__finalize__(\n self, method=\"count\"\n )\n\n def mode(self, dropna=True) -> Series:\n \"\"\"\n Return the mode(s) of the Series.\n\n The mode is the value that appears most often. There can be multiple modes.\n\n Always returns Series even if only one value is returned.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't consider counts of NaN/NaT.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series\n Modes of the Series in sorted order.\n \"\"\"\n # TODO: Add option for bins like value_counts()\n return algorithms.mode(self, dropna=dropna)\n\n def unique(self):\n \"\"\"\n Return unique values of Series object.\n\n Uniques are returned in order of appearance. Hash table-based unique,\n therefore does NOT sort.\n\n Returns\n -------\n ndarray or ExtensionArray\n The unique values returned as a NumPy array. See Notes.\n\n See Also\n --------\n unique : Top-level unique method for any 1-d array-like object.\n Index.unique : Return Index with unique values from an Index object.\n\n Notes\n -----\n Returns the unique values as a NumPy array. In case of an\n extension-array backed Series, a new\n :class:`~api.extensions.ExtensionArray` of that type with just\n the unique values is returned. This includes\n\n * Categorical\n * Period\n * Datetime with Timezone\n * Interval\n * Sparse\n * IntegerNA\n\n See Examples section.\n\n Examples\n --------\n >>> pd.Series([2, 1, 3, 3], name='A').unique()\n array([2, 1, 3])\n\n >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()\n array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')\n\n >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern')\n ... for _ in range(3)]).unique()\n <DatetimeArray>\n ['2016-01-01 00:00:00-05:00']\n Length: 1, dtype: datetime64[ns, US/Eastern]\n\n An unordered Categorical will return categories in the order of\n appearance.\n\n >>> pd.Series(pd.Categorical(list('baabc'))).unique()\n ['b', 'a', 'c']\n Categories (3, object): ['b', 'a', 'c']\n\n An ordered Categorical preserves the category ordering.\n\n >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),\n ... ordered=True)).unique()\n ['b', 'a', 'c']\n Categories (3, object): ['a' < 'b' < 'c']\n \"\"\"\n return super().unique()\n\n def drop_duplicates(self, keep=\"first\", inplace=False) -> Optional[Series]:\n \"\"\"\n Return Series with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n Method to handle dropping duplicates:\n\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n - ``False`` : Drop all duplicates.\n\n inplace : bool, default ``False``\n If ``True``, performs operation inplace and returns None.\n\n Returns\n -------\n Series or None\n Series with duplicates dropped or None if ``inplace=True``.\n\n See Also\n --------\n Index.drop_duplicates : Equivalent method on Index.\n DataFrame.drop_duplicates : Equivalent method on DataFrame.\n Series.duplicated : Related method on Series, indicating duplicate\n Series values.\n\n Examples\n --------\n Generate a Series with duplicated entries.\n\n >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],\n ... name='animal')\n >>> s\n 0 lama\n 1 cow\n 2 lama\n 3 beetle\n 4 lama\n 5 hippo\n Name: animal, dtype: object\n\n With the 'keep' parameter, the selection behaviour of duplicated values\n can be changed. The value 'first' keeps the first occurrence for each\n set of duplicated entries. The default value of keep is 'first'.\n\n >>> s.drop_duplicates()\n 0 lama\n 1 cow\n 3 beetle\n 5 hippo\n Name: animal, dtype: object\n\n The value 'last' for parameter 'keep' keeps the last occurrence for\n each set of duplicated entries.\n\n >>> s.drop_duplicates(keep='last')\n 1 cow\n 3 beetle\n 4 lama\n 5 hippo\n Name: animal, dtype: object\n\n The value ``False`` for parameter 'keep' discards all sets of\n duplicated entries. Setting the value of 'inplace' to ``True`` performs\n the operation inplace and returns ``None``.\n\n >>> s.drop_duplicates(keep=False, inplace=True)\n >>> s\n 1 cow\n 3 beetle\n 5 hippo\n Name: animal, dtype: object\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n result = super().drop_duplicates(keep=keep)\n if inplace:\n self._update_inplace(result)\n return None\n else:\n return result\n\n def duplicated(self, keep=\"first\") -> Series:\n \"\"\"\n Indicate duplicate Series values.\n\n Duplicated values are indicated as ``True`` values in the resulting\n Series. Either all duplicates, all except the first or all except the\n last occurrence of duplicates can be indicated.\n\n Parameters\n ----------\n keep : {'first', 'last', False}, default 'first'\n Method to handle dropping duplicates:\n\n - 'first' : Mark duplicates as ``True`` except for the first\n occurrence.\n - 'last' : Mark duplicates as ``True`` except for the last\n occurrence.\n - ``False`` : Mark all duplicates as ``True``.\n\n Returns\n -------\n Series\n Series indicating whether each value has occurred in the\n preceding values.\n\n See Also\n --------\n Index.duplicated : Equivalent method on pandas.Index.\n DataFrame.duplicated : Equivalent method on pandas.DataFrame.\n Series.drop_duplicates : Remove duplicate values from Series.\n\n Examples\n --------\n By default, for each set of duplicated values, the first occurrence is\n set on False and all others on True:\n\n >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])\n >>> animals.duplicated()\n 0 False\n 1 False\n 2 True\n 3 False\n 4 True\n dtype: bool\n\n which is equivalent to\n\n >>> animals.duplicated(keep='first')\n 0 False\n 1 False\n 2 True\n 3 False\n 4 True\n dtype: bool\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True:\n\n >>> animals.duplicated(keep='last')\n 0 True\n 1 False\n 2 True\n 3 False\n 4 False\n dtype: bool\n\n By setting keep on ``False``, all duplicates are True:\n\n >>> animals.duplicated(keep=False)\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n dtype: bool\n \"\"\"\n res = base.IndexOpsMixin.duplicated(self, keep=keep)\n result = self._constructor(res, index=self.index)\n return result.__finalize__(self, method=\"duplicated\")\n\n def idxmin(self, axis=0, skipna=True, *args, **kwargs):\n \"\"\"\n Return the row label of the minimum value.\n\n If multiple values equal the minimum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n axis : int, default 0\n For compatibility with DataFrame.idxmin. Redundant for application\n on Series.\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\n will be NA.\n *args, **kwargs\n Additional arguments and keywords have no effect but might be\n accepted for compatibility with NumPy.\n\n Returns\n -------\n Index\n Label of the minimum value.\n\n Raises\n ------\n ValueError\n If the Series is empty.\n\n See Also\n --------\n numpy.argmin : Return indices of the minimum values\n along the given axis.\n DataFrame.idxmin : Return index of first occurrence of minimum\n over requested axis.\n Series.idxmax : Return index *label* of the first occurrence\n of maximum of values.\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmin``. This method\n returns the label of the minimum, while ``ndarray.argmin`` returns\n the position. To get the position, use ``series.values.argmin()``.\n\n Examples\n --------\n >>> s = pd.Series(data=[1, None, 4, 1],\n ... index=['A', 'B', 'C', 'D'])\n >>> s\n A 1.0\n B NaN\n C 4.0\n D 1.0\n dtype: float64\n\n >>> s.idxmin()\n 'A'\n\n If `skipna` is False and there is an NA value in the data,\n the function returns ``nan``.\n\n >>> s.idxmin(skipna=False)\n nan\n \"\"\"\n i = self.argmin(axis, skipna, *args, **kwargs)\n if i == -1:\n return np.nan\n return self.index[i]\n\n def idxmax(self, axis=0, skipna=True, *args, **kwargs):\n \"\"\"\n Return the row label of the maximum value.\n\n If multiple values equal the maximum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n axis : int, default 0\n For compatibility with DataFrame.idxmax. Redundant for application\n on Series.\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\n will be NA.\n *args, **kwargs\n Additional arguments and keywords have no effect but might be\n accepted for compatibility with NumPy.\n\n Returns\n -------\n Index\n Label of the maximum value.\n\n Raises\n ------\n ValueError\n If the Series is empty.\n\n See Also\n --------\n numpy.argmax : Return indices of the maximum values\n along the given axis.\n DataFrame.idxmax : Return index of first occurrence of maximum\n over requested axis.\n Series.idxmin : Return index *label* of the first occurrence\n of minimum of values.\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmax``. This method\n returns the label of the maximum, while ``ndarray.argmax`` returns\n the position. To get the position, use ``series.values.argmax()``.\n\n Examples\n --------\n >>> s = pd.Series(data=[1, None, 4, 3, 4],\n ... index=['A', 'B', 'C', 'D', 'E'])\n >>> s\n A 1.0\n B NaN\n C 4.0\n D 3.0\n E 4.0\n dtype: float64\n\n >>> s.idxmax()\n 'C'\n\n If `skipna` is False and there is an NA value in the data,\n the function returns ``nan``.\n\n >>> s.idxmax(skipna=False)\n nan\n \"\"\"\n i = self.argmax(axis, skipna, *args, **kwargs)\n if i == -1:\n return np.nan\n return self.index[i]\n\n def round(self, decimals=0, *args, **kwargs) -> Series:\n \"\"\"\n Round each value in a Series to the given number of decimals.\n\n Parameters\n ----------\n decimals : int, default 0\n Number of decimal places to round to. If decimals is negative,\n it specifies the number of positions to the left of the decimal point.\n *args, **kwargs\n Additional arguments and keywords have no effect but might be\n accepted for compatibility with NumPy.\n\n Returns\n -------\n Series\n Rounded values of the Series.\n\n See Also\n --------\n numpy.around : Round values of an np.array.\n DataFrame.round : Round values of a DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([0.1, 1.3, 2.7])\n >>> s.round()\n 0 0.0\n 1 1.0\n 2 3.0\n dtype: float64\n \"\"\"\n nv.validate_round(args, kwargs)\n result = self._values.round(decimals)\n result = self._constructor(result, index=self.index).__finalize__(\n self, method=\"round\"\n )\n\n return result\n\n def quantile(self, q=0.5, interpolation=\"linear\"):\n \"\"\"\n Return value at the given quantile.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n The quantile(s) to compute, which can lie in range: 0 <= q <= 1.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n\n Returns\n -------\n float or Series\n If ``q`` is an array, a Series will be returned where the\n index is ``q`` and the values are the quantiles, otherwise\n a float will be returned.\n\n See Also\n --------\n core.window.Rolling.quantile : Calculate the rolling quantile.\n numpy.percentile : Returns the q-th percentile(s) of the array elements.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.quantile(.5)\n 2.5\n >>> s.quantile([.25, .5, .75])\n 0.25 1.75\n 0.50 2.50\n 0.75 3.25\n dtype: float64\n \"\"\"\n validate_percentile(q)\n\n # We dispatch to DataFrame so that core.internals only has to worry\n # about 2D cases.\n df = self.to_frame()\n\n result = df.quantile(q=q, interpolation=interpolation, numeric_only=False)\n if result.ndim == 2:\n result = result.iloc[:, 0]\n\n if is_list_like(q):\n result.name = self.name\n return self._constructor(result, index=Float64Index(q), name=self.name)\n else:\n # scalar\n return result.iloc[0]\n\n def corr(self, other, method=\"pearson\", min_periods=None) -> float:\n \"\"\"\n Compute correlation with `other` Series, excluding missing values.\n\n Parameters\n ----------\n other : Series\n Series with which to compute the correlation.\n method : {'pearson', 'kendall', 'spearman'} or callable\n Method used to compute correlation:\n\n - pearson : Standard correlation coefficient\n - kendall : Kendall Tau correlation coefficient\n - spearman : Spearman rank correlation\n - callable: Callable with input two 1d ndarrays and returning a float.\n\n .. versionadded:: 0.24.0\n Note that the returned matrix from corr will have 1 along the\n diagonals and will be symmetric regardless of the callable's\n behavior.\n min_periods : int, optional\n Minimum number of observations needed to have a valid result.\n\n Returns\n -------\n float\n Correlation with other.\n\n See Also\n --------\n DataFrame.corr : Compute pairwise correlation between columns.\n DataFrame.corrwith : Compute pairwise correlation with another\n DataFrame or Series.\n\n Examples\n --------\n >>> def histogram_intersection(a, b):\n ... v = np.minimum(a, b).sum().round(decimals=1)\n ... return v\n >>> s1 = pd.Series([.2, .0, .6, .2])\n >>> s2 = pd.Series([.3, .6, .0, .1])\n >>> s1.corr(s2, method=histogram_intersection)\n 0.3\n \"\"\"\n this, other = self.align(other, join=\"inner\", copy=False)\n if len(this) == 0:\n return np.nan\n\n if method in [\"pearson\", \"spearman\", \"kendall\"] or callable(method):\n return nanops.nancorr(\n this.values, other.values, method=method, min_periods=min_periods\n )\n\n raise ValueError(\n \"method must be either 'pearson', \"\n \"'spearman', 'kendall', or a callable, \"\n f\"'{method}' was supplied\"\n )\n\n def cov(\n self,\n other: Series,\n min_periods: Optional[int] = None,\n ddof: Optional[int] = 1,\n ) -> float:\n \"\"\"\n Compute covariance with Series, excluding missing values.\n\n Parameters\n ----------\n other : Series\n Series with which to compute the covariance.\n min_periods : int, optional\n Minimum number of observations needed to have a valid result.\n ddof : int, default 1\n Delta degrees of freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n float\n Covariance between Series and other normalized by N-1\n (unbiased estimator).\n\n See Also\n --------\n DataFrame.cov : Compute pairwise covariance of columns.\n\n Examples\n --------\n >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])\n >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])\n >>> s1.cov(s2)\n -0.01685762652715874\n \"\"\"\n this, other = self.align(other, join=\"inner\", copy=False)\n if len(this) == 0:\n return np.nan\n return nanops.nancov(\n this.values, other.values, min_periods=min_periods, ddof=ddof\n )\n\n @doc(\n klass=\"Series\",\n extra_params=\"\",\n other_klass=\"DataFrame\",\n examples=dedent(\n \"\"\"\n Difference with previous row\n\n >>> s = pd.Series([1, 1, 2, 3, 5, 8])\n >>> s.diff()\n 0 NaN\n 1 0.0\n 2 1.0\n 3 1.0\n 4 2.0\n 5 3.0\n dtype: float64\n\n Difference with 3rd previous row\n\n >>> s.diff(periods=3)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 2.0\n 4 4.0\n 5 6.0\n dtype: float64\n\n Difference with following row\n\n >>> s.diff(periods=-1)\n 0 0.0\n 1 -1.0\n 2 -1.0\n 3 -2.0\n 4 -3.0\n 5 NaN\n dtype: float64\n\n Overflow in input dtype\n\n >>> s = pd.Series([1, 0], dtype=np.uint8)\n >>> s.diff()\n 0 NaN\n 1 255.0\n dtype: float64\"\"\"\n ),\n )\n def diff(self, periods: int = 1) -> Series:\n \"\"\"\n First discrete difference of element.\n\n Calculates the difference of a {klass} element compared with another\n element in the {klass} (default is element in previous row).\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for calculating difference, accepts negative\n values.\n {extra_params}\n Returns\n -------\n {klass}\n First differences of the Series.\n\n See Also\n --------\n {klass}.pct_change: Percent change over given number of periods.\n {klass}.shift: Shift index by desired number of periods with an\n optional time freq.\n {other_klass}.diff: First discrete difference of object.\n\n Notes\n -----\n For boolean dtypes, this uses :meth:`operator.xor` rather than\n :meth:`operator.sub`.\n The result is calculated according to current dtype in {klass},\n however dtype of the result is always float64.\n\n Examples\n --------\n {examples}\n \"\"\"\n result = algorithms.diff(self.array, periods)\n return self._constructor(result, index=self.index).__finalize__(\n self, method=\"diff\"\n )\n\n def autocorr(self, lag=1) -> float:\n \"\"\"\n Compute the lag-N autocorrelation.\n\n This method computes the Pearson correlation between\n the Series and its shifted self.\n\n Parameters\n ----------\n lag : int, default 1\n Number of lags to apply before performing autocorrelation.\n\n Returns\n -------\n float\n The Pearson correlation between self and self.shift(lag).\n\n See Also\n --------\n Series.corr : Compute the correlation between two Series.\n Series.shift : Shift index by desired number of periods.\n DataFrame.corr : Compute pairwise correlation of columns.\n DataFrame.corrwith : Compute pairwise correlation between rows or\n columns of two DataFrame objects.\n\n Notes\n -----\n If the Pearson correlation is not well defined return 'NaN'.\n\n Examples\n --------\n >>> s = pd.Series([0.25, 0.5, 0.2, -0.05])\n >>> s.autocorr() # doctest: +ELLIPSIS\n 0.10355...\n >>> s.autocorr(lag=2) # doctest: +ELLIPSIS\n -0.99999...\n\n If the Pearson correlation is not well defined, then 'NaN' is returned.\n\n >>> s = pd.Series([1, 0, 0, 0])\n >>> s.autocorr()\n nan\n \"\"\"\n return self.corr(self.shift(lag))\n\n def dot(self, other):\n \"\"\"\n Compute the dot product between the Series and the columns of other.\n\n This method computes the dot product between the Series and another\n one, or the Series and each columns of a DataFrame, or the Series and\n each columns of an array.\n\n It can also be called using `self @ other` in Python >= 3.5.\n\n Parameters\n ----------\n other : Series, DataFrame or array-like\n The other object to compute the dot product with its columns.\n\n Returns\n -------\n scalar, Series or numpy.ndarray\n Return the dot product of the Series and other if other is a\n Series, the Series of the dot product of Series and each rows of\n other if other is a DataFrame or a numpy.ndarray between the Series\n and each columns of the numpy array.\n\n See Also\n --------\n DataFrame.dot: Compute the matrix product with the DataFrame.\n Series.mul: Multiplication of series and other, element-wise.\n\n Notes\n -----\n The Series and other has to share the same index if other is a Series\n or a DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([0, 1, 2, 3])\n >>> other = pd.Series([-1, 2, -3, 4])\n >>> s.dot(other)\n 8\n >>> s @ other\n 8\n >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])\n >>> s.dot(df)\n 0 24\n 1 14\n dtype: int64\n >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])\n >>> s.dot(arr)\n array([24, 14])\n \"\"\"\n if isinstance(other, (Series, ABCDataFrame)):\n common = self.index.union(other.index)\n if len(common) > len(self.index) or len(common) > len(other.index):\n raise ValueError(\"matrices are not aligned\")\n\n left = self.reindex(index=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right.values\n else:\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[0] != rvals.shape[0]:\n raise Exception(\n f\"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}\"\n )\n\n if isinstance(other, ABCDataFrame):\n return self._constructor(\n np.dot(lvals, rvals), index=other.columns\n ).__finalize__(self, method=\"dot\")\n elif isinstance(other, Series):\n return np.dot(lvals, rvals)\n elif isinstance(rvals, np.ndarray):\n return np.dot(lvals, rvals)\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n def __matmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(other)\n\n def __rmatmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(np.transpose(other))\n\n @doc(base.IndexOpsMixin.searchsorted, klass=\"Series\")\n def searchsorted(self, value, side=\"left\", sorter=None):\n return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)\n\n # -------------------------------------------------------------------\n # Combination\n\n def append(self, to_append, ignore_index=False, verify_integrity=False):\n \"\"\"\n Concatenate two or more Series.\n\n Parameters\n ----------\n to_append : Series or list/tuple of Series\n Series to append with self.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n verify_integrity : bool, default False\n If True, raise Exception on creating index with duplicates.\n\n Returns\n -------\n Series\n Concatenated Series.\n\n See Also\n --------\n concat : General function to concatenate DataFrame or Series objects.\n\n Notes\n -----\n Iteratively appending to a Series can be more computationally intensive\n than a single concatenate. A better solution is to append values to a\n list and then concatenate the list with the original Series all at\n once.\n\n Examples\n --------\n >>> s1 = pd.Series([1, 2, 3])\n >>> s2 = pd.Series([4, 5, 6])\n >>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])\n >>> s1.append(s2)\n 0 1\n 1 2\n 2 3\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n >>> s1.append(s3)\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n 5 6\n dtype: int64\n\n With `ignore_index` set to True:\n\n >>> s1.append(s2, ignore_index=True)\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n 5 6\n dtype: int64\n\n With `verify_integrity` set to True:\n\n >>> s1.append(s2, verify_integrity=True)\n Traceback (most recent call last):\n ...\n ValueError: Indexes have overlapping values: [0, 1, 2]\n \"\"\"\n from pandas.core.reshape.concat import concat\n\n if isinstance(to_append, (list, tuple)):\n to_concat = [self]\n to_concat.extend(to_append)\n else:\n to_concat = [self, to_append]\n if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]):\n msg = \"to_append should be a Series or list/tuple of Series, got DataFrame\"\n raise TypeError(msg)\n return concat(\n to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity\n )\n\n def _binop(self, other, func, level=None, fill_value=None):\n \"\"\"\n Perform generic binary operation with optional fill value.\n\n Parameters\n ----------\n other : Series\n func : binary operator\n fill_value : float or object\n Value to substitute for NA/null values. If both Series are NA in a\n location, the result will be NA regardless of the passed fill value.\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n\n Returns\n -------\n Series\n \"\"\"\n if not isinstance(other, Series):\n raise AssertionError(\"Other operand must be Series\")\n\n this = self\n\n if not self.index.equals(other.index):\n this, other = self.align(other, level=level, join=\"outer\", copy=False)\n\n this_vals, other_vals = ops.fill_binop(this.values, other.values, fill_value)\n\n with np.errstate(all=\"ignore\"):\n result = func(this_vals, other_vals)\n\n name = ops.get_op_result_name(self, other)\n return this._construct_result(result, name)\n\n def _construct_result(\n self, result: Union[ArrayLike, Tuple[ArrayLike, ArrayLike]], name: Hashable\n ) -> Union[Series, Tuple[Series, Series]]:\n \"\"\"\n Construct an appropriately-labelled Series from the result of an op.\n\n Parameters\n ----------\n result : ndarray or ExtensionArray\n name : Label\n\n Returns\n -------\n Series\n In the case of __divmod__ or __rdivmod__, a 2-tuple of Series.\n \"\"\"\n if isinstance(result, tuple):\n # produced by divmod or rdivmod\n\n res1 = self._construct_result(result[0], name=name)\n res2 = self._construct_result(result[1], name=name)\n\n # GH#33427 assertions to keep mypy happy\n assert isinstance(res1, Series)\n assert isinstance(res2, Series)\n return (res1, res2)\n\n # We do not pass dtype to ensure that the Series constructor\n # does inference in the case where `result` has object-dtype.\n out = self._constructor(result, index=self.index)\n out = out.__finalize__(self)\n\n # Set the result's name after __finalize__ is called because __finalize__\n # would set it back to self.name\n out.name = name\n return out\n\n @doc(\n generic._shared_docs[\"compare\"],\n \"\"\"\nReturns\n-------\nSeries or DataFrame\n If axis is 0 or 'index' the result will be a Series.\n The resulting index will be a MultiIndex with 'self' and 'other'\n stacked alternately at the inner level.\n\n If axis is 1 or 'columns' the result will be a DataFrame.\n It will have two columns namely 'self' and 'other'.\n\nSee Also\n--------\nDataFrame.compare : Compare with another DataFrame and show differences.\n\nNotes\n-----\nMatching NaNs will not appear as a difference.\n\nExamples\n--------\n>>> s1 = pd.Series([\"a\", \"b\", \"c\", \"d\", \"e\"])\n>>> s2 = pd.Series([\"a\", \"a\", \"c\", \"b\", \"e\"])\n\nAlign the differences on columns\n\n>>> s1.compare(s2)\n self other\n1 b a\n3 d b\n\nStack the differences on indices\n\n>>> s1.compare(s2, align_axis=0)\n1 self b\n other a\n3 self d\n other b\ndtype: object\n\nKeep all original rows\n\n>>> s1.compare(s2, keep_shape=True)\n self other\n0 NaN NaN\n1 b a\n2 NaN NaN\n3 d b\n4 NaN NaN\n\nKeep all original rows and also all original values\n\n>>> s1.compare(s2, keep_shape=True, keep_equal=True)\n self other\n0 a a\n1 b a\n2 c c\n3 d b\n4 e e\n\"\"\",\n klass=_shared_doc_kwargs[\"klass\"],\n )\n def compare(\n self,\n other: Series,\n align_axis: Axis = 1,\n keep_shape: bool = False,\n keep_equal: bool = False,\n ) -> FrameOrSeriesUnion:\n return super().compare(\n other=other,\n align_axis=align_axis,\n keep_shape=keep_shape,\n keep_equal=keep_equal,\n )\n\n def combine(self, other, func, fill_value=None) -> Series:\n \"\"\"\n Combine the Series with a Series or scalar according to `func`.\n\n Combine the Series and `other` using `func` to perform elementwise\n selection for combined Series.\n `fill_value` is assumed when value is missing at some index\n from one of the two objects being combined.\n\n Parameters\n ----------\n other : Series or scalar\n The value(s) to be combined with the `Series`.\n func : function\n Function that takes two scalars as inputs and returns an element.\n fill_value : scalar, optional\n The value to assume when an index is missing from\n one Series or the other. The default specifies to use the\n appropriate NaN value for the underlying dtype of the Series.\n\n Returns\n -------\n Series\n The result of combining the Series with the other object.\n\n See Also\n --------\n Series.combine_first : Combine Series values, choosing the calling\n Series' values first.\n\n Examples\n --------\n Consider 2 Datasets ``s1`` and ``s2`` containing\n highest clocked speeds of different birds.\n\n >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})\n >>> s1\n falcon 330.0\n eagle 160.0\n dtype: float64\n >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})\n >>> s2\n falcon 345.0\n eagle 200.0\n duck 30.0\n dtype: float64\n\n Now, to combine the two datasets and view the highest speeds\n of the birds across the two datasets\n\n >>> s1.combine(s2, max)\n duck NaN\n eagle 200.0\n falcon 345.0\n dtype: float64\n\n In the previous example, the resulting value for duck is missing,\n because the maximum of a NaN and a float is a NaN.\n So, in the example, we set ``fill_value=0``,\n so the maximum value returned will be the value from some dataset.\n\n >>> s1.combine(s2, max, fill_value=0)\n duck 30.0\n eagle 200.0\n falcon 345.0\n dtype: float64\n \"\"\"\n if fill_value is None:\n fill_value = na_value_for_dtype(self.dtype, compat=False)\n\n if isinstance(other, Series):\n # If other is a Series, result is based on union of Series,\n # so do this element by element\n new_index = self.index.union(other.index)\n new_name = ops.get_op_result_name(self, other)\n new_values = []\n for idx in new_index:\n lv = self.get(idx, fill_value)\n rv = other.get(idx, fill_value)\n with np.errstate(all=\"ignore\"):\n new_values.append(func(lv, rv))\n else:\n # Assume that other is a scalar, so apply the function for\n # each element in the Series\n new_index = self.index\n with np.errstate(all=\"ignore\"):\n new_values = [func(lv, other) for lv in self._values]\n new_name = self.name\n\n if is_categorical_dtype(self.dtype):\n pass\n elif is_extension_array_dtype(self.dtype):\n # TODO: can we do this for only SparseDtype?\n # The function can return something of any type, so check\n # if the type is compatible with the calling EA.\n new_values = maybe_cast_to_extension_array(type(self._values), new_values)\n return self._constructor(new_values, index=new_index, name=new_name)\n\n def combine_first(self, other) -> Series:\n \"\"\"\n Combine Series values, choosing the calling Series's values first.\n\n Parameters\n ----------\n other : Series\n The value(s) to be combined with the `Series`.\n\n Returns\n -------\n Series\n The result of combining the Series with the other object.\n\n See Also\n --------\n Series.combine : Perform elementwise operation on two Series\n using a given function.\n\n Notes\n -----\n Result index will be the union of the two indexes.\n\n Examples\n --------\n >>> s1 = pd.Series([1, np.nan])\n >>> s2 = pd.Series([3, 4])\n >>> s1.combine_first(s2)\n 0 1.0\n 1 4.0\n dtype: float64\n \"\"\"\n new_index = self.index.union(other.index)\n this = self.reindex(new_index, copy=False)\n other = other.reindex(new_index, copy=False)\n if this.dtype.kind == \"M\" and other.dtype.kind != \"M\":\n other = to_datetime(other)\n\n return this.where(notna(this), other)\n\n def update(self, other) -> None:\n \"\"\"\n Modify Series in place using values from passed Series.\n\n Uses non-NA values from passed Series to make updates. Aligns\n on index.\n\n Parameters\n ----------\n other : Series, or object coercible into Series\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s.update(pd.Series([4, 5, 6]))\n >>> s\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n >>> s = pd.Series(['a', 'b', 'c'])\n >>> s.update(pd.Series(['d', 'e'], index=[0, 2]))\n >>> s\n 0 d\n 1 b\n 2 e\n dtype: object\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update(pd.Series([4, 5, 6, 7, 8]))\n >>> s\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n If ``other`` contains NaNs the corresponding values are not updated\n in the original Series.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update(pd.Series([4, np.nan, 6]))\n >>> s\n 0 4\n 1 2\n 2 6\n dtype: int64\n\n ``other`` can also be a non-Series object type\n that is coercible into a Series\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update([4, np.nan, 6])\n >>> s\n 0 4\n 1 2\n 2 6\n dtype: int64\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update({1: 9})\n >>> s\n 0 1\n 1 9\n 2 3\n dtype: int64\n \"\"\"\n\n if not isinstance(other, Series):\n other = Series(other)\n\n other = other.reindex_like(self)\n mask = notna(other)\n\n self._mgr = self._mgr.putmask(mask=mask, new=other)\n self._maybe_update_cacher()\n\n # ----------------------------------------------------------------------\n # Reindexing, sorting\n\n def sort_values(\n self,\n axis=0,\n ascending=True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool = False,\n key: ValueKeyFunc = None,\n ):\n \"\"\"\n Sort by the values.\n\n Sort a Series in ascending or descending order by some\n criterion.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Axis to direct sorting. The value 'index' is accepted for\n compatibility with DataFrame.sort_values.\n ascending : bool, default True\n If True, sort values in ascending order, otherwise descending.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See also :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable algorithms.\n na_position : {'first' or 'last'}, default 'last'\n Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at\n the end.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n key : callable, optional\n If not None, apply the key function to the series values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect a\n ``Series`` and return an array-like.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n Series or None\n Series ordered by values or None if ``inplace=True``.\n\n See Also\n --------\n Series.sort_index : Sort by the Series indices.\n DataFrame.sort_values : Sort DataFrame by the values along either axis.\n DataFrame.sort_index : Sort DataFrame by indices.\n\n Examples\n --------\n >>> s = pd.Series([np.nan, 1, 3, 10, 5])\n >>> s\n 0 NaN\n 1 1.0\n 2 3.0\n 3 10.0\n 4 5.0\n dtype: float64\n\n Sort values ascending order (default behaviour)\n\n >>> s.sort_values(ascending=True)\n 1 1.0\n 2 3.0\n 4 5.0\n 3 10.0\n 0 NaN\n dtype: float64\n\n Sort values descending order\n\n >>> s.sort_values(ascending=False)\n 3 10.0\n 4 5.0\n 2 3.0\n 1 1.0\n 0 NaN\n dtype: float64\n\n Sort values inplace\n\n >>> s.sort_values(ascending=False, inplace=True)\n >>> s\n 3 10.0\n 4 5.0\n 2 3.0\n 1 1.0\n 0 NaN\n dtype: float64\n\n Sort values putting NAs first\n\n >>> s.sort_values(na_position='first')\n 0 NaN\n 1 1.0\n 2 3.0\n 4 5.0\n 3 10.0\n dtype: float64\n\n Sort a series of strings\n\n >>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])\n >>> s\n 0 z\n 1 b\n 2 d\n 3 a\n 4 c\n dtype: object\n\n >>> s.sort_values()\n 3 a\n 1 b\n 4 c\n 2 d\n 0 z\n dtype: object\n\n Sort using a key function. Your `key` function will be\n given the ``Series`` of values and should return an array-like.\n\n >>> s = pd.Series(['a', 'B', 'c', 'D', 'e'])\n >>> s.sort_values()\n 1 B\n 3 D\n 0 a\n 2 c\n 4 e\n dtype: object\n >>> s.sort_values(key=lambda x: x.str.lower())\n 0 a\n 1 B\n 2 c\n 3 D\n 4 e\n dtype: object\n\n NumPy ufuncs work well here. For example, we can\n sort by the ``sin`` of the value\n\n >>> s = pd.Series([-4, -2, 0, 2, 4])\n >>> s.sort_values(key=np.sin)\n 1 -2\n 4 4\n 2 0\n 0 -4\n 3 2\n dtype: int64\n\n More complicated user-defined functions can be used,\n as long as they expect a Series and return an array-like\n\n >>> s.sort_values(key=lambda x: (np.tan(x.cumsum())))\n 0 -4\n 3 2\n 4 4\n 1 -2\n 2 0\n dtype: int64\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n # Validate the axis parameter\n self._get_axis_number(axis)\n\n # GH 5856/5853\n if inplace and self._is_cached:\n raise ValueError(\n \"This Series is a view of some other array, to \"\n \"sort in-place you must create a copy\"\n )\n\n if is_list_like(ascending):\n if len(ascending) != 1:\n raise ValueError(\n f\"Length of ascending ({len(ascending)}) must be 1 for Series\"\n )\n ascending = ascending[0]\n\n if not is_bool(ascending):\n raise ValueError(\"ascending must be boolean\")\n\n if na_position not in [\"first\", \"last\"]:\n raise ValueError(f\"invalid na_position: {na_position}\")\n\n # GH 35922. Make sorting stable by leveraging nargsort\n values_to_sort = ensure_key_mapped(self, key)._values if key else self._values\n sorted_index = nargsort(values_to_sort, kind, ascending, na_position)\n\n result = self._constructor(\n self._values[sorted_index], index=self.index[sorted_index]\n )\n\n if ignore_index:\n result.index = ibase.default_index(len(sorted_index))\n\n if inplace:\n self._update_inplace(result)\n else:\n return result.__finalize__(self, method=\"sort_values\")\n\n def sort_index(\n self,\n axis=0,\n level=None,\n ascending: bool = True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n sort_remaining: bool = True,\n ignore_index: bool = False,\n key: IndexKeyFunc = None,\n ):\n \"\"\"\n Sort Series by index labels.\n\n Returns a new Series sorted by label if `inplace` argument is\n ``False``, otherwise updates the original series and returns None.\n\n Parameters\n ----------\n axis : int, default 0\n Axis to direct sorting. This can only be 0 for Series.\n level : int, optional\n If not None, sort on values in specified index level(s).\n ascending : bool or list of bools, default True\n Sort ascending vs. descending. When the index is a MultiIndex the\n sort direction can be controlled for each level individually.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See also :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable algorithms. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n key : callable, optional\n If not None, apply the key function to the index values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect an\n ``Index`` and return an ``Index`` of the same shape.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n Series or None\n The original Series sorted by the labels or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.sort_index: Sort DataFrame by the index.\n DataFrame.sort_values: Sort DataFrame by the value.\n Series.sort_values : Sort Series by the value.\n\n Examples\n --------\n >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])\n >>> s.sort_index()\n 1 c\n 2 b\n 3 a\n 4 d\n dtype: object\n\n Sort Descending\n\n >>> s.sort_index(ascending=False)\n 4 d\n 3 a\n 2 b\n 1 c\n dtype: object\n\n Sort Inplace\n\n >>> s.sort_index(inplace=True)\n >>> s\n 1 c\n 2 b\n 3 a\n 4 d\n dtype: object\n\n By default NaNs are put at the end, but use `na_position` to place\n them at the beginning\n\n >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan])\n >>> s.sort_index(na_position='first')\n NaN d\n 1.0 c\n 2.0 b\n 3.0 a\n dtype: object\n\n Specify index level to sort\n\n >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo',\n ... 'baz', 'baz', 'bar', 'bar']),\n ... np.array(['two', 'one', 'two', 'one',\n ... 'two', 'one', 'two', 'one'])]\n >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays)\n >>> s.sort_index(level=1)\n bar one 8\n baz one 6\n foo one 4\n qux one 2\n bar two 7\n baz two 5\n foo two 3\n qux two 1\n dtype: int64\n\n Does not sort by remaining levels when sorting by levels\n\n >>> s.sort_index(level=1, sort_remaining=False)\n qux one 2\n foo one 4\n baz one 6\n bar one 8\n qux two 1\n foo two 3\n baz two 5\n bar two 7\n dtype: int64\n\n Apply a key function before sorting\n\n >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd'])\n >>> s.sort_index(key=lambda x : x.str.lower())\n A 1\n b 2\n C 3\n d 4\n dtype: int64\n \"\"\"\n\n return super().sort_index(\n axis,\n level,\n ascending,\n inplace,\n kind,\n na_position,\n sort_remaining,\n ignore_index,\n key,\n )\n\n def argsort(self, axis=0, kind=\"quicksort\", order=None) -> Series:\n \"\"\"\n Return the integer indices that would sort the Series values.\n\n Override ndarray.argsort. Argsorts the value, omitting NA/null values,\n and places the result in the same locations as the non-NA values.\n\n Parameters\n ----------\n axis : {0 or \"index\"}\n Has no effect but is accepted for compatibility with numpy.\n kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable algorithms.\n order : None\n Has no effect but is accepted for compatibility with numpy.\n\n Returns\n -------\n Series\n Positions of values within the sort order with -1 indicating\n nan values.\n\n See Also\n --------\n numpy.ndarray.argsort : Returns the indices that would sort this array.\n \"\"\"\n values = self._values\n mask = isna(values)\n\n if mask.any():\n result = Series(-1, index=self.index, name=self.name, dtype=\"int64\")\n notmask = ~mask\n result[notmask] = np.argsort(values[notmask], kind=kind)\n return self._constructor(result, index=self.index).__finalize__(\n self, method=\"argsort\"\n )\n else:\n return self._constructor(\n np.argsort(values, kind=kind), index=self.index, dtype=\"int64\"\n ).__finalize__(self, method=\"argsort\")\n\n def nlargest(self, n=5, keep=\"first\") -> Series:\n \"\"\"\n Return the largest `n` elements.\n\n Parameters\n ----------\n n : int, default 5\n Return this many descending sorted values.\n keep : {'first', 'last', 'all'}, default 'first'\n When there are duplicate values that cannot all fit in a\n Series of `n` elements:\n\n - ``first`` : return the first `n` occurrences in order\n of appearance.\n - ``last`` : return the last `n` occurrences in reverse\n order of appearance.\n - ``all`` : keep all occurrences. This can result in a Series of\n size larger than `n`.\n\n Returns\n -------\n Series\n The `n` largest values in the Series, sorted in decreasing order.\n\n See Also\n --------\n Series.nsmallest: Get the `n` smallest elements.\n Series.sort_values: Sort Series by values.\n Series.head: Return the first `n` rows.\n\n Notes\n -----\n Faster than ``.sort_values(ascending=False).head(n)`` for small `n`\n relative to the size of the ``Series`` object.\n\n Examples\n --------\n >>> countries_population = {\"Italy\": 59000000, \"France\": 65000000,\n ... \"Malta\": 434000, \"Maldives\": 434000,\n ... \"Brunei\": 434000, \"Iceland\": 337000,\n ... \"Nauru\": 11300, \"Tuvalu\": 11300,\n ... \"Anguilla\": 11300, \"Montserrat\": 5200}\n >>> s = pd.Series(countries_population)\n >>> s\n Italy 59000000\n France 65000000\n Malta 434000\n Maldives 434000\n Brunei 434000\n Iceland 337000\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Montserrat 5200\n dtype: int64\n\n The `n` largest elements where ``n=5`` by default.\n\n >>> s.nlargest()\n France 65000000\n Italy 59000000\n Malta 434000\n Maldives 434000\n Brunei 434000\n dtype: int64\n\n The `n` largest elements where ``n=3``. Default `keep` value is 'first'\n so Malta will be kept.\n\n >>> s.nlargest(3)\n France 65000000\n Italy 59000000\n Malta 434000\n dtype: int64\n\n The `n` largest elements where ``n=3`` and keeping the last duplicates.\n Brunei will be kept since it is the last with value 434000 based on\n the index order.\n\n >>> s.nlargest(3, keep='last')\n France 65000000\n Italy 59000000\n Brunei 434000\n dtype: int64\n\n The `n` largest elements where ``n=3`` with all duplicates kept. Note\n that the returned Series has five elements due to the three duplicates.\n\n >>> s.nlargest(3, keep='all')\n France 65000000\n Italy 59000000\n Malta 434000\n Maldives 434000\n Brunei 434000\n dtype: int64\n \"\"\"\n return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()\n\n def nsmallest(self, n=5, keep=\"first\") -> Series:\n \"\"\"\n Return the smallest `n` elements.\n\n Parameters\n ----------\n n : int, default 5\n Return this many ascending sorted values.\n keep : {'first', 'last', 'all'}, default 'first'\n When there are duplicate values that cannot all fit in a\n Series of `n` elements:\n\n - ``first`` : return the first `n` occurrences in order\n of appearance.\n - ``last`` : return the last `n` occurrences in reverse\n order of appearance.\n - ``all`` : keep all occurrences. This can result in a Series of\n size larger than `n`.\n\n Returns\n -------\n Series\n The `n` smallest values in the Series, sorted in increasing order.\n\n See Also\n --------\n Series.nlargest: Get the `n` largest elements.\n Series.sort_values: Sort Series by values.\n Series.head: Return the first `n` rows.\n\n Notes\n -----\n Faster than ``.sort_values().head(n)`` for small `n` relative to\n the size of the ``Series`` object.\n\n Examples\n --------\n >>> countries_population = {\"Italy\": 59000000, \"France\": 65000000,\n ... \"Brunei\": 434000, \"Malta\": 434000,\n ... \"Maldives\": 434000, \"Iceland\": 337000,\n ... \"Nauru\": 11300, \"Tuvalu\": 11300,\n ... \"Anguilla\": 11300, \"Montserrat\": 5200}\n >>> s = pd.Series(countries_population)\n >>> s\n Italy 59000000\n France 65000000\n Brunei 434000\n Malta 434000\n Maldives 434000\n Iceland 337000\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Montserrat 5200\n dtype: int64\n\n The `n` smallest elements where ``n=5`` by default.\n\n >>> s.nsmallest()\n Montserrat 5200\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Iceland 337000\n dtype: int64\n\n The `n` smallest elements where ``n=3``. Default `keep` value is\n 'first' so Nauru and Tuvalu will be kept.\n\n >>> s.nsmallest(3)\n Montserrat 5200\n Nauru 11300\n Tuvalu 11300\n dtype: int64\n\n The `n` smallest elements where ``n=3`` and keeping the last\n duplicates. Anguilla and Tuvalu will be kept since they are the last\n with value 11300 based on the index order.\n\n >>> s.nsmallest(3, keep='last')\n Montserrat 5200\n Anguilla 11300\n Tuvalu 11300\n dtype: int64\n\n The `n` smallest elements where ``n=3`` with all duplicates kept. Note\n that the returned Series has four elements due to the three duplicates.\n\n >>> s.nsmallest(3, keep='all')\n Montserrat 5200\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n dtype: int64\n \"\"\"\n return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()\n\n def swaplevel(self, i=-2, j=-1, copy=True) -> Series:\n \"\"\"\n Swap levels i and j in a :class:`MultiIndex`.\n\n Default is to swap the two innermost levels of the index.\n\n Parameters\n ----------\n i, j : int, str\n Level of the indices to be swapped. Can pass level name as string.\n copy : bool, default True\n Whether to copy underlying data.\n\n Returns\n -------\n Series\n Series with levels swapped in MultiIndex.\n \"\"\"\n assert isinstance(self.index, MultiIndex)\n new_index = self.index.swaplevel(i, j)\n return self._constructor(self._values, index=new_index, copy=copy).__finalize__(\n self, method=\"swaplevel\"\n )\n\n def reorder_levels(self, order) -> Series:\n \"\"\"\n Rearrange index levels using input order.\n\n May not drop or duplicate levels.\n\n Parameters\n ----------\n order : list of int representing new level order\n Reference level by number or key.\n\n Returns\n -------\n type of caller (new object)\n \"\"\"\n if not isinstance(self.index, MultiIndex): # pragma: no cover\n raise Exception(\"Can only reorder levels on a hierarchical axis.\")\n\n result = self.copy()\n assert isinstance(result.index, MultiIndex)\n result.index = result.index.reorder_levels(order)\n return result\n\n def explode(self, ignore_index: bool = False) -> Series:\n \"\"\"\n Transform each element of a list-like to a row.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n ignore_index : bool, default False\n If True, the resulting index will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n Series\n Exploded lists to rows; index will be duplicated for these rows.\n\n See Also\n --------\n Series.str.split : Split string values on specified separator.\n Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex\n to produce DataFrame.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n DataFrame.explode : Explode a DataFrame from list-like\n columns to long format.\n\n Notes\n -----\n This routine will explode list-likes including lists, tuples, sets,\n Series, and np.ndarray. The result dtype of the subset rows will\n be object. Scalars will be returned unchanged, and empty list-likes will\n result in a np.nan for that row. In addition, the ordering of elements in\n the output will be non-deterministic when exploding sets.\n\n Examples\n --------\n >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]])\n >>> s\n 0 [1, 2, 3]\n 1 foo\n 2 []\n 3 [3, 4]\n dtype: object\n\n >>> s.explode()\n 0 1\n 0 2\n 0 3\n 1 foo\n 2 NaN\n 3 3\n 3 4\n dtype: object\n \"\"\"\n if not len(self) or not is_object_dtype(self):\n return self.copy()\n\n values, counts = reshape.explode(np.asarray(self.array))\n\n if ignore_index:\n index = ibase.default_index(len(values))\n else:\n index = self.index.repeat(counts)\n\n return self._constructor(values, index=index, name=self.name)\n\n def unstack(self, level=-1, fill_value=None):\n \"\"\"\n Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.\n\n Parameters\n ----------\n level : int, str, or list of these, default last level\n Level(s) to unstack, can pass level name.\n fill_value : scalar value, default None\n Value to use when replacing NaN values.\n\n Returns\n -------\n DataFrame\n Unstacked Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4],\n ... index=pd.MultiIndex.from_product([['one', 'two'],\n ... ['a', 'b']]))\n >>> s\n one a 1\n b 2\n two a 3\n b 4\n dtype: int64\n\n >>> s.unstack(level=-1)\n a b\n one 1 2\n two 3 4\n\n >>> s.unstack(level=0)\n one two\n a 1 3\n b 2 4\n \"\"\"\n from pandas.core.reshape.reshape import unstack\n\n return unstack(self, level, fill_value)\n\n # ----------------------------------------------------------------------\n # function application\n\n def map(self, arg, na_action=None) -> Series:\n \"\"\"\n Map values of Series according to input correspondence.\n\n Used for substituting each value in a Series with another value,\n that may be derived from a function, a ``dict`` or\n a :class:`Series`.\n\n Parameters\n ----------\n arg : function, collections.abc.Mapping subclass or Series\n Mapping correspondence.\n na_action : {None, 'ignore'}, default None\n If 'ignore', propagate NaN values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n Series\n Same index as caller.\n\n See Also\n --------\n Series.apply : For applying more complex functions on a Series.\n DataFrame.apply : Apply a function row-/column-wise.\n DataFrame.applymap : Apply a function elementwise on a whole DataFrame.\n\n Notes\n -----\n When ``arg`` is a dictionary, values in Series that are not in the\n dictionary (as keys) are converted to ``NaN``. However, if the\n dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.\n provides a method for default values), then this default is used\n rather than ``NaN``.\n\n Examples\n --------\n >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])\n >>> s\n 0 cat\n 1 dog\n 2 NaN\n 3 rabbit\n dtype: object\n\n ``map`` accepts a ``dict`` or a ``Series``. Values that are not found\n in the ``dict`` are converted to ``NaN``, unless the dict has a default\n value (e.g. ``defaultdict``):\n\n >>> s.map({'cat': 'kitten', 'dog': 'puppy'})\n 0 kitten\n 1 puppy\n 2 NaN\n 3 NaN\n dtype: object\n\n It also accepts a function:\n\n >>> s.map('I am a {}'.format)\n 0 I am a cat\n 1 I am a dog\n 2 I am a nan\n 3 I am a rabbit\n dtype: object\n\n To avoid applying the function to missing values (and keep them as\n ``NaN``) ``na_action='ignore'`` can be used:\n\n >>> s.map('I am a {}'.format, na_action='ignore')\n 0 I am a cat\n 1 I am a dog\n 2 NaN\n 3 I am a rabbit\n dtype: object\n \"\"\"\n new_values = super()._map_values(arg, na_action=na_action)\n return self._constructor(new_values, index=self.index).__finalize__(\n self, method=\"map\"\n )\n\n def _gotitem(self, key, ndim, subset=None) -> Series:\n \"\"\"\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n Requested ndim of result.\n subset : object, default None\n Subset to act on.\n \"\"\"\n return self\n\n _agg_see_also_doc = dedent(\n \"\"\"\n See Also\n --------\n Series.apply : Invoke function on a Series.\n Series.transform : Transform function producing a Series with like indexes.\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.agg('min')\n 1\n\n >>> s.agg(['min', 'max'])\n min 1\n max 4\n dtype: int64\n \"\"\"\n )\n\n @doc(\n generic._shared_docs[\"aggregate\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n see_also=_agg_see_also_doc,\n examples=_agg_examples_doc,\n )\n def aggregate(self, func=None, axis=0, *args, **kwargs):\n # Validate the axis parameter\n self._get_axis_number(axis)\n\n # if func is None, will switch to user-provided \"named aggregation\" kwargs\n if func is None:\n func = dict(kwargs.items())\n\n op = series_apply(self, func, args=args, kwds=kwargs)\n result, how = op.agg()\n if result is None:\n\n # we can be called from an inner function which\n # passes this meta-data\n kwargs.pop(\"_axis\", None)\n kwargs.pop(\"_level\", None)\n\n # try a regular apply, this evaluates lambdas\n # row-by-row; however if the lambda is expected a Series\n # expression, e.g.: lambda x: x-x.quantile(0.25)\n # this will fail, so we can try a vectorized evaluation\n\n # we cannot FIRST try the vectorized evaluation, because\n # then .agg and .apply would have different semantics if the\n # operation is actually defined on the Series, e.g. str\n try:\n result = self.apply(func, *args, **kwargs)\n except (ValueError, AttributeError, TypeError):\n result = func(self, *args, **kwargs)\n\n return result\n\n agg = aggregate\n\n @doc(\n _shared_docs[\"transform\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n )\n def transform(\n self, func: AggFuncType, axis: Axis = 0, *args, **kwargs\n ) -> FrameOrSeriesUnion:\n return transform(self, func, axis, *args, **kwargs)\n\n def apply(\n self,\n func: AggFuncType,\n convert_dtype: bool = True,\n args: Tuple[Any, ...] = (),\n **kwds,\n ) -> FrameOrSeriesUnion:\n \"\"\"\n Invoke function on values of Series.\n\n Can be ufunc (a NumPy function that applies to the entire Series)\n or a Python function that only works on single values.\n\n Parameters\n ----------\n func : function\n Python function or NumPy ufunc to apply.\n convert_dtype : bool, default True\n Try to find better dtype for elementwise function results. If\n False, leave as dtype=object.\n args : tuple\n Positional arguments passed to func after the series value.\n **kwds\n Additional keyword arguments passed to func.\n\n Returns\n -------\n Series or DataFrame\n If func returns a Series object the result will be a DataFrame.\n\n See Also\n --------\n Series.map: For element-wise operations.\n Series.agg: Only perform aggregating type operations.\n Series.transform: Only perform transforming type operations.\n\n Examples\n --------\n Create a series with typical summer temperatures for each city.\n\n >>> s = pd.Series([20, 21, 12],\n ... index=['London', 'New York', 'Helsinki'])\n >>> s\n London 20\n New York 21\n Helsinki 12\n dtype: int64\n\n Square the values by defining a function and passing it as an\n argument to ``apply()``.\n\n >>> def square(x):\n ... return x ** 2\n >>> s.apply(square)\n London 400\n New York 441\n Helsinki 144\n dtype: int64\n\n Square the values by passing an anonymous function as an\n argument to ``apply()``.\n\n >>> s.apply(lambda x: x ** 2)\n London 400\n New York 441\n Helsinki 144\n dtype: int64\n\n Define a custom function that needs additional positional\n arguments and pass these additional arguments using the\n ``args`` keyword.\n\n >>> def subtract_custom_value(x, custom_value):\n ... return x - custom_value\n\n >>> s.apply(subtract_custom_value, args=(5,))\n London 15\n New York 16\n Helsinki 7\n dtype: int64\n\n Define a custom function that takes keyword arguments\n and pass these arguments to ``apply``.\n\n >>> def add_custom_values(x, **kwargs):\n ... for month in kwargs:\n ... x += kwargs[month]\n ... return x\n\n >>> s.apply(add_custom_values, june=30, july=20, august=25)\n London 95\n New York 96\n Helsinki 87\n dtype: int64\n\n Use a function from the Numpy library.\n\n >>> s.apply(np.log)\n London 2.995732\n New York 3.044522\n Helsinki 2.484907\n dtype: float64\n \"\"\"\n op = series_apply(self, func, convert_dtype, args, kwds)\n return op.apply()\n\n def _reduce(\n self,\n op,\n name: str,\n *,\n axis=0,\n skipna=True,\n numeric_only=None,\n filter_type=None,\n **kwds,\n ):\n \"\"\"\n Perform a reduction operation.\n\n If we have an ndarray as a value, then simply perform the operation,\n otherwise delegate to the object.\n \"\"\"\n delegate = self._values\n\n if axis is not None:\n self._get_axis_number(axis)\n\n if isinstance(delegate, ExtensionArray):\n # dispatch to ExtensionArray interface\n return delegate._reduce(name, skipna=skipna, **kwds)\n\n else:\n # dispatch to numpy arrays\n if numeric_only:\n raise NotImplementedError(\n f\"Series.{name} does not implement numeric_only.\"\n )\n with np.errstate(all=\"ignore\"):\n return op(delegate, skipna=skipna, **kwds)\n\n def _reindex_indexer(self, new_index, indexer, copy):\n if indexer is None:\n if copy:\n return self.copy()\n return self\n\n new_values = algorithms.take_1d(\n self._values, indexer, allow_fill=True, fill_value=None\n )\n return self._constructor(new_values, index=new_index)\n\n def _needs_reindex_multi(self, axes, method, level):\n \"\"\"\n Check if we do need a multi reindex; this is for compat with\n higher dims.\n \"\"\"\n return False\n\n @doc(\n NDFrame.align,\n klass=_shared_doc_kwargs[\"klass\"],\n axes_single_arg=_shared_doc_kwargs[\"axes_single_arg\"],\n )\n def align(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy=True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n broadcast_axis=None,\n ):\n return super().align(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n broadcast_axis=broadcast_axis,\n )\n\n def rename(\n self,\n index=None,\n *,\n axis=None,\n copy=True,\n inplace=False,\n level=None,\n errors=\"ignore\",\n ):\n \"\"\"\n Alter Series index labels or name.\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don't throw an\n error.\n\n Alternatively, change ``Series.name`` with a scalar value.\n\n See the :ref:`user guide <basics.rename>` for more.\n\n Parameters\n ----------\n axis : {0 or \"index\"}\n Unused. Accepted for compatibility with DataFrame method only.\n index : scalar, hashable sequence, dict-like or function, optional\n Functions or dict-like are transformations to apply to\n the index.\n Scalar or hashable sequence-like will alter the ``Series.name``\n attribute.\n\n **kwargs\n Additional keyword arguments passed to the function. Only the\n \"inplace\" keyword is used.\n\n Returns\n -------\n Series or None\n Series with index labels or name altered or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.rename : Corresponding DataFrame method.\n Series.rename_axis : Set the name of the axis.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename(\"my_name\") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n \"\"\"\n if callable(index) or is_dict_like(index):\n return super().rename(\n index, copy=copy, inplace=inplace, level=level, errors=errors\n )\n else:\n return self._set_name(index, inplace=inplace)\n\n @Appender(\n \"\"\"\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> s.set_axis(['a', 'b', 'c'], axis=0)\n a 1\n b 2\n c 3\n dtype: int64\n \"\"\"\n )\n @Substitution(\n **_shared_doc_kwargs,\n extended_summary_sub=\"\",\n axis_description_sub=\"\",\n see_also_sub=\"\",\n )\n @Appender(generic.NDFrame.set_axis.__doc__)\n def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):\n return super().set_axis(labels, axis=axis, inplace=inplace)\n\n @doc(\n NDFrame.reindex,\n klass=_shared_doc_kwargs[\"klass\"],\n axes=_shared_doc_kwargs[\"axes\"],\n optional_labels=_shared_doc_kwargs[\"optional_labels\"],\n optional_axis=_shared_doc_kwargs[\"optional_axis\"],\n )\n def reindex(self, index=None, **kwargs):\n return super().reindex(index=index, **kwargs)\n\n def drop(\n self,\n labels=None,\n axis=0,\n index=None,\n columns=None,\n level=None,\n inplace=False,\n errors=\"raise\",\n ) -> Series:\n \"\"\"\n Return Series with specified index labels removed.\n\n Remove elements of a Series based on specifying the index labels.\n When using a multi-index, labels on different levels can be removed\n by specifying the level.\n\n Parameters\n ----------\n labels : single label or list-like\n Index labels to drop.\n axis : 0, default 0\n Redundant for application on Series.\n index : single label or list-like\n Redundant for application on Series, but 'index' can be used instead\n of 'labels'.\n columns : single label or list-like\n No change is made to the Series; use 'index' or 'labels' instead.\n level : int or level name, optional\n For MultiIndex, level for which the labels will be removed.\n inplace : bool, default False\n If True, do operation inplace and return None.\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and only existing labels are dropped.\n\n Returns\n -------\n Series or None\n Series with specified index labels removed or None if ``inplace=True``.\n\n Raises\n ------\n KeyError\n If none of the labels are found in the index.\n\n See Also\n --------\n Series.reindex : Return only specified index labels of Series.\n Series.dropna : Return series without null values.\n Series.drop_duplicates : Return Series with duplicate values removed.\n DataFrame.drop : Drop specified labels from rows or columns.\n\n Examples\n --------\n >>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C'])\n >>> s\n A 0\n B 1\n C 2\n dtype: int64\n\n Drop labels B en C\n\n >>> s.drop(labels=['B', 'C'])\n A 0\n dtype: int64\n\n Drop 2nd level label in MultiIndex Series\n\n >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],\n ... index=midx)\n >>> s\n lama speed 45.0\n weight 200.0\n length 1.2\n cow speed 30.0\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.drop(labels='weight', level=1)\n lama speed 45.0\n length 1.2\n cow speed 30.0\n length 1.5\n falcon speed 320.0\n length 0.3\n dtype: float64\n \"\"\"\n return super().drop(\n labels=labels,\n axis=axis,\n index=index,\n columns=columns,\n level=level,\n inplace=inplace,\n errors=errors,\n )\n\n @doc(NDFrame.fillna, **_shared_doc_kwargs)\n def fillna(\n self,\n value=None,\n method=None,\n axis=None,\n inplace=False,\n limit=None,\n downcast=None,\n ) -> Optional[Series]:\n return super().fillna(\n value=value,\n method=method,\n axis=axis,\n inplace=inplace,\n limit=limit,\n downcast=downcast,\n )\n\n def pop(self, item: Hashable) -> Any:\n \"\"\"\n Return item and drops from series. Raise KeyError if not found.\n\n Parameters\n ----------\n item : label\n Index of the element that needs to be removed.\n\n Returns\n -------\n Value that is popped from series.\n\n Examples\n --------\n >>> ser = pd.Series([1,2,3])\n\n >>> ser.pop(0)\n 1\n\n >>> ser\n 1 2\n 2 3\n dtype: int64\n \"\"\"\n return super().pop(item=item)\n\n @doc(\n NDFrame.replace,\n klass=_shared_doc_kwargs[\"klass\"],\n inplace=_shared_doc_kwargs[\"inplace\"],\n replace_iloc=_shared_doc_kwargs[\"replace_iloc\"],\n )\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n ):\n return super().replace(\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n limit=limit,\n regex=regex,\n method=method,\n )\n\n def _replace_single(self, to_replace, method: str, inplace: bool, limit):\n \"\"\"\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n \"\"\"\n\n orig_dtype = self.dtype\n result = self if inplace else self.copy()\n fill_f = missing.get_fill_func(method)\n\n mask = missing.mask_missing(result.values, to_replace)\n values = fill_f(result.values, limit=limit, mask=mask)\n\n if values.dtype == orig_dtype and inplace:\n return\n\n result = self._constructor(values, index=self.index, dtype=self.dtype)\n result = result.__finalize__(self)\n\n if inplace:\n self._update_inplace(result)\n return\n\n return result\n\n @doc(NDFrame.shift, klass=_shared_doc_kwargs[\"klass\"])\n def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> Series:\n return super().shift(\n periods=periods, freq=freq, axis=axis, fill_value=fill_value\n )\n\n def memory_usage(self, index=True, deep=False):\n \"\"\"\n Return the memory usage of the Series.\n\n The memory usage can optionally include the contribution of\n the index and of elements of `object` dtype.\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of the Series index.\n deep : bool, default False\n If True, introspect the data deeply by interrogating\n `object` dtypes for system-level memory consumption, and include\n it in the returned value.\n\n Returns\n -------\n int\n Bytes of memory consumed.\n\n See Also\n --------\n numpy.ndarray.nbytes : Total bytes consumed by the elements of the\n array.\n DataFrame.memory_usage : Bytes consumed by a DataFrame.\n\n Examples\n --------\n >>> s = pd.Series(range(3))\n >>> s.memory_usage()\n 152\n\n Not including the index gives the size of the rest of the data, which\n is necessarily smaller:\n\n >>> s.memory_usage(index=False)\n 24\n\n The memory footprint of `object` values is ignored by default:\n\n >>> s = pd.Series([\"a\", \"b\"])\n >>> s.values\n array(['a', 'b'], dtype=object)\n >>> s.memory_usage()\n 144\n >>> s.memory_usage(deep=True)\n 244\n \"\"\"\n v = super().memory_usage(deep=deep)\n if index:\n v += self.index.memory_usage(deep=deep)\n return v\n\n def isin(self, values) -> Series:\n \"\"\"\n Whether elements in Series are contained in `values`.\n\n Return a boolean Series showing whether each element in the Series\n matches an element in the passed sequence of `values` exactly.\n\n Parameters\n ----------\n values : set or list-like\n The sequence of values to test. Passing in a single string will\n raise a ``TypeError``. Instead, turn a single string into a\n list of one element.\n\n Returns\n -------\n Series\n Series of booleans indicating if each element is in values.\n\n Raises\n ------\n TypeError\n * If `values` is a string\n\n See Also\n --------\n DataFrame.isin : Equivalent method on DataFrame.\n\n Examples\n --------\n >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama',\n ... 'hippo'], name='animal')\n >>> s.isin(['cow', 'lama'])\n 0 True\n 1 True\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n Passing a single string as ``s.isin('lama')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['lama'])\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n Strings and integers are distinct and are therefore not comparable:\n\n >>> pd.Series([1]).isin(['1'])\n 0 False\n dtype: bool\n >>> pd.Series([1.1]).isin(['1.1'])\n 0 False\n dtype: bool\n \"\"\"\n result = algorithms.isin(self._values, values)\n return self._constructor(result, index=self.index).__finalize__(\n self, method=\"isin\"\n )\n\n def between(self, left, right, inclusive=True) -> Series:\n \"\"\"\n Return boolean Series equivalent to left <= series <= right.\n\n This function returns a boolean vector containing `True` wherever the\n corresponding Series element is between the boundary values `left` and\n `right`. NA values are treated as `False`.\n\n Parameters\n ----------\n left : scalar or list-like\n Left boundary.\n right : scalar or list-like\n Right boundary.\n inclusive : bool, default True\n Include boundaries.\n\n Returns\n -------\n Series\n Series representing whether each element is between left and\n right (inclusive).\n\n See Also\n --------\n Series.gt : Greater than of series and other.\n Series.lt : Less than of series and other.\n\n Notes\n -----\n This function is equivalent to ``(left <= ser) & (ser <= right)``\n\n Examples\n --------\n >>> s = pd.Series([2, 0, 4, 8, np.nan])\n\n Boundary values are included by default:\n\n >>> s.between(1, 4)\n 0 True\n 1 False\n 2 True\n 3 False\n 4 False\n dtype: bool\n\n With `inclusive` set to ``False`` boundary values are excluded:\n\n >>> s.between(1, 4, inclusive=False)\n 0 True\n 1 False\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n `left` and `right` can be any scalar value:\n\n >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])\n >>> s.between('Anna', 'Daniel')\n 0 False\n 1 True\n 2 True\n 3 False\n dtype: bool\n \"\"\"\n if inclusive:\n lmask = self >= left\n rmask = self <= right\n else:\n lmask = self > left\n rmask = self < right\n\n return lmask & rmask\n\n # ----------------------------------------------------------------------\n # Convert to types that support pd.NA\n\n def _convert_dtypes(\n self,\n infer_objects: bool = True,\n convert_string: bool = True,\n convert_integer: bool = True,\n convert_boolean: bool = True,\n convert_floating: bool = True,\n ) -> Series:\n input_series = self\n if infer_objects:\n input_series = input_series.infer_objects()\n if is_object_dtype(input_series):\n input_series = input_series.copy()\n\n if convert_string or convert_integer or convert_boolean or convert_floating:\n inferred_dtype = convert_dtypes(\n input_series._values,\n convert_string,\n convert_integer,\n convert_boolean,\n convert_floating,\n )\n try:\n result = input_series.astype(inferred_dtype)\n except TypeError:\n result = input_series.copy()\n else:\n result = input_series.copy()\n return result\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isna(self) -> Series:\n return generic.NDFrame.isna(self)\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isnull(self) -> Series:\n return super().isnull()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notna(self) -> Series:\n return super().notna()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notnull(self) -> Series:\n return super().notnull()\n\n def dropna(self, axis=0, inplace=False, how=None):\n \"\"\"\n Return a new Series with missing values removed.\n\n See the :ref:`User Guide <missing_data>` for more on which values are\n considered missing, and how to work with missing data.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n There is only one axis to drop values from.\n inplace : bool, default False\n If True, do operation inplace and return None.\n how : str, optional\n Not in use. Kept for compatibility.\n\n Returns\n -------\n Series or None\n Series with NA entries dropped from it or None if ``inplace=True``.\n\n See Also\n --------\n Series.isna: Indicate missing values.\n Series.notna : Indicate existing (non-missing) values.\n Series.fillna : Replace missing values.\n DataFrame.dropna : Drop rows or columns which contain NA values.\n Index.dropna : Drop missing indices.\n\n Examples\n --------\n >>> ser = pd.Series([1., 2., np.nan])\n >>> ser\n 0 1.0\n 1 2.0\n 2 NaN\n dtype: float64\n\n Drop NA values from a Series.\n\n >>> ser.dropna()\n 0 1.0\n 1 2.0\n dtype: float64\n\n Keep the Series with valid entries in the same variable.\n\n >>> ser.dropna(inplace=True)\n >>> ser\n 0 1.0\n 1 2.0\n dtype: float64\n\n Empty strings are not considered NA values. ``None`` is considered an\n NA value.\n\n >>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay'])\n >>> ser\n 0 NaN\n 1 2\n 2 NaT\n 3\n 4 None\n 5 I stay\n dtype: object\n >>> ser.dropna()\n 1 2\n 3\n 5 I stay\n dtype: object\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n # Validate the axis parameter\n self._get_axis_number(axis or 0)\n\n if self._can_hold_na:\n result = remove_na_arraylike(self)\n if inplace:\n self._update_inplace(result)\n else:\n return result\n else:\n if inplace:\n # do nothing\n pass\n else:\n return self.copy()\n\n # ----------------------------------------------------------------------\n # Time series-oriented methods\n\n @doc(NDFrame.asfreq, **_shared_doc_kwargs)\n def asfreq(\n self,\n freq,\n method=None,\n how: Optional[str] = None,\n normalize: bool = False,\n fill_value=None,\n ) -> Series:\n return super().asfreq(\n freq=freq,\n method=method,\n how=how,\n normalize=normalize,\n fill_value=fill_value,\n )\n\n @doc(NDFrame.resample, **_shared_doc_kwargs)\n def resample(\n self,\n rule,\n axis=0,\n closed: Optional[str] = None,\n label: Optional[str] = None,\n convention: str = \"start\",\n kind: Optional[str] = None,\n loffset=None,\n base: Optional[int] = None,\n on=None,\n level=None,\n origin: Union[str, TimestampConvertibleTypes] = \"start_day\",\n offset: Optional[TimedeltaConvertibleTypes] = None,\n ) -> Resampler:\n return super().resample(\n rule=rule,\n axis=axis,\n closed=closed,\n label=label,\n convention=convention,\n kind=kind,\n loffset=loffset,\n base=base,\n on=on,\n level=level,\n origin=origin,\n offset=offset,\n )\n\n def to_timestamp(self, freq=None, how=\"start\", copy=True) -> Series:\n \"\"\"\n Cast to DatetimeIndex of Timestamps, at *beginning* of period.\n\n Parameters\n ----------\n freq : str, default frequency of PeriodIndex\n Desired frequency.\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end.\n copy : bool, default True\n Whether or not to return a copy.\n\n Returns\n -------\n Series with DatetimeIndex\n \"\"\"\n new_values = self._values\n if copy:\n new_values = new_values.copy()\n\n if not isinstance(self.index, PeriodIndex):\n raise TypeError(f\"unsupported Type {type(self.index).__name__}\")\n new_index = self.index.to_timestamp(freq=freq, how=how)\n return self._constructor(new_values, index=new_index).__finalize__(\n self, method=\"to_timestamp\"\n )\n\n def to_period(self, freq=None, copy=True) -> Series:\n \"\"\"\n Convert Series from DatetimeIndex to PeriodIndex.\n\n Parameters\n ----------\n freq : str, default None\n Frequency associated with the PeriodIndex.\n copy : bool, default True\n Whether or not to return a copy.\n\n Returns\n -------\n Series\n Series with index converted to PeriodIndex.\n \"\"\"\n new_values = self._values\n if copy:\n new_values = new_values.copy()\n\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(f\"unsupported Type {type(self.index).__name__}\")\n new_index = self.index.to_period(freq=freq)\n return self._constructor(new_values, index=new_index).__finalize__(\n self, method=\"to_period\"\n )\n\n # ----------------------------------------------------------------------\n # Add index\n _AXIS_ORDERS = [\"index\"]\n _AXIS_REVERSED = False\n _AXIS_LEN = len(_AXIS_ORDERS)\n _info_axis_number = 0\n _info_axis_name = \"index\"\n\n index: Index = properties.AxisProperty(\n axis=0, doc=\"The index (axis labels) of the Series.\"\n )\n\n # ----------------------------------------------------------------------\n # Accessor Methods\n # ----------------------------------------------------------------------\n str = CachedAccessor(\"str\", StringMethods)\n dt = CachedAccessor(\"dt\", CombinedDatetimelikeProperties)\n cat = CachedAccessor(\"cat\", CategoricalAccessor)\n plot = CachedAccessor(\"plot\", pandas.plotting.PlotAccessor)\n sparse = CachedAccessor(\"sparse\", SparseAccessor)\n\n # ----------------------------------------------------------------------\n # Add plotting methods to Series\n hist = pandas.plotting.hist_series\n\n # ----------------------------------------------------------------------\n # Template-Based Arithmetic/Comparison Methods\n\n def _cmp_method(self, other, op):\n res_name = ops.get_op_result_name(self, other)\n\n if isinstance(other, Series) and not self._indexed_same(other):\n raise ValueError(\"Can only compare identically-labeled Series objects\")\n\n lvalues = extract_array(self, extract_numpy=True)\n rvalues = extract_array(other, extract_numpy=True)\n\n res_values = ops.comparison_op(lvalues, rvalues, op)\n\n return self._construct_result(res_values, name=res_name)\n\n def _logical_method(self, other, op):\n res_name = ops.get_op_result_name(self, other)\n self, other = ops.align_method_SERIES(self, other, align_asobject=True)\n\n lvalues = extract_array(self, extract_numpy=True)\n rvalues = extract_array(other, extract_numpy=True)\n\n res_values = ops.logical_op(lvalues, rvalues, op)\n return self._construct_result(res_values, name=res_name)\n\n def _arith_method(self, other, op):\n res_name = ops.get_op_result_name(self, other)\n self, other = ops.align_method_SERIES(self, other)\n\n lvalues = extract_array(self, extract_numpy=True)\n rvalues = extract_array(other, extract_numpy=True)\n result = ops.arithmetic_op(lvalues, rvalues, op)\n\n return self._construct_result(result, name=res_name)\n\n\nSeries._add_numeric_operations()\n\n# Add arithmetic!\nops.add_flex_arithmetic_methods(Series)\n", "from __future__ import annotations\n\nfrom typing import Dict, List, Optional, Tuple, Type\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import iNaT, lib, missing as libmissing\nfrom pandas._typing import ArrayLike, Dtype, DtypeObj\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_datetime64_dtype,\n is_float,\n is_float_dtype,\n is_integer_dtype,\n is_list_like,\n is_object_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core.ops import invalid_comparison\nfrom pandas.core.tools.numeric import to_numeric\n\nfrom .masked import BaseMaskedArray, BaseMaskedDtype\nfrom .numeric import NumericArray, NumericDtype\n\n\nclass _IntegerDtype(NumericDtype):\n \"\"\"\n An ExtensionDtype to hold a single size & kind of integer dtype.\n\n These specific implementations are subclasses of the non-public\n _IntegerDtype. For example we have Int8Dtype to represent signed int 8s.\n\n The attributes name & type are set when these subclasses are created.\n \"\"\"\n\n def __repr__(self) -> str:\n sign = \"U\" if self.is_unsigned_integer else \"\"\n return f\"{sign}Int{8 * self.itemsize}Dtype()\"\n\n @cache_readonly\n def is_signed_integer(self) -> bool:\n return self.kind == \"i\"\n\n @cache_readonly\n def is_unsigned_integer(self) -> bool:\n return self.kind == \"u\"\n\n @property\n def _is_numeric(self) -> bool:\n return True\n\n @classmethod\n def construct_array_type(cls) -> Type[IntegerArray]:\n \"\"\"\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n \"\"\"\n return IntegerArray\n\n def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:\n # we only handle nullable EA dtypes and numeric numpy dtypes\n if not all(\n isinstance(t, BaseMaskedDtype)\n or (\n isinstance(t, np.dtype)\n and (np.issubdtype(t, np.number) or np.issubdtype(t, np.bool_))\n )\n for t in dtypes\n ):\n return None\n np_dtype = np.find_common_type(\n [t.numpy_dtype if isinstance(t, BaseMaskedDtype) else t for t in dtypes], []\n )\n if np.issubdtype(np_dtype, np.integer):\n return INT_STR_TO_DTYPE[str(np_dtype)]\n elif np.issubdtype(np_dtype, np.floating):\n from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE\n\n return FLOAT_STR_TO_DTYPE[str(np_dtype)]\n return None\n\n\ndef safe_cast(values, dtype, copy: bool):\n \"\"\"\n Safely cast the values to the dtype if they\n are equivalent, meaning floats must be equivalent to the\n ints.\n\n \"\"\"\n try:\n return values.astype(dtype, casting=\"safe\", copy=copy)\n except TypeError as err:\n\n casted = values.astype(dtype, copy=copy)\n if (casted == values).all():\n return casted\n\n raise TypeError(\n f\"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}\"\n ) from err\n\n\ndef coerce_to_array(\n values, dtype, mask=None, copy: bool = False\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Coerce the input values array to numpy arrays with a mask\n\n Parameters\n ----------\n values : 1D list-like\n dtype : integer dtype\n mask : bool 1D array, optional\n copy : bool, default False\n if True, copy the input\n\n Returns\n -------\n tuple of (values, mask)\n \"\"\"\n # if values is integer numpy array, preserve its dtype\n if dtype is None and hasattr(values, \"dtype\"):\n if is_integer_dtype(values.dtype):\n dtype = values.dtype\n\n if dtype is not None:\n if isinstance(dtype, str) and (\n dtype.startswith(\"Int\") or dtype.startswith(\"UInt\")\n ):\n # Avoid DeprecationWarning from NumPy about np.dtype(\"Int64\")\n # https://github.com/numpy/numpy/pull/7476\n dtype = dtype.lower()\n\n if not issubclass(type(dtype), _IntegerDtype):\n try:\n dtype = INT_STR_TO_DTYPE[str(np.dtype(dtype))]\n except KeyError as err:\n raise ValueError(f\"invalid dtype specified {dtype}\") from err\n\n if isinstance(values, IntegerArray):\n values, mask = values._data, values._mask\n if dtype is not None:\n values = values.astype(dtype.numpy_dtype, copy=False)\n\n if copy:\n values = values.copy()\n mask = mask.copy()\n return values, mask\n\n values = np.array(values, copy=copy)\n if is_object_dtype(values):\n inferred_type = lib.infer_dtype(values, skipna=True)\n if inferred_type == \"empty\":\n values = np.empty(len(values))\n values.fill(np.nan)\n elif inferred_type not in [\n \"floating\",\n \"integer\",\n \"mixed-integer\",\n \"integer-na\",\n \"mixed-integer-float\",\n ]:\n raise TypeError(f\"{values.dtype} cannot be converted to an IntegerDtype\")\n\n elif is_bool_dtype(values) and is_integer_dtype(dtype):\n values = np.array(values, dtype=int, copy=copy)\n\n elif not (is_integer_dtype(values) or is_float_dtype(values)):\n raise TypeError(f\"{values.dtype} cannot be converted to an IntegerDtype\")\n\n if mask is None:\n mask = isna(values)\n else:\n assert len(mask) == len(values)\n\n if not values.ndim == 1:\n raise TypeError(\"values must be a 1D list-like\")\n if not mask.ndim == 1:\n raise TypeError(\"mask must be a 1D list-like\")\n\n # infer dtype if needed\n if dtype is None:\n dtype = np.dtype(\"int64\")\n else:\n dtype = dtype.type\n\n # if we are float, let's make sure that we can\n # safely cast\n\n # we copy as need to coerce here\n if mask.any():\n values = values.copy()\n values[mask] = 1\n values = safe_cast(values, dtype, copy=False)\n else:\n values = safe_cast(values, dtype, copy=False)\n\n return values, mask\n\n\nclass IntegerArray(NumericArray):\n \"\"\"\n Array of integer (optional missing) values.\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 1.0.0\n\n Now uses :attr:`pandas.NA` as the missing value rather\n than :attr:`numpy.nan`.\n\n .. warning::\n\n IntegerArray is currently experimental, and its API or internal\n implementation may change without warning.\n\n We represent an IntegerArray with 2 numpy arrays:\n\n - data: contains a numpy integer array of the appropriate dtype\n - mask: a boolean array holding a mask on the data, True is missing\n\n To construct an IntegerArray from generic array-like input, use\n :func:`pandas.array` with one of the integer dtypes (see examples).\n\n See :ref:`integer_na` for more.\n\n Parameters\n ----------\n values : numpy.ndarray\n A 1-d integer-dtype array.\n mask : numpy.ndarray\n A 1-d boolean-dtype array indicating missing values.\n copy : bool, default False\n Whether to copy the `values` and `mask`.\n\n Attributes\n ----------\n None\n\n Methods\n -------\n None\n\n Returns\n -------\n IntegerArray\n\n Examples\n --------\n Create an IntegerArray with :func:`pandas.array`.\n\n >>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())\n >>> int_array\n <IntegerArray>\n [1, <NA>, 3]\n Length: 3, dtype: Int32\n\n String aliases for the dtypes are also available. They are capitalized.\n\n >>> pd.array([1, None, 3], dtype='Int32')\n <IntegerArray>\n [1, <NA>, 3]\n Length: 3, dtype: Int32\n\n >>> pd.array([1, None, 3], dtype='UInt16')\n <IntegerArray>\n [1, <NA>, 3]\n Length: 3, dtype: UInt16\n \"\"\"\n\n # The value used to fill '_data' to avoid upcasting\n _internal_fill_value = 1\n\n @cache_readonly\n def dtype(self) -> _IntegerDtype:\n return INT_STR_TO_DTYPE[str(self._data.dtype)]\n\n def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):\n if not (isinstance(values, np.ndarray) and values.dtype.kind in [\"i\", \"u\"]):\n raise TypeError(\n \"values should be integer numpy array. Use \"\n \"the 'pd.array' function instead\"\n )\n super().__init__(values, mask, copy=copy)\n\n def __neg__(self):\n return type(self)(-self._data, self._mask)\n\n def __pos__(self):\n return self\n\n def __abs__(self):\n return type(self)(np.abs(self._data), self._mask)\n\n @classmethod\n def _from_sequence(\n cls, scalars, *, dtype: Optional[Dtype] = None, copy: bool = False\n ) -> IntegerArray:\n values, mask = coerce_to_array(scalars, dtype=dtype, copy=copy)\n return IntegerArray(values, mask)\n\n @classmethod\n def _from_sequence_of_strings(\n cls, strings, *, dtype: Optional[Dtype] = None, copy: bool = False\n ) -> IntegerArray:\n scalars = to_numeric(strings, errors=\"raise\")\n return cls._from_sequence(scalars, dtype=dtype, copy=copy)\n\n def _coerce_to_array(self, value) -> Tuple[np.ndarray, np.ndarray]:\n return coerce_to_array(value, dtype=self.dtype)\n\n def astype(self, dtype, copy: bool = True) -> ArrayLike:\n \"\"\"\n Cast to a NumPy array or ExtensionArray with 'dtype'.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n copy : bool, default True\n Whether to copy the data, even if not necessary. If False,\n a copy is made only if the old dtype does not match the\n new dtype.\n\n Returns\n -------\n ndarray or ExtensionArray\n NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.\n\n Raises\n ------\n TypeError\n if incompatible type with an IntegerDtype, equivalent of same_kind\n casting\n \"\"\"\n dtype = pandas_dtype(dtype)\n\n if isinstance(dtype, ExtensionDtype):\n return super().astype(dtype, copy=copy)\n\n # coerce\n if is_float_dtype(dtype):\n # In astype, we consider dtype=float to also mean na_value=np.nan\n na_value = np.nan\n elif is_datetime64_dtype(dtype):\n na_value = np.datetime64(\"NaT\")\n else:\n na_value = lib.no_default\n\n return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)\n\n def _values_for_argsort(self) -> np.ndarray:\n \"\"\"\n Return values for sorting.\n\n Returns\n -------\n ndarray\n The transformed values should maintain the ordering between values\n within the array.\n\n See Also\n --------\n ExtensionArray.argsort : Return the indices that would sort this array.\n \"\"\"\n data = self._data.copy()\n if self._mask.any():\n data[self._mask] = data.min() - 1\n return data\n\n def _cmp_method(self, other, op):\n from pandas.core.arrays import BooleanArray\n\n mask = None\n\n if isinstance(other, BaseMaskedArray):\n other, mask = other._data, other._mask\n\n elif is_list_like(other):\n other = np.asarray(other)\n if other.ndim > 1:\n raise NotImplementedError(\"can only perform ops with 1-d structures\")\n if len(self) != len(other):\n raise ValueError(\"Lengths must match to compare\")\n\n if other is libmissing.NA:\n # numpy does not handle pd.NA well as \"other\" scalar (it returns\n # a scalar False instead of an array)\n # This may be fixed by NA.__array_ufunc__. Revisit this check\n # once that's implemented.\n result = np.zeros(self._data.shape, dtype=\"bool\")\n mask = np.ones(self._data.shape, dtype=\"bool\")\n else:\n with warnings.catch_warnings():\n # numpy may show a FutureWarning:\n # elementwise comparison failed; returning scalar instead,\n # but in the future will perform elementwise comparison\n # before returning NotImplemented. We fall back to the correct\n # behavior today, so that should be fine to ignore.\n warnings.filterwarnings(\"ignore\", \"elementwise\", FutureWarning)\n with np.errstate(all=\"ignore\"):\n method = getattr(self._data, f\"__{op.__name__}__\")\n result = method(other)\n\n if result is NotImplemented:\n result = invalid_comparison(self._data, other, op)\n\n # nans propagate\n if mask is None:\n mask = self._mask.copy()\n else:\n mask = self._mask | mask\n\n return BooleanArray(result, mask)\n\n def sum(self, *, skipna=True, min_count=0, **kwargs):\n nv.validate_sum((), kwargs)\n return super()._reduce(\"sum\", skipna=skipna, min_count=min_count)\n\n def prod(self, *, skipna=True, min_count=0, **kwargs):\n nv.validate_prod((), kwargs)\n return super()._reduce(\"prod\", skipna=skipna, min_count=min_count)\n\n def min(self, *, skipna=True, **kwargs):\n nv.validate_min((), kwargs)\n return super()._reduce(\"min\", skipna=skipna)\n\n def max(self, *, skipna=True, **kwargs):\n nv.validate_max((), kwargs)\n return super()._reduce(\"max\", skipna=skipna)\n\n def _maybe_mask_result(self, result, mask, other, op_name: str):\n \"\"\"\n Parameters\n ----------\n result : array-like\n mask : array-like bool\n other : scalar or array-like\n op_name : str\n \"\"\"\n # if we have a float operand we are by-definition\n # a float result\n # or our op is a divide\n if (is_float_dtype(other) or is_float(other)) or (\n op_name in [\"rtruediv\", \"truediv\"]\n ):\n from pandas.core.arrays import FloatingArray\n\n return FloatingArray(result, mask, copy=False)\n\n if result.dtype == \"timedelta64[ns]\":\n from pandas.core.arrays import TimedeltaArray\n\n result[mask] = iNaT\n return TimedeltaArray._simple_new(result)\n\n return type(self)(result, mask, copy=False)\n\n\n_dtype_docstring = \"\"\"\nAn ExtensionDtype for {dtype} integer data.\n\n.. versionchanged:: 1.0.0\n\n Now uses :attr:`pandas.NA` as its missing value,\n rather than :attr:`numpy.nan`.\n\nAttributes\n----------\nNone\n\nMethods\n-------\nNone\n\"\"\"\n\n# create the Dtype\n\n\n@register_extension_dtype\nclass Int8Dtype(_IntegerDtype):\n type = np.int8\n name = \"Int8\"\n __doc__ = _dtype_docstring.format(dtype=\"int8\")\n\n\n@register_extension_dtype\nclass Int16Dtype(_IntegerDtype):\n type = np.int16\n name = \"Int16\"\n __doc__ = _dtype_docstring.format(dtype=\"int16\")\n\n\n@register_extension_dtype\nclass Int32Dtype(_IntegerDtype):\n type = np.int32\n name = \"Int32\"\n __doc__ = _dtype_docstring.format(dtype=\"int32\")\n\n\n@register_extension_dtype\nclass Int64Dtype(_IntegerDtype):\n type = np.int64\n name = \"Int64\"\n __doc__ = _dtype_docstring.format(dtype=\"int64\")\n\n\n@register_extension_dtype\nclass UInt8Dtype(_IntegerDtype):\n type = np.uint8\n name = \"UInt8\"\n __doc__ = _dtype_docstring.format(dtype=\"uint8\")\n\n\n@register_extension_dtype\nclass UInt16Dtype(_IntegerDtype):\n type = np.uint16\n name = \"UInt16\"\n __doc__ = _dtype_docstring.format(dtype=\"uint16\")\n\n\n@register_extension_dtype\nclass UInt32Dtype(_IntegerDtype):\n type = np.uint32\n name = \"UInt32\"\n __doc__ = _dtype_docstring.format(dtype=\"uint32\")\n\n\n@register_extension_dtype\nclass UInt64Dtype(_IntegerDtype):\n type = np.uint64\n name = \"UInt64\"\n __doc__ = _dtype_docstring.format(dtype=\"uint64\")\n\n\nINT_STR_TO_DTYPE: Dict[str, _IntegerDtype] = {\n \"int8\": Int8Dtype(),\n \"int16\": Int16Dtype(),\n \"int32\": Int32Dtype(),\n \"int64\": Int64Dtype(),\n \"uint8\": UInt8Dtype(),\n \"uint16\": UInt16Dtype(),\n \"uint32\": UInt32Dtype(),\n \"uint64\": UInt64Dtype(),\n}\n" ]
[ [ "pandas.core.ops.logical_op", "pandas.core.nanops.nancov", "pandas.util._validators.validate_bool_kwarg", "pandas.core.ops.align_method_SERIES", "pandas.core.dtypes.inference.is_hashable", "pandas.core.aggregation.transform", "pandas.core.common.standardize_mapping", "pandas.core.construction.is_empty_data", "pandas.core.dtypes.common.is_iterator", "pandas.core.algorithms.searchsorted", "pandas.core.dtypes.common.is_categorical_dtype", "pandas.core.dtypes.common.is_list_like", "numpy.array", "pandas.core.ops.comparison_op", "pandas.core.ops.fill_binop", "pandas.core.groupby.generic.SeriesGroupBy", "pandas.core.dtypes.missing.na_value_for_dtype", "pandas.core.dtypes.missing.isna", "pandas.compat.numpy.function.validate_repeat", "pandas.core.base.IndexOpsMixin.duplicated", "pandas.core.common.maybe_iterable_to_list", "pandas.core.generic.NDFrame.__init__", "pandas.core.sorting.ensure_key_mapped", "pandas.io.formats.format.SeriesFormatter", "numpy.asarray", "pandas.compat.numpy.function.validate_round", "pandas.core.ops.arithmetic_op", "pandas._config.get_option", "pandas.core.indexers.unpack_1tuple", "pandas.core.generic.NDFrame.isna", "pandas.core.internals.SingleBlockManager.from_array", "pandas.core.internals.construction.sanitize_index", "pandas.compat.numpy.function.validate_take", "pandas.core.common.asarray_tuplesafe", "pandas.core.nanops.nancorr", "pandas.core.common.any_none", "pandas.core.construction.create_series_with_explicit_dtype", "pandas.core.algorithms.SelectNSeries", "numpy.errstate", "pandas.core.dtypes.cast.convert_dtypes", "pandas.core.indexers.deprecate_ndim_indexing", "pandas.core.algorithms.mode", "pandas.core.dtypes.common.is_integer", "pandas._libs.lib.infer_dtype", "pandas.util._decorators.doc", "pandas.core.indexes.datetimes.DatetimeIndex", "pandas.core.dtypes.common.is_extension_array_dtype", "pandas.core.dtypes.missing.notna", "pandas.core.indexes.api.Float64Index", "pandas.util._decorators.Substitution", "pandas.core.dtypes.common.is_dict_like", "pandas.util._decorators.Appender", "pandas.core.dtypes.cast.validate_numeric_casting", "numpy.transpose", "pandas.core.missing.mask_missing", "numpy.argsort", "pandas._libs.properties.AxisProperty", "pandas.core.missing.get_fill_func", "pandas.core.dtypes.common.is_bool", "pandas.core.ops.add_flex_arithmetic_methods", "pandas.core.sorting.nargsort", "pandas.core.common.is_bool_indexer", "pandas.core.algorithms.diff", "pandas.core.dtypes.common.is_scalar", "pandas.core.algorithms.isin", "pandas.core.accessor.CachedAccessor", "pandas.core.indexes.api.ensure_index", "pandas.util._validators.validate_percentile", "numpy.dot", "pandas.core.dtypes.missing.remove_na_arraylike", "pandas.core.indexing.check_bool_indexer", "pandas.core.ops.get_op_result_name", "pandas.core.apply.series_apply", "pandas.core.dtypes.common.ensure_platform_int", "pandas.core.reshape.concat.concat", "pandas.core.tools.datetimes.to_datetime", "pandas.core.reshape.reshape.unstack", "pandas.core.algorithms.take_1d", "pandas.core.construction.sanitize_array", "pandas.core.dtypes.common.is_object_dtype", "pandas.core.common.apply_if_callable", "pandas.core.construction.extract_array" ], [ "numpy.asarray", "numpy.issubdtype", "numpy.dtype", "pandas.compat.numpy.function.validate_prod", "pandas.core.dtypes.common.is_datetime64_dtype", "pandas.core.arrays.TimedeltaArray._simple_new", "pandas.core.tools.numeric.to_numeric", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.dtypes.common.is_float", "numpy.zeros", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.is_list_like", "pandas.core.dtypes.common.pandas_dtype", "pandas.core.arrays.BooleanArray", "pandas.core.arrays.FloatingArray", "numpy.errstate", "numpy.array", "pandas.core.ops.invalid_comparison", "pandas.compat.numpy.function.validate_sum", "pandas.core.dtypes.common.is_bool_dtype", "numpy.abs", "pandas.compat.numpy.function.validate_max", "numpy.ones", "numpy.datetime64", "pandas.compat.numpy.function.validate_min", "pandas.core.dtypes.common.is_object_dtype", "pandas.core.dtypes.missing.isna", "pandas._libs.lib.infer_dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "1.1", "1.0", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
krodyush/nncf
[ "476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a", "476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a", "476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a", "476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a", "476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a" ]
[ "nncf/quantization/metrics.py", "nncf/layer_utils.py", "nncf/quantization/precision_init/perturbations.py", "nncf/compression_method_api.py", "examples/object_detection/main.py" ]
[ "import numpy as np\nimport networkx as nx\nfrom copy import deepcopy\nfrom texttable import Texttable\nfrom collections import deque\n\nfrom nncf.quantization.layers import SymmetricQuantizer\nfrom nncf.nncf_network import NNCFNetwork, NNCFGraph\nfrom nncf.dynamic_graph.transform_graph import is_nncf_module\nfrom nncf.quantization.quantizer_propagation import DEFAULT_QUANT_TRAIT_TO_OP_DICT, QuantizationTrait\n\nclass BaseMetric:\n def __init__(self):\n pass\n\n def collect(self):\n pass\n\n def get_metric_table(self):\n pass\n\n\nclass NetworkQuantizationShareMetric(BaseMetric):\n \"\"\"\n This is a metric representing the share of the model that has been quantized.\n It includes the calculation of the following numbers:\n - Percentage of symmetric/asymmetric/per-channel/per-tensor weight quantizers relative\n to the number of placed weight quantizers\n - Percentage of symmetric/asymmetric/per-channel/per-tensor non weight quantizers relative\n to the number of placed non weight quantizers\n - Percentage of weight quantizers and non weight quantizers for each precision relative\n to the number potential* quantizers / placed quantizers\n Bitwidth distribution data is also collected.\n\n * The maximum possible number of potential quantizers depends on the presence of ignored\n scopes and the mode of quantizer setup that is used at the time of collecting the metric.\n\n \"\"\"\n NAME_STR = 'NetworkQuantizationShare'\n\n WEIGHTS_RATIO_STR = ' WQs / All placed WQs' # WQ - weight quantizer\n ACTIVATIONS_RATIO_STR = ' AQs / All placed AQs' # AQ - activation quantizer\n TOTAL_RATIO_STR = ' Qs (out of total placed)'\n\n PARAMS_STR = 'Quantizer parameter'\n SYMMETRIC_STR = 'Symmetric'\n ASYMMETRIC_STR = 'Asymmetric'\n PER_CHANNEL_STR = 'Per-channel'\n SIGNED_STR = 'Signed'\n PER_TENSOR_STR = 'Per-tensor'\n UNSIGNED_STR = 'Unsigned'\n SHARE_WEIGHT_QUANTIZERS_STR = 'Placed WQs / Potential WQs'\n SHARE_ACTIVATION_QUANTIZERS_STR = 'Placed AQs / Potential AQs'\n\n def __init__(self, compressed_model, weights_quantizers, non_weights_quantizers, quantizer_setup_type):\n super().__init__()\n self._compressed_model = compressed_model\n self._quantizer_setup_type = quantizer_setup_type # type: QuantizerSetupType\n self.non_weights_quantizers = {k: v.quantizer_module_ref for k, v in non_weights_quantizers.items()}\n self.weights_quantizers = weights_quantizers\n self._all_quantizations = {**self.weights_quantizers, **self.non_weights_quantizers}\n self.header = [self.PARAMS_STR, self.WEIGHTS_RATIO_STR, self.ACTIVATIONS_RATIO_STR, self.TOTAL_RATIO_STR]\n self.params = {self.PER_CHANNEL_STR, self.PER_TENSOR_STR, self.UNSIGNED_STR, self.SIGNED_STR,\n self.SYMMETRIC_STR, self.ASYMMETRIC_STR}\n self.params_bits_stat = set()\n self.num_potential_quantized_weights = len(compressed_model.get_nncf_modules())\n self.num_potential_quantized_activations = self._get_num_potential_quantized_activations()\n self.num_placed_weight_quantizers = len(self.weights_quantizers)\n self.num_placed_activation_quantizers = len(self.non_weights_quantizers)\n self.num_all_potential_quantizer = self.num_potential_quantized_weights +\\\n self.num_potential_quantized_activations\n self.stat = {}\n self._ratio = {\n self.WEIGHTS_RATIO_STR: len(self.weights_quantizers),\n self.ACTIVATIONS_RATIO_STR: len(self.non_weights_quantizers),\n self.TOTAL_RATIO_STR: len(self._all_quantizations)}\n\n def _get_num_potential_quantized_activations(self):\n from nncf.quantization.algo import QuantizerSetupType\n retval = 0\n if self._quantizer_setup_type == QuantizerSetupType.PATTERN_BASED:\n from nncf.quantization.algo import QuantizationBuilder\n # pylint: disable=protected-access\n default_pattern = QuantizationBuilder._make_default_quantizable_subgraph_pattern()\n retval = len(self._compressed_model.get_post_pattern_insertion_points(default_pattern))\n else:\n from nncf.quantization.algo import QuantizerPropagationSolver\n insertion_point_graph = self._compressed_model.get_insertion_point_graph()\n prop_graph_solver = QuantizerPropagationSolver()\n insertion_data = prop_graph_solver.run_on_ip_graph(insertion_point_graph)\n retval = len(insertion_data)\n return retval\n\n def collect(self):\n for quantizer in self._all_quantizations.values():\n self.params_bits_stat.add(quantizer.num_bits)\n\n for h in self.header:\n self.stat[h] = {}\n for p in self.params:\n self.stat[h][p] = 0\n for p in self.params_bits_stat:\n self.stat[h][p] = 0\n\n for quantizer in self._all_quantizations.values(): # type: BaseQuantizer\n num_bits = quantizer.num_bits\n self.stat[self.TOTAL_RATIO_STR][num_bits] += 1\n type_ = self.WEIGHTS_RATIO_STR if quantizer.is_weights else self.ACTIVATIONS_RATIO_STR\n self.stat[type_][num_bits] += 1\n if quantizer.per_channel:\n self.stat[type_][self.PER_CHANNEL_STR] += 1\n else:\n self.stat[type_][self.PER_TENSOR_STR] += 1\n if quantizer.signed:\n self.stat[type_][self.SIGNED_STR] += 1\n else:\n self.stat[type_][self.UNSIGNED_STR] += 1\n if isinstance(quantizer, SymmetricQuantizer):\n self.stat[type_][self.SYMMETRIC_STR] += 1\n else:\n self.stat[type_][self.ASYMMETRIC_STR] += 1\n\n def _get_copy_statistics(self):\n statistics = deepcopy(self.stat)\n for h in self.header[1:]:\n for key, _ in statistics[h].items():\n try:\n statistics[h][key] /= self._ratio[h]\n statistics[h][key] *= 100\n except ZeroDivisionError:\n statistics[h][key] = 0\n return statistics\n\n def get_metric_table(self):\n table_with_bits_stats = Texttable()\n table_with_other_stats = Texttable()\n data = [['Metric type', 'Value']]\n for h in (self.WEIGHTS_RATIO_STR, self.ACTIVATIONS_RATIO_STR):\n for p in self.params:\n try:\n row = ['{} '.format(p) + str(h), '{:.2f} % ({} / {}) '.format(\\\n self.stat[h][p] / self._ratio[h] * 100, self.stat[h][p], self._ratio[h])]\n except ZeroDivisionError:\n row = ['{} '.format(p) + h, 0]\n data.append(row)\n try:\n row = [self.SHARE_WEIGHT_QUANTIZERS_STR, '{:.2f} % ({} / {}) '.format(\\\n self.num_placed_weight_quantizers / self.num_potential_quantized_weights * 100,\n self.num_placed_weight_quantizers, self.num_potential_quantized_weights)]\n except ZeroDivisionError:\n row = [self.SHARE_WEIGHT_QUANTIZERS_STR, '{} % '.format(0)]\n\n data.append(row)\n try:\n row = [self.SHARE_ACTIVATION_QUANTIZERS_STR, '{:.2f} % ({} / {}) '.format(\\\n self.num_placed_activation_quantizers / self.num_potential_quantized_activations * 100,\n self.num_placed_activation_quantizers, self.num_potential_quantized_activations)]\n except ZeroDivisionError:\n row = [self.SHARE_ACTIVATION_QUANTIZERS_STR, '{} % '.format(0)]\n data.append(row)\n\n table_with_other_stats.add_rows(data)\n\n data = [['Num bits (N)', 'N-bits WQs / Placed WQs', 'N-bits AQs / Placed AQs', 'N-bits Qs / Placed Qs']]\n for p in self.params_bits_stat:\n row = [p]\n for h in (self.WEIGHTS_RATIO_STR, self.ACTIVATIONS_RATIO_STR, self.TOTAL_RATIO_STR):\n try:\n row.append('{:.2f} % ({} / {}) '.format(\\\n self.stat[h][p] / self._ratio[h] * 100, self.stat[h][p], self._ratio[h]))\n except ZeroDivisionError:\n row.append(0)\n data.append(row)\n table_with_bits_stats.add_rows(data)\n\n retval = {\n \"Share quantization statistics:\" : table_with_other_stats,\n \"Bitwidth distribution:\" : table_with_bits_stats\n }\n return retval\n\n def get_bits_stat(self):\n table = Texttable()\n data = [['Num bits (N)', 'N-bits WQs / Placed Qs', 'N-bits AQs / Placed Qs', 'N-bits Qs / Placed Qs']]\n for p in self.params_bits_stat:\n row = [p]\n for h in (self.WEIGHTS_RATIO_STR, self.ACTIVATIONS_RATIO_STR, self.TOTAL_RATIO_STR):\n try:\n row.append(self.stat[h][p] / self._ratio[self.TOTAL_RATIO_STR] * 100)\n except ZeroDivisionError:\n row.append(0)\n data.append(row)\n table.add_rows(data)\n return table\n\nclass MemoryCostMetric(BaseMetric):\n \"\"\"\n\n This metric considers:\n - how many times memory consumption for network weights will decrease.\n - how many times memory consumption* for activations tensor will decrease.\n\n * Reflects host memory consumption, assuming only the final low-precision output activation tensors are stored\n in host memory (i.e. assuming intermediate accumulation results are only stored in device memory)\n\n \"\"\"\n PARAMS_STR = 'params'\n NAME_STR = 'MemoryCost'\n\n EXPECTED_MEMORY_CONSUMPTION_DECREASE_STR = 'Memory consumption decrease for weights'\n SIZE_MEMORY_FP_WEIGHTS_STR = 'Memory consumption for full-precision weights'\n SIZE_MEMORY_COMPRESSED_WEIGHTS_STR = 'Memory consumption for quantized weights'\n MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_FP32_MODEL_STR =\\\n 'Max memory consumption for an activation tensor in FP32 model'\n MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_COMPRESSED_MODEL_STR =\\\n 'Max memory consumption for an activation tensor in compressed model'\n\n def __init__(self, compressed_model: NNCFNetwork, weights_quantizers, non_weight_quantizers):\n super().__init__()\n self._compressed_model = compressed_model\n self._weights_quantizers = weights_quantizers\n self._non_weight_quantizers = {k: v.quantizer_module_ref for k, v in non_weight_quantizers.items()}\n self.header = [self.EXPECTED_MEMORY_CONSUMPTION_DECREASE_STR, self.SIZE_MEMORY_FP_WEIGHTS_STR,\\\n self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR,\\\n self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_FP32_MODEL_STR,\\\n self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_COMPRESSED_MODEL_STR]\n self.stat = {}\n\n def collect(self):\n self.stat[self.SIZE_MEMORY_FP_WEIGHTS_STR] = 0\n self.stat[self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR] = 0\n fp_num_bits = 32\n nncf_modules = self._compressed_model.get_nncf_modules()\n\n for scope_module, nncf_module in nncf_modules.items():\n count_el = np.prod(nncf_module.weight.shape)\n self.stat[self.SIZE_MEMORY_FP_WEIGHTS_STR] += count_el * fp_num_bits\n status, quantizer = self._get_quantizer_for_scope(scope_module, self._weights_quantizers)\n if status > 0:\n num_bits = quantizer.num_bits\n self.stat[self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR] += count_el * num_bits\n else:\n self.stat[self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR] += count_el * fp_num_bits\n try:\n self.stat[self.EXPECTED_MEMORY_CONSUMPTION_DECREASE_STR] = self.stat[self.SIZE_MEMORY_FP_WEIGHTS_STR] /\\\n self.stat[self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR]\n except ZeroDivisionError:\n self.stat[self.EXPECTED_MEMORY_CONSUMPTION_DECREASE_STR] = 0\n self.stat[self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR] /= 2**23\n self.stat[self.SIZE_MEMORY_FP_WEIGHTS_STR] /= 2**23\n\n original_graph = deepcopy(self._compressed_model.get_original_graph())\n\n memory_consumption_fp_model = {}\n memory_consumption_compressed_model = {}\n # pylint: disable=protected-access\n original_nx_graph = original_graph._nx_graph\n nx.set_edge_attributes(original_nx_graph, 32, \"precision\")\n input_nodes = original_graph.get_input_nodes()\n input_node_keys = []\n for input_node in input_nodes:\n input_node_key = original_graph.get_node_key_by_id(input_node.node_id)\n input_node_keys.append(input_node_key)\n next_nodes = original_graph.get_next_nodes(input_node)\n for next_node in next_nodes:\n scope = next_node.op_exec_context.scope_in_model\n status, quantizer = self._get_quantizer_for_scope(scope, self._non_weight_quantizers)\n if status:\n next_node_key = original_graph.get_node_key_by_id(next_node.node_id)\n num_bits = quantizer.num_bits\n original_nx_graph.edges[input_node_key, next_node_key]['precision'] = num_bits\n\n for u, v in original_nx_graph.edges:\n if u in input_node_keys:\n continue\n\n shape = original_nx_graph.edges[u, v][NNCFGraph.ACTIVATION_SHAPE_EDGE_ATTR]\n u_node_scope_str = str(original_nx_graph.nodes[u]['op_exec_context'].input_agnostic)\n num_bits = self.get_precision_for_activation_tensor(u, v, original_nx_graph)\n original_nx_graph.edges[u, v]['precision'] = num_bits\n memory_consumption_fp_model[u_node_scope_str] = np.prod(shape) * fp_num_bits\n memory_consumption_compressed_model[u_node_scope_str] = np.prod(shape) * num_bits\n try:\n self.stat[self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_FP32_MODEL_STR] =\\\n max(memory_consumption_fp_model.values()) / 2**23\n self.stat[self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_COMPRESSED_MODEL_STR] =\\\n max(memory_consumption_compressed_model.values()) / 2**23\n except ValueError:\n self.stat[self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_FP32_MODEL_STR] = 0\n self.stat[self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_COMPRESSED_MODEL_STR] = 0\n\n def get_precision_for_activation_tensor(self, u_node, v_node, original_nx_graph):\n scope_u_node = original_nx_graph.nodes[u_node][NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].scope_in_model\n # pylint: disable=protected-access\n pred_u_nodes = original_nx_graph._pred[u_node]\n precision_enter_activation_tensor =\\\n max([0] + [original_nx_graph.edges[pred_u_node, u_node]['precision'] for pred_u_node in pred_u_nodes])\n module = self._compressed_model.get_module_by_scope(scope_u_node)\n if is_nncf_module(module):\n status, quantizer = self._get_quantizer_for_scope(scope_u_node, self._weights_quantizers)\n if status:\n precision = max(quantizer.num_bits, precision_enter_activation_tensor)\n else:\n precision = 32\n return precision\n\n u_node_scope_str = str(original_nx_graph.nodes[u_node][NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].input_agnostic)\n if u_node_scope_str in self._compressed_model.activation_quantizers:\n precision = self._compressed_model.activation_quantizers[u_node_scope_str].num_bits\n else:\n precision = precision_enter_activation_tensor\n return precision\n\n def _get_quantizer_for_scope(self, scope, quatizers):\n for quantizer_id, quantizer in quatizers.items():\n if quantizer_id.get_scope() == scope:\n return True, quantizer\n return False, None\n\n def get_metric_table(self):\n table = Texttable()\n data = [['Metric type', 'Value']]\n data.append([self.header[0], self.stat[self.header[0]]])\n for h in self.header[1:]:\n data.append([h + ' (Mbyte)', self.stat[h]])\n table.add_rows(data)\n\n retval = {\"Memory consumption statistics:\": table}\n return retval\n\n\nclass ShareEdgesQuantizedDataPath(BaseMetric):\n \"\"\"\n\n This metric calculates the percentage of quantized edges relative to the total number of edges\n in the original network graph. \"Quantized edge\" is an edge representing a quantized activation tensor.\n\n \"\"\"\n NAME_STR = 'ShareEdgesQuantizedDataPath'\n COUNT_QUANTIZED_EDGES_STR = 'Share edges of the quantized data path'\n QUANTIZED_EDGES_ATTR = 'quantized'\n PASSED_EDGES_ATTR = 'passed'\n NODES_GRAPH_ATTR = 'nodes'\n IS_MERGED_GRAPH_ATTR = 'is_merged'\n\n\n def __init__(self, compressed_model: NNCFNetwork):\n super().__init__()\n self._compressed_model = compressed_model\n self.stat = {}\n\n def collect(self):\n # pylint: disable=too-many-branches\n merged_original_graph =\\\n self.get_merged_original_graph_with_patterns(self._compressed_model.get_original_graph())\n self.stat[self.COUNT_QUANTIZED_EDGES_STR] = 0\n self.header = [self.COUNT_QUANTIZED_EDGES_STR]\n nx.set_edge_attributes(merged_original_graph, False, self.QUANTIZED_EDGES_ATTR)\n nx.set_edge_attributes(merged_original_graph, False, self.PASSED_EDGES_ATTR)\n # pylint: disable=protected-access\n input_nodes = [node for node in merged_original_graph.nodes if len(merged_original_graph._pred[node]) == 0]\n queue = deque()\n for input_node in input_nodes:\n # pylint: disable=protected-access\n next_nodes = merged_original_graph._succ[input_node]\n for next_node_key in next_nodes:\n edge = merged_original_graph.edges[input_node, next_node_key]\n edge[self.PASSED_EDGES_ATTR] = True\n edge[self.QUANTIZED_EDGES_ATTR] = True\n self.stat[self.COUNT_QUANTIZED_EDGES_STR] += 1\n queue.appendleft(next_node_key)\n visited_nodes = {}\n #pylint: disable=too-many-nested-blocks\n while len(queue) != 0:\n node_key = queue.pop()\n if node_key in visited_nodes:\n continue\n if self._all_enter_edges_in_node_of_type(merged_original_graph, node_key, self.PASSED_EDGES_ATTR):\n visited_nodes[node_key] = True\n node = merged_original_graph.nodes[node_key]\n if node[self.IS_MERGED_GRAPH_ATTR]:\n last_node = node[self.NODES_GRAPH_ATTR][-1]\n scope_str = str(last_node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].input_agnostic)\n if scope_str in self._compressed_model.activation_quantizers:\n self._marking_edges(merged_original_graph, node_key, queue)\n else:\n self._marking_edges(merged_original_graph, node_key, queue, False)\n else:\n scope_str = str(node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].input_agnostic)\n if scope_str in self._compressed_model.activation_quantizers:\n self._marking_edges(merged_original_graph, node_key, queue)\n else:\n is_op_non_change_precision_activation_tensor = True\n node_op_name = node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].operator_name\n for op in DEFAULT_QUANT_TRAIT_TO_OP_DICT[QuantizationTrait.INPUTS_QUANTIZABLE]:\n op_names = [op.name]\n if op.torch_tensor_patch_spec is not None:\n op_names = op.torch_tensor_patch_spec.underlying_function_names\n if node_op_name in op_names:\n is_op_non_change_precision_activation_tensor = False\n break\n status = is_op_non_change_precision_activation_tensor and\\\n self._all_enter_edges_in_node_of_type(merged_original_graph,\\\n node_key, self.QUANTIZED_EDGES_ATTR)\n self._marking_edges(merged_original_graph, node_key, queue, status)\n else:\n queue.appendleft(node_key)\n self.num_merged_original_graph_edges = len(merged_original_graph.edges)\n\n def _get_copy_statistics(self):\n statistics = deepcopy(self.stat)\n try:\n statistics[self.COUNT_QUANTIZED_EDGES_STR] /= self.num_merged_original_graph_edges\n statistics[self.COUNT_QUANTIZED_EDGES_STR] *= 100\n except ZeroDivisionError:\n statistics[self.COUNT_QUANTIZED_EDGES_STR] = 0\n\n return statistics\n\n def _all_enter_edges_in_node_of_type(self, graph, node_key, type_edge):\n # pylint: disable=protected-access\n prev_nodes = graph._pred[node_key]\n retval = True\n for prev_node_key in prev_nodes:\n edge = graph.edges[prev_node_key, node_key]\n if not edge[type_edge]:\n retval = False\n break\n return retval\n\n def _marking_edges(self, graph, node_key, queue, mark=True):\n # pylint: disable=protected-access\n next_nodes = graph._succ[node_key]\n for next_node_key in next_nodes:\n edge = graph.edges[node_key, next_node_key]\n edge[self.QUANTIZED_EDGES_ATTR] = mark\n edge[self.PASSED_EDGES_ATTR] = True\n queue.appendleft(next_node_key)\n if mark:\n self.stat[self.COUNT_QUANTIZED_EDGES_STR] += 1\n\n def get_metric_table(self):\n table = Texttable()\n data = [['Metric type', 'Value']]\n try:\n data.append([self.header[0], '{:.2f} % ({} / {})'.format(\n self.stat[self.COUNT_QUANTIZED_EDGES_STR] / self.num_merged_original_graph_edges * 100,\n self.stat[self.COUNT_QUANTIZED_EDGES_STR], self.num_merged_original_graph_edges)])\n except ZeroDivisionError:\n data.append([self.header[0], '{} % '.format(0)])\n table.add_rows(data)\n\n retval = {\"Quantization configuration statistics:\" : table}\n return retval\n\n def get_merged_original_graph_with_patterns(self, original_graph: NNCFGraph):\n import nncf.dynamic_graph.patterns as p\n from nncf.dynamic_graph.graph_matching import search_all\n\n pattern = p.LINEAR_OPS + p.ANY_BN_ACT_COMBO | p.LINEAR_OPS + p.ELTWISE_UNIFORM_OPS\n # pylint: disable=protected-access\n matches = search_all(original_graph._nx_graph, pattern)\n merged_graph = deepcopy(original_graph._nx_graph)\n nx.set_node_attributes(merged_graph, False, self.IS_MERGED_GRAPH_ATTR)\n for match in matches:\n if len(match) == 1:\n continue\n\n input_node_key = match[0]\n output_node_key = match[-1]\n in_edges = list(merged_graph.in_edges(input_node_key))\n out_edges = list(merged_graph.out_edges(output_node_key))\n\n in_edge_copies_dict = {}\n for in_edge_key in in_edges:\n in_edge_copies_dict[in_edge_key] = deepcopy(merged_graph.edges[in_edge_key])\n out_edge_copies_dict = {}\n for out_edge_key in out_edges:\n out_edge_copies_dict[out_edge_key] = deepcopy(merged_graph.edges[out_edge_key])\n\n merged_node_key = \"\"\n merged_nodes = []\n for node_key in match:\n merged_node_key += node_key + '\\n'\n # pylint: disable=protected-access\n merged_nodes.append(original_graph._nx_graph.nodes[node_key])\n merged_graph.remove_node(node_key)\n merged_node_attrs = {\n NNCFGraph.KEY_NODE_ATTR: merged_node_key,\n self.NODES_GRAPH_ATTR: merged_nodes,\n self.IS_MERGED_GRAPH_ATTR: True\n }\n merged_graph.add_node(merged_node_key, **merged_node_attrs)\n for in_edge_key, in_edge_attrs in in_edge_copies_dict.items():\n merged_graph.add_edge(in_edge_key[0], merged_node_key, **in_edge_attrs)\n for out_edge_key, out_edge_attrs in out_edge_copies_dict.items():\n merged_graph.add_edge(merged_node_key, out_edge_key[1], **out_edge_attrs)\n\n return merged_graph\n\n @staticmethod\n def visualize_marked_graph(merged_original_graph):\n out_graph = nx.DiGraph()\n for node_key, _ in merged_original_graph.nodes.items():\n out_graph.add_node(node_key)\n for u, v in merged_original_graph.edges:\n edge = merged_original_graph.edges[u, v]\n if edge[ShareEdgesQuantizedDataPath.QUANTIZED_EDGES_ATTR]:\n attrs = {\"color\": \"blue\"}\n out_graph.add_edge(u, v, **attrs)\n return out_graph\n", "import torch.nn as nn\nfrom .registry import Registry\n\n\nCOMPRESSION_MODULES = Registry('compression modules')\n\n\nclass ProxyModule:\n def __init__(self, module):\n self._module = module\n\n def __getattr__(self, name):\n return getattr(self._module, name)\n\n\nclass _NNCFModuleMixin:\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n _NNCFModuleMixin.add_mixin_fields(self)\n\n @staticmethod\n def add_mixin_fields(obj):\n obj.pre_ops = nn.ModuleDict()\n obj.post_ops = nn.ModuleDict()\n\n def get_pre_op(self, key):\n return self.pre_ops[key]\n\n def get_post_op(self, key):\n return self.post_ops[key]\n\n def register_pre_forward_operation(self, op):\n key = str(len(self.pre_ops))\n self.pre_ops[key] = op\n return key\n\n def remove_pre_forward_operation(self, key):\n return self.pre_ops.pop(key)\n\n def register_post_forward_operation(self, op):\n key = str(len(self.post_ops))\n self.post_ops[key] = op\n return key\n\n def remove_post_forward_operation(self, key):\n return self.post_ops.pop(key)\n\n def forward(self, *args):\n proxy_module = ProxyModule(self)\n for op in self.pre_ops.values():\n op_args = op(proxy_module, args)\n if op_args is not None:\n if not isinstance(op_args, tuple):\n op_args = tuple([op_args])\n args = op_args\n results = super().forward.__func__(proxy_module, *args)\n for op in self.post_ops.values():\n op_results = op(proxy_module, results)\n if op_results is not None:\n results = op_results\n return results\n", "\"\"\"\n Copyright (c) 2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nfrom typing import Dict\n\nimport torch\nfrom torch import Tensor\n\nfrom ...dynamic_graph.context import no_nncf_trace\n\n\nclass PerturbationObserver:\n def __init__(self, device):\n super().__init__()\n self.device = device\n self.perturbation = None\n self.numels = None\n\n def calc_perturbation(self, module, inputs: Tensor, output: Tensor):\n input_ = inputs[0] if isinstance(inputs, tuple) else inputs\n with no_nncf_trace():\n self.perturbation = torch.norm(input_ - output, p=2) ** 2\n self.numels = input_.size().numel()\n self.input_norm = torch.norm(input_, p=2) ** 2\n\n def reset(self):\n self.perturbation = None\n self.numels = None\n\n def get_observation(self):\n return self.perturbation\n\n def get_numels(self):\n return self.numels\n\n def get_input_norm(self):\n return self.input_norm\n\n\nclass Perturbations:\n def __init__(self):\n self._perturbations = {} # type: Dict[int, Dict[int, Tensor]]\n\n def add(self, layer_id: int, bitwidth: int, perturbation: Tensor):\n if layer_id in self._perturbations:\n self._perturbations[layer_id].update({bitwidth: perturbation})\n else:\n self._perturbations[layer_id] = {bitwidth: perturbation}\n\n def get(self, layer_id: int, bitwidth: int) -> Tensor:\n layer_perturbations = self._perturbations[layer_id]\n return layer_perturbations[bitwidth]\n\n def get_all(self) -> Dict[int, Dict[int, Tensor]]:\n return self._perturbations\n", "#\n# Copyright (c) 2019-2020 Intel Corporation\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\n@package docstring\nThis package defines the API for the NNCF compression methods, so that the user could\nextend the existing algorithms.\n\"\"\"\nimport functools\nfrom copy import copy\nfrom enum import Enum\nfrom functools import partial\n\nimport torch\nfrom torch import nn\n\nfrom nncf.config import NNCFConfig\nfrom nncf.dynamic_graph.graph_builder import create_mock_tensor\nfrom nncf.initialization import DataLoaderBNAdaptationRunner\nfrom nncf.nncf_logger import logger as nncf_logger\nfrom nncf.nncf_network import NNCFNetwork\nfrom nncf.structures import BNAdaptationInitArgs\nfrom nncf.utils import should_consider_scope\n\n\nclass CompressionLoss(nn.Module):\n \"\"\"\n Used to calculate additional loss to be added to the base loss during the\n training process. It uses the model graph to measure variables and activations\n values of the layers during the loss construction. For example, the $L_0$-based\n sparsity algorithm calculates the number of non-zero weights in convolutional\n and fully-connected layers to construct the loss function.\n \"\"\"\n\n def forward(self):\n \"\"\"\n Returns the compression loss value.\n \"\"\"\n return torch.zeros([])\n\n def statistics(self):\n \"\"\"\n Returns a dictionary of printable statistics.\n \"\"\"\n return {}\n\n\nclass CompressionScheduler:\n \"\"\"\n Implements the logic of compression method control during the training process.\n May change the method hyperparameters in regards to the current training step or\n epoch. For example, the sparsity method can smoothly increase the sparsity rate\n over several epochs.\n \"\"\"\n\n def __init__(self):\n self.last_epoch = -1\n self.last_step = -1\n self._steps_in_current_epoch = 0\n\n def step(self, last=None):\n \"\"\"\n Should be called after each optimizer step during training.\n Arguments:\n `last` - specifies the initial \"previous\" step\n \"\"\"\n if last is None:\n last = self.last_step + 1\n self.last_step = last\n self._steps_in_current_epoch += 1\n\n def epoch_step(self, last=None):\n \"\"\"\n Should be called after each training epoch.\n Arguments:\n `last` - specifies the initial \"previous\" epoch\n \"\"\"\n if last is None:\n last = self.last_epoch + 1\n self.last_epoch = last\n self._steps_in_current_epoch = 0\n\n def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)\n\n def state_dict(self):\n default_keys = {'last_step', 'last_epoch'}\n return {key: val for key, val in self.__dict__.items() if key in default_keys}\n\n def initialize(self):\n pass\n\n\[email protected]_ordering\nclass CompressionLevel(Enum):\n NONE = 0\n PARTIAL = 1\n FULL = 2\n\n # pylint:disable=comparison-with-callable\n def __add__(self, other: 'CompressionLevel') -> 'CompressionLevel':\n \"\"\"\n Defines compression level of a composite compression controller, consist of two algorithms, where `self` is\n compression level of first algorithm and other - compression level of second one.\n NONE & NONE = NONE\n PARTIAL & PARTIAL = PARTIAL\n FULL & FULL = FULL\n NONE & PARTIAL = PARTIAL\n NONE & FULL = PARTIAL\n PARTIAL & FULL = PARTIAL\n Args:\n other: instance of another compression level\n Returns:\n common compression level of two algorithms\n \"\"\"\n if self.value == other.value:\n return self\n return CompressionLevel.PARTIAL\n\n def __lt__(self, other: 'CompressionLevel') -> bool:\n return self.value < other.value\n\n\nclass CompressionAlgorithmController:\n \"\"\"Serves as a handle to the additional modules, parameters and hooks inserted\n into the original uncompressed model in order to enable algorithm-specific compression.\n Hosts entities that are to be used during the training process, such as compression scheduler and\n compression loss.\"\"\"\n\n def __init__(self, target_model: NNCFNetwork):\n self._model = target_model\n self._loss = CompressionLoss()\n self._scheduler = CompressionScheduler()\n\n @property\n def loss(self):\n return self._loss\n\n @property\n def scheduler(self):\n return self._scheduler\n\n def distributed(self):\n \"\"\"\n Should be called when distributed training with multiple training processes\n is going to be used (i.e. after the model is wrapped with DistributedDataParallel).\n Any special preparations for the algorithm to properly support distributed training\n should be made inside this function.\n \"\"\"\n\n def compression_level(self) -> CompressionLevel:\n \"\"\"\n Returns level of compression. Should be used on saving best checkpoints to distinguish between\n uncompressed, partially compressed and fully compressed models.\n \"\"\"\n raise NotImplementedError()\n\n def statistics(self):\n \"\"\"\n Returns a dictionary of printable statistics.\n \"\"\"\n stats = self._loss.statistics()\n if hasattr(self._model, 'statistics'):\n stats.update(self._model.statistics())\n return stats\n\n def run_batchnorm_adaptation(self, config):\n initializer_params = config.get(\"initializer\", {})\n init_bn_adapt_config = initializer_params.get('batchnorm_adaptation', {})\n num_bn_adaptation_steps = init_bn_adapt_config.get('num_bn_adaptation_steps', 0)\n num_bn_forget_steps = init_bn_adapt_config.get('num_bn_forget_steps', 5)\n\n if num_bn_adaptation_steps < 0:\n raise AttributeError('Number of batch adaptation steps must be >= 0')\n if num_bn_adaptation_steps > 0:\n try:\n bn_adaptation_args = config.get_extra_struct(BNAdaptationInitArgs)\n except KeyError:\n nncf_logger.info(\n 'Could not run batchnorm adaptation '\n 'as the adaptation data loader is not provided as an extra struct. '\n 'Refer to `NNCFConfig.register_extra_structs` and the `BNAdaptationInitArgs` class')\n return\n\n bn_adaptation_runner = DataLoaderBNAdaptationRunner(self._model, bn_adaptation_args.device,\n num_bn_forget_steps)\n bn_adaptation_runner.run(bn_adaptation_args.data_loader, num_bn_adaptation_steps)\n\n def prepare_for_export(self):\n pass\n\n def export_model(self, filename, *args, **kwargs):\n \"\"\"\n Used to export the compressed model for inference into the ONNX format.\n Makes method-specific preparations of the model graph,\n (e.g. removing auxiliary layers that were used for the model compression),\n then exports the model and dumps it into the output file.\n Parameters:\n `filename` - a path to the file for the exported model to be saved into.\n *args, **kwargs - if the model's `forward` requires additional parameters\n during export, specify these here.\n \"\"\"\n self.prepare_for_export()\n model = self._model.eval().cpu()\n input_tensor_list = []\n for info in self._model.input_infos:\n single_batch_info = copy(info)\n input_shape = tuple([1] + list(info.shape)[1:])\n single_batch_info.shape = input_shape\n input_tensor_list.append(create_mock_tensor(single_batch_info, \"cpu\"))\n original_forward = model.forward\n model.forward = partial(model.forward, *args, **kwargs)\n # pylint:disable=unexpected-keyword-arg\n with torch.no_grad():\n torch.onnx.export(model, tuple(input_tensor_list),\n filename, verbose=True, enable_onnx_checker=False, opset_version=10)\n model.forward = original_forward\n\n\nclass CompressionAlgorithmBuilder:\n \"\"\"\n Determines which modifications should be made to the original FP32 model in\n order to enable algorithm-specific compression during fine-tuning. Operates\n on an NNCFNetwork object wrapping a target PyTorch model (torch.nn.Module).\n \"\"\"\n\n def __init__(self, config: NNCFConfig, should_init: bool = True):\n \"\"\"\n Arguments:\n `config` - a dictionary that contains parameters of compression method\n `should_init` - if False, trainable parameter initialization will be skipped during building\n \"\"\"\n self.config = config\n self.should_init = should_init\n if not isinstance(self.config, list):\n self.ignored_scopes = self.config.get('ignored_scopes')\n self.target_scopes = self.config.get('target_scopes')\n\n def apply_to(self, target_model: NNCFNetwork) -> NNCFNetwork:\n \"\"\"\n Applies algorithm-specific modifications to the model. Hooks to be executed during model\n forward operation may be registered using NNCFNetwork command insertion methods. Additional\n compression modules that are expected to be saved along with the network via torch.save should also be\n registered and added to the model here.\n :param target_model: An instance of NNCFNetwork for the algorithm to be applied to.\n :return: NNCFNetwork with algorithm-specific modifications applied\n \"\"\"\n self._model = target_model # type: NNCFNetwork\n return target_model\n\n def build_controller(self, target_model: NNCFNetwork) -> CompressionAlgorithmController:\n \"\"\"\n Should be called once the compressed model target_model is fully constructed (i.e. hooks are applied and\n modules are in place. Returns a CompressionAlgorithmController object containing information\n and references to the compressed model or specific modules thereof required for the corresponding compression\n scheduler operation or compression loss calculation.\n :param target_model: An instance of NNCFNetwork with current algorithm already applied\n :return: A CompressionAlgorithmController object.\n \"\"\"\n\n def _should_consider_scope(self, scope_str: str) -> bool:\n return should_consider_scope(scope_str, self.target_scopes, self.ignored_scopes)\n", "\"\"\"\n Copyright (c) 2019-2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport os.path as osp\nimport sys\nimport time\nfrom pathlib import Path\n\nimport torch\nimport torch.utils.data as data\nfrom examples.common.sample_config import create_sample_config, SampleConfig\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nfrom examples.common.argparser import get_common_argument_parser\nfrom examples.common.distributed import DistributedSampler, configure_distributed\nfrom examples.common.example_logger import logger\nfrom examples.common.execution import ExecutionMode, get_device, get_execution_mode\nfrom examples.common.execution import prepare_model_for_execution, start_worker\nfrom nncf.compression_method_api import CompressionLevel\nfrom nncf.initialization import register_default_init_args\nfrom examples.common.optimizer import get_parameter_groups, make_optimizer\nfrom examples.common.utils import get_name, make_additional_checkpoints, print_statistics, configure_paths, \\\n create_code_snapshot, is_on_first_rank, configure_logging, print_args, is_pretrained_model_requested\nfrom examples.common.utils import write_metrics\nfrom examples.object_detection.dataset import detection_collate, get_testing_dataset, get_training_dataset\nfrom examples.object_detection.eval import test_net\nfrom examples.object_detection.layers.modules import MultiBoxLoss\nfrom examples.object_detection.model import build_ssd\nfrom nncf import create_compressed_model, load_state\nfrom nncf.dynamic_graph.graph_builder import create_input_infos\n\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\ndef get_option(args, config, key, default=None):\n \"\"\"Gets key option from args if it is provided, otherwise tries to get it from config\"\"\"\n if hasattr(args, key) and getattr(args, key) is not None:\n return getattr(args, key)\n return config.get(key, default)\n\n\ndef get_argument_parser():\n parser = get_common_argument_parser()\n\n parser.add_argument('--basenet', default='', help='pretrained base model, should be located in save_folder')\n parser.add_argument('--test-interval', default=5000, type=int, help='test interval')\n parser.add_argument(\"--dataset\", help=\"Dataset to use.\", choices=[\"voc\", \"coco\"], default=None)\n parser.add_argument('--train_imgs', help='path to training images or VOC root directory')\n parser.add_argument('--train_anno', help='path to training annotations or VOC root directory')\n parser.add_argument('--test_imgs', help='path to testing images or VOC root directory')\n parser.add_argument('--test_anno', help='path to testing annotations or VOC root directory')\n return parser\n\n\ndef main(argv):\n parser = get_argument_parser()\n args = parser.parse_args(args=argv)\n config = create_sample_config(args, parser)\n\n configure_paths(config)\n source_root = Path(__file__).absolute().parents[2] # nncf root\n create_code_snapshot(source_root, osp.join(config.log_dir, \"snapshot.tar.gz\"))\n\n config.execution_mode = get_execution_mode(config)\n\n if config.dataset_dir is not None:\n config.train_imgs = config.train_anno = config.test_imgs = config.test_anno = config.dataset_dir\n start_worker(main_worker, config)\n\n\ndef main_worker(current_gpu, config):\n #################################\n # Setup experiment environment\n #################################\n config.current_gpu = current_gpu\n config.distributed = config.execution_mode in (ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)\n if config.distributed:\n configure_distributed(config)\n if is_on_first_rank(config):\n configure_logging(logger, config)\n print_args(config)\n\n config.device = get_device(config)\n config.start_iter = 0\n nncf_config = config.nncf_config\n ##########################\n # Prepare metrics log file\n ##########################\n\n if config.metrics_dump is not None:\n write_metrics(0, config.metrics_dump)\n\n ###########################\n # Criterion\n ###########################\n\n criterion = MultiBoxLoss(\n config,\n config['num_classes'],\n overlap_thresh=0.5,\n prior_for_matching=True,\n bkg_label=0,\n neg_mining=True,\n neg_pos=3,\n neg_overlap=0.5,\n encode_target=False,\n device=config.device\n )\n\n train_data_loader = test_data_loader = None\n resuming_checkpoint_path = config.resuming_checkpoint_path\n\n ###########################\n # Prepare data\n ###########################\n\n pretrained = is_pretrained_model_requested(config)\n\n if config.to_onnx is not None:\n assert pretrained or (resuming_checkpoint_path is not None)\n else:\n test_data_loader, train_data_loader = create_dataloaders(config)\n nncf_config = register_default_init_args(nncf_config, train_data_loader, criterion, config.device)\n\n ##################\n # Prepare model\n ##################\n resuming_checkpoint_path = config.resuming_checkpoint_path\n resuming_checkpoint = None\n resuming_model_state_dict = None\n\n if resuming_checkpoint_path:\n logger.info('Resuming from checkpoint {}...'.format(resuming_checkpoint_path))\n resuming_checkpoint = torch.load(resuming_checkpoint_path, map_location='cpu')\n # use checkpoint itself in case only the state dict was saved,\n # i.e. the checkpoint was created with `torch.save(module.state_dict())`\n resuming_model_state_dict = resuming_checkpoint.get('state_dict', resuming_checkpoint)\n\n compression_ctrl, net = create_model(config, resuming_model_state_dict)\n if config.distributed:\n config.batch_size //= config.ngpus_per_node\n config.workers //= config.ngpus_per_node\n compression_ctrl.distributed()\n\n ###########################\n # Optimizer\n ###########################\n\n params_to_optimize = get_parameter_groups(net, config)\n optimizer, lr_scheduler = make_optimizer(params_to_optimize, config)\n\n #################################\n # Load additional checkpoint data\n #################################\n\n if resuming_checkpoint is not None and config.mode.lower() == 'train' and config.to_onnx is None:\n compression_ctrl.scheduler.load_state_dict(resuming_checkpoint['scheduler'])\n optimizer.load_state_dict(resuming_checkpoint.get('optimizer', optimizer.state_dict()))\n config.start_iter = resuming_checkpoint.get('iter', 0) + 1\n\n if config.to_onnx:\n compression_ctrl.export_model(config.to_onnx)\n logger.info(\"Saved to {}\".format(config.to_onnx))\n return\n\n if config.mode.lower() == 'test':\n with torch.no_grad():\n print_statistics(compression_ctrl.statistics())\n net.eval()\n mAp = test_net(net, config.device, test_data_loader, distributed=config.distributed)\n if config.metrics_dump is not None:\n write_metrics(mAp, config.metrics_dump)\n return\n\n train(net, compression_ctrl, train_data_loader, test_data_loader, criterion, optimizer, config, lr_scheduler)\n\n\ndef create_dataloaders(config):\n logger.info('Loading Dataset...')\n train_dataset = get_training_dataset(config.dataset, config.train_anno, config.train_imgs, config)\n logger.info(\"Loaded {} training images\".format(len(train_dataset)))\n if config.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,\n num_replicas=config.ngpus_per_node,\n rank=config.rank)\n else:\n train_sampler = None\n train_data_loader = data.DataLoader(\n train_dataset, config.batch_size,\n num_workers=config.workers,\n shuffle=(train_sampler is None),\n collate_fn=detection_collate,\n pin_memory=True,\n sampler=train_sampler\n )\n test_dataset = get_testing_dataset(config.dataset, config.test_anno, config.test_imgs, config)\n logger.info(\"Loaded {} testing images\".format(len(test_dataset)))\n if config.distributed:\n test_sampler = DistributedSampler(test_dataset, config.rank, config.world_size)\n else:\n test_sampler = torch.utils.data.SequentialSampler(test_dataset)\n test_data_loader = data.DataLoader(\n test_dataset, config.batch_size,\n num_workers=config.workers,\n shuffle=False,\n collate_fn=detection_collate,\n pin_memory=True,\n drop_last=False,\n sampler=test_sampler\n )\n return test_data_loader, train_data_loader\n\n\ndef create_model(config: SampleConfig, resuming_model_sd: dict = None):\n input_info_list = create_input_infos(config.nncf_config)\n image_size = input_info_list[0].shape[-1]\n ssd_net = build_ssd(config.model, config.ssd_params, image_size, config.num_classes, config)\n weights = config.get('weights')\n if weights:\n sd = torch.load(weights, map_location='cpu')\n load_state(ssd_net, sd)\n\n ssd_net.to(config.device)\n\n compression_ctrl, compressed_model = create_compressed_model(ssd_net, config.nncf_config, resuming_model_sd)\n compressed_model, _ = prepare_model_for_execution(compressed_model, config)\n\n compressed_model.train()\n return compression_ctrl, compressed_model\n\n\ndef train_step(batch_iterator, compression_ctrl, config, criterion, net, train_data_loader):\n batch_loss_l = torch.tensor(0.).to(config.device)\n batch_loss_c = torch.tensor(0.).to(config.device)\n batch_loss = torch.tensor(0.).to(config.device)\n for _ in range(0, config.iter_size):\n # load train data\n try:\n images, targets = next(batch_iterator)\n except StopIteration:\n logger.debug(\"StopIteration: can not load batch\")\n batch_iterator = iter(train_data_loader)\n break\n\n images = images.to(config.device)\n targets = [anno.requires_grad_(False).to(config.device) for anno in targets]\n\n # forward\n out = net(images)\n # backprop\n loss_l, loss_c = criterion(out, targets)\n loss_comp = compression_ctrl.loss()\n loss = loss_l + loss_c + loss_comp\n batch_loss += loss\n loss.backward()\n batch_loss_l += loss_l\n batch_loss_c += loss_c\n return batch_iterator, batch_loss, batch_loss_c, batch_loss_l, loss_comp\n\n\n# pylint: disable=too-many-statements\ndef train(net, compression_ctrl, train_data_loader, test_data_loader, criterion, optimizer, config, lr_scheduler):\n net.train()\n # loss counters\n loc_loss = 0 # epoch\n conf_loss = 0\n\n epoch_size = len(train_data_loader)\n logger.info('Training {} on {} dataset...'.format(config.model, train_data_loader.dataset.name))\n batch_iterator = None\n\n t_start = time.time()\n print_statistics(compression_ctrl.statistics())\n\n best_mAp = 0\n best_compression_level = CompressionLevel.NONE\n test_freq_in_epochs = max(config.test_interval // epoch_size, 1)\n\n for iteration in range(config.start_iter, config['max_iter']):\n if (not batch_iterator) or (iteration % epoch_size == 0):\n # create batch iterator\n batch_iterator = iter(train_data_loader)\n\n epoch = iteration // epoch_size\n\n if (iteration + 1) % epoch_size == 0:\n compression_ctrl.scheduler.epoch_step(epoch)\n compression_level = compression_ctrl.compression_level()\n is_best = False\n\n if (epoch + 1) % test_freq_in_epochs == 0:\n if is_on_first_rank(config):\n print_statistics(compression_ctrl.statistics())\n with torch.no_grad():\n net.eval()\n mAP = test_net(net, config.device, test_data_loader, distributed=config.multiprocessing_distributed)\n is_best_by_mAP = mAP > best_mAp and compression_level == best_compression_level\n is_best = is_best_by_mAP or compression_level > best_compression_level\n if is_best:\n best_mAp = mAP\n best_compression_level = max(compression_level, best_compression_level)\n net.train()\n\n # Learning rate scheduling should be applied after optimizer’s update\n if not isinstance(lr_scheduler, ReduceLROnPlateau):\n lr_scheduler.step(epoch)\n else:\n lr_scheduler.step(mAP)\n\n if is_on_first_rank(config):\n logger.info('Saving state, iter: {}'.format(iteration))\n\n checkpoint_file_path = osp.join(config.checkpoint_save_dir, \"{}_last.pth\".format(get_name(config)))\n torch.save({\n 'state_dict': net.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'iter': config['max_iter'],\n 'scheduler': compression_ctrl.scheduler.state_dict(),\n 'compression_level': compression_level,\n }, str(checkpoint_file_path))\n make_additional_checkpoints(checkpoint_file_path,\n is_best=is_best,\n epoch=epoch + 1,\n config=config)\n\n compression_ctrl.scheduler.step(iteration - config.start_iter)\n\n optimizer.zero_grad()\n batch_iterator, batch_loss, batch_loss_c, batch_loss_l, loss_comp = train_step(\n batch_iterator, compression_ctrl, config, criterion, net, train_data_loader\n )\n optimizer.step()\n\n batch_loss_l = batch_loss_l / config.iter_size\n batch_loss_c = batch_loss_c / config.iter_size\n model_loss = (batch_loss_l + batch_loss_c) / config.iter_size\n batch_loss = batch_loss / config.iter_size\n\n loc_loss += batch_loss_l.item()\n conf_loss += batch_loss_c.item()\n\n ###########################\n # Logging\n ###########################\n\n if is_on_first_rank(config):\n config.tb.add_scalar(\"train/loss_l\", batch_loss_l.item(), iteration)\n config.tb.add_scalar(\"train/loss_c\", batch_loss_c.item(), iteration)\n config.tb.add_scalar(\"train/loss\", batch_loss.item(), iteration)\n\n if iteration % config.print_freq == 0:\n t_finish = time.time()\n t_elapsed = t_finish - t_start\n t_start = time.time()\n logger.info('{}: iter {} epoch {} || Loss: {:.4} || Time {:.4}s || lr: {} || CR loss: {}'.format(\n config.rank, iteration, epoch, model_loss.item(), t_elapsed, optimizer.param_groups[0]['lr'],\n loss_comp.item() if isinstance(loss_comp, torch.Tensor) else loss_comp\n ))\n\n if config.metrics_dump is not None:\n write_metrics(best_mAp, config.metrics_dump)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n" ]
[ [ "numpy.prod" ], [ "torch.nn.ModuleDict" ], [ "torch.norm" ], [ "torch.no_grad", "torch.zeros" ], [ "torch.utils.data.distributed.DistributedSampler", "torch.load", "torch.utils.data.SequentialSampler", "torch.utils.data.DataLoader", "torch.tensor", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nyw-pathfinder/Deep-Learning-Bootcamp-with-PyTorch
[ "5bf2efd3f921dc95461026df8f105ff7a5822fb5" ]
[ "generation/DCGAN/train.py" ]
[ "if __name__ == '__main__':\n import os\n from torchvision.transforms import Compose, Normalize, Resize, ToTensor\n from torch.utils.data import DataLoader\n from models import Discriminator, Generator, weights_init\n import torch\n import torch.nn as nn\n import matplotlib.pyplot as plt\n from time import time\n from tqdm import tqdm\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n BETA1, BETA2 = 0.5, 0.99\n BATCH_SIZE = 16\n DATASET_NAME = 'MNIST'\n DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0')\n EPOCHS = 1\n ITER_REPORT = 10\n LATENT_DIM = 100\n LR = 2e-4\n N_D_STEP = 1\n\n if DATASET_NAME == 'CIFAR10':\n from torchvision.datasets import CIFAR10\n transforms = Compose([ToTensor(), Normalize(mean=[0.5], std=[0.5])])\n dataset = CIFAR10(root='./datasets', train=True, transform=transforms, download=True)\n elif DATASET_NAME == 'LSUN':\n from torchvision.datasets import LSUN\n transforms = Compose([Resize(64), ToTensor(), Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])\n dataset = LSUN(root='./datasets/LSUN', classes=['bedroom_train'], transform=transforms)\n elif DATASET_NAME == 'MNIST':\n from torchvision.datasets import MNIST\n transforms = Compose([ToTensor(), Normalize(mean=[0.5], std=[0.5])])\n dataset = MNIST(root='./datasets', train=True, transform=transforms, download=True)\n else:\n raise NotImplementedError\n\n data_loader = DataLoader(dataset=dataset, batch_size=BATCH_SIZE, num_workers=0, shuffle=True)\n\n D = Discriminator(DATASET_NAME).apply(weights_init).to(DEVICE)\n G = Generator(DATASET_NAME).apply(weights_init).to(DEVICE)\n print(D, G)\n criterion = nn.BCELoss()\n\n optim_D = torch.optim.Adam(D.parameters(), lr=LR, betas=(BETA1, BETA2))\n optim_G = torch.optim.Adam(G.parameters(), lr=LR, betas=(BETA1, BETA2))\n\n list_D_loss = list()\n list_G_loss = list()\n total_step = 0\n\n st = time()\n for epoch in range(EPOCHS):\n for data in tqdm(data_loader):\n total_step += 1\n real, label = data[0].to(DEVICE), data[1].to(DEVICE)\n z = torch.randn(BATCH_SIZE, LATENT_DIM).to(DEVICE)\n\n fake = G(z)\n\n fake_score = D(fake.detach())\n real_score = D(real)\n\n D_loss = 0.5 * (criterion(fake_score, torch.zeros_like(fake_score).to(DEVICE))\n + criterion(real_score, torch.ones_like(real_score).to(DEVICE)))\n optim_D.zero_grad()\n D_loss.backward()\n optim_D.step()\n list_D_loss.append(D_loss.detach().cpu().item())\n\n if total_step % N_D_STEP == 0:\n fake_score = D(fake)\n G_loss = criterion(fake_score, torch.ones_like(fake_score))\n optim_G.zero_grad()\n G_loss.backward()\n optim_G.step()\n list_G_loss.append(G_loss.detach().cpu().item())\n\n if total_step % ITER_REPORT == 0:\n print(\"Epoch: {}, D_loss: {:.{prec}} G_loss: {:.{prec}}\"\n .format(epoch, D_loss.detach().cpu().item(), G_loss.detach().cpu().item(), prec=4))\n\n torch.save(D.state_dict(), '{}_D.pt'.format(DATASET_NAME))\n torch.save(G.state_dict(), '{}_G.pt'.format(DATASET_NAME))\n\n plt.figure()\n plt.plot(range(0, len(list_D_loss)), list_D_loss, linestyle='--', color='r', label='Discriminator loss')\n plt.plot(range(0, len(list_G_loss) * N_D_STEP, N_D_STEP), list_G_loss, linestyle='--', color='g',\n label='Generator loss')\n plt.xlabel('Iteration')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig('Loss.png')\n\n print(time() - st)\n" ]
[ [ "matplotlib.pyplot.legend", "torch.randn", "torch.utils.data.DataLoader", "torch.zeros_like", "matplotlib.pyplot.savefig", "torch.nn.BCELoss", "matplotlib.pyplot.ylabel", "torch.cuda.is_available", "matplotlib.pyplot.xlabel", "torch.ones_like", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rioyokotalab/RAFT
[ "d718fe86d11f8ab0d4d6d0b0b5c45fa029104444" ]
[ "core/utils/flow_viz.py" ]
[ "# Flow visualization code\n# used from https://github.com/tomrunia/OpticalFlow_Visualization\n\n# MIT License\n#\n# Copyright (c) 2018 Tom Runia\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to conditions.\n#\n# Author: Tom Runia\n# Date Created: 2018-08-03\n\nimport numpy as np\n\n\ndef make_colorwheel():\n \"\"\"\n Generates a color wheel for optical flow visualization as presented in:\n Baker et al. \"A Database and Evaluation Methodology for Optical Flow\"\n (ICCV, 2007)\n URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf\n\n Code follows the original C++ source code of Daniel Scharstein.\n Code follows the the Matlab source code of Deqing Sun.\n\n Returns:\n np.ndarray: Color wheel\n \"\"\"\n\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n ncols = RY + YG + GC + CB + BM + MR\n colorwheel = np.zeros((ncols, 3))\n col = 0\n\n # RY\n colorwheel[0:RY, 0] = 255\n colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY)\n col = col + RY\n # YG\n colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG)\n colorwheel[col:col + YG, 1] = 255\n col = col + YG\n # GC\n colorwheel[col:col + GC, 1] = 255\n colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC)\n col = col + GC\n # CB\n colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB)\n colorwheel[col:col + CB, 2] = 255\n col = col + CB\n # BM\n colorwheel[col:col + BM, 2] = 255\n colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM)\n col = col + BM\n # MR\n colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR)\n colorwheel[col:col + MR, 0] = 255\n return colorwheel\n\n\ndef flow_uv_to_colors(u, v, convert_to_bgr=False):\n \"\"\"\n Applies the flow color wheel to (possibly clipped) flow components u and v.\n\n According to the C++ source code of Daniel Scharstein\n According to the Matlab source code of Deqing Sun\n\n Args:\n u (np.ndarray): Input horizontal flow of shape [H,W]\n v (np.ndarray): Input vertical flow of shape [H,W]\n convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.\n\n Returns:\n np.ndarray: Flow visualization image of shape [H,W,3]\n \"\"\"\n flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)\n colorwheel = make_colorwheel() # shape [55x3]\n ncols = colorwheel.shape[0]\n rad = np.sqrt(np.square(u) + np.square(v))\n a = np.arctan2(-v, -u) / np.pi\n fk = (a + 1) / 2 * (ncols - 1)\n k0 = np.floor(fk).astype(np.int32)\n k1 = k0 + 1\n k1[k1 == ncols] = 0\n f = fk - k0\n for i in range(colorwheel.shape[1]):\n tmp = colorwheel[:, i]\n col0 = tmp[k0] / 255.0\n col1 = tmp[k1] / 255.0\n col = (1 - f) * col0 + f * col1\n idx = (rad <= 1)\n col[idx] = 1 - rad[idx] * (1 - col[idx])\n col[~idx] = col[~idx] * 0.75 # out of range\n # Note the 2-i => BGR instead of RGB\n ch_idx = 2 - i if convert_to_bgr else i\n flow_image[:, :, ch_idx] = np.floor(255 * col)\n return flow_image\n\n\ndef flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):\n \"\"\"\n Expects a two dimensional flow image of shape.\n\n Args:\n flow_uv (np.ndarray): Flow UV image of shape [H,W,2]\n clip_flow (float, optional): Clip maximum of flow values. Defaults to None.\n convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.\n\n Returns:\n np.ndarray: Flow visualization image of shape [H,W,3]\n \"\"\"\n assert flow_uv.ndim == 3, 'input flow must have three dimensions'\n assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'\n if clip_flow is not None:\n flow_uv = np.clip(flow_uv, 0, clip_flow)\n u = flow_uv[:, :, 0]\n v = flow_uv[:, :, 1]\n rad = np.sqrt(np.square(u) + np.square(v))\n rad_max = np.max(rad)\n epsilon = 1e-5\n u = u / (rad_max + epsilon)\n v = v / (rad_max + epsilon)\n return flow_uv_to_colors(u, v, convert_to_bgr)\n" ]
[ [ "numpy.square", "numpy.clip", "numpy.arange", "numpy.arctan2", "numpy.max", "numpy.floor", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
magreiner/TractSeg
[ "5ac5278fc3a6d3262f9f06924dbdde01b399ccf6" ]
[ "tractseg/models/UNet_Pytorch_DeepSup.py" ]
[ "# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport glob\nfrom os.path import join\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import Adamax\nfrom torch.optim import Adam\nimport torch.optim.lr_scheduler as lr_scheduler\nfrom torch.autograd import Variable\n\nfrom tractseg.libs.PytorchUtils import PytorchUtils\nfrom tractseg.libs.ExpUtils import ExpUtils\nfrom tractseg.models.BaseModel import BaseModel\nfrom tractseg.libs.PytorchUtils import conv2d\nfrom tractseg.libs.PytorchUtils import deconv2d\n\n\nclass UNet_Pytorch_DeepSup(torch.nn.Module):\n def __init__(self, n_input_channels=3, n_classes=7, n_filt=64, batchnorm=False, dropout=False):\n super(UNet_Pytorch_DeepSup, self).__init__()\n\n self.dropout = dropout\n\n self.in_channel = n_input_channels\n self.n_classes = n_classes\n\n self.contr_1_1 = conv2d(n_input_channels, n_filt)\n self.contr_1_2 = conv2d(n_filt, n_filt)\n self.pool_1 = nn.MaxPool2d((2, 2))\n\n self.contr_2_1 = conv2d(n_filt, n_filt * 2)\n self.contr_2_2 = conv2d(n_filt * 2, n_filt * 2)\n self.pool_2 = nn.MaxPool2d((2, 2))\n\n self.contr_3_1 = conv2d(n_filt * 2, n_filt * 4)\n self.contr_3_2 = conv2d(n_filt * 4, n_filt * 4)\n self.pool_3 = nn.MaxPool2d((2, 2))\n\n self.contr_4_1 = conv2d(n_filt * 4, n_filt * 8)\n self.contr_4_2 = conv2d(n_filt * 8, n_filt * 8)\n self.pool_4 = nn.MaxPool2d((2, 2))\n\n self.dropout = nn.Dropout(p=0.4)\n\n self.encode_1 = conv2d(n_filt * 8, n_filt * 16)\n self.encode_2 = conv2d(n_filt * 16, n_filt * 16)\n self.deconv_1 = deconv2d(n_filt * 16, n_filt * 16, kernel_size=2, stride=2)\n # self.deconv_1 = nn.Upsample(scale_factor=2) #does only upscale width and height #Similar results to deconv2d\n\n self.expand_1_1 = conv2d(n_filt * 8 + n_filt * 16, n_filt * 8)\n self.expand_1_2 = conv2d(n_filt * 8, n_filt * 8)\n self.deconv_2 = deconv2d(n_filt * 8, n_filt * 8, kernel_size=2, stride=2)\n # self.deconv_2 = nn.Upsample(scale_factor=2)\n\n self.expand_2_1 = conv2d(n_filt * 4 + n_filt * 8, n_filt * 4, stride=1)\n self.expand_2_2 = conv2d(n_filt * 4, n_filt * 4, stride=1)\n self.deconv_3 = deconv2d(n_filt * 4, n_filt * 4, kernel_size=2, stride=2)\n # self.deconv_3 = nn.Upsample(scale_factor=2)\n\n self.output_2 = nn.Conv2d(n_filt * 4 + n_filt * 8, n_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.output_2_up = nn.Upsample(scale_factor=2, mode='bilinear') # does only upscale width and height\n\n self.expand_3_1 = conv2d(n_filt * 2 + n_filt * 4, n_filt * 2, stride=1)\n self.expand_3_2 = conv2d(n_filt * 2, n_filt * 2, stride=1)\n self.deconv_4 = deconv2d(n_filt * 2, n_filt * 2, kernel_size=2, stride=2)\n # self.deconv_4 = nn.Upsample(scale_factor=2)\n\n self.output_3 = nn.Conv2d(n_filt * 2 + n_filt * 4, n_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.output_3_up = nn.Upsample(scale_factor=2, mode='bilinear') # does only upscale width and height\n\n self.expand_4_1 = conv2d(n_filt + n_filt * 2, n_filt, stride=1)\n self.expand_4_2 = conv2d(n_filt, n_filt, stride=1)\n\n self.conv_5 = nn.Conv2d(n_filt, n_classes, kernel_size=1, stride=1, padding=0, bias=True) # no activation function, because is in LossFunction (...WithLogits)\n\n def forward(self, inpt):\n contr_1_1 = self.contr_1_1(inpt)\n contr_1_2 = self.contr_1_2(contr_1_1)\n pool_1 = self.pool_1(contr_1_2)\n\n contr_2_1 = self.contr_2_1(pool_1)\n contr_2_2 = self.contr_2_2(contr_2_1)\n pool_2 = self.pool_2(contr_2_2)\n\n contr_3_1 = self.contr_3_1(pool_2)\n contr_3_2 = self.contr_3_2(contr_3_1)\n pool_3 = self.pool_3(contr_3_2)\n\n contr_4_1 = self.contr_4_1(pool_3)\n contr_4_2 = self.contr_4_2(contr_4_1)\n pool_4 = self.pool_4(contr_4_2)\n\n # pool_4 = self.dropout(pool_4)\n\n encode_1 = self.encode_1(pool_4)\n encode_2 = self.encode_2(encode_1)\n deconv_1 = self.deconv_1(encode_2)\n\n concat1 = torch.cat([deconv_1, contr_4_2], 1)\n expand_1_1 = self.expand_1_1(concat1)\n expand_1_2 = self.expand_1_2(expand_1_1)\n deconv_2 = self.deconv_2(expand_1_2)\n\n concat2 = torch.cat([deconv_2, contr_3_2], 1)\n expand_2_1 = self.expand_2_1(concat2)\n expand_2_2 = self.expand_2_2(expand_2_1)\n deconv_3 = self.deconv_3(expand_2_2)\n\n output_2 = self.output_2(concat2)\n output_2_up = self.output_2_up(output_2)\n\n concat3 = torch.cat([deconv_3, contr_2_2], 1)\n expand_3_1 = self.expand_3_1(concat3)\n expand_3_2 = self.expand_3_2(expand_3_1)\n deconv_4 = self.deconv_4(expand_3_2)\n\n output_3 = output_2_up + self.output_3(concat3)\n output_3_up = self.output_3_up(output_3)\n\n concat4 = torch.cat([deconv_4, contr_1_2], 1)\n expand_4_1 = self.expand_4_1(concat4)\n expand_4_2 = self.expand_4_2(expand_4_1)\n\n conv_5 = self.conv_5(expand_4_2)\n\n final = output_3_up + conv_5\n\n # return conv_51\n # return final\n return final, F.sigmoid(final)" ]
[ [ "torch.nn.Dropout", "torch.cat", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.functional.sigmoid", "torch.nn.Upsample" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ruinunca/data_tooling
[ "297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff", "297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff", "297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff", "297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff" ]
[ "kenlm_training/cc_net/flat_hash_set.py", "pii_processing/neuralcoref/train/dataset.py", "kenlm_training/tests/test_flat_hash_set.py", "pii_processing/neuralcoref/train/learn.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport sys\nimport time\nimport warnings\nfrom typing import Iterable, Iterator, Sequence, Sized, Tuple, Type\n\nimport numpy as np\n\nHASH_TYPE: Type[np.uint64] = np.uint64\n\nGETPY_WARNING = False\n\n\nclass AbstractDedupHashSet(Sized, Iterable[np.uint64]):\n \"\"\"A dict-like that returns `True` for keys that have been added more than once.\n\n The API is batched and expect np.array as input. This batching grants better\n perf when using the C++ implementation.\n \"\"\"\n\n dtype: Type[np.uint64] = HASH_TYPE\n\n def __repr__(self):\n implementation = type(self).__name__\n return f\"[{implementation}, len: {len(self)}\"\n\n def __len__(self) -> int:\n ...\n\n def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray:\n ...\n\n def __getitem__(self, values) -> np.ndarray:\n ...\n\n def __setitem__(self, keys, values) -> None:\n ...\n\n def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]:\n ...\n\n def keys(self) -> Iterable[np.uint64]:\n ...\n\n def __iter__(self) -> Iterator[np.uint64]:\n return iter(self.keys())\n\n def add(self, h, contains=None):\n \"\"\"Add the given keys. First time a key is added the value is set to 0,\n then it's set to one.\"\"\"\n if not isinstance(h, np.ndarray):\n h = np.array(h, dtype=HASH_TYPE)\n if contains is None:\n contains = self.__contains__(h)\n\n self.__setitem__(h, contains)\n return contains\n\n def merge(self, keys, values):\n contains = self.__contains__(keys)\n self.__setitem__(keys, contains | values)\n\n def dump(self, filename):\n return self.dump_np(filename)\n\n def load(self, filename):\n return self.load_np(filename)\n\n def dump_np(self, filename):\n kv_type = np.dtype([(\"k\", HASH_TYPE), (\"v\", np.uint8)])\n items = np.fromiter(self.items(), dtype=kv_type, count=len(self))\n with open(filename, \"wb\") as f:\n np.save(f, items)\n\n def load_np(self, filename):\n items = np.load(str(filename))\n keys = items[\"k\"].copy()\n values = items[\"v\"].copy()\n self.merge(keys, values)\n\n def dump_np2(self, filename):\n keys = np.fromiter(\n (k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self)\n )\n with open(filename, \"wb\") as f:\n np.save(f, keys)\n\n values = np.fromiter(\n (v for (k, v) in self.items()), dtype=np.uint8, count=len(self)\n )\n with open(str(filename) + \".val\", \"wb\") as f:\n np.save(f, values)\n\n def load_np2(self, filename):\n keys = np.load(filename)\n values = np.load(str(filename) + \".val\")\n self.merge(keys, values)\n\n\nclass NaiveHashSet(dict, AbstractDedupHashSet):\n \"\"\"Pure python implementation of AbstractDedupHashSet.\n\n This implementation is quite fast, since Python dict are heavily optimized.\n \"\"\"\n\n def __init__(self, iterable=None):\n super().__init__()\n global GETPY_WARNING\n if GETPY_WARNING:\n warnings.warn(\n \"Module 'getpy' not found. Deduplication will take more RAM.\"\n \" Try `pip install cc_net[getpy]\"\n )\n GETPY_WARNING = False\n\n def __contains__(self, values):\n \"\"\"Returns `True` if the object has been added at list once.\"\"\"\n contains_point = super().__contains__\n return np.fromiter(\n map(contains_point, values), count=len(values), dtype=np.uint8\n )\n\n def __getitem__(self, values):\n \"\"\"Returns `True` if the object has been added at list twice.\"\"\"\n get_point = super().get\n return np.fromiter(\n map(lambda x: get_point(x, False), values),\n count=len(values),\n dtype=np.uint8,\n )\n\n def __setitem__(self, keys, values):\n assert len(keys) == len(values)\n for k, v in zip(keys, values):\n dict.__setitem__(self, k, v)\n\n\ntry:\n import getpy as gp # type: ignore\n\n class _FlatHashSet(gp.Dict, AbstractDedupHashSet):\n \"\"\"C++ backed implementation of AbstractDedupHashSet.\n\n This implementation is slightly slower than the Python one but uses\n 3x less RAM.\n See https://github.com/atom-moyer/getpy.\n \"\"\"\n\n def __init__(self):\n super().__init__(HASH_TYPE, np.uint8, default_value=False)\n\n def __contains__(self, h):\n \"\"\"Returns `True` if the object has been added at list once.\"\"\"\n if not isinstance(h, np.ndarray):\n h = np.array(h, dtype=HASH_TYPE)\n c = gp.Dict.__contains__(self, h)\n c.dtype = np.uint8\n return c\n\n def dump(self, filename):\n return self.dump_gp(filename)\n\n def load(self, filename):\n return self.load_gp(filename)\n\n def dump_gp(self, filename):\n return gp.Dict.dump(self, str(filename))\n\n def load_gp(self, filename):\n \"\"\"Override gp.Dict.load, to correctly merge values instead of overwriting.\"\"\"\n other = gp.Dict(HASH_TYPE, np.uint8, default_value=False)\n other.load(str(filename))\n n = len(other)\n keys = np.fromiter(\n (k for (k, v) in other.items()), dtype=HASH_TYPE, count=n\n )\n values = np.fromiter(\n (v for (k, v) in other.items()), dtype=np.uint8, count=n\n )\n self.merge(keys, values)\n\n FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet\nexcept ImportError:\n GETPY_WARNING = True\n FlatHashSet = NaiveHashSet\n\n\ndef timeit(message, function, *args):\n start = time.time()\n function(*args)\n end = time.time()\n print(message, f\"took {end - start:.0f}s\")\n\n\ndef compare_load(*filenames):\n assert filenames, \"No file given\"\n\n def load_list():\n hashes = []\n for f in filenames:\n h = FlatHashSet()\n h.load(f)\n print(f\"Loaded {h} from {f}.\")\n hashes.append(h)\n return hashes\n\n def load_all(load, ext):\n hashes = FlatHashSet()\n for f in filenames:\n load(hashes, f + ext)\n\n def dump_all(hashes, dump, ext):\n for h, f in zip(hashes, filenames):\n dump(h, f + ext)\n\n hashes = load_list()\n dump_gp = getattr(FlatHashSet, \"dump_gp\")\n if dump_gp is not None:\n timeit(\"Dumping using gp.dump\", dump_all, hashes, dump_gp, \".gp.test\")\n timeit(\"Dumping using dump_np\", dump_all, hashes, FlatHashSet.dump_np, \".npy.test\")\n timeit(\n \"Dumping using dump_np2\", dump_all, hashes, FlatHashSet.dump_np2, \".npy2.test\"\n )\n\n load_gp = getattr(FlatHashSet, \"load_gp\")\n if load_gp is not None:\n timeit(\"Loading using gp.load\", load_all, load_gp, \".gp.test\")\n timeit(\"Loading using load_np\", load_all, FlatHashSet.load_np, \".npy.test\")\n timeit(\"Loading using load_np2\", load_all, FlatHashSet.load_np2, \".npy2.test\")\n\n # Loading 10 shards:\n # [dedup] Dumping using gp.dump took 52s\n # [dedup] Dumping using dump_np took 270s\n # [dedup] Dumping using dump_np2 took 483s\n #\n # [dedup] Loading using gp.load took 654s\n # [dedup] Loading using load_np took 82s\n # [dedup] Loading using load_np2 took 76s\n\n\nif __name__ == \"__main__\":\n compare_load(*sys.argv[1:])\n", "\"\"\"Conll training algorithm\"\"\"\n\nimport os\nimport io\nimport numpy as np\n\nimport torch\nimport torch.utils.data\n\nfrom torch.utils.data.sampler import Sampler\nfrom torch.utils.data import Dataset\n\nfrom neuralcoref.train.utils import (\n encode_distance,\n BATCH_SIZE_PATH,\n SIZE_FP,\n SIZE_FP_COMPRESSED,\n SIZE_FS,\n SIZE_FS_COMPRESSED,\n SIZE_GENRE,\n SIZE_PAIR_IN,\n SIZE_SINGLE_IN,\n)\nfrom neuralcoref.train.conllparser import FEATURES_NAMES\n\n\ndef load_embeddings_from_file(name):\n print(\"loading\", name + \"_embeddings.npy\")\n embed = torch.from_numpy(np.load(name + \"_embeddings.npy\")).float()\n print(embed.size())\n print(\"loading\", name + \"_vocabulary.txt\")\n with io.open(name + \"_vocabulary.txt\", \"r\", encoding=\"utf-8\") as f:\n voc = [line.strip() for line in f]\n return embed, voc\n\n\nclass _DictionaryDataLoader(object):\n def __init__(self, dict_object, order):\n self.dict_object = dict_object\n self.order = order\n\n def __len__(self):\n return len(self.dict_object[self.order[0]])\n\n def __getitem__(self, idx):\n if isinstance(idx, slice):\n data = []\n for i in range(\n idx.start, idx.stop, idx.step if idx.step is not None else 1\n ):\n temp_data = []\n for key in self.order:\n temp_data.append(self.dict_object[key][i])\n data.append(temp_data)\n\n else:\n data = []\n for key in self.order:\n data.append(self.dict_object[key][idx])\n\n return data\n\n\nclass NCDataset(Dataset):\n def __init__(self, data_path, params, no_targets=False):\n print(\"🏝 Loading Dataset at\", data_path)\n self.costs = params.costs\n self.no_targets = no_targets\n # Load files\n datas = {}\n if not os.listdir(data_path):\n raise ValueError(\"Empty data_path\")\n numpy_files_found = False\n print(\"Reading \", end=\"\")\n for file_name in os.listdir(data_path):\n if not \".npy\" in file_name:\n continue\n numpy_files_found = True\n print(file_name, end=\", \")\n datas[file_name.split(\".\")[0]] = np.load(\n data_path + file_name, mmap_mode=\"r\" if params.lazy else None\n )\n if not numpy_files_found:\n raise ValueError(f\"Can't find numpy files in {data_path}\")\n\n # Gather arrays in two lists of tuples for mention and pairs\n if not params.lazy:\n self.mentions = list(\n zip(\n *(\n arr\n for key, arr in sorted(datas.items())\n if key.startswith(\"mentions\")\n )\n )\n )\n self.pairs = list(\n zip(\n *(\n arr\n for key, arr in sorted(datas.items())\n if key.startswith(\"pairs\")\n )\n )\n )\n else:\n self.mentions = _DictionaryDataLoader(\n datas,\n order=(\n \"mentions_features\",\n \"mentions_labels\",\n \"mentions_pairs_length\",\n \"mentions_pairs_start_index\",\n \"mentions_spans\",\n \"mentions_words\",\n ),\n )\n self.pairs = _DictionaryDataLoader(\n datas, order=(\"pairs_ant_index\", \"pairs_features\", \"pairs_labels\")\n )\n\n self.mentions_pair_length = datas[FEATURES_NAMES[2]]\n assert [arr.shape[0] for arr in self.mentions[0]] == [\n 6,\n 1,\n 1,\n 1,\n 250,\n 8,\n ] # Cf order of FEATURES_NAMES in conllparser.py\n assert [arr.shape[0] for arr in self.pairs[0]] == [\n 1,\n 9,\n 1,\n ] # Cf order of FEATURES_NAMES in conllparser.py\n\n def __len__(self):\n return len(self.mentions)\n\n def __getitem__(self, mention_idx, debug=False):\n \"\"\"\n Return:\n Definitions:\n P is the number of antecedent per mention (number of pairs for the mention)\n S = 250 is the size of the span vector (averaged word embeddings)\n W = 8 is the number of words in a mention (tuned embeddings)\n Fp = 70 is the number of features for a pair of mention\n Fs = 24 is the number of features of a single mention\n\n if there are some pairs:\n inputs = (spans, words, features, ant_spans, ant_words, ana_spans, ana_words, pairs_features)\n targets = (labels, costs, true_ants, false_ants)\n else:\n inputs = (spans, words, features)\n targets = (labels, costs, true_ants)\n\n inputs: Tuple of\n spans => (S,)\n words => (W,)\n features => (Fs,)\n + if there are potential antecedents (P > 0):\n ant_spans => (P, S) or nothing if no pairs\n ant_words => (P, W) or nothing if no pairs\n ana_spans => (P, S) or nothing if no pairs\n ana_words => (P, W) or nothing if no pairs\n pair_features => (P, Fp) or nothing if no pairs\n\n targets: Tuple of\n labels => (P+1,)\n costs => (P+1,)\n true_ant => (P+1,)\n + if there are potential antecedents (P > 0):\n false_ant => (P+1,)\n\n \"\"\"\n features_raw, label, pairs_length, pairs_start_index, spans, words = self.mentions[\n mention_idx\n ]\n pairs_start_index = pairs_start_index.item()\n pairs_length = pairs_length.item()\n\n # Build features array (float) from raw features (int)\n assert features_raw.shape[0] == SIZE_FS_COMPRESSED\n features = np.zeros((SIZE_FS,))\n features[features_raw[0]] = 1\n features[4:15] = encode_distance(features_raw[1])\n features[15] = features_raw[2].astype(float) / features_raw[3].astype(float)\n features[16] = features_raw[4]\n features[features_raw[5] + 17] = 1\n\n if pairs_length == 0:\n spans = torch.from_numpy(spans).float()\n words = torch.from_numpy(words)\n features = torch.from_numpy(features).float()\n inputs = (spans, words, features)\n if self.no_targets:\n return inputs\n true_ant = torch.zeros(1).long() # zeros = indices of true ant\n costs = torch.from_numpy((1 - label) * self.costs[\"FN\"]).float()\n label = torch.from_numpy(label).float()\n targets = (label, costs, true_ant)\n if debug:\n print(\"inputs shapes: \", [a.size() for a in inputs])\n print(\"targets shapes: \", [a.size() for a in targets])\n return inputs, targets\n\n start = pairs_start_index\n end = pairs_start_index + pairs_length\n pairs = self.pairs[start:end]\n assert len(pairs) == pairs_length\n assert (\n len(pairs[0]) == 3\n ) # pair[i] = (pairs_ant_index, pairs_features, pairs_labels)\n pairs_ant_index, pairs_features_raw, pairs_labels = list(zip(*pairs))\n\n pairs_features_raw = np.stack(pairs_features_raw)\n pairs_labels = np.squeeze(np.stack(pairs_labels), axis=1)\n\n # Build pair features array (float) from raw features (int)\n assert pairs_features_raw[0, :].shape[0] == SIZE_FP_COMPRESSED\n pairs_features = np.zeros((len(pairs_ant_index), SIZE_FP))\n pairs_features[:, 0:6] = pairs_features_raw[:, 0:6]\n pairs_features[:, 6:17] = encode_distance(pairs_features_raw[:, 6])\n pairs_features[:, 17:28] = encode_distance(pairs_features_raw[:, 7])\n pairs_features[:, 28] = pairs_features_raw[:, 8]\n # prepare antecent features\n ant_features_raw = np.concatenate(\n [self.mentions[idx.item()][0][np.newaxis, :] for idx in pairs_ant_index]\n )\n ant_features = np.zeros((pairs_length, SIZE_FS - SIZE_GENRE))\n ant_features[:, ant_features_raw[:, 0]] = 1\n ant_features[:, 4:15] = encode_distance(ant_features_raw[:, 1])\n ant_features[:, 15] = ant_features_raw[:, 2].astype(float) / ant_features_raw[\n :, 3\n ].astype(float)\n ant_features[:, 16] = ant_features_raw[:, 4]\n pairs_features[:, 29:46] = ant_features\n # Here we keep the genre\n ana_features = np.tile(features, (pairs_length, 1))\n pairs_features[:, 46:] = ana_features\n\n ant_spans = np.concatenate(\n [self.mentions[idx.item()][4][np.newaxis, :] for idx in pairs_ant_index]\n )\n ant_words = np.concatenate(\n [self.mentions[idx.item()][5][np.newaxis, :] for idx in pairs_ant_index]\n )\n ana_spans = np.tile(spans, (pairs_length, 1))\n ana_words = np.tile(words, (pairs_length, 1))\n ant_spans = torch.from_numpy(ant_spans).float()\n ant_words = torch.from_numpy(ant_words)\n ana_spans = torch.from_numpy(ana_spans).float()\n ana_words = torch.from_numpy(ana_words)\n pairs_features = torch.from_numpy(pairs_features).float()\n\n labels_stack = np.concatenate((pairs_labels, label), axis=0)\n assert labels_stack.shape == (pairs_length + 1,)\n labels = torch.from_numpy(labels_stack).float()\n\n spans = torch.from_numpy(spans).float()\n words = torch.from_numpy(words)\n features = torch.from_numpy(features).float()\n\n inputs = (\n spans,\n words,\n features,\n ant_spans,\n ant_words,\n ana_spans,\n ana_words,\n pairs_features,\n )\n\n if self.no_targets:\n return inputs\n\n if label == 0:\n costs = np.concatenate(\n (self.costs[\"WL\"] * (1 - pairs_labels), [self.costs[\"FN\"]])\n ) # Inverse labels: 1=>0, 0=>1\n else:\n costs = np.concatenate((self.costs[\"FL\"] * np.ones_like(pairs_labels), [0]))\n assert costs.shape == (pairs_length + 1,)\n costs = torch.from_numpy(costs).float()\n\n true_ants_unpad = np.flatnonzero(labels_stack)\n if len(true_ants_unpad) == 0:\n raise ValueError(\"Error: no True antecedent for mention\")\n true_ants = np.pad(\n true_ants_unpad, (0, len(pairs_labels) + 1 - len(true_ants_unpad)), \"edge\"\n )\n assert true_ants.shape == (pairs_length + 1,)\n true_ants = torch.from_numpy(true_ants).long()\n\n false_ants_unpad = np.flatnonzero(1 - labels_stack)\n assert len(false_ants_unpad) != 0\n false_ants = np.pad(\n false_ants_unpad, (0, len(pairs_labels) + 1 - len(false_ants_unpad)), \"edge\"\n )\n assert false_ants.shape == (pairs_length + 1,)\n false_ants = torch.from_numpy(false_ants).long()\n\n targets = (labels, costs, true_ants, false_ants)\n if debug:\n print(\"Mention\", mention_idx)\n print(\"inputs shapes: \", [a.size() for a in inputs])\n print(\"targets shapes: \", [a.size() for a in targets])\n return inputs, targets\n\n\nclass NCBatchSampler(Sampler):\n \"\"\"A Batch sampler to group mentions in batches with close number of pairs to be padded together\n \"\"\"\n\n def __init__(\n self, mentions_pairs_length, batchsize=600, shuffle=False, debug=False\n ):\n \"\"\" Create and feed batches of mentions having close number of antecedents\n The batch are padded and collated by the padder_collate function\n\n # Arguments:\n mentions_pairs_length array of shape (N, 1): list/array of the number of pairs for each mention\n batchsize: Number of pairs of each batch will be capped at this\n \"\"\"\n self.shuffle = shuffle\n num_mentions = len(mentions_pairs_length)\n mentions_lengths = np.concatenate(\n [\n mentions_pairs_length,\n np.arange(0, num_mentions, 1, dtype=int)[:, np.newaxis],\n ],\n axis=1,\n )\n sorted_lengths = mentions_lengths[mentions_lengths[:, 0].argsort()]\n print(\"Preparing batches 📚\")\n\n self.batches = []\n self.batches_pairs = []\n self.batches_size = []\n batch = []\n n_pairs = []\n num = 0\n for length, mention_idx in sorted_lengths:\n if num > batchsize or (\n num == len(batch) and length != 0\n ): # We keep the no_pairs batches pure\n if debug:\n print(\n \"Added batch number\",\n len(self.batches),\n \"with\",\n len(batch),\n \"mentions and\",\n num,\n \"pairs\",\n )\n self.batches.append(batch)\n self.batches_size.append(\n num\n ) # We don't count the max 7 additional mentions that are repeated\n self.batches_pairs.append(n_pairs)\n\n # Start a new batch\n batch = [mention_idx]\n n_pairs = [length]\n num = (\n length + 1\n ) # +1 since we also have the single mention to add to the number of pairs\n else:\n num += length + 1\n batch.append(mention_idx)\n n_pairs.append(length)\n\n # Complete and store the last batch\n if debug:\n print(\n \"Added batch number\",\n len(self.batches),\n \"with\",\n len(batch),\n \"mentions and\",\n num,\n \"pairs\",\n )\n self.batches.append(batch)\n self.batches_size.append(num)\n self.batches_pairs.append(n_pairs)\n self.n_pairs = sum(sum(p) for p in self.batches_pairs)\n self.n_mentions = sum(len(b) for b in self.batches)\n self.n_batches = len(self.batches)\n self.pairs_per_batch = float(self.n_pairs) / self.n_batches\n self.mentions_per_batch = float(self.n_mentions) / self.n_batches\n print(\n \"Dataset has:\",\n self.n_batches,\n \"batches,\",\n self.n_mentions,\n \"mentions,\",\n self.n_pairs,\n \"pairs\",\n )\n\n def get_batch_info(self):\n return self.batches, self.batches_pairs\n\n def save_batch_sizes(self, save_file=BATCH_SIZE_PATH, debug=False):\n print(\"🌋 Saving sizes of batches\")\n with io.open(save_file, \"w\", encoding=\"utf-8\") as f:\n if debug:\n print(\"Batch sizes saved in\", save_file)\n for batch, size in zip(self.batches, self.batches_size):\n out_str = str(len(batch)) + \"\\t\" + str(size) + \"\\n\"\n f.write(out_str)\n\n def __iter__(self):\n if self.shuffle:\n np.random.shuffle(self.batches)\n for batch in self.batches:\n yield batch\n\n def __len__(self):\n return self.n_batches\n\n\ndef padder_collate(batch, debug=False):\n \"\"\" Puts each data field into a tensor with outer dimension batch size\n Pad variable length input tensors and add a weight tensor to the target\n \"\"\"\n transposed_inputs = tuple(zip(*batch))\n if len(transposed_inputs) == 2:\n inputs, targets = transposed_inputs\n transposed_inputs = tuple(zip(*inputs))\n transposed_targets = tuple(zip(*targets))\n else:\n transposed_targets = None\n\n max_pairs = (\n max(len(t) for t in transposed_inputs[3]) if len(transposed_inputs) == 8 else 0\n ) # Get max nb of pairs (batch are sorted by nb of pairs)\n if max_pairs > 0:\n out_inputs = []\n out_targets = []\n for t_inp in transposed_inputs:\n if len(t_inp[0].shape) == 2:\n out_inputs.append(\n torch.stack(\n [\n torch.cat([t, t.new(max_pairs - len(t), len(t[0])).zero_()])\n if len(t) != max_pairs\n else t\n for t in t_inp\n ],\n 0,\n )\n )\n else:\n out_inputs.append(torch.stack(t_inp, 0))\n if transposed_targets is not None:\n for i, t_targ in enumerate(\n transposed_targets\n ): # 0:labels, 1:costs, 2:true_ants, 3:false_ants\n if i == 2 or i == 3:\n if debug:\n print(\"collate before\", t_targ)\n # shift the antecedent index associated to single anaphores (last)\n t_targ = tuple(\n t.masked_fill_(torch.eq(t, len(t) - 1), max_pairs)\n for t in t_targ\n )\n if debug:\n print(\"collate after\", t_targ)\n out_targets.append(\n torch.stack(\n [\n torch.cat(\n [\n t[:-1] if len(t) > 2 else t.new(1).fill_(t[0]),\n t.new(max_pairs + 1 - len(t)).fill_(t[0]),\n t.new(1).fill_(t[-1]),\n ]\n )\n if len(t) != max_pairs + 1\n else t\n for t in t_targ\n ],\n 0,\n )\n )\n\n t_costs = transposed_targets[\n 1\n ] # We build the weights from the costs to have a float Tensor\n out_targets.append(\n torch.stack(\n [\n torch.cat(\n [\n t.new(len(t) - 1).fill_(1),\n t.new(max_pairs + 1 - len(t)).zero_(),\n t.new(1).fill_(1),\n ]\n )\n if len(t) != max_pairs + 1\n else t.new(max_pairs + 1).fill_(1)\n for t in t_costs\n ],\n 0,\n )\n )\n else:\n # Remark this mask is the inverse of the weights in the above target (used for evaluation masking)\n t_base = transposed_inputs[3]\n out_targets = torch.stack(\n [\n torch.cat(\n [\n t.new(len(t) - 1).zero_().bool(),\n t.new(max_pairs + 1 - len(t)).fill_(1).bool(),\n t.new(1).zero_().bool(),\n ]\n )\n if len(t) != max_pairs + 1\n else t.new(max_pairs + 1).zero_().bool()\n for t in t_base\n ],\n 0,\n )\n else:\n out_inputs = [torch.stack(t_inp, 0) for t_inp in transposed_inputs]\n if transposed_targets is not None:\n out_targets = [torch.stack(t_targ, 0) for t_targ in transposed_targets]\n out_targets.append(out_targets[1].new(len(out_targets[1]), 1).fill_(1))\n else:\n out_targets = out_inputs[0].new(len(out_inputs[0]), 1).zero_().bool()\n return (out_inputs, out_targets)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport numpy as np\nimport pytest\n\nfrom cc_net.flat_hash_set import HASH_TYPE, FlatHashSet, NaiveHashSet\n\n\ndef as_dict(flat_hash_set) -> dict:\n return {k: v for (k, v) in flat_hash_set.items()}\n\n\nneed_getpy = pytest.mark.skipif(\n FlatHashSet == NaiveHashSet, reason=\"getpy isn't installed\"\n)\n\n\ndef same_behavior(test_case):\n def run_case():\n naive = as_dict(test_case(FlatHashSet))\n flat = as_dict(test_case(NaiveHashSet))\n assert naive == flat\n\n return need_getpy(run_case)\n\n\n@same_behavior\ndef test_setitem(hash_set_cls):\n h = hash_set_cls()\n h[np.arange(10, dtype=h.dtype)] = np.zeros(10, dtype=np.uint8)\n h[np.arange(5, dtype=h.dtype)] = np.ones(5, dtype=np.uint8)\n return h\n\n\n@same_behavior\ndef test_add_dup(hash_set_cls):\n h = hash_set_cls()\n h.add(np.arange(10, dtype=h.dtype))\n h.add(np.arange(5, dtype=h.dtype))\n\n expected = {i: i < 5 for i in range(10)}\n assert expected == as_dict(h), f\"add_dup with {hash_set_cls.__name__}\"\n return h\n\n\n@need_getpy\ndef test_gp_dict():\n import getpy as gp # type: ignore\n\n h = gp.Dict(HASH_TYPE, np.uint8)\n h[np.arange(10, dtype=HASH_TYPE)] = np.zeros(10, dtype=np.uint8)\n h[np.arange(5, dtype=HASH_TYPE)] = np.ones(5, dtype=np.uint8)\n expected = {i: i < 5 for i in range(10)}\n assert expected == as_dict(h)\n\n\ndef check_reload(h, dump, load, tmp_path):\n dump_path = tmp_path / dump.__name__\n dump(h, dump_path)\n h2 = type(h)()\n load(h2, dump_path)\n assert as_dict(h) == as_dict(h2)\n\n\[email protected](\"hash_set_cls\", [FlatHashSet, NaiveHashSet])\ndef test_loading(tmp_path, hash_set_cls):\n h = hash_set_cls()\n x = np.random.randint(0, 2**32, (100,), dtype=h.dtype)\n h.add(x)\n\n check_reload(h, hash_set_cls.dump, hash_set_cls.load, tmp_path)\n check_reload(h, hash_set_cls.dump_np, hash_set_cls.load_np, tmp_path)\n if hasattr(hash_set_cls, \"dump_gp\"):\n check_reload(h, hash_set_cls.dump_gp, hash_set_cls.load_gp, tmp_path)\n", "\"\"\"Conll training algorithm\"\"\"\n\nimport os\nimport time\nimport argparse\nimport socket\nfrom datetime import datetime\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.optim import RMSprop\nfrom torch.utils.data import DataLoader\nfrom tensorboardX import SummaryWriter\n\nfrom neuralcoref.train.model import Model\nfrom neuralcoref.train.dataset import (\n NCDataset,\n NCBatchSampler,\n load_embeddings_from_file,\n padder_collate,\n SIZE_PAIR_IN,\n SIZE_SINGLE_IN,\n)\nfrom neuralcoref.train.utils import SIZE_EMBEDDING\nfrom neuralcoref.train.evaluator import ConllEvaluator\n\nPACKAGE_DIRECTORY = os.path.dirname(os.path.abspath(__file__))\nSTAGES = [\"allpairs\", \"toppairs\", \"ranking\"]\n\n\ndef clipped_sigmoid(inputs):\n epsilon = 1.0e-7\n return torch.sigmoid(inputs).clamp(epsilon, 1.0 - epsilon)\n\n\ndef get_all_pairs_loss(n):\n def all_pair_loss(scores, targets):\n \"\"\" All pairs and single mentions probabilistic loss\n \"\"\"\n labels = targets[0]\n weights = targets[4].data if len(targets) == 5 else None\n loss_op = nn.BCEWithLogitsLoss(weight=weights, reduction=\"sum\")\n loss = loss_op(scores, labels)\n return loss / n\n\n return all_pair_loss\n\n\ndef get_top_pair_loss(n):\n def top_pair_loss(scores, targets, debug=False):\n \"\"\" Top pairs (best true and best mistaken) and single mention probabilistic loss\n \"\"\"\n true_ants = targets[2]\n false_ants = targets[3] if len(targets) == 5 else None\n s_scores = clipped_sigmoid(scores)\n true_pairs = torch.gather(s_scores, 1, true_ants)\n top_true, top_true_arg = torch.log(true_pairs).max(\n dim=1\n ) # max(log(p)), p=sigmoid(s)\n if debug:\n print(\"true_pairs\", true_pairs.data)\n print(\"top_true\", top_true.data)\n print(\"top_true_arg\", top_true_arg.data)\n out_score = torch.sum(top_true).neg()\n if (\n false_ants is not None\n ): # We have no false antecedents when there are no pairs\n false_pairs = torch.gather(s_scores, 1, false_ants)\n top_false, _ = torch.log(1 - false_pairs).min(\n dim=1\n ) # min(log(1-p)), p=sigmoid(s)\n out_score = out_score + torch.sum(top_false).neg()\n return out_score / n\n\n return top_pair_loss\n\n\ndef get_ranking_loss(n):\n def ranking_loss(scores, targets):\n \"\"\" Slack-rescaled max margin loss\n \"\"\"\n costs = targets[1]\n true_ants = targets[2]\n weights = targets[4] if len(targets) == 5 else None\n true_ant_score = torch.gather(scores, 1, true_ants)\n top_true, _ = true_ant_score.max(dim=1)\n tmp_loss = scores.add(1).add(\n top_true.unsqueeze(1).neg()\n ) # 1 + scores - top_true\n if weights is not None:\n tmp_loss = tmp_loss.mul(weights)\n tmp_loss = tmp_loss.mul(costs)\n loss, _ = tmp_loss.max(dim=1)\n out_score = torch.sum(loss)\n return out_score / n\n\n return ranking_loss\n\n\ndef decrease_lr(optim_func, factor=0.1, min_lrs=0, eps=0, verbose=True):\n for i, param_group in enumerate(optim_func.param_groups):\n old_lr = float(param_group[\"lr\"])\n new_lr = max(old_lr * factor, min_lrs)\n if old_lr - new_lr > eps:\n param_group[\"lr\"] = new_lr\n if verbose:\n print(f\"Reducing learning rate\" \" of group {i} to {new_lr:.4e}.\")\n return new_lr\n\n\ndef load_model(model, path):\n print(\"⛄️ Reloading model from\", path)\n model.load_state_dict(\n torch.load(path)\n if args.cuda\n else torch.load(path, map_location=lambda storage, loc: storage)\n )\n\n\ndef run_model(args):\n print(\n \"Training for\",\n args.all_pairs_epoch,\n args.top_pairs_epoch,\n args.ranking_epoch,\n \"epochs\",\n )\n # Tensorboard server\n writer = SummaryWriter()\n\n # Load datasets and embeddings\n embed_path = args.weights if args.weights is not None else args.train\n tensor_embeddings, voc = load_embeddings_from_file(embed_path + \"tuned_word\")\n dataset = NCDataset(args.train, args)\n eval_dataset = NCDataset(args.eval, args)\n print(\"Vocabulary:\", len(voc))\n\n # Construct model\n print(\"🏝 Build model\")\n model = Model(\n len(voc),\n SIZE_EMBEDDING,\n args.h1,\n args.h2,\n args.h3,\n SIZE_PAIR_IN,\n SIZE_SINGLE_IN,\n )\n model.load_embeddings(tensor_embeddings)\n if args.cuda:\n model.cuda()\n if args.weights is not None:\n print(\"🏝 Loading pre-trained weights\")\n model.load_weights(args.weights)\n if args.checkpoint_file is not None:\n print(\"⛄️ Loading model from\", args.checkpoint_file)\n model.load_state_dict(\n torch.load(args.checkpoint_file)\n if args.cuda\n else torch.load(\n args.checkpoint_file, map_location=lambda storage, loc: storage\n )\n )\n\n print(\"🏝 Loading conll evaluator\")\n eval_evaluator = ConllEvaluator(\n model, eval_dataset, args.eval, args.evalkey, embed_path, args\n )\n train_evaluator = ConllEvaluator(\n model, dataset, args.train, args.trainkey, embed_path, args\n )\n print(\"🏝 Testing evaluator and getting first eval score\")\n eval_evaluator.test_model()\n start_time = time.time()\n eval_evaluator.build_test_file()\n score, f1_conll, ident = eval_evaluator.get_score()\n elapsed = time.time() - start_time\n print(f\"|| s/evaluation {elapsed:5.2f}\")\n writer.add_scalar(\"eval/\" + \"F1_conll\", f1_conll, 0)\n\n # Preparing dataloader\n print(\"🏝 Preparing dataloader\")\n print(\n \"Dataloader parameters: batchsize\",\n args.batchsize,\n \"numworkers\",\n args.numworkers,\n )\n batch_sampler = NCBatchSampler(\n dataset.mentions_pair_length, shuffle=True, batchsize=args.batchsize\n )\n dataloader = DataLoader(\n dataset,\n collate_fn=padder_collate,\n batch_sampler=batch_sampler,\n num_workers=args.numworkers,\n pin_memory=args.cuda,\n )\n mentions_idx, n_pairs = batch_sampler.get_batch_info()\n\n print(\"🏝 Start training\")\n g_step = 0\n start_from = (\n args.startstep\n if args.startstep is not None and args.startstage is not None\n else 0\n )\n\n def run_epochs(\n start_epoch, end_epoch, loss_func, optim_func, save_name, lr, g_step, debug=None\n ):\n best_model_path = args.save_path + \"best_model\" + save_name\n start_time_all = time.time()\n best_f1_conll = 0\n lower_eval = 0\n for epoch in range(start_epoch, end_epoch):\n \"\"\" Run an epoch \"\"\"\n print(f\"🚘 {save_name} Epoch {epoch:d}\")\n model.train()\n start_time_log = time.time()\n start_time_epoch = time.time()\n epoch_loss = 0\n for batch_i, (m_idx, n_pairs_l, batch) in enumerate(\n zip(mentions_idx, n_pairs, dataloader)\n ):\n if debug is not None and (debug == -1 or debug in m_idx):\n l = list(dataset.flat_m_loc[m][2:] for m in m_idx)\n print(\n \"🏔 Batch\",\n batch_i,\n \"m_idx:\",\n \"|\".join(str(i) for i in m_idx),\n \"mentions:\",\n \"|\".join(dataset.docs[d][\"mentions\"][i] for u, i, d in l),\n )\n print(\"Batch n_pairs:\", \"|\".join(str(p) for p in n_pairs_l))\n inputs, targets = batch\n inputs = tuple(Variable(inp, requires_grad=False) for inp in inputs)\n targets = tuple(Variable(tar, requires_grad=False) for tar in targets)\n if args.cuda:\n inputs = tuple(i.cuda() for i in inputs)\n targets = tuple(t.cuda() for t in targets)\n scores = model(inputs)\n if debug is not None and (debug == -1 or debug in m_idx):\n print(\n \"Scores:\\n\"\n + \"\\n\".join(\n \"|\".join(str(s) for s in s_l)\n for s_l in scores.data.cpu().numpy()\n )\n )\n print(\n \"Labels:\\n\"\n + \"\\n\".join(\n \"|\".join(str(s) for s in s_l)\n for s_l in targets[0].data.cpu().numpy()\n )\n )\n loss = loss_func(scores, targets)\n if debug is not None and (debug == -1 or debug in m_idx):\n print(\"Loss\", loss.item())\n # Zero gradients, perform a backward pass, and update the weights.\n optim_func.zero_grad()\n loss.backward()\n epoch_loss += loss.item()\n optim_func.step()\n writer.add_scalar(\"train/\" + save_name + \"_loss\", loss.item(), g_step)\n writer.add_scalar(\"meta/\" + \"lr\", lr, g_step)\n writer.add_scalar(\"meta/\" + \"stage\", STAGES.index(save_name), g_step)\n g_step += 1\n if batch_i % args.log_interval == 0 and batch_i > 0:\n elapsed = time.time() - start_time_log\n lr = optim_func.param_groups[0][\"lr\"]\n ea = elapsed * 1000 / args.log_interval\n li = loss.item()\n print(\n f\"| epoch {epoch:3d} | {batch_i:5d}/{len(dataloader):5d} batches | \"\n f\"lr {lr:.2e} | ms/batch {ea:5.2f} | \"\n f\"loss {li:.2e}\"\n )\n start_time_log = time.time()\n elapsed_all = time.time() - start_time_all\n elapsed_epoch = time.time() - start_time_epoch\n ep = elapsed_epoch / 60\n ea = (\n elapsed_all\n / 3600\n * float(end_epoch - epoch)\n / float(epoch - start_epoch + 1)\n )\n print(\n f\"|| min/epoch {ep:5.2f} | est. remaining time (h) {ea:5.2f} | loss {epoch_loss:.2e}\"\n )\n writer.add_scalar(\"epoch/\" + \"loss\", epoch_loss, g_step)\n if epoch % args.conll_train_interval == 0:\n start_time = time.time()\n train_evaluator.build_test_file()\n score, f1_conll, ident = train_evaluator.get_score()\n elapsed = time.time() - start_time\n ep = elapsed_epoch / 60\n print(f\"|| min/train evaluation {ep:5.2f} | F1_conll {f1_conll:5.2f}\")\n writer.add_scalar(\"epoch/\" + \"F1_conll\", f1_conll, g_step)\n if epoch % args.conll_eval_interval == 0:\n start_time = time.time()\n eval_evaluator.build_test_file()\n score, f1_conll, ident = eval_evaluator.get_score()\n elapsed = time.time() - start_time\n ep = elapsed_epoch / 60\n print(f\"|| min/evaluation {ep:5.2f}\")\n writer.add_scalar(\"eval/\" + \"F1_conll\", f1_conll, g_step)\n g_step += 1\n save_path = args.save_path + save_name + \"_\" + str(epoch)\n torch.save(model.state_dict(), save_path)\n if f1_conll > best_f1_conll:\n best_f1_conll = f1_conll\n torch.save(model.state_dict(), best_model_path)\n lower_eval = 0\n elif args.on_eval_decrease != \"nothing\":\n print(\"Evaluation metric decreases\")\n lower_eval += 1\n if lower_eval >= args.patience:\n if (\n args.on_eval_decrease == \"divide_lr\"\n or args.on_eval_decrease == \"divide_then_next\"\n ):\n print(\"reload best model and decrease lr\")\n load_model(model, best_model_path)\n lr = decrease_lr(optim_func)\n if args.on_eval_decrease == \"next_stage\" or lr <= args.min_lr:\n print(\"Switch to next stage\")\n break\n # Save last step\n start_time = time.time()\n eval_evaluator.build_test_file()\n score, f1_conll, ident = eval_evaluator.get_score()\n elapsed = time.time() - start_time\n ep = elapsed / 60\n print(f\"|| min/evaluation {ep:5.2f}\")\n writer.add_scalar(\"eval/\" + \"F1_conll\", f1_conll, g_step)\n g_step += 1\n save_path = args.save_path + save_name + \"_\" + str(epoch)\n torch.save(model.state_dict(), save_path)\n load_model(model, best_model_path)\n return g_step\n\n if args.startstage is None or args.startstage == \"allpairs\":\n optimizer = RMSprop(\n model.parameters(), lr=args.all_pairs_lr, weight_decay=args.all_pairs_l2\n )\n loss_func = get_all_pairs_loss(batch_sampler.pairs_per_batch)\n g_step = run_epochs(\n start_from,\n args.all_pairs_epoch,\n loss_func,\n optimizer,\n \"allpairs\",\n args.all_pairs_lr,\n g_step,\n )\n start_from = 0\n\n if args.startstage is None or args.startstage in [\"allpairs\", \"toppairs\"]:\n optimizer = RMSprop(\n model.parameters(), lr=args.top_pairs_lr, weight_decay=args.top_pairs_l2\n )\n loss_func = get_top_pair_loss(10 * batch_sampler.mentions_per_batch)\n g_step = run_epochs(\n start_from,\n args.top_pairs_epoch,\n loss_func,\n optimizer,\n \"toppairs\",\n args.top_pairs_lr,\n g_step,\n )\n start_from = 0\n\n if args.startstage is None or args.startstage in [\n \"ranking\",\n \"allpairs\",\n \"toppairs\",\n ]:\n optimizer = RMSprop(\n model.parameters(), lr=args.ranking_lr, weight_decay=args.ranking_l2\n )\n loss_func = get_ranking_loss(batch_sampler.mentions_per_batch)\n g_step = run_epochs(\n start_from,\n args.ranking_epoch,\n loss_func,\n optimizer,\n \"ranking\",\n args.ranking_lr,\n g_step,\n )\n\n\nif __name__ == \"__main__\":\n DIR_PATH = os.path.dirname(os.path.realpath(__file__))\n parser = argparse.ArgumentParser(\n description=\"Training the neural coreference model\"\n )\n parser.add_argument(\n \"--train\",\n type=str,\n default=DIR_PATH + \"/data/\",\n help=\"Path to the train dataset\",\n )\n parser.add_argument(\n \"--eval\", type=str, default=DIR_PATH + \"/data/\", help=\"Path to the eval dataset\"\n )\n parser.add_argument(\n \"--evalkey\", type=str, help=\"Path to an optional key file for scoring\"\n )\n parser.add_argument(\n \"--weights\",\n type=str,\n help=\"Path to pre-trained weights (if you only want to test the scoring for e.g.)\",\n )\n parser.add_argument(\n \"--batchsize\",\n type=int,\n default=20000,\n help=\"Size of a batch in total number of pairs\",\n )\n parser.add_argument(\n \"--numworkers\",\n type=int,\n default=8,\n help=\"Number of workers for loading batches\",\n )\n parser.add_argument(\n \"--startstage\",\n type=str,\n help='Start from a specific stage (\"allpairs\", \"toppairs\", \"ranking\")',\n )\n parser.add_argument(\"--startstep\", type=int, help=\"Start from a specific step\")\n parser.add_argument(\n \"--checkpoint_file\",\n type=str,\n help=\"Start from a previously saved checkpoint file\",\n )\n parser.add_argument(\n \"--log_interval\", type=int, default=10, help=\"test every X mini-batches\"\n )\n parser.add_argument(\n \"--conll_eval_interval\",\n type=int,\n default=10,\n help=\"evaluate eval F1 conll every X epochs\",\n )\n parser.add_argument(\n \"--conll_train_interval\",\n type=int,\n default=20,\n help=\"evaluate train F1 conll every X epochs\",\n )\n parser.add_argument(\"--seed\", type=int, default=1111, help=\"random seed\")\n parser.add_argument(\"--costfn\", type=float, default=0.8, help=\"cost of false new\")\n parser.add_argument(\"--costfl\", type=float, default=0.4, help=\"cost of false link\")\n parser.add_argument(\"--costwl\", type=float, default=1.0, help=\"cost of wrong link\")\n parser.add_argument(\n \"--h1\", type=int, default=1000, help=\"number of hidden unit on layer 1\"\n )\n parser.add_argument(\n \"--h2\", type=int, default=500, help=\"number of hidden unit on layer 2\"\n )\n parser.add_argument(\n \"--h3\", type=int, default=500, help=\"number of hidden unit on layer 3\"\n )\n parser.add_argument(\n \"--all_pairs_epoch\",\n type=int,\n default=200,\n help=\"number of epochs for all-pairs pre-training\",\n )\n parser.add_argument(\n \"--top_pairs_epoch\",\n type=int,\n default=200,\n help=\"number of epochs for top-pairs pre-training\",\n )\n parser.add_argument(\n \"--ranking_epoch\",\n type=int,\n default=200,\n help=\"number of epochs for ranking training\",\n )\n parser.add_argument(\n \"--all_pairs_lr\",\n type=float,\n default=2e-4,\n help=\"all pairs pre-training learning rate\",\n )\n parser.add_argument(\n \"--top_pairs_lr\",\n type=float,\n default=2e-4,\n help=\"top pairs pre-training learning rate\",\n )\n parser.add_argument(\n \"--ranking_lr\", type=float, default=2e-6, help=\"ranking training learning rate\"\n )\n parser.add_argument(\n \"--all_pairs_l2\",\n type=float,\n default=1e-6,\n help=\"all pairs pre-training l2 regularization\",\n )\n parser.add_argument(\n \"--top_pairs_l2\",\n type=float,\n default=1e-5,\n help=\"top pairs pre-training l2 regularization\",\n )\n parser.add_argument(\n \"--ranking_l2\",\n type=float,\n default=1e-5,\n help=\"ranking training l2 regularization\",\n )\n parser.add_argument(\n \"--patience\",\n type=int,\n default=3,\n help=\"patience (epochs) before considering evaluation has decreased\",\n )\n parser.add_argument(\"--min_lr\", type=float, default=2e-8, help=\"min learning rate\")\n parser.add_argument(\n \"--on_eval_decrease\",\n type=str,\n default=\"nothing\",\n help='What to do when evaluation decreases (\"nothing\", \"divide_lr\", \"next_stage\", \"divide_then_next\")',\n )\n parser.add_argument(\n \"--lazy\",\n type=int,\n default=1,\n choices=(0, 1),\n help=\"Use lazy loading (1, default) or not (0) while loading the npy files\",\n )\n args = parser.parse_args()\n args.costs = {\"FN\": args.costfn, \"FL\": args.costfl, \"WL\": args.costwl}\n args.lazy = bool(args.lazy)\n current_time = datetime.now().strftime(\"%b%d_%H-%M-%S\")\n args.save_path = os.path.join(\n PACKAGE_DIRECTORY,\n \"checkpoints\",\n current_time + \"_\" + socket.gethostname() + \"_\",\n )\n\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n args.cuda = torch.cuda.is_available()\n if args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n args.evalkey = args.evalkey if args.evalkey is not None else args.eval + \"/key.txt\"\n args.trainkey = args.train + \"/key.txt\"\n args.train = args.train + \"/numpy/\"\n args.eval = args.eval + \"/numpy/\"\n print(args)\n run_model(args)\n" ]
[ [ "numpy.load", "numpy.array", "numpy.save", "numpy.dtype" ], [ "numpy.ones_like", "torch.zeros", "numpy.arange", "torch.from_numpy", "numpy.tile", "numpy.stack", "numpy.concatenate", "numpy.flatnonzero", "numpy.random.shuffle", "torch.stack", "numpy.load", "numpy.zeros" ], [ "numpy.random.randint", "numpy.arange", "numpy.zeros", "numpy.ones" ], [ "torch.sigmoid", "numpy.random.seed", "torch.cuda.manual_seed", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.sum", "torch.nn.BCEWithLogitsLoss", "torch.log", "torch.cuda.is_available", "torch.gather", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wdkwyf/mars
[ "3f750e360e64380eab779301a5103994d4886b6a", "3f750e360e64380eab779301a5103994d4886b6a", "3f750e360e64380eab779301a5103994d4886b6a", "3f750e360e64380eab779301a5103994d4886b6a" ]
[ "mars/tensor/merge/concatenate.py", "mars/tensor/linalg/inv.py", "mars/tensor/random/standard_cauchy.py", "mars/tensor/arithmetic/around.py" ]
[ "# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport operator\nfrom collections import Iterable\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...compat import six, lrange, lmap\nfrom ...serialize import AnyField\nfrom ..array_utils import device, as_same_device\nfrom ..utils import validate_axis, unify_chunks\nfrom ..datasource import tensor as astensor\nfrom ..operands import TensorOperand, TensorOperandMixin\nfrom ..indexing.slice import TensorSlice\n\n\ndef _get_index(chunk):\n try:\n return chunk.index\n except AttributeError:\n if isinstance(chunk.op, TensorSlice):\n return chunk.inputs[0].index\n raise\n\n\ndef _norm_axis(axis):\n if isinstance(axis, six.integer_types):\n return axis, True\n if isinstance(axis, Iterable):\n axis = sorted(tuple(axis))\n if len(axis) == 1:\n return axis[0], True\n return axis, False\n\n assert axis is None\n return None, False\n\n\nclass TensorConcatenate(TensorOperand, TensorOperandMixin):\n _op_type_ = OperandDef.CONCATENATE\n\n _axis = AnyField('axis')\n\n def __init__(self, axis=None, dtype=None, sparse=False, **kw):\n super(TensorConcatenate, self).__init__(_axis=axis, _dtype=dtype,\n _sparse=sparse, **kw)\n\n @property\n def axis(self):\n return getattr(self, '_axis', None)\n\n def __call__(self, tensors):\n if len(set(t.ndim for t in tensors)) != 1:\n raise ValueError('all the input tensors must have same number of dimensions')\n\n axis = self._axis\n shapes = [t.shape[:axis] + t.shape[axis + 1:] for t in tensors]\n if len(set(shapes)) != 1:\n raise ValueError('all the input tensor dimensions '\n 'except for the concatenation axis must match exactly')\n\n shape = [0 if i == axis else tensors[0].shape[i] for i in range(tensors[0].ndim)]\n shape[axis] = sum(t.shape[axis] for t in tensors)\n\n if any(np.isnan(s) for i, s in enumerate(shape) if i != axis):\n raise ValueError('cannot concatenate tensor with unknown shape')\n\n return self.new_tensor(tensors, shape=tuple(shape))\n\n @classmethod\n def tile(cls, op):\n from ..indexing.slice import TensorSlice\n\n inputs = op.inputs\n axis = op.axis\n\n c = itertools.count(inputs[0].ndim)\n tensor_axes = [(t, tuple(i if i != axis else next(c) for i in range(t.ndim)))\n for t in inputs]\n inputs = unify_chunks(*tensor_axes)\n\n out_chunk_shape = [0 if i == axis else inputs[0].chunk_shape[i]\n for i in range(inputs[0].ndim)]\n out_chunk_shape[axis] = sum(t.chunk_shape[axis] for t in inputs)\n out_nsplits = [None if i == axis else inputs[0].nsplits[i]\n for i in range(inputs[0].ndim)]\n out_nsplits[axis] = tuple(itertools.chain(*[t.nsplits[axis] for t in inputs]))\n\n out_chunks = []\n axis_cum_chunk_shape = np.cumsum([t.chunk_shape[axis] for t in inputs])\n for out_idx in itertools.product(*[range(s) for s in out_chunk_shape]):\n axis_index = np.searchsorted(axis_cum_chunk_shape, out_idx[axis], side='right')\n t = inputs[axis_index]\n axis_inner_index = out_idx[axis] - \\\n (0 if axis_index < 1 else axis_cum_chunk_shape[axis_index - 1])\n idx = out_idx[:axis] + (axis_inner_index,) + out_idx[axis + 1:]\n in_chunk = t.cix[idx]\n if idx == out_idx:\n # if index is the same, just use the input chunk\n out_chunks.append(in_chunk)\n else:\n chunk_op = TensorSlice(slices=[slice(None) for _ in range(in_chunk.ndim)],\n dtype=in_chunk.dtype, sparse=in_chunk.op.sparse)\n out_chunk = chunk_op.new_chunk([in_chunk], shape=in_chunk.shape, index=out_idx)\n\n out_chunks.append(out_chunk)\n\n new_op = op.copy()\n return new_op.new_tensors(op.inputs, op.outputs[0].shape,\n nsplits=out_nsplits, chunks=out_chunks)\n\n @classmethod\n def execute(cls, ctx, op):\n def _base_concatenate(chunk, inputs):\n inputs, device_id, xp = as_same_device(inputs, device=chunk.op.device, ret_extra=True)\n\n axis, single_axis = _norm_axis(chunk.op.axis)\n if single_axis:\n with device(device_id):\n res = xp.concatenate(tuple(inputs), axis=axis)\n else:\n axes = axis or lrange(chunk.ndim)\n chunks = [(_get_index(input), data) for input, data in zip(chunk.inputs, inputs)]\n with device(device_id):\n for i in range(len(axes) - 1):\n new_chunks = []\n for idx, cs in itertools.groupby(chunks, key=lambda t: t[0][:-1]):\n cs = lmap(operator.itemgetter(1), cs)\n new_chunks.append((idx, xp.concatenate(cs, axis=len(axes) - i - 1)))\n chunks = new_chunks\n res = xp.concatenate(lmap(operator.itemgetter(1), chunks), axis=axes[0])\n return res\n\n chunk = op.outputs[0]\n inputs = [ctx[input.key] for input in op.inputs]\n\n if isinstance(inputs[0], tuple):\n ctx[chunk.key] = tuple(_base_concatenate(chunk, [input[i] for input in inputs])\n for i in range(len(inputs[0])))\n else:\n ctx[chunk.key] = _base_concatenate(chunk, inputs)\n\n\ndef concatenate(tensors, axis=0):\n \"\"\"\n Join a sequence of arrays along an existing axis.\n\n Parameters\n ----------\n a1, a2, ... : sequence of array_like\n The tensors must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the tensors will be joined. Default is 0.\n\n Returns\n -------\n res : Tensor\n The concatenated tensor.\n\n See Also\n --------\n array_split : Split a tensor into multiple sub-arrays of equal or\n near-equal size.\n split : Split tensor into a list of multiple sub-tensors of equal size.\n hsplit : Split tensor into multiple sub-tensors horizontally (column wise)\n vsplit : Split tensor into multiple sub-tensors vertically (row wise)\n dsplit : Split tensor into multiple sub-tensors along the 3rd axis (depth).\n stack : Stack a sequence of tensors along a new axis.\n hstack : Stack tensors in sequence horizontally (column wise)\n vstack : Stack tensors in sequence vertically (row wise)\n dstack : Stack tensors in sequence depth wise (along third dimension)\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> a = mt.array([[1, 2], [3, 4]])\n >>> b = mt.array([[5, 6]])\n >>> mt.concatenate((a, b), axis=0).execute()\n array([[1, 2],\n [3, 4],\n [5, 6]])\n >>> mt.concatenate((a, b.T), axis=1).execute()\n array([[1, 2, 5],\n [3, 4, 6]])\n\n \"\"\"\n tensors = [astensor(t) for t in tensors]\n\n axis = validate_axis(tensors[0].ndim, axis)\n dtype = np.result_type(*(t.dtype for t in tensors))\n sparse = all(t.issparse() for t in tensors)\n\n op = TensorConcatenate(axis=axis, dtype=dtype, sparse=sparse)\n return op(tensors)\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom numpy.linalg import LinAlgError\n\nfrom ... import opcodes as OperandDef\nfrom ...serialize import KeyField\nfrom ..datasource import tensor as astensor\nfrom ..operands import TensorHasInput, TensorOperandMixin\n\n\nclass TensorInv(TensorHasInput, TensorOperandMixin):\n _op_type_ = OperandDef.INV\n\n _input = KeyField('input')\n\n def __init__(self, dtype=None, sparse=False, **kw):\n super(TensorInv, self).__init__(_dtype=dtype, _sparse=sparse, **kw)\n\n def __call__(self, a):\n a = astensor(a)\n return self.new_tensor([a], a.shape)\n\n @classmethod\n def tile(cls, op):\n \"\"\"\n Use LU decomposition to compute inverse of matrix.\n Given a square matrix A:\n P, L, U = lu(A)\n b_eye is an identity matrix with the same shape as matrix A, then,\n (P * L * U) * A_inv = b_eye\n L * (U * A_inv) = P.T * b_eye\n use `solve_triangular` twice to compute the inverse of matrix A.\n \"\"\"\n from .lu import lu\n from ..datasource import eye\n from ..base.transpose import TensorTranspose\n from .tensordot import tensordot\n from .solve_triangular import solve_triangular\n in_tensor = op.input\n is_sparse = in_tensor.is_sparse()\n\n b_eye = eye(in_tensor.shape[0], chunk_size=in_tensor.nsplits, sparse=is_sparse)\n b_eye.single_tiles()\n\n p, l, u = lu(in_tensor)\n p.single_tiles()\n\n # transposed p equals to inverse of p\n p_transpose = TensorTranspose(\n dtype=p.dtype, sparse=p.op.sparse, axes=list(range(in_tensor.ndim))[::-1]).new_tensor([p], p.shape)\n p_transpose.single_tiles()\n\n b = tensordot(p_transpose, b_eye, axes=((p_transpose.ndim - 1,), (b_eye.ndim - 2,)))\n b.single_tiles()\n\n # as `l` is a lower matrix, `lower=True` should be specified.\n uy = solve_triangular(l, b, lower=True, sparse=op.sparse)\n uy.single_tiles()\n\n a_inv = solve_triangular(u, uy, sparse=op.sparse)\n a_inv.single_tiles()\n return [a_inv]\n\n\ndef inv(a, sparse=None):\n \"\"\"\n Compute the (multiplicative) inverse of a matrix.\n Given a square matrix `a`, return the matrix `ainv` satisfying\n ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.\n Parameters\n ----------\n a : (..., M, M) array_like\n Matrix to be inverted.\n sparse: bool, optional\n Return sparse value or not.\n Returns\n -------\n ainv : (..., M, M) ndarray or matrix\n (Multiplicative) inverse of the matrix `a`.\n Raises\n ------\n LinAlgError\n If `a` is not square or inversion fails.\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> a = np.array([[1., 2.], [3., 4.]])\n >>> ainv = mt.linalg.inv(a)\n >>> mt.allclose(mt.dot(a, ainv), mt.eye(2)).execute()\n True\n >>> mt.allclose(mt.dot(ainv, a), mt.eye(2)).execute()\n True\n >>> ainv.execute()\n array([[ -2. , 1. ],\n [ 1.5, -0.5]])\n \"\"\"\n\n # TODO: using some parallel algorithm for matrix inversion.\n a = astensor(a)\n if a.ndim != 2:\n raise LinAlgError('{0}-dimensional array given. '\n 'Tensor must be two-dimensional'.format(a.ndim))\n if a.shape[0] != a.shape[1]:\n raise LinAlgError('Input must be square')\n\n tiny_inv = np.linalg.inv(np.array([[1, 2], [2, 5]], dtype=a.dtype))\n sparse = sparse if sparse is not None else a.issparse()\n op = TensorInv(dtype=tiny_inv.dtype, sparse=sparse)\n return op(a)\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom .core import TensorRandomOperandMixin, TensorDistribution\n\n\nclass TensorStandardCauchy(TensorDistribution, TensorRandomOperandMixin):\n __slots__ = '_size',\n _op_type_ = OperandDef.RAND_STANDARD_CAUCHY\n _func_name = 'standard_cauchy'\n\n def __init__(self, size=None, state=None, dtype=None, gpu=None, **kw):\n dtype = np.dtype(dtype) if dtype is not None else dtype\n super(TensorStandardCauchy, self).__init__(_size=size, _state=state, _dtype=dtype,\n _gpu=gpu, **kw)\n\n def __call__(self, chunk_size=None):\n return self.new_tensor(None, None, raw_chunk_size=chunk_size)\n\n\ndef standard_cauchy(random_state, size=None, chunk_size=None, gpu=None, dtype=None):\n r\"\"\"\n Draw samples from a standard Cauchy distribution with mode = 0.\n\n Also known as the Lorentz distribution.\n\n Parameters\n ----------\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n chunk_size : int or tuple of int or tuple of ints, optional\n Desired chunk size on each dimension\n gpu : bool, optional\n Allocate the tensor on GPU if True, False as default\n dtype : data-type, optional\n Data-type of the returned tensor.\n\n Returns\n -------\n samples : Tensor or scalar\n The drawn samples.\n\n Notes\n -----\n The probability density function for the full Cauchy distribution is\n\n .. math:: P(x; x_0, \\gamma) = \\frac{1}{\\pi \\gamma \\bigl[ 1+\n (\\frac{x-x_0}{\\gamma})^2 \\bigr] }\n\n and the Standard Cauchy distribution just sets :math:`x_0=0` and\n :math:`\\gamma=1`\n\n The Cauchy distribution arises in the solution to the driven harmonic\n oscillator problem, and also describes spectral line broadening. It\n also describes the distribution of values at which a line tilted at\n a random angle will cut the x axis.\n\n When studying hypothesis tests that assume normality, seeing how the\n tests perform on data from a Cauchy distribution is a good indicator of\n their sensitivity to a heavy-tailed distribution, since the Cauchy looks\n very much like a Gaussian distribution, but with heavier tails.\n\n References\n ----------\n .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, \"Cauchy\n Distribution\",\n http://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm\n .. [2] Weisstein, Eric W. \"Cauchy Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/CauchyDistribution.html\n .. [3] Wikipedia, \"Cauchy distribution\"\n http://en.wikipedia.org/wiki/Cauchy_distribution\n\n Examples\n --------\n Draw samples and plot the distribution:\n\n >>> import mars.tensor as mt\n >>> import matplotlib.pyplot as plt\n\n >>> s = mt.random.standard_cauchy(1000000)\n >>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well\n >>> plt.hist(s.execute(), bins=100)\n >>> plt.show()\n \"\"\"\n if dtype is None:\n dtype = np.random.RandomState().standard_cauchy(size=(0,)).dtype\n size = random_state._handle_size(size)\n op = TensorStandardCauchy(size=size, state=random_state._state, gpu=gpu, dtype=dtype)\n return op(chunk_size=chunk_size)\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...serialize import Int32Field\nfrom ..array_utils import device, as_same_device\nfrom ..datasource import tensor as astensor\nfrom .core import TensorUnaryOp\nfrom .utils import arithmetic_operand\n\n\n@arithmetic_operand(init=False, sparse_mode='unary')\nclass TensorAround(TensorUnaryOp):\n _op_type_ = OperandDef.AROUND\n\n _decimals = Int32Field('decimals')\n _func_name = 'around'\n\n @property\n def decimals(self):\n return self._decimals\n\n def __init__(self, decimals=None, casting='same_kind', err=None, dtype=None, sparse=False, **kw):\n err = err if err is not None else np.geterr()\n super(TensorAround, self).__init__(_decimals=decimals, _casting=casting, _err=err,\n _dtype=dtype, _sparse=sparse, **kw)\n\n @classmethod\n def execute(cls, ctx, op):\n (a,), device_id, xp = as_same_device(\n [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)\n\n with device(device_id):\n ctx[op.outputs[0].key] = xp.around(a, decimals=op.decimals)\n\n\ndef around(a, decimals=0, out=None, **kwargs):\n \"\"\"\n Evenly round to the given number of decimals.\n\n Parameters\n ----------\n a : array_like\n Input data.\n decimals : int, optional\n Number of decimal places to round to (default: 0). If\n decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out : Tensor, optional\n Alternative output tensor in which to place the result. It must have\n the same shape as the expected output, but the type of the output\n values will be cast if necessary.\n\n Returns\n -------\n rounded_array : Tensor\n An tensor of the same type as `a`, containing the rounded values.\n Unless `out` was specified, a new tensor is created. A reference to\n the result is returned.\n\n The real and imaginary parts of complex numbers are rounded\n separately. The result of rounding a float is a float.\n\n See Also\n --------\n Tensor.round : equivalent method\n\n ceil, fix, floor, rint, trunc\n\n\n Notes\n -----\n For values exactly halfway between rounded decimal values, NumPy\n rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,\n -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due\n to the inexact representation of decimal fractions in the IEEE\n floating point standard [1]_ and errors introduced when scaling\n by powers of ten.\n\n References\n ----------\n .. [1] \"Lecture Notes on the Status of IEEE 754\", William Kahan,\n http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF\n .. [2] \"How Futile are Mindless Assessments of\n Roundoff in Floating-Point Computation?\", William Kahan,\n http://www.cs.berkeley.edu/~wkahan/Mindless.pdf\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> mt.around([0.37, 1.64]).execute()\n array([ 0., 2.])\n >>> mt.around([0.37, 1.64], decimals=1).execute()\n array([ 0.4, 1.6])\n >>> mt.around([.5, 1.5, 2.5, 3.5, 4.5]).execute() # rounds to nearest even value\n array([ 0., 2., 2., 4., 4.])\n >>> mt.around([1,2,3,11], decimals=1).execute() # tensor of ints is returned\n array([ 1, 2, 3, 11])\n >>> mt.around([1,2,3,11], decimals=-1).execute()\n array([ 0, 0, 0, 10])\n\n \"\"\"\n dtype = astensor(a).dtype\n op = TensorAround(decimals=decimals, dtype=dtype, **kwargs)\n return op(a, out=out)\n\n\nround_ = around\n" ]
[ [ "numpy.isnan", "numpy.result_type", "numpy.cumsum", "numpy.searchsorted" ], [ "numpy.array", "numpy.linalg.LinAlgError" ], [ "numpy.random.RandomState", "numpy.dtype" ], [ "numpy.geterr" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ash-vs/tensorflow
[ "303dc341a6300a4a2eee820679bca30547426aa6" ]
[ "tensorflow/contrib/learn/python/learn/estimators/estimator.py" ]
[ "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Base Estimator class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport os\nimport tempfile\nimport time\n\nimport six\n\nfrom tensorflow.contrib import framework as contrib_framework\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib import losses\nfrom tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn\nfrom tensorflow.contrib.learn.python.learn.estimators import run_config\nfrom tensorflow.contrib.learn.python.learn.estimators import tensor_signature\nfrom tensorflow.contrib.learn.python.learn.graph_actions import evaluate\nfrom tensorflow.contrib.learn.python.learn.graph_actions import infer\nfrom tensorflow.contrib.learn.python.learn.graph_actions import train\nfrom tensorflow.contrib.learn.python.learn.io import data_feeder\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import device_setter\nfrom tensorflow.python.training import saver\n\n\n# Default metrics for evaluation.\n_EVAL_METRICS = {\n 'regression': {\n 'mean_squared_error': losses.sum_of_squares,\n },\n 'classification': {\n 'logistic': losses.sigmoid_cross_entropy,\n },}\n\n\nclass ModeKeys(object):\n \"\"\"Standard names for model modes.\n\n The following standard keys are defined:\n\n * `TRAIN`: training mode.\n * `EVAL`: evaluation mode.\n * `INFER`: inference mode.\n \"\"\"\n\n TRAIN = 'train'\n EVAL = 'eval'\n INFER = 'infer'\n\n\ndef _get_input_fn(x, y, batch_size):\n # TODO(ipoloshukin): Remove this when refactor of data_feeder is done\n if hasattr(x, 'create_graph') and hasattr(y, 'create_graph'):\n def input_fn():\n return x.create_graph(), y.create_graph()\n return input_fn, None\n\n df = data_feeder.setup_train_data_feeder(x, y,\n n_classes=None,\n batch_size=batch_size)\n return df.input_builder, df.get_feed_dict_fn()\n\n\ndef _get_predict_input_fn(x, batch_size):\n # TODO(ipoloshukin): Remove this when refactor of data_feeder is done\n if hasattr(x, 'create_graph'):\n def input_fn():\n return x.create_graph()\n return input_fn, None\n\n df = data_feeder.setup_train_data_feeder(x, None,\n n_classes=None,\n batch_size=batch_size)\n return df.input_builder, df.get_feed_dict_fn()\n\n\nclass BaseEstimator(sklearn.BaseEstimator):\n \"\"\"Abstract BaseEstimator class to train and evaluate TensorFlow models.\n\n Concrete implementation of this class should provide following functions:\n * _get_train_ops\n * _get_eval_ops\n * _get_predict_ops\n It may override _get_default_metric_functions.\n\n `Estimator` implemented below is a good example of how to use this class.\n\n Parameters:\n model_dir: Directory to save model parameters, graph and etc.\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n # TODO(wicke): Remove this once launcher takes over config functionality\n _Config = run_config.RunConfig # pylint: disable=invalid-name\n\n def __init__(self, model_dir=None):\n # Model directory.\n self._model_dir = model_dir\n if self._model_dir is None:\n self._model_dir = tempfile.mkdtemp()\n logging.info('Using temporary folder as model directory: %s',\n self._model_dir)\n\n # Create a run configuration\n self._config = BaseEstimator._Config()\n\n # Set device function depending if there are replicas or not.\n if self._config.num_ps_replicas > 0:\n ps_ops = ['Variable', 'AutoReloadVariable']\n self._device_fn = device_setter.replica_device_setter(\n ps_tasks=self._config.num_ps_replicas,\n merge_devices=False, ps_ops=ps_ops)\n else:\n self._device_fn = None\n\n # Features and targets TensorSingature objects.\n self._features_info = None\n self._targets_info = None\n\n @abc.abstractproperty\n def _get_train_ops(self, features, targets):\n \"\"\"Method that builds model graph and returns trainer ops.\n\n Expected to be overriden by sub-classes that require custom support.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n targets: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n Tuple of train `Operation` and loss `Tensor`.\n \"\"\"\n pass\n\n @abc.abstractproperty\n def _get_predict_ops(self, features):\n \"\"\"Method that builds model graph and returns prediction ops.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n predictions: `Tensor` or `dict` of `Tensor` objects.\n \"\"\"\n pass\n\n def _get_eval_ops(self, features, targets, metrics):\n \"\"\"Method that builds model graph and returns evaluation ops.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n targets: `Tensor` or `dict` of `Tensor` objects.\n metrics: `dict` of functions that take predictions and targets.\n\n Returns:\n metrics: `dict` of `Tensor` objects.\n \"\"\"\n predictions = self._get_predict_ops(features)\n result = {}\n for name, metric in six.iteritems(metrics):\n result[name] = metric(predictions, targets)\n return result\n\n def _get_feature_ops_from_example(self, examples_batch):\n \"\"\"Method that returns features given the batch of examples.\n\n This method will be used to export model into a server.\n\n Args:\n examples_batch: batch of tf.Example\n\n Returns:\n features: `Tensor` or `dict` of `Tensor` objects.\n \"\"\"\n raise NotImplementedError('_get_feature_ops_from_example not implemented '\n 'in BaseEstimator')\n\n def _get_default_metric_functions(self):\n \"\"\"Method that provides default metric operations.\n\n This functions is intented to be overridden by sub-classes.\n Returns:\n `dict` of functions that take predictions and targets `Tensor` objects and\n return `Tensor`.\n \"\"\"\n return {}\n\n def fit(self, x, y, steps, batch_size=32, monitor=None):\n \"\"\"Trains a model given training data X and y.\n\n Args:\n x: matrix or tensor of shape [n_samples, n_features...]. Can be\n iterator that returns arrays of features. The training input\n samples for fitting the model.\n y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be\n iterator that returns array of targets. The training target values\n (class labels in classification, real numbers in regression).\n steps: number of steps to train model for.\n batch_size: minibatch size to use on the input, defaults to 32.\n monitor: monitor object to print training progress and invoke\n early stopping.\n\n Returns:\n Returns self.\n \"\"\"\n input_fn, feed_fn = _get_input_fn(x, y, batch_size)\n return self._train_model(input_fn=input_fn,\n feed_fn=feed_fn,\n steps=steps,\n monitor=monitor)\n\n def train(self, input_fn, steps, monitor=None):\n \"\"\"Trains a model given input builder function.\n\n Args:\n input_fn: Input builder function, returns tuple of dicts or\n dict and Tensor.\n steps: number of steps to train model for.\n monitor: monitor object to print training progress and invoke\n early stopping.\n\n Returns:\n Returns self.\n \"\"\"\n return self._train_model(input_fn=input_fn, steps=steps, monitor=monitor)\n\n def partial_fit(self, x, y, steps=1, batch_size=32, monitor=None):\n \"\"\"Incremental fit on a batch of samples.\n\n This method is expected to be called several times consecutively\n on different or the same chunks of the dataset. This either can\n implement iterative training or out-of-core/online training.\n\n This is especially useful when the whole dataset is too big to\n fit in memory at the same time. Or when model is taking long time\n to converge, and you want to split up training into subparts.\n\n Args:\n x: matrix or tensor of shape [n_samples, n_features...]. Can be\n iterator that returns arrays of features. The training input\n samples for fitting the model.\n y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be\n iterator that returns array of targets. The training target values\n (class label in classification, real numbers in regression).\n steps: number of steps to train model for.\n batch_size: minibatch size to use on the input, defaults to 32.\n monitor: Monitor object to print training progress and invoke\n early stopping.\n\n Returns:\n Returns self.\n \"\"\"\n input_fn, feed_fn = _get_input_fn(x, y, batch_size)\n return self._train_model(input_fn=input_fn,\n feed_fn=feed_fn,\n steps=steps,\n monitor=monitor)\n\n def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,\n batch_size=32, steps=100, metrics=None):\n \"\"\"Evaluates given model with provided evaluation data.\n\n Args:\n x: features.\n y: targets.\n input_fn: Input function. If set, x and y must be None.\n feed_fn: Function creating a feed dict every time it is called. Called\n once per iteration.\n batch_size: minibatch size to use on the input, defaults to 32. Ignored\n if input_fn is set.\n steps: Number of steps to evalute for.\n metrics: Dict of metric ops to run.\n\n Returns:\n Returns self.\n\n Raises:\n ValueError: If x or y are not None while input_fn or feed_fn is not None.\n \"\"\"\n if (x is not None or y is not None) and input_fn is not None:\n raise ValueError('Either x and y or input_fn must be None.')\n if input_fn is None:\n assert x is not None\n input_fn, feed_fn = _get_input_fn(x, y, batch_size)\n return self._evaluate_model(input_fn=input_fn, feed_fn=feed_fn,\n steps=steps, metrics=metrics)\n\n def predict(self, x, axis=None, batch_size=None):\n \"\"\"Returns predictions for given features.\n\n Args:\n x: features.\n axis: Axis on which to argmax. (for classification).\n batch_size: Override default batch size.\n\n Returns:\n Numpy array of predicted classes or regression values.\n \"\"\"\n return self._infer_model(x=x, batch_size=batch_size, axis=axis)\n\n def predict_proba(self, x, batch_size=None):\n \"\"\"Returns prediction probabilities for given features (classification).\n\n Args:\n x: features.\n batch_size: OVerride default batch size.\n\n Returns:\n Numpy array of predicted probabilities.\n \"\"\"\n return self._infer_model(x=x, batch_size=batch_size, proba=True)\n\n def _check_inputs(self, features, targets):\n if self._features_info is not None:\n if not tensor_signature.tensors_compatible(features, self._features_info):\n raise ValueError('Features are incompatible with given information. '\n 'Given features: %s, required signatures: %s.' %\n (str(features), str(self._features_info)))\n else:\n self._features_info = tensor_signature.create_signatures(features)\n if self._targets_info is not None:\n if not tensor_signature.tensors_compatible(targets, self._targets_info):\n raise ValueError('Targets are incompatible with given information. '\n 'Given targets: %s, required signatures: %s.' %\n (str(targets), str(self._targets_info)))\n else:\n self._targets_info = tensor_signature.create_signatures(targets)\n\n def _train_model(self,\n input_fn,\n steps,\n feed_fn=None,\n device_fn=None,\n monitor=None,\n log_every_steps=100,\n fail_on_nan_loss=True):\n if self._config.execution_mode not in ('all', 'train'):\n return\n\n # Stagger startup of worker sessions based on task id.\n sleep_secs = min(self._config.training_worker_max_startup_secs,\n self._config.task *\n self._config.training_worker_session_startup_stagger_secs)\n if sleep_secs:\n logging.info('Waiting %d secs before starting task %d.', sleep_secs,\n self._config.task)\n time.sleep(sleep_secs)\n\n # Device allocation\n device_fn = device_fn or self._device_fn\n\n with ops.Graph().as_default() as g, g.device(device_fn):\n random_seed.set_random_seed(self._config.tf_random_seed)\n global_step = contrib_framework.create_global_step(g)\n features, targets = input_fn()\n self._check_inputs(features, targets)\n train_op, loss_op = self._get_train_ops(features, targets)\n return train(\n graph=g,\n output_dir=self._model_dir,\n train_op=train_op,\n loss_op=loss_op,\n global_step_tensor=global_step,\n log_every_steps=log_every_steps,\n supervisor_is_chief=(self._config.task == 0),\n supervisor_master=self._config.master,\n feed_fn=feed_fn,\n max_steps=steps,\n fail_on_nan_loss=fail_on_nan_loss)\n\n def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None):\n if self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset'):\n return\n\n checkpoint_path = saver.latest_checkpoint(self._model_dir)\n eval_dir = os.path.join(self._model_dir, 'eval')\n with ops.Graph().as_default() as g:\n random_seed.set_random_seed(self._config.tf_random_seed)\n global_step = contrib_framework.create_global_step(g)\n features, targets = input_fn()\n self._check_inputs(features, targets)\n eval_dict = self._get_eval_ops(features, targets, metrics or\n self._get_default_metric_functions())\n eval_results, _ = evaluate(\n graph=g,\n output_dir=eval_dir,\n checkpoint_path=checkpoint_path,\n eval_dict=eval_dict,\n global_step_tensor=global_step,\n supervisor_master=self._config.master,\n feed_fn=feed_fn,\n max_steps=steps)\n return eval_results\n\n def _infer_model(self, x, batch_size=None, axis=None, proba=False):\n # Converts inputs into tf.DataFrame / tf.Series.\n batch_size = -1 if batch_size is None else batch_size\n input_fn, feed_fn = _get_predict_input_fn(x, batch_size)\n\n checkpoint_path = saver.latest_checkpoint(self._model_dir)\n with ops.Graph().as_default() as g:\n random_seed.set_random_seed(self._config.tf_random_seed)\n contrib_framework.create_global_step(g)\n features, _ = input_fn()\n feed_dict = feed_fn() if feed_fn is not None else None\n predictions = self._get_predict_ops(features)\n if not isinstance(predictions, dict):\n predictions = {'predictions': predictions}\n # TODO(ipolosukhin): Support batching\n return infer(checkpoint_path, predictions, feed_dict=feed_dict)\n\n\nclass Estimator(BaseEstimator):\n \"\"\"Estimator class is the basic TensorFlow model trainer/evaluator.\n\n Parameters:\n model_fn: Model function, takes features and targets tensors or dicts of\n tensors and returns predictions and loss tensors.\n E.g. `(features, targets) -> (predictions, loss)`.\n model_dir: Directory to save model parameters, graph and etc.\n classification: boolean, true if classification problem.\n learning_rate: learning rate for the model.\n optimizer: optimizer for the model, can be:\n string: name of optimizer, like 'SGD', 'Adam', 'Adagrad', 'Ftl',\n 'Momentum', 'RMSProp', 'Momentum').\n Full list in contrib/layers/optimizers.py\n class: sub-class of Optimizer\n (like tf.train.GradientDescentOptimizer).\n clip_gradients: clip_norm value for call to `clip_by_global_norm`. None\n denotes no gradient clipping.\n \"\"\"\n\n def __init__(self,\n model_fn=None,\n model_dir=None,\n classification=True,\n learning_rate=0.01,\n optimizer='SGD',\n clip_gradients=None):\n super(Estimator, self).__init__(model_dir=model_dir)\n\n self._model_fn = model_fn\n self._classification = classification\n if isinstance(optimizer, six.string_types):\n if optimizer not in layers.OPTIMIZER_CLS_NAMES:\n raise ValueError(\n 'Optimizer name should be one of [%s], you provided %s.' %\n (', '.join(layers.OPTIMIZER_CLS_NAMES), optimizer))\n self.optimizer = optimizer\n self.learning_rate = learning_rate\n self.clip_gradients = clip_gradients\n\n def _get_train_ops(self, features, targets):\n \"\"\"Method that builds model graph and returns trainer ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n targets: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n Tuple of train `Operation` and loss `Tensor`.\n \"\"\"\n _, loss = self._model_fn(features, targets, ModeKeys.TRAIN)\n train_op = layers.optimize_loss(\n loss,\n contrib_framework.get_global_step(),\n learning_rate=self.learning_rate,\n optimizer=self.optimizer,\n clip_gradients=self.clip_gradients)\n return train_op, loss\n\n def _get_eval_ops(self, features, targets, metrics):\n \"\"\"Method that builds model graph and returns evaluation ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n targets: `Tensor` or `dict` of `Tensor` objects.\n metrics: `dict` of functions that take predictions and targets.\n\n Returns:\n metrics: `dict` of `Tensor` objects.\n \"\"\"\n predictions, loss = self._model_fn(features, targets, ModeKeys.EVAL)\n result = {'loss': loss}\n if isinstance(targets, dict) and len(targets) == 1:\n # Unpack single target into just tensor.\n targets = targets[targets.keys()[0]]\n for name, metric in six.iteritems(metrics):\n # TODO(ipolosukhin): Add support for multi-head metrics.\n result[name] = metric(predictions, targets)\n return result\n\n def _get_predict_ops(self, features):\n \"\"\"Method that builds model graph and returns prediction ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n predictions: `Tensor` or `dict` of `Tensor` objects.\n \"\"\"\n targets = tensor_signature.create_placeholders_from_signatures(\n self._targets_info)\n predictions, _ = self._model_fn(features, targets, ModeKeys.INFER)\n return predictions\n\n def _get_default_metric_functions(self):\n \"\"\"Method that provides default metric operations.\n\n Returns:\n a dictionary of metric operations.\n \"\"\"\n return _EVAL_METRICS[\n 'classification' if self._classification else 'regression']\n\n def _get_feature_ops_from_example(self, examples_batch):\n \"\"\"Unimplemented.\n\n TODO(vihanjain): We need a way to parse tf.Example into features.\n\n Args:\n examples_batch: batch of tf.Example\n\n Returns:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Raises:\n Exception: Unimplemented\n \"\"\"\n raise NotImplementedError('_get_feature_ops_from_example not yet '\n 'implemented')\n" ]
[ [ "tensorflow.python.training.saver.latest_checkpoint", "tensorflow.contrib.learn.python.learn.estimators.tensor_signature.tensors_compatible", "tensorflow.python.training.device_setter.replica_device_setter", "tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_signatures", "tensorflow.contrib.learn.python.learn.graph_actions.infer", "tensorflow.python.platform.tf_logging.info", "tensorflow.contrib.learn.python.learn.graph_actions.evaluate", "tensorflow.contrib.framework.get_global_step", "tensorflow.python.framework.ops.Graph", "tensorflow.contrib.learn.python.learn.io.data_feeder.setup_train_data_feeder", "tensorflow.contrib.framework.create_global_step", "tensorflow.contrib.learn.python.learn.graph_actions.train", "tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_placeholders_from_signatures", "tensorflow.python.framework.random_seed.set_random_seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
ThomsonTan/nnvm
[ "dab5ce8ab6adbf4edd8bd2fa89f1a99f343b6e38" ]
[ "tests/python/compiler/test_top_level2.py" ]
[ "import numpy as np\n\nimport tvm\nfrom tvm.contrib import graph_runtime\nimport topi\nimport topi.testing\nimport nnvm.symbol as sym\nimport nnvm.compiler\nfrom nnvm.testing.config import ctx_list\n\n\ndef test_conv2d():\n x = sym.Variable(\"x\")\n y = sym.conv2d(x, channels=10, kernel_size=(3,3),\n name=\"y\", padding=(1,1))\n dtype = \"float32\"\n dshape = (1, 3, 18, 18)\n kshape = (10, 3, 3, 3)\n oshape = (1, 10, 18, 18)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))\n bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))\n m.run(x=data, y_weight=kernel, y_bias=bias)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n c_np = topi.testing.conv2d_nchw_python(\n data.asnumpy(), kernel.asnumpy(), 1, 1)\n c_np = c_np + bias.asnumpy().reshape(kshape[0], 1, 1)\n np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)\n\n\ndef test_dilated_conv2d():\n dilation = 3\n x = sym.Variable(\"x\")\n y = sym.conv2d(x, channels=10, kernel_size=(3, 3), dilation=(dilation, dilation),\n name=\"y\", padding=(1, 1))\n dtype = \"float32\"\n dshape = (1, 3, 18, 18)\n kshape = (10, 3, 3, 3)\n oshape = (1, 10, 14, 14)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))\n kernel_np = np.random.uniform(size=kshape).astype(dtype)\n kernel = tvm.nd.array(kernel_np)\n dkernel_np = topi.testing.dilate_python(kernel_np, (1, 1, dilation, dilation))\n m.run(x=data, y_weight=kernel, y_bias=bias)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n c_np = topi.testing.conv2d_nchw_python(\n data.asnumpy(), dkernel_np, 1, 1)\n c_np = c_np + bias.asnumpy().reshape(kshape[0], 1, 1)\n np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)\n\n\ndef test_grouped_conv2d():\n x = sym.Variable(\"x\")\n y = sym.conv2d(x, channels=32, kernel_size=(3,3), groups=32,\n name=\"y\", padding=(1,1))\n dtype = \"float32\"\n dshape = (1, 32, 18, 18)\n kshape = (32, 1, 3, 3)\n oshape = (1, 32, 18, 18)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))\n bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))\n m.run(x=data, y_weight=kernel, y_bias=bias)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n c_np = topi.testing.depthwise_conv2d_python_nchw(\n data.asnumpy(), kernel.asnumpy(), (1,1), 'SAME')\n c_np = c_np + bias.asnumpy().reshape(kshape[0], 1, 1)\n np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)\n\n\ndef test_conv2d_transpose():\n x = sym.Variable(\"x\")\n y = sym.conv2d_transpose(x, channels=10, kernel_size=(3,3), strides=(2,2),\n name=\"y\", padding=(1,1), output_padding=(2,2))\n dtype = \"float32\"\n dshape = (1, 3, 18, 18)\n kshape = (3, 10, 3, 3)\n oshape = (1, 10, 37, 37)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))\n bias = tvm.nd.array(np.random.uniform(size=kshape[1]).astype(dtype))\n m.run(x=data, y_weight=kernel, y_bias=bias)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n c_np = topi.testing.conv2d_transpose_nchw_python(\n data.asnumpy(), kernel.asnumpy(), 2, 1)\n c_np = c_np + bias.asnumpy().reshape(kshape[1], 1, 1)\n d_np = np.zeros(shape=oshape)\n d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np\n np.testing.assert_allclose(out.asnumpy(), d_np, rtol=1e-5)\n\n\ndef test_max_pool2d():\n x = sym.Variable(\"x\")\n y = sym.max_pool2d(x, pool_size=(2,2), strides=(2,2),\n padding=(0,0), name=\"y\", ceil_mode=True)\n dtype = \"float32\"\n dshape = (1, 3, 28, 28)\n oshape = (1, 3, 14, 14)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n b_np = np.max(data.asnumpy().reshape(1,3,14,2,14,2), axis=(3,5))\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\ndef test_avg_pool2d():\n x = sym.Variable(\"x\")\n y = sym.avg_pool2d(x, pool_size=(2,2), strides=(2,2), padding=(0,0), name=\"y\")\n dtype = \"float32\"\n dshape = (1, 3, 28, 28)\n oshape = (1, 3, 14, 14)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n b_np = np.mean(data.asnumpy().reshape(1,3,14,2,14,2), axis=(3,5))\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\ndef test_avg_pool2d_no_count_pad():\n kh, kw = (4, 4)\n sh, sw = (2, 2)\n ph, pw = (2, 2)\n \n x = sym.Variable(\"x\")\n y = sym.avg_pool2d(x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph, pw),\n name=\"y\", count_include_pad=False)\n dtype = \"float32\"\n n = 1\n (ic, ih, iw) = (3, 28, 28)\n (oc, oh, ow) = (3, 15, 15)\n\n a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)\n pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype)\n no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw)))\n pad_np[np.ix_(*no_zero)] = a_np\n b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)\n \n for i in range(oh):\n for j in range(ow):\n pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3))\n b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw],\n axis=(2,3)) / np.maximum(pad_count, 1)\n b_np = np.maximum(b_np, 0.0)\n shape_dict = {\"x\": (n, ic, ih, iw)}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(a_np)\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty((n, oc, oh, ow), dtype))\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\ndef test_global_max_pool2d():\n x = sym.Variable(\"x\")\n y = sym.global_max_pool2d(x, name=\"y\")\n dtype = \"float32\"\n dshape = (1, 1024, 7, 7)\n oshape = (1, 1024, 1, 1)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n b_np = np.max(data.asnumpy(), axis=(2,3), keepdims=True)\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\ndef test_global_avg_pool2d():\n x = sym.Variable(\"x\")\n y = sym.global_avg_pool2d(x, name=\"y\")\n dtype = \"float32\"\n dshape = (1, 1024, 7, 7)\n oshape = (1, 1024, 1, 1)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n b_np = np.mean(data.asnumpy(), axis=(2,3), keepdims=True)\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\ndef test_upsampling():\n x = sym.Variable(\"x\")\n scale = 2\n y = sym.upsampling(x, scale=scale, name=\"y\")\n dtype = \"float32\"\n dshape = (1, 16, 32, 32)\n oshape = (1, 16, 32*scale, 32*scale)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n a_np = np.random.uniform(size=dshape).astype(dtype)\n data = tvm.nd.array(a_np)\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n b_np = topi.testing.upsampling_python(a_np, scale)\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\nif __name__ == \"__main__\":\n test_conv2d()\n test_dilated_conv2d()\n test_grouped_conv2d()\n test_conv2d_transpose()\n test_max_pool2d()\n test_avg_pool2d()\n test_avg_pool2d_no_count_pad()\n test_global_max_pool2d()\n test_global_avg_pool2d()\n test_upsampling()\n" ]
[ [ "numpy.ix_", "numpy.maximum", "numpy.random.uniform", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
weidan-wd/numpy
[ "b7c27bd2a3817f59c84b004b87bba5db57d9a9b0", "b7c27bd2a3817f59c84b004b87bba5db57d9a9b0" ]
[ "numpy/testing/tests/test_utils.py", "numpy/core/defchararray.py" ]
[ "import warnings\nimport sys\nimport os\nimport itertools\nimport textwrap\nimport pytest\nimport weakref\n\nimport numpy as np\nfrom numpy.testing import (\n assert_equal, assert_array_equal, assert_almost_equal,\n assert_array_almost_equal, assert_array_less, build_err_msg, raises,\n assert_raises, assert_warns, assert_no_warnings, assert_allclose,\n assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,\n clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,\n tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT\n )\nfrom numpy.core.overrides import ARRAY_FUNCTION_ENABLED\n\n\nclass _GenericTest(object):\n\n def _test_equal(self, a, b):\n self._assert_func(a, b)\n\n def _test_not_equal(self, a, b):\n with assert_raises(AssertionError):\n self._assert_func(a, b)\n\n def test_array_rank1_eq(self):\n \"\"\"Test two equal array of rank 1 are found equal.\"\"\"\n a = np.array([1, 2])\n b = np.array([1, 2])\n\n self._test_equal(a, b)\n\n def test_array_rank1_noteq(self):\n \"\"\"Test two different array of rank 1 are found not equal.\"\"\"\n a = np.array([1, 2])\n b = np.array([2, 2])\n\n self._test_not_equal(a, b)\n\n def test_array_rank2_eq(self):\n \"\"\"Test two equal array of rank 2 are found equal.\"\"\"\n a = np.array([[1, 2], [3, 4]])\n b = np.array([[1, 2], [3, 4]])\n\n self._test_equal(a, b)\n\n def test_array_diffshape(self):\n \"\"\"Test two arrays with different shapes are found not equal.\"\"\"\n a = np.array([1, 2])\n b = np.array([[1, 2], [1, 2]])\n\n self._test_not_equal(a, b)\n\n def test_objarray(self):\n \"\"\"Test object arrays.\"\"\"\n a = np.array([1, 1], dtype=object)\n self._test_equal(a, 1)\n\n def test_array_likes(self):\n self._test_equal([1, 2, 3], (1, 2, 3))\n\n\nclass TestArrayEqual(_GenericTest):\n\n def setup(self):\n self._assert_func = assert_array_equal\n\n def test_generic_rank1(self):\n \"\"\"Test rank 1 array for all dtypes.\"\"\"\n def foo(t):\n a = np.empty(2, t)\n a.fill(1)\n b = a.copy()\n c = a.copy()\n c.fill(0)\n self._test_equal(a, b)\n self._test_not_equal(c, b)\n\n # Test numeric types and object\n for t in '?bhilqpBHILQPfdgFDG':\n foo(t)\n\n # Test strings\n for t in ['S1', 'U1']:\n foo(t)\n\n def test_0_ndim_array(self):\n x = np.array(473963742225900817127911193656584771)\n y = np.array(18535119325151578301457182298393896)\n assert_raises(AssertionError, self._assert_func, x, y)\n\n y = x\n self._assert_func(x, y)\n\n x = np.array(43)\n y = np.array(10)\n assert_raises(AssertionError, self._assert_func, x, y)\n\n y = x\n self._assert_func(x, y)\n\n def test_generic_rank3(self):\n \"\"\"Test rank 3 array for all dtypes.\"\"\"\n def foo(t):\n a = np.empty((4, 2, 3), t)\n a.fill(1)\n b = a.copy()\n c = a.copy()\n c.fill(0)\n self._test_equal(a, b)\n self._test_not_equal(c, b)\n\n # Test numeric types and object\n for t in '?bhilqpBHILQPfdgFDG':\n foo(t)\n\n # Test strings\n for t in ['S1', 'U1']:\n foo(t)\n\n def test_nan_array(self):\n \"\"\"Test arrays with nan values in them.\"\"\"\n a = np.array([1, 2, np.nan])\n b = np.array([1, 2, np.nan])\n\n self._test_equal(a, b)\n\n c = np.array([1, 2, 3])\n self._test_not_equal(c, b)\n\n def test_string_arrays(self):\n \"\"\"Test two arrays with different shapes are found not equal.\"\"\"\n a = np.array(['floupi', 'floupa'])\n b = np.array(['floupi', 'floupa'])\n\n self._test_equal(a, b)\n\n c = np.array(['floupipi', 'floupa'])\n\n self._test_not_equal(c, b)\n\n def test_recarrays(self):\n \"\"\"Test record arrays.\"\"\"\n a = np.empty(2, [('floupi', float), ('floupa', float)])\n a['floupi'] = [1, 2]\n a['floupa'] = [1, 2]\n b = a.copy()\n\n self._test_equal(a, b)\n\n c = np.empty(2, [('floupipi', float), ('floupa', float)])\n c['floupipi'] = a['floupi'].copy()\n c['floupa'] = a['floupa'].copy()\n\n with suppress_warnings() as sup:\n l = sup.record(FutureWarning, message=\"elementwise == \")\n self._test_not_equal(c, b)\n assert_equal(len(l), 1)\n\n def test_masked_nan_inf(self):\n # Regression test for gh-11121\n a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])\n b = np.array([3., np.nan, 6.5])\n self._test_equal(a, b)\n self._test_equal(b, a)\n a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])\n b = np.array([np.inf, 4., 6.5])\n self._test_equal(a, b)\n self._test_equal(b, a)\n\n def test_subclass_that_overrides_eq(self):\n # While we cannot guarantee testing functions will always work for\n # subclasses, the tests should ideally rely only on subclasses having\n # comparison operators, not on them being able to store booleans\n # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.\n class MyArray(np.ndarray):\n def __eq__(self, other):\n return bool(np.equal(self, other).all())\n\n def __ne__(self, other):\n return not self == other\n\n a = np.array([1., 2.]).view(MyArray)\n b = np.array([2., 3.]).view(MyArray)\n assert_(type(a == a), bool)\n assert_(a == a)\n assert_(a != b)\n self._test_equal(a, a)\n self._test_not_equal(a, b)\n self._test_not_equal(b, a)\n\n @pytest.mark.skipif(\n not ARRAY_FUNCTION_ENABLED, reason='requires __array_function__')\n def test_subclass_that_does_not_implement_npall(self):\n class MyArray(np.ndarray):\n def __array_function__(self, *args, **kwargs):\n return NotImplemented\n\n a = np.array([1., 2.]).view(MyArray)\n b = np.array([2., 3.]).view(MyArray)\n with assert_raises(TypeError):\n np.all(a)\n self._test_equal(a, a)\n self._test_not_equal(a, b)\n self._test_not_equal(b, a)\n\n\nclass TestBuildErrorMessage(object):\n\n def test_build_err_msg_defaults(self):\n x = np.array([1.00001, 2.00002, 3.00003])\n y = np.array([1.00002, 2.00003, 3.00004])\n err_msg = 'There is a mismatch'\n\n a = build_err_msg([x, y], err_msg)\n b = ('\\nItems are not equal: There is a mismatch\\n ACTUAL: array(['\n '1.00001, 2.00002, 3.00003])\\n DESIRED: array([1.00002, '\n '2.00003, 3.00004])')\n assert_equal(a, b)\n\n def test_build_err_msg_no_verbose(self):\n x = np.array([1.00001, 2.00002, 3.00003])\n y = np.array([1.00002, 2.00003, 3.00004])\n err_msg = 'There is a mismatch'\n\n a = build_err_msg([x, y], err_msg, verbose=False)\n b = '\\nItems are not equal: There is a mismatch'\n assert_equal(a, b)\n\n def test_build_err_msg_custom_names(self):\n x = np.array([1.00001, 2.00002, 3.00003])\n y = np.array([1.00002, 2.00003, 3.00004])\n err_msg = 'There is a mismatch'\n\n a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))\n b = ('\\nItems are not equal: There is a mismatch\\n FOO: array(['\n '1.00001, 2.00002, 3.00003])\\n BAR: array([1.00002, 2.00003, '\n '3.00004])')\n assert_equal(a, b)\n\n def test_build_err_msg_custom_precision(self):\n x = np.array([1.000000001, 2.00002, 3.00003])\n y = np.array([1.000000002, 2.00003, 3.00004])\n err_msg = 'There is a mismatch'\n\n a = build_err_msg([x, y], err_msg, precision=10)\n b = ('\\nItems are not equal: There is a mismatch\\n ACTUAL: array(['\n '1.000000001, 2.00002 , 3.00003 ])\\n DESIRED: array(['\n '1.000000002, 2.00003 , 3.00004 ])')\n assert_equal(a, b)\n\n\nclass TestEqual(TestArrayEqual):\n\n def setup(self):\n self._assert_func = assert_equal\n\n def test_nan_items(self):\n self._assert_func(np.nan, np.nan)\n self._assert_func([np.nan], [np.nan])\n self._test_not_equal(np.nan, [np.nan])\n self._test_not_equal(np.nan, 1)\n\n def test_inf_items(self):\n self._assert_func(np.inf, np.inf)\n self._assert_func([np.inf], [np.inf])\n self._test_not_equal(np.inf, [np.inf])\n\n def test_datetime(self):\n self._test_equal(\n np.datetime64(\"2017-01-01\", \"s\"),\n np.datetime64(\"2017-01-01\", \"s\")\n )\n self._test_equal(\n np.datetime64(\"2017-01-01\", \"s\"),\n np.datetime64(\"2017-01-01\", \"m\")\n )\n\n # gh-10081\n self._test_not_equal(\n np.datetime64(\"2017-01-01\", \"s\"),\n np.datetime64(\"2017-01-02\", \"s\")\n )\n self._test_not_equal(\n np.datetime64(\"2017-01-01\", \"s\"),\n np.datetime64(\"2017-01-02\", \"m\")\n )\n\n def test_nat_items(self):\n # not a datetime\n nadt_no_unit = np.datetime64(\"NaT\")\n nadt_s = np.datetime64(\"NaT\", \"s\")\n nadt_d = np.datetime64(\"NaT\", \"ns\")\n # not a timedelta\n natd_no_unit = np.timedelta64(\"NaT\")\n natd_s = np.timedelta64(\"NaT\", \"s\")\n natd_d = np.timedelta64(\"NaT\", \"ns\")\n\n dts = [nadt_no_unit, nadt_s, nadt_d]\n tds = [natd_no_unit, natd_s, natd_d]\n for a, b in itertools.product(dts, dts):\n self._assert_func(a, b)\n self._assert_func([a], [b])\n self._test_not_equal([a], b)\n\n for a, b in itertools.product(tds, tds):\n self._assert_func(a, b)\n self._assert_func([a], [b])\n self._test_not_equal([a], b)\n\n for a, b in itertools.product(tds, dts):\n self._test_not_equal(a, b)\n self._test_not_equal(a, [b])\n self._test_not_equal([a], [b])\n self._test_not_equal([a], np.datetime64(\"2017-01-01\", \"s\"))\n self._test_not_equal([b], np.datetime64(\"2017-01-01\", \"s\"))\n self._test_not_equal([a], np.timedelta64(123, \"s\"))\n self._test_not_equal([b], np.timedelta64(123, \"s\"))\n\n def test_non_numeric(self):\n self._assert_func('ab', 'ab')\n self._test_not_equal('ab', 'abb')\n\n def test_complex_item(self):\n self._assert_func(complex(1, 2), complex(1, 2))\n self._assert_func(complex(1, np.nan), complex(1, np.nan))\n self._test_not_equal(complex(1, np.nan), complex(1, 2))\n self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))\n self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))\n\n def test_negative_zero(self):\n self._test_not_equal(np.PZERO, np.NZERO)\n\n def test_complex(self):\n x = np.array([complex(1, 2), complex(1, np.nan)])\n y = np.array([complex(1, 2), complex(1, 2)])\n self._assert_func(x, x)\n self._test_not_equal(x, y)\n\n def test_error_message(self):\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(np.array([1, 2]), np.array([[1, 2]]))\n msg = str(exc_info.value)\n msg2 = msg.replace(\"shapes (2L,), (1L, 2L)\", \"shapes (2,), (1, 2)\")\n msg_reference = textwrap.dedent(\"\"\"\\\n\n Arrays are not equal\n\n (shapes (2,), (1, 2) mismatch)\n x: array([1, 2])\n y: array([[1, 2]])\"\"\")\n\n try:\n assert_equal(msg, msg_reference)\n except AssertionError:\n assert_equal(msg2, msg_reference)\n\n def test_object(self):\n #gh-12942\n import datetime\n a = np.array([datetime.datetime(2000, 1, 1),\n datetime.datetime(2000, 1, 2)])\n self._test_not_equal(a, a[::-1])\n\n\nclass TestArrayAlmostEqual(_GenericTest):\n\n def setup(self):\n self._assert_func = assert_array_almost_equal\n\n def test_closeness(self):\n # Note that in the course of time we ended up with\n # `abs(x - y) < 1.5 * 10**(-decimal)`\n # instead of the previously documented\n # `abs(x - y) < 0.5 * 10**(-decimal)`\n # so this check serves to preserve the wrongness.\n\n # test scalars\n self._assert_func(1.499999, 0.0, decimal=0)\n assert_raises(AssertionError,\n lambda: self._assert_func(1.5, 0.0, decimal=0))\n\n # test arrays\n self._assert_func([1.499999], [0.0], decimal=0)\n assert_raises(AssertionError,\n lambda: self._assert_func([1.5], [0.0], decimal=0))\n\n def test_simple(self):\n x = np.array([1234.2222])\n y = np.array([1234.2223])\n\n self._assert_func(x, y, decimal=3)\n self._assert_func(x, y, decimal=4)\n assert_raises(AssertionError,\n lambda: self._assert_func(x, y, decimal=5))\n\n def test_nan(self):\n anan = np.array([np.nan])\n aone = np.array([1])\n ainf = np.array([np.inf])\n self._assert_func(anan, anan)\n assert_raises(AssertionError,\n lambda: self._assert_func(anan, aone))\n assert_raises(AssertionError,\n lambda: self._assert_func(anan, ainf))\n assert_raises(AssertionError,\n lambda: self._assert_func(ainf, anan))\n\n def test_inf(self):\n a = np.array([[1., 2.], [3., 4.]])\n b = a.copy()\n a[0, 0] = np.inf\n assert_raises(AssertionError,\n lambda: self._assert_func(a, b))\n b[0, 0] = -np.inf\n assert_raises(AssertionError,\n lambda: self._assert_func(a, b))\n\n def test_subclass(self):\n a = np.array([[1., 2.], [3., 4.]])\n b = np.ma.masked_array([[1., 2.], [0., 4.]],\n [[False, False], [True, False]])\n self._assert_func(a, b)\n self._assert_func(b, a)\n self._assert_func(b, b)\n\n # Test fully masked as well (see gh-11123).\n a = np.ma.MaskedArray(3.5, mask=True)\n b = np.array([3., 4., 6.5])\n self._test_equal(a, b)\n self._test_equal(b, a)\n a = np.ma.masked\n b = np.array([3., 4., 6.5])\n self._test_equal(a, b)\n self._test_equal(b, a)\n a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])\n b = np.array([1., 2., 3.])\n self._test_equal(a, b)\n self._test_equal(b, a)\n a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])\n b = np.array(1.)\n self._test_equal(a, b)\n self._test_equal(b, a)\n\n def test_subclass_that_cannot_be_bool(self):\n # While we cannot guarantee testing functions will always work for\n # subclasses, the tests should ideally rely only on subclasses having\n # comparison operators, not on them being able to store booleans\n # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.\n class MyArray(np.ndarray):\n def __eq__(self, other):\n return super(MyArray, self).__eq__(other).view(np.ndarray)\n\n def __lt__(self, other):\n return super(MyArray, self).__lt__(other).view(np.ndarray)\n\n def all(self, *args, **kwargs):\n raise NotImplementedError\n\n a = np.array([1., 2.]).view(MyArray)\n self._assert_func(a, a)\n\n\nclass TestAlmostEqual(_GenericTest):\n\n def setup(self):\n self._assert_func = assert_almost_equal\n\n def test_closeness(self):\n # Note that in the course of time we ended up with\n # `abs(x - y) < 1.5 * 10**(-decimal)`\n # instead of the previously documented\n # `abs(x - y) < 0.5 * 10**(-decimal)`\n # so this check serves to preserve the wrongness.\n\n # test scalars\n self._assert_func(1.499999, 0.0, decimal=0)\n assert_raises(AssertionError,\n lambda: self._assert_func(1.5, 0.0, decimal=0))\n\n # test arrays\n self._assert_func([1.499999], [0.0], decimal=0)\n assert_raises(AssertionError,\n lambda: self._assert_func([1.5], [0.0], decimal=0))\n\n def test_nan_item(self):\n self._assert_func(np.nan, np.nan)\n assert_raises(AssertionError,\n lambda: self._assert_func(np.nan, 1))\n assert_raises(AssertionError,\n lambda: self._assert_func(np.nan, np.inf))\n assert_raises(AssertionError,\n lambda: self._assert_func(np.inf, np.nan))\n\n def test_inf_item(self):\n self._assert_func(np.inf, np.inf)\n self._assert_func(-np.inf, -np.inf)\n assert_raises(AssertionError,\n lambda: self._assert_func(np.inf, 1))\n assert_raises(AssertionError,\n lambda: self._assert_func(-np.inf, np.inf))\n\n def test_simple_item(self):\n self._test_not_equal(1, 2)\n\n def test_complex_item(self):\n self._assert_func(complex(1, 2), complex(1, 2))\n self._assert_func(complex(1, np.nan), complex(1, np.nan))\n self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))\n self._test_not_equal(complex(1, np.nan), complex(1, 2))\n self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))\n self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))\n\n def test_complex(self):\n x = np.array([complex(1, 2), complex(1, np.nan)])\n z = np.array([complex(1, 2), complex(np.nan, 1)])\n y = np.array([complex(1, 2), complex(1, 2)])\n self._assert_func(x, x)\n self._test_not_equal(x, y)\n self._test_not_equal(x, z)\n\n def test_error_message(self):\n \"\"\"Check the message is formatted correctly for the decimal value.\n Also check the message when input includes inf or nan (gh12200)\"\"\"\n x = np.array([1.00000000001, 2.00000000002, 3.00003])\n y = np.array([1.00000000002, 2.00000000003, 3.00004])\n\n # Test with a different amount of decimal digits\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y, decimal=12)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)')\n assert_equal(msgs[4], 'Max absolute difference: 1.e-05')\n assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')\n assert_equal(\n msgs[6],\n ' x: array([1.00000000001, 2.00000000002, 3.00003 ])')\n assert_equal(\n msgs[7],\n ' y: array([1.00000000002, 2.00000000003, 3.00004 ])')\n\n # With the default value of decimal digits, only the 3rd element\n # differs. Note that we only check for the formatting of the arrays\n # themselves.\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)')\n assert_equal(msgs[4], 'Max absolute difference: 1.e-05')\n assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')\n assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])')\n assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])')\n\n # Check the error message when input includes inf\n x = np.array([np.inf, 0])\n y = np.array([np.inf, 1])\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)')\n assert_equal(msgs[4], 'Max absolute difference: 1.')\n assert_equal(msgs[5], 'Max relative difference: 1.')\n assert_equal(msgs[6], ' x: array([inf, 0.])')\n assert_equal(msgs[7], ' y: array([inf, 1.])')\n\n # Check the error message when dividing by zero\n x = np.array([1, 2])\n y = np.array([0, 0])\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)')\n assert_equal(msgs[4], 'Max absolute difference: 2')\n assert_equal(msgs[5], 'Max relative difference: inf')\n\n def test_error_message_2(self):\n \"\"\"Check the message is formatted correctly when either x or y is a scalar.\"\"\"\n x = 2\n y = np.ones(20)\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')\n assert_equal(msgs[4], 'Max absolute difference: 1.')\n assert_equal(msgs[5], 'Max relative difference: 1.')\n\n y = 2\n x = np.ones(20)\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')\n assert_equal(msgs[4], 'Max absolute difference: 1.')\n assert_equal(msgs[5], 'Max relative difference: 0.5')\n\n def test_subclass_that_cannot_be_bool(self):\n # While we cannot guarantee testing functions will always work for\n # subclasses, the tests should ideally rely only on subclasses having\n # comparison operators, not on them being able to store booleans\n # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.\n class MyArray(np.ndarray):\n def __eq__(self, other):\n return super(MyArray, self).__eq__(other).view(np.ndarray)\n\n def __lt__(self, other):\n return super(MyArray, self).__lt__(other).view(np.ndarray)\n\n def all(self, *args, **kwargs):\n raise NotImplementedError\n\n a = np.array([1., 2.]).view(MyArray)\n self._assert_func(a, a)\n\n\nclass TestApproxEqual(object):\n\n def setup(self):\n self._assert_func = assert_approx_equal\n\n def test_simple_0d_arrays(self):\n x = np.array(1234.22)\n y = np.array(1234.23)\n\n self._assert_func(x, y, significant=5)\n self._assert_func(x, y, significant=6)\n assert_raises(AssertionError,\n lambda: self._assert_func(x, y, significant=7))\n\n def test_simple_items(self):\n x = 1234.22\n y = 1234.23\n\n self._assert_func(x, y, significant=4)\n self._assert_func(x, y, significant=5)\n self._assert_func(x, y, significant=6)\n assert_raises(AssertionError,\n lambda: self._assert_func(x, y, significant=7))\n\n def test_nan_array(self):\n anan = np.array(np.nan)\n aone = np.array(1)\n ainf = np.array(np.inf)\n self._assert_func(anan, anan)\n assert_raises(AssertionError, lambda: self._assert_func(anan, aone))\n assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))\n\n def test_nan_items(self):\n anan = np.array(np.nan)\n aone = np.array(1)\n ainf = np.array(np.inf)\n self._assert_func(anan, anan)\n assert_raises(AssertionError, lambda: self._assert_func(anan, aone))\n assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))\n\n\nclass TestArrayAssertLess(object):\n\n def setup(self):\n self._assert_func = assert_array_less\n\n def test_simple_arrays(self):\n x = np.array([1.1, 2.2])\n y = np.array([1.2, 2.3])\n\n self._assert_func(x, y)\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n y = np.array([1.0, 2.3])\n\n assert_raises(AssertionError, lambda: self._assert_func(x, y))\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n def test_rank2(self):\n x = np.array([[1.1, 2.2], [3.3, 4.4]])\n y = np.array([[1.2, 2.3], [3.4, 4.5]])\n\n self._assert_func(x, y)\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n y = np.array([[1.0, 2.3], [3.4, 4.5]])\n\n assert_raises(AssertionError, lambda: self._assert_func(x, y))\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n def test_rank3(self):\n x = np.ones(shape=(2, 2, 2))\n y = np.ones(shape=(2, 2, 2))+1\n\n self._assert_func(x, y)\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n y[0, 0, 0] = 0\n\n assert_raises(AssertionError, lambda: self._assert_func(x, y))\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n def test_simple_items(self):\n x = 1.1\n y = 2.2\n\n self._assert_func(x, y)\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n y = np.array([2.2, 3.3])\n\n self._assert_func(x, y)\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n y = np.array([1.0, 3.3])\n\n assert_raises(AssertionError, lambda: self._assert_func(x, y))\n\n def test_nan_noncompare(self):\n anan = np.array(np.nan)\n aone = np.array(1)\n ainf = np.array(np.inf)\n self._assert_func(anan, anan)\n assert_raises(AssertionError, lambda: self._assert_func(aone, anan))\n assert_raises(AssertionError, lambda: self._assert_func(anan, aone))\n assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))\n\n def test_nan_noncompare_array(self):\n x = np.array([1.1, 2.2, 3.3])\n anan = np.array(np.nan)\n\n assert_raises(AssertionError, lambda: self._assert_func(x, anan))\n assert_raises(AssertionError, lambda: self._assert_func(anan, x))\n\n x = np.array([1.1, 2.2, np.nan])\n\n assert_raises(AssertionError, lambda: self._assert_func(x, anan))\n assert_raises(AssertionError, lambda: self._assert_func(anan, x))\n\n y = np.array([1.0, 2.0, np.nan])\n\n self._assert_func(y, x)\n assert_raises(AssertionError, lambda: self._assert_func(x, y))\n\n def test_inf_compare(self):\n aone = np.array(1)\n ainf = np.array(np.inf)\n\n self._assert_func(aone, ainf)\n self._assert_func(-ainf, aone)\n self._assert_func(-ainf, ainf)\n assert_raises(AssertionError, lambda: self._assert_func(ainf, aone))\n assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf))\n assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf))\n\n def test_inf_compare_array(self):\n x = np.array([1.1, 2.2, np.inf])\n ainf = np.array(np.inf)\n\n assert_raises(AssertionError, lambda: self._assert_func(x, ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, x))\n assert_raises(AssertionError, lambda: self._assert_func(x, -ainf))\n assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf))\n assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x))\n self._assert_func(-ainf, x)\n\n\[email protected](reason=\"The raises decorator depends on Nose\")\nclass TestRaises(object):\n\n def setup(self):\n class MyException(Exception):\n pass\n\n self.e = MyException\n\n def raises_exception(self, e):\n raise e\n\n def does_not_raise_exception(self):\n pass\n\n def test_correct_catch(self):\n raises(self.e)(self.raises_exception)(self.e) # raises?\n\n def test_wrong_exception(self):\n try:\n raises(self.e)(self.raises_exception)(RuntimeError) # raises?\n except RuntimeError:\n return\n else:\n raise AssertionError(\"should have caught RuntimeError\")\n\n def test_catch_no_raise(self):\n try:\n raises(self.e)(self.does_not_raise_exception)() # raises?\n except AssertionError:\n return\n else:\n raise AssertionError(\"should have raised an AssertionError\")\n\n\nclass TestWarns(object):\n\n def test_warn(self):\n def f():\n warnings.warn(\"yo\")\n return 3\n\n before_filters = sys.modules['warnings'].filters[:]\n assert_equal(assert_warns(UserWarning, f), 3)\n after_filters = sys.modules['warnings'].filters\n\n assert_raises(AssertionError, assert_no_warnings, f)\n assert_equal(assert_no_warnings(lambda x: x, 1), 1)\n\n # Check that the warnings state is unchanged\n assert_equal(before_filters, after_filters,\n \"assert_warns does not preserver warnings state\")\n\n def test_context_manager(self):\n\n before_filters = sys.modules['warnings'].filters[:]\n with assert_warns(UserWarning):\n warnings.warn(\"yo\")\n after_filters = sys.modules['warnings'].filters\n\n def no_warnings():\n with assert_no_warnings():\n warnings.warn(\"yo\")\n\n assert_raises(AssertionError, no_warnings)\n assert_equal(before_filters, after_filters,\n \"assert_warns does not preserver warnings state\")\n\n def test_warn_wrong_warning(self):\n def f():\n warnings.warn(\"yo\", DeprecationWarning)\n\n failed = False\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", DeprecationWarning)\n try:\n # Should raise a DeprecationWarning\n assert_warns(UserWarning, f)\n failed = True\n except DeprecationWarning:\n pass\n\n if failed:\n raise AssertionError(\"wrong warning caught by assert_warn\")\n\n\nclass TestAssertAllclose(object):\n\n def test_simple(self):\n x = 1e-3\n y = 1e-9\n\n assert_allclose(x, y, atol=1)\n assert_raises(AssertionError, assert_allclose, x, y)\n\n a = np.array([x, y, x, y])\n b = np.array([x, y, x, x])\n\n assert_allclose(a, b, atol=1)\n assert_raises(AssertionError, assert_allclose, a, b)\n\n b[-1] = y * (1 + 1e-8)\n assert_allclose(a, b)\n assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9)\n\n assert_allclose(6, 10, rtol=0.5)\n assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)\n\n def test_min_int(self):\n a = np.array([np.iinfo(np.int_).min], dtype=np.int_)\n # Should not raise:\n assert_allclose(a, a)\n\n def test_report_fail_percentage(self):\n a = np.array([1, 1, 1, 1])\n b = np.array([1, 1, 1, 2])\n\n with pytest.raises(AssertionError) as exc_info:\n assert_allclose(a, b)\n msg = str(exc_info.value)\n assert_('Mismatched elements: 1 / 4 (25%)\\n'\n 'Max absolute difference: 1\\n'\n 'Max relative difference: 0.5' in msg)\n\n def test_equal_nan(self):\n a = np.array([np.nan])\n b = np.array([np.nan])\n # Should not raise:\n assert_allclose(a, b, equal_nan=True)\n\n def test_not_equal_nan(self):\n a = np.array([np.nan])\n b = np.array([np.nan])\n assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)\n\n def test_equal_nan_default(self):\n # Make sure equal_nan default behavior remains unchanged. (All\n # of these functions use assert_array_compare under the hood.)\n # None of these should raise.\n a = np.array([np.nan])\n b = np.array([np.nan])\n assert_array_equal(a, b)\n assert_array_almost_equal(a, b)\n assert_array_less(a, b)\n assert_allclose(a, b)\n\n def test_report_max_relative_error(self):\n a = np.array([0, 1])\n b = np.array([0, 2])\n\n with pytest.raises(AssertionError) as exc_info:\n assert_allclose(a, b)\n msg = str(exc_info.value)\n assert_('Max relative difference: 0.5' in msg)\n\n\nclass TestArrayAlmostEqualNulp(object):\n\n def test_float64_pass(self):\n # The number of units of least precision\n # In this case, use a few places above the lowest level (ie nulp=1)\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float64)\n x = 10**x\n x = np.r_[-x, x]\n\n # Addition\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n # Subtraction\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n def test_float64_fail(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float64)\n x = 10**x\n x = np.r_[-x, x]\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n def test_float32_pass(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float32)\n x = 10**x\n x = np.r_[-x, x]\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n def test_float32_fail(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float32)\n x = 10**x\n x = np.r_[-x, x]\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n def test_float16_pass(self):\n nulp = 5\n x = np.linspace(-4, 4, 10, dtype=np.float16)\n x = 10**x\n x = np.r_[-x, x]\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n def test_float16_fail(self):\n nulp = 5\n x = np.linspace(-4, 4, 10, dtype=np.float16)\n x = 10**x\n x = np.r_[-x, x]\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n def test_complex128_pass(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float64)\n x = 10**x\n x = np.r_[-x, x]\n xi = x + x*1j\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp/2.\n assert_array_almost_equal_nulp(xi, x + y*1j, nulp)\n assert_array_almost_equal_nulp(xi, y + x*1j, nulp)\n # The test condition needs to be at least a factor of sqrt(2) smaller\n # because the real and imaginary parts both change\n y = x + x*eps*nulp/4.\n assert_array_almost_equal_nulp(xi, y + y*1j, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp/2.\n assert_array_almost_equal_nulp(xi, x + y*1j, nulp)\n assert_array_almost_equal_nulp(xi, y + x*1j, nulp)\n y = x - x*epsneg*nulp/4.\n assert_array_almost_equal_nulp(xi, y + y*1j, nulp)\n\n def test_complex128_fail(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float64)\n x = 10**x\n x = np.r_[-x, x]\n xi = x + x*1j\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, x + y*1j, nulp)\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + x*1j, nulp)\n # The test condition needs to be at least a factor of sqrt(2) smaller\n # because the real and imaginary parts both change\n y = x + x*eps*nulp\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + y*1j, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, x + y*1j, nulp)\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + x*1j, nulp)\n y = x - x*epsneg*nulp\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + y*1j, nulp)\n\n def test_complex64_pass(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float32)\n x = 10**x\n x = np.r_[-x, x]\n xi = x + x*1j\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp/2.\n assert_array_almost_equal_nulp(xi, x + y*1j, nulp)\n assert_array_almost_equal_nulp(xi, y + x*1j, nulp)\n y = x + x*eps*nulp/4.\n assert_array_almost_equal_nulp(xi, y + y*1j, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp/2.\n assert_array_almost_equal_nulp(xi, x + y*1j, nulp)\n assert_array_almost_equal_nulp(xi, y + x*1j, nulp)\n y = x - x*epsneg*nulp/4.\n assert_array_almost_equal_nulp(xi, y + y*1j, nulp)\n\n def test_complex64_fail(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float32)\n x = 10**x\n x = np.r_[-x, x]\n xi = x + x*1j\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, x + y*1j, nulp)\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + x*1j, nulp)\n y = x + x*eps*nulp\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + y*1j, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, x + y*1j, nulp)\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + x*1j, nulp)\n y = x - x*epsneg*nulp\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + y*1j, nulp)\n\n\nclass TestULP(object):\n\n def test_equal(self):\n x = np.random.randn(10)\n assert_array_max_ulp(x, x, maxulp=0)\n\n def test_single(self):\n # Generate 1 + small deviation, check that adding eps gives a few UNL\n x = np.ones(10).astype(np.float32)\n x += 0.01 * np.random.randn(10).astype(np.float32)\n eps = np.finfo(np.float32).eps\n assert_array_max_ulp(x, x+eps, maxulp=20)\n\n def test_double(self):\n # Generate 1 + small deviation, check that adding eps gives a few UNL\n x = np.ones(10).astype(np.float64)\n x += 0.01 * np.random.randn(10).astype(np.float64)\n eps = np.finfo(np.float64).eps\n assert_array_max_ulp(x, x+eps, maxulp=200)\n\n def test_inf(self):\n for dt in [np.float32, np.float64]:\n inf = np.array([np.inf]).astype(dt)\n big = np.array([np.finfo(dt).max])\n assert_array_max_ulp(inf, big, maxulp=200)\n\n def test_nan(self):\n # Test that nan is 'far' from small, tiny, inf, max and min\n for dt in [np.float32, np.float64]:\n if dt == np.float32:\n maxulp = 1e6\n else:\n maxulp = 1e12\n inf = np.array([np.inf]).astype(dt)\n nan = np.array([np.nan]).astype(dt)\n big = np.array([np.finfo(dt).max])\n tiny = np.array([np.finfo(dt).tiny])\n zero = np.array([np.PZERO]).astype(dt)\n nzero = np.array([np.NZERO]).astype(dt)\n assert_raises(AssertionError,\n lambda: assert_array_max_ulp(nan, inf,\n maxulp=maxulp))\n assert_raises(AssertionError,\n lambda: assert_array_max_ulp(nan, big,\n maxulp=maxulp))\n assert_raises(AssertionError,\n lambda: assert_array_max_ulp(nan, tiny,\n maxulp=maxulp))\n assert_raises(AssertionError,\n lambda: assert_array_max_ulp(nan, zero,\n maxulp=maxulp))\n assert_raises(AssertionError,\n lambda: assert_array_max_ulp(nan, nzero,\n maxulp=maxulp))\n\n\nclass TestStringEqual(object):\n def test_simple(self):\n assert_string_equal(\"hello\", \"hello\")\n assert_string_equal(\"hello\\nmultiline\", \"hello\\nmultiline\")\n\n with pytest.raises(AssertionError) as exc_info:\n assert_string_equal(\"foo\\nbar\", \"hello\\nbar\")\n msg = str(exc_info.value)\n assert_equal(msg, \"Differences in strings:\\n- foo\\n+ hello\")\n\n assert_raises(AssertionError,\n lambda: assert_string_equal(\"foo\", \"hello\"))\n\n def test_regex(self):\n assert_string_equal(\"a+*b\", \"a+*b\")\n\n assert_raises(AssertionError,\n lambda: assert_string_equal(\"aaa\", \"a+b\"))\n\n\ndef assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):\n try:\n mod_warns = mod.__warningregistry__\n except AttributeError:\n # the lack of a __warningregistry__\n # attribute means that no warning has\n # occurred; this can be triggered in\n # a parallel test scenario, while in\n # a serial test scenario an initial\n # warning (and therefore the attribute)\n # are always created first\n mod_warns = {}\n\n num_warns = len(mod_warns)\n # Python 3.4 appears to clear any pre-existing warnings of the same type,\n # when raising warnings inside a catch_warnings block. So, there is a\n # warning generated by the tests within the context manager, but no\n # previous warnings.\n if 'version' in mod_warns:\n # Python 3 adds a 'version' entry to the registry,\n # do not count it.\n num_warns -= 1\n\n # Behavior of warnings is Python version dependent. Adjust the\n # expected result to compensate. In particular, Python 3.7 does\n # not make an entry for ignored warnings.\n if sys.version_info[:2] >= (3, 7):\n if py37 is not None:\n n_in_context = py37\n elif sys.version_info[:2] >= (3, 4):\n if py34 is not None:\n n_in_context = py34\n assert_equal(num_warns, n_in_context)\n\ndef test_warn_len_equal_call_scenarios():\n # assert_warn_len_equal is called under\n # varying circumstances depending on serial\n # vs. parallel test scenarios; this test\n # simply aims to probe both code paths and\n # check that no assertion is uncaught\n\n # parallel scenario -- no warning issued yet\n class mod(object):\n pass\n\n mod_inst = mod()\n\n assert_warn_len_equal(mod=mod_inst,\n n_in_context=0)\n\n # serial test scenario -- the __warningregistry__\n # attribute should be present\n class mod(object):\n def __init__(self):\n self.__warningregistry__ = {'warning1':1,\n 'warning2':2}\n\n mod_inst = mod()\n assert_warn_len_equal(mod=mod_inst,\n n_in_context=2)\n\n\ndef _get_fresh_mod():\n # Get this module, with warning registry empty\n my_mod = sys.modules[__name__]\n try:\n my_mod.__warningregistry__.clear()\n except AttributeError:\n # will not have a __warningregistry__ unless warning has been\n # raised in the module at some point\n pass\n return my_mod\n\n\ndef test_clear_and_catch_warnings():\n # Initial state of module, no warnings\n my_mod = _get_fresh_mod()\n assert_equal(getattr(my_mod, '__warningregistry__', {}), {})\n with clear_and_catch_warnings(modules=[my_mod]):\n warnings.simplefilter('ignore')\n warnings.warn('Some warning')\n assert_equal(my_mod.__warningregistry__, {})\n # Without specified modules, don't clear warnings during context\n # Python 3.7 catch_warnings doesn't make an entry for 'ignore'.\n with clear_and_catch_warnings():\n warnings.simplefilter('ignore')\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 1, py37=0)\n # Confirm that specifying module keeps old warning, does not add new\n with clear_and_catch_warnings(modules=[my_mod]):\n warnings.simplefilter('ignore')\n warnings.warn('Another warning')\n assert_warn_len_equal(my_mod, 1, py37=0)\n # Another warning, no module spec does add to warnings dict, except on\n # Python 3.4 (see comments in `assert_warn_len_equal`)\n # Python 3.7 catch_warnings doesn't make an entry for 'ignore'.\n with clear_and_catch_warnings():\n warnings.simplefilter('ignore')\n warnings.warn('Another warning')\n assert_warn_len_equal(my_mod, 2, py34=1, py37=0)\n\n\ndef test_suppress_warnings_module():\n # Initial state of module, no warnings\n my_mod = _get_fresh_mod()\n assert_equal(getattr(my_mod, '__warningregistry__', {}), {})\n\n def warn_other_module():\n # Apply along axis is implemented in python; stacklevel=2 means\n # we end up inside its module, not ours.\n def warn(arr):\n warnings.warn(\"Some warning 2\", stacklevel=2)\n return arr\n np.apply_along_axis(warn, 0, [0])\n\n # Test module based warning suppression:\n assert_warn_len_equal(my_mod, 0)\n with suppress_warnings() as sup:\n sup.record(UserWarning)\n # suppress warning from other module (may have .pyc ending),\n # if apply_along_axis is moved, had to be changed.\n sup.filter(module=np.lib.shape_base)\n warnings.warn(\"Some warning\")\n warn_other_module()\n # Check that the suppression did test the file correctly (this module\n # got filtered)\n assert_equal(len(sup.log), 1)\n assert_equal(sup.log[0].message.args[0], \"Some warning\")\n assert_warn_len_equal(my_mod, 0, py37=0)\n sup = suppress_warnings()\n # Will have to be changed if apply_along_axis is moved:\n sup.filter(module=my_mod)\n with sup:\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 0)\n # And test repeat works:\n sup.filter(module=my_mod)\n with sup:\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 0)\n\n # Without specified modules, don't clear warnings during context\n # Python 3.7 does not add ignored warnings.\n with suppress_warnings():\n warnings.simplefilter('ignore')\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 1, py37=0)\n\ndef test_suppress_warnings_type():\n # Initial state of module, no warnings\n my_mod = _get_fresh_mod()\n assert_equal(getattr(my_mod, '__warningregistry__', {}), {})\n\n # Test module based warning suppression:\n with suppress_warnings() as sup:\n sup.filter(UserWarning)\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 0)\n sup = suppress_warnings()\n sup.filter(UserWarning)\n with sup:\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 0)\n # And test repeat works:\n sup.filter(module=my_mod)\n with sup:\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 0)\n\n # Without specified modules, don't clear warnings during context\n # Python 3.7 does not add ignored warnings.\n with suppress_warnings():\n warnings.simplefilter('ignore')\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 1, py37=0)\n\n\ndef test_suppress_warnings_decorate_no_record():\n sup = suppress_warnings()\n sup.filter(UserWarning)\n\n @sup\n def warn(category):\n warnings.warn('Some warning', category)\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n warn(UserWarning) # should be supppressed\n warn(RuntimeWarning)\n assert_equal(len(w), 1)\n\n\ndef test_suppress_warnings_record():\n sup = suppress_warnings()\n log1 = sup.record()\n\n with sup:\n log2 = sup.record(message='Some other warning 2')\n sup.filter(message='Some warning')\n warnings.warn('Some warning')\n warnings.warn('Some other warning')\n warnings.warn('Some other warning 2')\n\n assert_equal(len(sup.log), 2)\n assert_equal(len(log1), 1)\n assert_equal(len(log2),1)\n assert_equal(log2[0].message.args[0], 'Some other warning 2')\n\n # Do it again, with the same context to see if some warnings survived:\n with sup:\n log2 = sup.record(message='Some other warning 2')\n sup.filter(message='Some warning')\n warnings.warn('Some warning')\n warnings.warn('Some other warning')\n warnings.warn('Some other warning 2')\n\n assert_equal(len(sup.log), 2)\n assert_equal(len(log1), 1)\n assert_equal(len(log2), 1)\n assert_equal(log2[0].message.args[0], 'Some other warning 2')\n\n # Test nested:\n with suppress_warnings() as sup:\n sup.record()\n with suppress_warnings() as sup2:\n sup2.record(message='Some warning')\n warnings.warn('Some warning')\n warnings.warn('Some other warning')\n assert_equal(len(sup2.log), 1)\n assert_equal(len(sup.log), 1)\n\n\ndef test_suppress_warnings_forwarding():\n def warn_other_module():\n # Apply along axis is implemented in python; stacklevel=2 means\n # we end up inside its module, not ours.\n def warn(arr):\n warnings.warn(\"Some warning\", stacklevel=2)\n return arr\n np.apply_along_axis(warn, 0, [0])\n\n with suppress_warnings() as sup:\n sup.record()\n with suppress_warnings(\"always\"):\n for i in range(2):\n warnings.warn(\"Some warning\")\n\n assert_equal(len(sup.log), 2)\n\n with suppress_warnings() as sup:\n sup.record()\n with suppress_warnings(\"location\"):\n for i in range(2):\n warnings.warn(\"Some warning\")\n warnings.warn(\"Some warning\")\n\n assert_equal(len(sup.log), 2)\n\n with suppress_warnings() as sup:\n sup.record()\n with suppress_warnings(\"module\"):\n for i in range(2):\n warnings.warn(\"Some warning\")\n warnings.warn(\"Some warning\")\n warn_other_module()\n\n assert_equal(len(sup.log), 2)\n\n with suppress_warnings() as sup:\n sup.record()\n with suppress_warnings(\"once\"):\n for i in range(2):\n warnings.warn(\"Some warning\")\n warnings.warn(\"Some other warning\")\n warn_other_module()\n\n assert_equal(len(sup.log), 2)\n\n\ndef test_tempdir():\n with tempdir() as tdir:\n fpath = os.path.join(tdir, 'tmp')\n with open(fpath, 'w'):\n pass\n assert_(not os.path.isdir(tdir))\n\n raised = False\n try:\n with tempdir() as tdir:\n raise ValueError()\n except ValueError:\n raised = True\n assert_(raised)\n assert_(not os.path.isdir(tdir))\n\n\ndef test_temppath():\n with temppath() as fpath:\n with open(fpath, 'w'):\n pass\n assert_(not os.path.isfile(fpath))\n\n raised = False\n try:\n with temppath() as fpath:\n raise ValueError()\n except ValueError:\n raised = True\n assert_(raised)\n assert_(not os.path.isfile(fpath))\n\n\nclass my_cacw(clear_and_catch_warnings):\n\n class_modules = (sys.modules[__name__],)\n\n\ndef test_clear_and_catch_warnings_inherit():\n # Test can subclass and add default modules\n my_mod = _get_fresh_mod()\n with my_cacw():\n warnings.simplefilter('ignore')\n warnings.warn('Some warning')\n assert_equal(my_mod.__warningregistry__, {})\n\n\[email protected](not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\nclass TestAssertNoGcCycles(object):\n \"\"\" Test assert_no_gc_cycles \"\"\"\n def test_passes(self):\n def no_cycle():\n b = []\n b.append([])\n return b\n\n with assert_no_gc_cycles():\n no_cycle()\n\n assert_no_gc_cycles(no_cycle)\n\n def test_asserts(self):\n def make_cycle():\n a = []\n a.append(a)\n a.append(a)\n return a\n\n with assert_raises(AssertionError):\n with assert_no_gc_cycles():\n make_cycle()\n\n with assert_raises(AssertionError):\n assert_no_gc_cycles(make_cycle)\n\n @pytest.mark.slow\n def test_fails(self):\n \"\"\"\n Test that in cases where the garbage cannot be collected, we raise an\n error, instead of hanging forever trying to clear it.\n \"\"\"\n\n class ReferenceCycleInDel(object):\n \"\"\"\n An object that not only contains a reference cycle, but creates new\n cycles whenever it's garbage-collected and its __del__ runs\n \"\"\"\n make_cycle = True\n\n def __init__(self):\n self.cycle = self\n\n def __del__(self):\n # break the current cycle so that `self` can be freed\n self.cycle = None\n\n if ReferenceCycleInDel.make_cycle:\n # but create a new one so that the garbage collector has more\n # work to do.\n ReferenceCycleInDel()\n\n try:\n w = weakref.ref(ReferenceCycleInDel())\n try:\n with assert_raises(RuntimeError):\n # this will be unable to get a baseline empty garbage\n assert_no_gc_cycles(lambda: None)\n except AssertionError:\n # the above test is only necessary if the GC actually tried to free\n # our object anyway, which python 2.7 does not.\n if w() is not None:\n pytest.skip(\"GC does not call __del__ on cyclic objects\")\n raise\n\n finally:\n # make sure that we stop creating reference cycles\n ReferenceCycleInDel.make_cycle = False\n", "\"\"\"\nThis module contains a set of functions for vectorized string\noperations and methods.\n\n.. note::\n The `chararray` class exists for backwards compatibility with\n Numarray, it is not recommended for new development. Starting from numpy\n 1.4, if one needs arrays of strings, it is recommended to use arrays of\n `dtype` `object_`, `string_` or `unicode_`, and use the free functions\n in the `numpy.char` module for fast vectorized string operations.\n\nSome methods will only be available if the corresponding string method is\navailable in your version of Python.\n\nThe preferred alias for `defchararray` is `numpy.char`.\n\n\"\"\"\nimport functools\nimport sys\nfrom .numerictypes import string_, unicode_, integer, object_, bool_, character\nfrom .numeric import ndarray, compare_chararrays\nfrom .numeric import array as narray\nfrom numpy.core.multiarray import _vec_string\nfrom numpy.core.overrides import set_module\nfrom numpy.core import overrides\nfrom numpy.compat import asbytes, long\nimport numpy\n\n__all__ = [\n 'equal', 'not_equal', 'greater_equal', 'less_equal',\n 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',\n 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',\n 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',\n 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',\n 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',\n 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',\n 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',\n 'array', 'asarray'\n ]\n\n\n_globalvar = 0\nif sys.version_info[0] >= 3:\n _unicode = str\n _bytes = bytes\nelse:\n _unicode = unicode\n _bytes = str\n_len = len\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy.char')\n\n\ndef _use_unicode(*args):\n \"\"\"\n Helper function for determining the output type of some string\n operations.\n\n For an operation on two ndarrays, if at least one is unicode, the\n result should be unicode.\n \"\"\"\n for x in args:\n if (isinstance(x, _unicode) or\n issubclass(numpy.asarray(x).dtype.type, unicode_)):\n return unicode_\n return string_\n\ndef _to_string_or_unicode_array(result):\n \"\"\"\n Helper function to cast a result back into a string or unicode array\n if an object array must be used as an intermediary.\n \"\"\"\n return numpy.asarray(result.tolist())\n\ndef _clean_args(*args):\n \"\"\"\n Helper function for delegating arguments to Python string\n functions.\n\n Many of the Python string operations that have optional arguments\n do not use 'None' to indicate a default value. In these cases,\n we need to remove all None arguments, and those following them.\n \"\"\"\n newargs = []\n for chk in args:\n if chk is None:\n break\n newargs.append(chk)\n return newargs\n\ndef _get_num_chars(a):\n \"\"\"\n Helper function that returns the number of characters per field in\n a string or unicode array. This is to abstract out the fact that\n for a unicode array this is itemsize / 4.\n \"\"\"\n if issubclass(a.dtype.type, unicode_):\n return a.itemsize // 4\n return a.itemsize\n\n\ndef _binary_op_dispatcher(x1, x2):\n return (x1, x2)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef equal(x1, x2):\n \"\"\"\n Return (x1 == x2) element-wise.\n\n Unlike `numpy.equal`, this comparison is performed by first\n stripping whitespace characters from the end of the string. This\n behavior is provided for backward-compatibility with numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray or bool\n Output array of bools, or a single bool if x1 and x2 are scalars.\n\n See Also\n --------\n not_equal, greater_equal, less_equal, greater, less\n \"\"\"\n return compare_chararrays(x1, x2, '==', True)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef not_equal(x1, x2):\n \"\"\"\n Return (x1 != x2) element-wise.\n\n Unlike `numpy.not_equal`, this comparison is performed by first\n stripping whitespace characters from the end of the string. This\n behavior is provided for backward-compatibility with numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray or bool\n Output array of bools, or a single bool if x1 and x2 are scalars.\n\n See Also\n --------\n equal, greater_equal, less_equal, greater, less\n \"\"\"\n return compare_chararrays(x1, x2, '!=', True)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef greater_equal(x1, x2):\n \"\"\"\n Return (x1 >= x2) element-wise.\n\n Unlike `numpy.greater_equal`, this comparison is performed by\n first stripping whitespace characters from the end of the string.\n This behavior is provided for backward-compatibility with\n numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray or bool\n Output array of bools, or a single bool if x1 and x2 are scalars.\n\n See Also\n --------\n equal, not_equal, less_equal, greater, less\n \"\"\"\n return compare_chararrays(x1, x2, '>=', True)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef less_equal(x1, x2):\n \"\"\"\n Return (x1 <= x2) element-wise.\n\n Unlike `numpy.less_equal`, this comparison is performed by first\n stripping whitespace characters from the end of the string. This\n behavior is provided for backward-compatibility with numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray or bool\n Output array of bools, or a single bool if x1 and x2 are scalars.\n\n See Also\n --------\n equal, not_equal, greater_equal, greater, less\n \"\"\"\n return compare_chararrays(x1, x2, '<=', True)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef greater(x1, x2):\n \"\"\"\n Return (x1 > x2) element-wise.\n\n Unlike `numpy.greater`, this comparison is performed by first\n stripping whitespace characters from the end of the string. This\n behavior is provided for backward-compatibility with numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray or bool\n Output array of bools, or a single bool if x1 and x2 are scalars.\n\n See Also\n --------\n equal, not_equal, greater_equal, less_equal, less\n \"\"\"\n return compare_chararrays(x1, x2, '>', True)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef less(x1, x2):\n \"\"\"\n Return (x1 < x2) element-wise.\n\n Unlike `numpy.greater`, this comparison is performed by first\n stripping whitespace characters from the end of the string. This\n behavior is provided for backward-compatibility with numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray or bool\n Output array of bools, or a single bool if x1 and x2 are scalars.\n\n See Also\n --------\n equal, not_equal, greater_equal, less_equal, greater\n \"\"\"\n return compare_chararrays(x1, x2, '<', True)\n\n\ndef _unary_op_dispatcher(a):\n return (a,)\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef str_len(a):\n \"\"\"\n Return len(a) element-wise.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n Returns\n -------\n out : ndarray\n Output array of integers\n\n See also\n --------\n __builtin__.len\n \"\"\"\n return _vec_string(a, integer, '__len__')\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef add(x1, x2):\n \"\"\"\n Return element-wise string concatenation for two arrays of str or unicode.\n\n Arrays `x1` and `x2` must have the same shape.\n\n Parameters\n ----------\n x1 : array_like of str or unicode\n Input array.\n x2 : array_like of str or unicode\n Input array.\n\n Returns\n -------\n add : ndarray\n Output array of `string_` or `unicode_`, depending on input types\n of the same shape as `x1` and `x2`.\n\n \"\"\"\n arr1 = numpy.asarray(x1)\n arr2 = numpy.asarray(x2)\n out_size = _get_num_chars(arr1) + _get_num_chars(arr2)\n dtype = _use_unicode(arr1, arr2)\n return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,))\n\n\ndef _multiply_dispatcher(a, i):\n return (a,)\n\n\n@array_function_dispatch(_multiply_dispatcher)\ndef multiply(a, i):\n \"\"\"\n Return (a * i), that is string multiple concatenation,\n element-wise.\n\n Values in `i` of less than 0 are treated as 0 (which yields an\n empty string).\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n i : array_like of ints\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input types\n\n \"\"\"\n a_arr = numpy.asarray(a)\n i_arr = numpy.asarray(i)\n if not issubclass(i_arr.dtype.type, integer):\n raise ValueError(\"Can only multiply by integers\")\n out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0)\n return _vec_string(\n a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,))\n\n\ndef _mod_dispatcher(a, values):\n return (a, values)\n\n\n@array_function_dispatch(_mod_dispatcher)\ndef mod(a, values):\n \"\"\"\n Return (a % i), that is pre-Python 2.6 string formatting\n (interpolation), element-wise for a pair of array_likes of str\n or unicode.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n values : array_like of values\n These values will be element-wise interpolated into the string.\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input types\n\n See also\n --------\n str.__mod__\n\n \"\"\"\n return _to_string_or_unicode_array(\n _vec_string(a, object_, '__mod__', (values,)))\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef capitalize(a):\n \"\"\"\n Return a copy of `a` with only the first character of each element\n capitalized.\n\n Calls `str.capitalize` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like of str or unicode\n Input array of strings to capitalize.\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input\n types\n\n See also\n --------\n str.capitalize\n\n Examples\n --------\n >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c\n array(['a1b2', '1b2a', 'b2a1', '2a1b'],\n dtype='|S4')\n >>> np.char.capitalize(c)\n array(['A1b2', '1b2a', 'B2a1', '2a1b'],\n dtype='|S4')\n\n \"\"\"\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'capitalize')\n\n\ndef _center_dispatcher(a, width, fillchar=None):\n return (a,)\n\n\n@array_function_dispatch(_center_dispatcher)\ndef center(a, width, fillchar=' '):\n \"\"\"\n Return a copy of `a` with its elements centered in a string of\n length `width`.\n\n Calls `str.center` element-wise.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n width : int\n The length of the resulting strings\n fillchar : str or unicode, optional\n The padding character to use (default is space).\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input\n types\n\n See also\n --------\n str.center\n\n \"\"\"\n a_arr = numpy.asarray(a)\n width_arr = numpy.asarray(width)\n size = long(numpy.max(width_arr.flat))\n if numpy.issubdtype(a_arr.dtype, numpy.string_):\n fillchar = asbytes(fillchar)\n return _vec_string(\n a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar))\n\n\ndef _count_dispatcher(a, sub, start=None, end=None):\n return (a,)\n\n\n@array_function_dispatch(_count_dispatcher)\ndef count(a, sub, start=0, end=None):\n \"\"\"\n Returns an array with the number of non-overlapping occurrences of\n substring `sub` in the range [`start`, `end`].\n\n Calls `str.count` element-wise.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n sub : str or unicode\n The substring to search for.\n\n start, end : int, optional\n Optional arguments `start` and `end` are interpreted as slice\n notation to specify the range in which to count.\n\n Returns\n -------\n out : ndarray\n Output array of ints.\n\n See also\n --------\n str.count\n\n Examples\n --------\n >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> c\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n >>> np.char.count(c, 'A')\n array([3, 1, 1])\n >>> np.char.count(c, 'aA')\n array([3, 1, 0])\n >>> np.char.count(c, 'A', start=1, end=4)\n array([2, 1, 1])\n >>> np.char.count(c, 'A', start=1, end=3)\n array([1, 0, 0])\n\n \"\"\"\n return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end))\n\n\ndef _code_dispatcher(a, encoding=None, errors=None):\n return (a,)\n\n\n@array_function_dispatch(_code_dispatcher)\ndef decode(a, encoding=None, errors=None):\n \"\"\"\n Calls `str.decode` element-wise.\n\n The set of available codecs comes from the Python standard library,\n and may be extended at runtime. For more information, see the\n :mod:`codecs` module.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n encoding : str, optional\n The name of an encoding\n\n errors : str, optional\n Specifies how to handle encoding errors\n\n Returns\n -------\n out : ndarray\n\n See also\n --------\n str.decode\n\n Notes\n -----\n The type of the result will depend on the encoding specified.\n\n Examples\n --------\n >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> c\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n >>> np.char.encode(c, encoding='cp037')\n array(['\\\\x81\\\\xc1\\\\x81\\\\xc1\\\\x81\\\\xc1', '@@\\\\x81\\\\xc1@@',\n '\\\\x81\\\\x82\\\\xc2\\\\xc1\\\\xc2\\\\x82\\\\x81'],\n dtype='|S7')\n\n \"\"\"\n return _to_string_or_unicode_array(\n _vec_string(a, object_, 'decode', _clean_args(encoding, errors)))\n\n\n@array_function_dispatch(_code_dispatcher)\ndef encode(a, encoding=None, errors=None):\n \"\"\"\n Calls `str.encode` element-wise.\n\n The set of available codecs comes from the Python standard library,\n and may be extended at runtime. For more information, see the codecs\n module.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n encoding : str, optional\n The name of an encoding\n\n errors : str, optional\n Specifies how to handle encoding errors\n\n Returns\n -------\n out : ndarray\n\n See also\n --------\n str.encode\n\n Notes\n -----\n The type of the result will depend on the encoding specified.\n\n \"\"\"\n return _to_string_or_unicode_array(\n _vec_string(a, object_, 'encode', _clean_args(encoding, errors)))\n\n\ndef _endswith_dispatcher(a, suffix, start=None, end=None):\n return (a,)\n\n\n@array_function_dispatch(_endswith_dispatcher)\ndef endswith(a, suffix, start=0, end=None):\n \"\"\"\n Returns a boolean array which is `True` where the string element\n in `a` ends with `suffix`, otherwise `False`.\n\n Calls `str.endswith` element-wise.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n suffix : str\n\n start, end : int, optional\n With optional `start`, test beginning at that position. With\n optional `end`, stop comparing at that position.\n\n Returns\n -------\n out : ndarray\n Outputs an array of bools.\n\n See also\n --------\n str.endswith\n\n Examples\n --------\n >>> s = np.array(['foo', 'bar'])\n >>> s[0] = 'foo'\n >>> s[1] = 'bar'\n >>> s\n array(['foo', 'bar'], dtype='<U3')\n >>> np.char.endswith(s, 'ar')\n array([False, True])\n >>> np.char.endswith(s, 'a', start=1, end=2)\n array([False, True])\n\n \"\"\"\n return _vec_string(\n a, bool_, 'endswith', [suffix, start] + _clean_args(end))\n\n\ndef _expandtabs_dispatcher(a, tabsize=None):\n return (a,)\n\n\n@array_function_dispatch(_expandtabs_dispatcher)\ndef expandtabs(a, tabsize=8):\n \"\"\"\n Return a copy of each string element where all tab characters are\n replaced by one or more spaces.\n\n Calls `str.expandtabs` element-wise.\n\n Return a copy of each string element where all tab characters are\n replaced by one or more spaces, depending on the current column\n and the given `tabsize`. The column number is reset to zero after\n each newline occurring in the string. This doesn't understand other\n non-printing characters or escape sequences.\n\n Parameters\n ----------\n a : array_like of str or unicode\n Input array\n tabsize : int, optional\n Replace tabs with `tabsize` number of spaces. If not given defaults\n to 8 spaces.\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.expandtabs\n\n \"\"\"\n return _to_string_or_unicode_array(\n _vec_string(a, object_, 'expandtabs', (tabsize,)))\n\n\n@array_function_dispatch(_count_dispatcher)\ndef find(a, sub, start=0, end=None):\n \"\"\"\n For each element, return the lowest index in the string where\n substring `sub` is found.\n\n Calls `str.find` element-wise.\n\n For each element, return the lowest index in the string where\n substring `sub` is found, such that `sub` is contained in the\n range [`start`, `end`].\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n sub : str or unicode\n\n start, end : int, optional\n Optional arguments `start` and `end` are interpreted as in\n slice notation.\n\n Returns\n -------\n out : ndarray or int\n Output array of ints. Returns -1 if `sub` is not found.\n\n See also\n --------\n str.find\n\n \"\"\"\n return _vec_string(\n a, integer, 'find', [sub, start] + _clean_args(end))\n\n\n@array_function_dispatch(_count_dispatcher)\ndef index(a, sub, start=0, end=None):\n \"\"\"\n Like `find`, but raises `ValueError` when the substring is not found.\n\n Calls `str.index` element-wise.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n sub : str or unicode\n\n start, end : int, optional\n\n Returns\n -------\n out : ndarray\n Output array of ints. Returns -1 if `sub` is not found.\n\n See also\n --------\n find, str.find\n\n \"\"\"\n return _vec_string(\n a, integer, 'index', [sub, start] + _clean_args(end))\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef isalnum(a):\n \"\"\"\n Returns true for each element if all characters in the string are\n alphanumeric and there is at least one character, false otherwise.\n\n Calls `str.isalnum` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.isalnum\n \"\"\"\n return _vec_string(a, bool_, 'isalnum')\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef isalpha(a):\n \"\"\"\n Returns true for each element if all characters in the string are\n alphabetic and there is at least one character, false otherwise.\n\n Calls `str.isalpha` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n Returns\n -------\n out : ndarray\n Output array of bools\n\n See also\n --------\n str.isalpha\n \"\"\"\n return _vec_string(a, bool_, 'isalpha')\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef isdigit(a):\n \"\"\"\n Returns true for each element if all characters in the string are\n digits and there is at least one character, false otherwise.\n\n Calls `str.isdigit` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n Returns\n -------\n out : ndarray\n Output array of bools\n\n See also\n --------\n str.isdigit\n \"\"\"\n return _vec_string(a, bool_, 'isdigit')\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef islower(a):\n \"\"\"\n Returns true for each element if all cased characters in the\n string are lowercase and there is at least one cased character,\n false otherwise.\n\n Calls `str.islower` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n Returns\n -------\n out : ndarray\n Output array of bools\n\n See also\n --------\n str.islower\n \"\"\"\n return _vec_string(a, bool_, 'islower')\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef isspace(a):\n \"\"\"\n Returns true for each element if there are only whitespace\n characters in the string and there is at least one character,\n false otherwise.\n\n Calls `str.isspace` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n Returns\n -------\n out : ndarray\n Output array of bools\n\n See also\n --------\n str.isspace\n \"\"\"\n return _vec_string(a, bool_, 'isspace')\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef istitle(a):\n \"\"\"\n Returns true for each element if the element is a titlecased\n string and there is at least one character, false otherwise.\n\n Call `str.istitle` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n Returns\n -------\n out : ndarray\n Output array of bools\n\n See also\n --------\n str.istitle\n \"\"\"\n return _vec_string(a, bool_, 'istitle')\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef isupper(a):\n \"\"\"\n Returns true for each element if all cased characters in the\n string are uppercase and there is at least one character, false\n otherwise.\n\n Call `str.isupper` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n Returns\n -------\n out : ndarray\n Output array of bools\n\n See also\n --------\n str.isupper\n \"\"\"\n return _vec_string(a, bool_, 'isupper')\n\n\ndef _join_dispatcher(sep, seq):\n return (sep, seq)\n\n\n@array_function_dispatch(_join_dispatcher)\ndef join(sep, seq):\n \"\"\"\n Return a string which is the concatenation of the strings in the\n sequence `seq`.\n\n Calls `str.join` element-wise.\n\n Parameters\n ----------\n sep : array_like of str or unicode\n seq : array_like of str or unicode\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input types\n\n See also\n --------\n str.join\n \"\"\"\n return _to_string_or_unicode_array(\n _vec_string(sep, object_, 'join', (seq,)))\n\n\n\ndef _just_dispatcher(a, width, fillchar=None):\n return (a,)\n\n\n@array_function_dispatch(_just_dispatcher)\ndef ljust(a, width, fillchar=' '):\n \"\"\"\n Return an array with the elements of `a` left-justified in a\n string of length `width`.\n\n Calls `str.ljust` element-wise.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n width : int\n The length of the resulting strings\n fillchar : str or unicode, optional\n The character to use for padding\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.ljust\n\n \"\"\"\n a_arr = numpy.asarray(a)\n width_arr = numpy.asarray(width)\n size = long(numpy.max(width_arr.flat))\n if numpy.issubdtype(a_arr.dtype, numpy.string_):\n fillchar = asbytes(fillchar)\n return _vec_string(\n a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar))\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef lower(a):\n \"\"\"\n Return an array with the elements converted to lowercase.\n\n Call `str.lower` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like, {str, unicode}\n Input array.\n\n Returns\n -------\n out : ndarray, {str, unicode}\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.lower\n\n Examples\n --------\n >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c\n array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')\n >>> np.char.lower(c)\n array(['a1b c', '1bca', 'bca1'], dtype='<U5')\n\n \"\"\"\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'lower')\n\n\ndef _strip_dispatcher(a, chars=None):\n return (a,)\n\n\n@array_function_dispatch(_strip_dispatcher)\ndef lstrip(a, chars=None):\n \"\"\"\n For each element in `a`, return a copy with the leading characters\n removed.\n\n Calls `str.lstrip` element-wise.\n\n Parameters\n ----------\n a : array-like, {str, unicode}\n Input array.\n\n chars : {str, unicode}, optional\n The `chars` argument is a string specifying the set of\n characters to be removed. If omitted or None, the `chars`\n argument defaults to removing whitespace. The `chars` argument\n is not a prefix; rather, all combinations of its values are\n stripped.\n\n Returns\n -------\n out : ndarray, {str, unicode}\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.lstrip\n\n Examples\n --------\n >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> c\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n\n The 'a' variable is unstripped from c[1] because whitespace leading.\n\n >>> np.char.lstrip(c, 'a')\n array(['AaAaA', ' aA ', 'bBABba'], dtype='<U7')\n\n\n >>> np.char.lstrip(c, 'A') # leaves c unchanged\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all()\n ... # XXX: is this a regression? This used to return True\n ... # np.char.lstrip(c,'') does not modify c at all.\n False\n >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all()\n True\n\n \"\"\"\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))\n\n\ndef _partition_dispatcher(a, sep):\n return (a,)\n\n\n@array_function_dispatch(_partition_dispatcher)\ndef partition(a, sep):\n \"\"\"\n Partition each element in `a` around `sep`.\n\n Calls `str.partition` element-wise.\n\n For each element in `a`, split the element as the first\n occurrence of `sep`, and return 3 strings containing the part\n before the separator, the separator itself, and the part after\n the separator. If the separator is not found, return 3 strings\n containing the string itself, followed by two empty strings.\n\n Parameters\n ----------\n a : array_like, {str, unicode}\n Input array\n sep : {str, unicode}\n Separator to split each string element in `a`.\n\n Returns\n -------\n out : ndarray, {str, unicode}\n Output array of str or unicode, depending on input type.\n The output array will have an extra dimension with 3\n elements per input element.\n\n See also\n --------\n str.partition\n\n \"\"\"\n return _to_string_or_unicode_array(\n _vec_string(a, object_, 'partition', (sep,)))\n\n\ndef _replace_dispatcher(a, old, new, count=None):\n return (a,)\n\n\n@array_function_dispatch(_replace_dispatcher)\ndef replace(a, old, new, count=None):\n \"\"\"\n For each element in `a`, return a copy of the string with all\n occurrences of substring `old` replaced by `new`.\n\n Calls `str.replace` element-wise.\n\n Parameters\n ----------\n a : array-like of str or unicode\n\n old, new : str or unicode\n\n count : int, optional\n If the optional argument `count` is given, only the first\n `count` occurrences are replaced.\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.replace\n\n \"\"\"\n return _to_string_or_unicode_array(\n _vec_string(\n a, object_, 'replace', [old, new] + _clean_args(count)))\n\n\n@array_function_dispatch(_count_dispatcher)\ndef rfind(a, sub, start=0, end=None):\n \"\"\"\n For each element in `a`, return the highest index in the string\n where substring `sub` is found, such that `sub` is contained\n within [`start`, `end`].\n\n Calls `str.rfind` element-wise.\n\n Parameters\n ----------\n a : array-like of str or unicode\n\n sub : str or unicode\n\n start, end : int, optional\n Optional arguments `start` and `end` are interpreted as in\n slice notation.\n\n Returns\n -------\n out : ndarray\n Output array of ints. Return -1 on failure.\n\n See also\n --------\n str.rfind\n\n \"\"\"\n return _vec_string(\n a, integer, 'rfind', [sub, start] + _clean_args(end))\n\n\n@array_function_dispatch(_count_dispatcher)\ndef rindex(a, sub, start=0, end=None):\n \"\"\"\n Like `rfind`, but raises `ValueError` when the substring `sub` is\n not found.\n\n Calls `str.rindex` element-wise.\n\n Parameters\n ----------\n a : array-like of str or unicode\n\n sub : str or unicode\n\n start, end : int, optional\n\n Returns\n -------\n out : ndarray\n Output array of ints.\n\n See also\n --------\n rfind, str.rindex\n\n \"\"\"\n return _vec_string(\n a, integer, 'rindex', [sub, start] + _clean_args(end))\n\n\n@array_function_dispatch(_just_dispatcher)\ndef rjust(a, width, fillchar=' '):\n \"\"\"\n Return an array with the elements of `a` right-justified in a\n string of length `width`.\n\n Calls `str.rjust` element-wise.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n width : int\n The length of the resulting strings\n fillchar : str or unicode, optional\n The character to use for padding\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.rjust\n\n \"\"\"\n a_arr = numpy.asarray(a)\n width_arr = numpy.asarray(width)\n size = long(numpy.max(width_arr.flat))\n if numpy.issubdtype(a_arr.dtype, numpy.string_):\n fillchar = asbytes(fillchar)\n return _vec_string(\n a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar))\n\n\n@array_function_dispatch(_partition_dispatcher)\ndef rpartition(a, sep):\n \"\"\"\n Partition (split) each element around the right-most separator.\n\n Calls `str.rpartition` element-wise.\n\n For each element in `a`, split the element as the last\n occurrence of `sep`, and return 3 strings containing the part\n before the separator, the separator itself, and the part after\n the separator. If the separator is not found, return 3 strings\n containing the string itself, followed by two empty strings.\n\n Parameters\n ----------\n a : array_like of str or unicode\n Input array\n sep : str or unicode\n Right-most separator to split each element in array.\n\n Returns\n -------\n out : ndarray\n Output array of string or unicode, depending on input\n type. The output array will have an extra dimension with\n 3 elements per input element.\n\n See also\n --------\n str.rpartition\n\n \"\"\"\n return _to_string_or_unicode_array(\n _vec_string(a, object_, 'rpartition', (sep,)))\n\n\ndef _split_dispatcher(a, sep=None, maxsplit=None):\n return (a,)\n\n\n@array_function_dispatch(_split_dispatcher)\ndef rsplit(a, sep=None, maxsplit=None):\n \"\"\"\n For each element in `a`, return a list of the words in the\n string, using `sep` as the delimiter string.\n\n Calls `str.rsplit` element-wise.\n\n Except for splitting from the right, `rsplit`\n behaves like `split`.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n sep : str or unicode, optional\n If `sep` is not specified or None, any whitespace string\n is a separator.\n maxsplit : int, optional\n If `maxsplit` is given, at most `maxsplit` splits are done,\n the rightmost ones.\n\n Returns\n -------\n out : ndarray\n Array of list objects\n\n See also\n --------\n str.rsplit, split\n\n \"\"\"\n # This will return an array of lists of different sizes, so we\n # leave it as an object array\n return _vec_string(\n a, object_, 'rsplit', [sep] + _clean_args(maxsplit))\n\n\ndef _strip_dispatcher(a, chars=None):\n return (a,)\n\n\n@array_function_dispatch(_strip_dispatcher)\ndef rstrip(a, chars=None):\n \"\"\"\n For each element in `a`, return a copy with the trailing\n characters removed.\n\n Calls `str.rstrip` element-wise.\n\n Parameters\n ----------\n a : array-like of str or unicode\n\n chars : str or unicode, optional\n The `chars` argument is a string specifying the set of\n characters to be removed. If omitted or None, the `chars`\n argument defaults to removing whitespace. The `chars` argument\n is not a suffix; rather, all combinations of its values are\n stripped.\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.rstrip\n\n Examples\n --------\n >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c\n array(['aAaAaA', 'abBABba'],\n dtype='|S7')\n >>> np.char.rstrip(c, b'a')\n array(['aAaAaA', 'abBABb'],\n dtype='|S7')\n >>> np.char.rstrip(c, b'A')\n array(['aAaAa', 'abBABba'],\n dtype='|S7')\n\n \"\"\"\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))\n\n\n@array_function_dispatch(_split_dispatcher)\ndef split(a, sep=None, maxsplit=None):\n \"\"\"\n For each element in `a`, return a list of the words in the\n string, using `sep` as the delimiter string.\n\n Calls `str.split` element-wise.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n sep : str or unicode, optional\n If `sep` is not specified or None, any whitespace string is a\n separator.\n\n maxsplit : int, optional\n If `maxsplit` is given, at most `maxsplit` splits are done.\n\n Returns\n -------\n out : ndarray\n Array of list objects\n\n See also\n --------\n str.split, rsplit\n\n \"\"\"\n # This will return an array of lists of different sizes, so we\n # leave it as an object array\n return _vec_string(\n a, object_, 'split', [sep] + _clean_args(maxsplit))\n\n\ndef _splitlines_dispatcher(a, keepends=None):\n return (a,)\n\n\n@array_function_dispatch(_splitlines_dispatcher)\ndef splitlines(a, keepends=None):\n \"\"\"\n For each element in `a`, return a list of the lines in the\n element, breaking at line boundaries.\n\n Calls `str.splitlines` element-wise.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n keepends : bool, optional\n Line breaks are not included in the resulting list unless\n keepends is given and true.\n\n Returns\n -------\n out : ndarray\n Array of list objects\n\n See also\n --------\n str.splitlines\n\n \"\"\"\n return _vec_string(\n a, object_, 'splitlines', _clean_args(keepends))\n\n\ndef _startswith_dispatcher(a, prefix, start=None, end=None):\n return (a,)\n\n\n@array_function_dispatch(_startswith_dispatcher)\ndef startswith(a, prefix, start=0, end=None):\n \"\"\"\n Returns a boolean array which is `True` where the string element\n in `a` starts with `prefix`, otherwise `False`.\n\n Calls `str.startswith` element-wise.\n\n Parameters\n ----------\n a : array_like of str or unicode\n\n prefix : str\n\n start, end : int, optional\n With optional `start`, test beginning at that position. With\n optional `end`, stop comparing at that position.\n\n Returns\n -------\n out : ndarray\n Array of booleans\n\n See also\n --------\n str.startswith\n\n \"\"\"\n return _vec_string(\n a, bool_, 'startswith', [prefix, start] + _clean_args(end))\n\n\n@array_function_dispatch(_strip_dispatcher)\ndef strip(a, chars=None):\n \"\"\"\n For each element in `a`, return a copy with the leading and\n trailing characters removed.\n\n Calls `str.strip` element-wise.\n\n Parameters\n ----------\n a : array-like of str or unicode\n\n chars : str or unicode, optional\n The `chars` argument is a string specifying the set of\n characters to be removed. If omitted or None, the `chars`\n argument defaults to removing whitespace. The `chars` argument\n is not a prefix or suffix; rather, all combinations of its\n values are stripped.\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.strip\n\n Examples\n --------\n >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> c\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n >>> np.char.strip(c)\n array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7')\n >>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads\n array(['AaAaA', ' aA ', 'bBABb'], dtype='<U7')\n >>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails\n array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7')\n\n \"\"\"\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef swapcase(a):\n \"\"\"\n Return element-wise a copy of the string with\n uppercase characters converted to lowercase and vice versa.\n\n Calls `str.swapcase` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like, {str, unicode}\n Input array.\n\n Returns\n -------\n out : ndarray, {str, unicode}\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.swapcase\n\n Examples\n --------\n >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c\n array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'],\n dtype='|S5')\n >>> np.char.swapcase(c)\n array(['A1b C', '1B cA', 'B cA1', 'Ca1B'],\n dtype='|S5')\n\n \"\"\"\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'swapcase')\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef title(a):\n \"\"\"\n Return element-wise title cased version of string or unicode.\n\n Title case words start with uppercase characters, all remaining cased\n characters are lowercase.\n\n Calls `str.title` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like, {str, unicode}\n Input array.\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.title\n\n Examples\n --------\n >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c\n array(['a1b c', '1b ca', 'b ca1', 'ca1b'],\n dtype='|S5')\n >>> np.char.title(c)\n array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],\n dtype='|S5')\n\n \"\"\"\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'title')\n\n\ndef _translate_dispatcher(a, table, deletechars=None):\n return (a,)\n\n\n@array_function_dispatch(_translate_dispatcher)\ndef translate(a, table, deletechars=None):\n \"\"\"\n For each element in `a`, return a copy of the string where all\n characters occurring in the optional argument `deletechars` are\n removed, and the remaining characters have been mapped through the\n given translation table.\n\n Calls `str.translate` element-wise.\n\n Parameters\n ----------\n a : array-like of str or unicode\n\n table : str of length 256\n\n deletechars : str\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.translate\n\n \"\"\"\n a_arr = numpy.asarray(a)\n if issubclass(a_arr.dtype.type, unicode_):\n return _vec_string(\n a_arr, a_arr.dtype, 'translate', (table,))\n else:\n return _vec_string(\n a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef upper(a):\n \"\"\"\n Return an array with the elements converted to uppercase.\n\n Calls `str.upper` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array_like, {str, unicode}\n Input array.\n\n Returns\n -------\n out : ndarray, {str, unicode}\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.upper\n\n Examples\n --------\n >>> c = np.array(['a1b c', '1bca', 'bca1']); c\n array(['a1b c', '1bca', 'bca1'], dtype='<U5')\n >>> np.char.upper(c)\n array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')\n\n \"\"\"\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'upper')\n\n\ndef _zfill_dispatcher(a, width):\n return (a,)\n\n\n@array_function_dispatch(_zfill_dispatcher)\ndef zfill(a, width):\n \"\"\"\n Return the numeric string left-filled with zeros\n\n Calls `str.zfill` element-wise.\n\n Parameters\n ----------\n a : array_like, {str, unicode}\n Input array.\n width : int\n Width of string to left-fill elements in `a`.\n\n Returns\n -------\n out : ndarray, {str, unicode}\n Output array of str or unicode, depending on input type\n\n See also\n --------\n str.zfill\n\n \"\"\"\n a_arr = numpy.asarray(a)\n width_arr = numpy.asarray(width)\n size = long(numpy.max(width_arr.flat))\n return _vec_string(\n a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,))\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef isnumeric(a):\n \"\"\"\n For each element, return True if there are only numeric\n characters in the element.\n\n Calls `unicode.isnumeric` element-wise.\n\n Numeric characters include digit characters, and all characters\n that have the Unicode numeric value property, e.g. ``U+2155,\n VULGAR FRACTION ONE FIFTH``.\n\n Parameters\n ----------\n a : array_like, unicode\n Input array.\n\n Returns\n -------\n out : ndarray, bool\n Array of booleans of same shape as `a`.\n\n See also\n --------\n unicode.isnumeric\n\n \"\"\"\n if _use_unicode(a) != unicode_:\n raise TypeError(\"isnumeric is only available for Unicode strings and arrays\")\n return _vec_string(a, bool_, 'isnumeric')\n\n\n@array_function_dispatch(_unary_op_dispatcher)\ndef isdecimal(a):\n \"\"\"\n For each element, return True if there are only decimal\n characters in the element.\n\n Calls `unicode.isdecimal` element-wise.\n\n Decimal characters include digit characters, and all characters\n that that can be used to form decimal-radix numbers,\n e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``.\n\n Parameters\n ----------\n a : array_like, unicode\n Input array.\n\n Returns\n -------\n out : ndarray, bool\n Array of booleans identical in shape to `a`.\n\n See also\n --------\n unicode.isdecimal\n\n \"\"\"\n if _use_unicode(a) != unicode_:\n raise TypeError(\"isnumeric is only available for Unicode strings and arrays\")\n return _vec_string(a, bool_, 'isdecimal')\n\n\n@set_module('numpy')\nclass chararray(ndarray):\n \"\"\"\n chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,\n strides=None, order=None)\n\n Provides a convenient view on arrays of string and unicode values.\n\n .. note::\n The `chararray` class exists for backwards compatibility with\n Numarray, it is not recommended for new development. Starting from numpy\n 1.4, if one needs arrays of strings, it is recommended to use arrays of\n `dtype` `object_`, `string_` or `unicode_`, and use the free functions\n in the `numpy.char` module for fast vectorized string operations.\n\n Versus a regular NumPy array of type `str` or `unicode`, this\n class adds the following functionality:\n\n 1) values automatically have whitespace removed from the end\n when indexed\n\n 2) comparison operators automatically remove whitespace from the\n end when comparing values\n\n 3) vectorized string operations are provided as methods\n (e.g. `.endswith`) and infix operators (e.g. ``\"+\", \"*\", \"%\"``)\n\n chararrays should be created using `numpy.char.array` or\n `numpy.char.asarray`, rather than this constructor directly.\n\n This constructor creates the array, using `buffer` (with `offset`\n and `strides`) if it is not ``None``. If `buffer` is ``None``, then\n constructs a new array with `strides` in \"C order\", unless both\n ``len(shape) >= 2`` and ``order='F'``, in which case `strides`\n is in \"Fortran order\".\n\n Methods\n -------\n astype\n argsort\n copy\n count\n decode\n dump\n dumps\n encode\n endswith\n expandtabs\n fill\n find\n flatten\n getfield\n index\n isalnum\n isalpha\n isdecimal\n isdigit\n islower\n isnumeric\n isspace\n istitle\n isupper\n item\n join\n ljust\n lower\n lstrip\n nonzero\n put\n ravel\n repeat\n replace\n reshape\n resize\n rfind\n rindex\n rjust\n rsplit\n rstrip\n searchsorted\n setfield\n setflags\n sort\n split\n splitlines\n squeeze\n startswith\n strip\n swapaxes\n swapcase\n take\n title\n tofile\n tolist\n tostring\n translate\n transpose\n upper\n view\n zfill\n\n Parameters\n ----------\n shape : tuple\n Shape of the array.\n itemsize : int, optional\n Length of each array element, in number of characters. Default is 1.\n unicode : bool, optional\n Are the array elements of type unicode (True) or string (False).\n Default is False.\n buffer : int, optional\n Memory address of the start of the array data. Default is None,\n in which case a new array is created.\n offset : int, optional\n Fixed stride displacement from the beginning of an axis?\n Default is 0. Needs to be >=0.\n strides : array_like of ints, optional\n Strides for the array (see `ndarray.strides` for full description).\n Default is None.\n order : {'C', 'F'}, optional\n The order in which the array data is stored in memory: 'C' ->\n \"row major\" order (the default), 'F' -> \"column major\"\n (Fortran) order.\n\n Examples\n --------\n >>> charar = np.chararray((3, 3))\n >>> charar[:] = 'a'\n >>> charar\n chararray([[b'a', b'a', b'a'],\n [b'a', b'a', b'a'],\n [b'a', b'a', b'a']], dtype='|S1')\n\n >>> charar = np.chararray(charar.shape, itemsize=5)\n >>> charar[:] = 'abc'\n >>> charar\n chararray([[b'abc', b'abc', b'abc'],\n [b'abc', b'abc', b'abc'],\n [b'abc', b'abc', b'abc']], dtype='|S5')\n\n \"\"\"\n def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,\n offset=0, strides=None, order='C'):\n global _globalvar\n\n if unicode:\n dtype = unicode_\n else:\n dtype = string_\n\n # force itemsize to be a Python long, since using NumPy integer\n # types results in itemsize.itemsize being used as the size of\n # strings in the new array.\n itemsize = long(itemsize)\n\n if sys.version_info[0] >= 3 and isinstance(buffer, _unicode):\n # On Py3, unicode objects do not have the buffer interface\n filler = buffer\n buffer = None\n else:\n filler = None\n\n _globalvar = 1\n if buffer is None:\n self = ndarray.__new__(subtype, shape, (dtype, itemsize),\n order=order)\n else:\n self = ndarray.__new__(subtype, shape, (dtype, itemsize),\n buffer=buffer,\n offset=offset, strides=strides,\n order=order)\n if filler is not None:\n self[...] = filler\n _globalvar = 0\n return self\n\n def __array_finalize__(self, obj):\n # The b is a special case because it is used for reconstructing.\n if not _globalvar and self.dtype.char not in 'SUbc':\n raise ValueError(\"Can only create a chararray from string data.\")\n\n def __getitem__(self, obj):\n val = ndarray.__getitem__(self, obj)\n\n if isinstance(val, character):\n temp = val.rstrip()\n if _len(temp) == 0:\n val = ''\n else:\n val = temp\n\n return val\n\n # IMPLEMENTATION NOTE: Most of the methods of this class are\n # direct delegations to the free functions in this module.\n # However, those that return an array of strings should instead\n # return a chararray, so some extra wrapping is required.\n\n def __eq__(self, other):\n \"\"\"\n Return (self == other) element-wise.\n\n See also\n --------\n equal\n \"\"\"\n return equal(self, other)\n\n def __ne__(self, other):\n \"\"\"\n Return (self != other) element-wise.\n\n See also\n --------\n not_equal\n \"\"\"\n return not_equal(self, other)\n\n def __ge__(self, other):\n \"\"\"\n Return (self >= other) element-wise.\n\n See also\n --------\n greater_equal\n \"\"\"\n return greater_equal(self, other)\n\n def __le__(self, other):\n \"\"\"\n Return (self <= other) element-wise.\n\n See also\n --------\n less_equal\n \"\"\"\n return less_equal(self, other)\n\n def __gt__(self, other):\n \"\"\"\n Return (self > other) element-wise.\n\n See also\n --------\n greater\n \"\"\"\n return greater(self, other)\n\n def __lt__(self, other):\n \"\"\"\n Return (self < other) element-wise.\n\n See also\n --------\n less\n \"\"\"\n return less(self, other)\n\n def __add__(self, other):\n \"\"\"\n Return (self + other), that is string concatenation,\n element-wise for a pair of array_likes of str or unicode.\n\n See also\n --------\n add\n \"\"\"\n return asarray(add(self, other))\n\n def __radd__(self, other):\n \"\"\"\n Return (other + self), that is string concatenation,\n element-wise for a pair of array_likes of `string_` or `unicode_`.\n\n See also\n --------\n add\n \"\"\"\n return asarray(add(numpy.asarray(other), self))\n\n def __mul__(self, i):\n \"\"\"\n Return (self * i), that is string multiple concatenation,\n element-wise.\n\n See also\n --------\n multiply\n \"\"\"\n return asarray(multiply(self, i))\n\n def __rmul__(self, i):\n \"\"\"\n Return (self * i), that is string multiple concatenation,\n element-wise.\n\n See also\n --------\n multiply\n \"\"\"\n return asarray(multiply(self, i))\n\n def __mod__(self, i):\n \"\"\"\n Return (self % i), that is pre-Python 2.6 string formatting\n (interpolation), element-wise for a pair of array_likes of `string_`\n or `unicode_`.\n\n See also\n --------\n mod\n \"\"\"\n return asarray(mod(self, i))\n\n def __rmod__(self, other):\n return NotImplemented\n\n def argsort(self, axis=-1, kind=None, order=None):\n \"\"\"\n Return the indices that sort the array lexicographically.\n\n For full documentation see `numpy.argsort`, for which this method is\n in fact merely a \"thin wrapper.\"\n\n Examples\n --------\n >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')\n >>> c = c.view(np.chararray); c\n chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],\n dtype='|S5')\n >>> c[c.argsort()]\n chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],\n dtype='|S5')\n\n \"\"\"\n return self.__array__().argsort(axis, kind, order)\n argsort.__doc__ = ndarray.argsort.__doc__\n\n def capitalize(self):\n \"\"\"\n Return a copy of `self` with only the first character of each element\n capitalized.\n\n See also\n --------\n char.capitalize\n\n \"\"\"\n return asarray(capitalize(self))\n\n def center(self, width, fillchar=' '):\n \"\"\"\n Return a copy of `self` with its elements centered in a\n string of length `width`.\n\n See also\n --------\n center\n \"\"\"\n return asarray(center(self, width, fillchar))\n\n def count(self, sub, start=0, end=None):\n \"\"\"\n Returns an array with the number of non-overlapping occurrences of\n substring `sub` in the range [`start`, `end`].\n\n See also\n --------\n char.count\n\n \"\"\"\n return count(self, sub, start, end)\n\n def decode(self, encoding=None, errors=None):\n \"\"\"\n Calls `str.decode` element-wise.\n\n See also\n --------\n char.decode\n\n \"\"\"\n return decode(self, encoding, errors)\n\n def encode(self, encoding=None, errors=None):\n \"\"\"\n Calls `str.encode` element-wise.\n\n See also\n --------\n char.encode\n\n \"\"\"\n return encode(self, encoding, errors)\n\n def endswith(self, suffix, start=0, end=None):\n \"\"\"\n Returns a boolean array which is `True` where the string element\n in `self` ends with `suffix`, otherwise `False`.\n\n See also\n --------\n char.endswith\n\n \"\"\"\n return endswith(self, suffix, start, end)\n\n def expandtabs(self, tabsize=8):\n \"\"\"\n Return a copy of each string element where all tab characters are\n replaced by one or more spaces.\n\n See also\n --------\n char.expandtabs\n\n \"\"\"\n return asarray(expandtabs(self, tabsize))\n\n def find(self, sub, start=0, end=None):\n \"\"\"\n For each element, return the lowest index in the string where\n substring `sub` is found.\n\n See also\n --------\n char.find\n\n \"\"\"\n return find(self, sub, start, end)\n\n def index(self, sub, start=0, end=None):\n \"\"\"\n Like `find`, but raises `ValueError` when the substring is not found.\n\n See also\n --------\n char.index\n\n \"\"\"\n return index(self, sub, start, end)\n\n def isalnum(self):\n \"\"\"\n Returns true for each element if all characters in the string\n are alphanumeric and there is at least one character, false\n otherwise.\n\n See also\n --------\n char.isalnum\n\n \"\"\"\n return isalnum(self)\n\n def isalpha(self):\n \"\"\"\n Returns true for each element if all characters in the string\n are alphabetic and there is at least one character, false\n otherwise.\n\n See also\n --------\n char.isalpha\n\n \"\"\"\n return isalpha(self)\n\n def isdigit(self):\n \"\"\"\n Returns true for each element if all characters in the string are\n digits and there is at least one character, false otherwise.\n\n See also\n --------\n char.isdigit\n\n \"\"\"\n return isdigit(self)\n\n def islower(self):\n \"\"\"\n Returns true for each element if all cased characters in the\n string are lowercase and there is at least one cased character,\n false otherwise.\n\n See also\n --------\n char.islower\n\n \"\"\"\n return islower(self)\n\n def isspace(self):\n \"\"\"\n Returns true for each element if there are only whitespace\n characters in the string and there is at least one character,\n false otherwise.\n\n See also\n --------\n char.isspace\n\n \"\"\"\n return isspace(self)\n\n def istitle(self):\n \"\"\"\n Returns true for each element if the element is a titlecased\n string and there is at least one character, false otherwise.\n\n See also\n --------\n char.istitle\n\n \"\"\"\n return istitle(self)\n\n def isupper(self):\n \"\"\"\n Returns true for each element if all cased characters in the\n string are uppercase and there is at least one character, false\n otherwise.\n\n See also\n --------\n char.isupper\n\n \"\"\"\n return isupper(self)\n\n def join(self, seq):\n \"\"\"\n Return a string which is the concatenation of the strings in the\n sequence `seq`.\n\n See also\n --------\n char.join\n\n \"\"\"\n return join(self, seq)\n\n def ljust(self, width, fillchar=' '):\n \"\"\"\n Return an array with the elements of `self` left-justified in a\n string of length `width`.\n\n See also\n --------\n char.ljust\n\n \"\"\"\n return asarray(ljust(self, width, fillchar))\n\n def lower(self):\n \"\"\"\n Return an array with the elements of `self` converted to\n lowercase.\n\n See also\n --------\n char.lower\n\n \"\"\"\n return asarray(lower(self))\n\n def lstrip(self, chars=None):\n \"\"\"\n For each element in `self`, return a copy with the leading characters\n removed.\n\n See also\n --------\n char.lstrip\n\n \"\"\"\n return asarray(lstrip(self, chars))\n\n def partition(self, sep):\n \"\"\"\n Partition each element in `self` around `sep`.\n\n See also\n --------\n partition\n \"\"\"\n return asarray(partition(self, sep))\n\n def replace(self, old, new, count=None):\n \"\"\"\n For each element in `self`, return a copy of the string with all\n occurrences of substring `old` replaced by `new`.\n\n See also\n --------\n char.replace\n\n \"\"\"\n return asarray(replace(self, old, new, count))\n\n def rfind(self, sub, start=0, end=None):\n \"\"\"\n For each element in `self`, return the highest index in the string\n where substring `sub` is found, such that `sub` is contained\n within [`start`, `end`].\n\n See also\n --------\n char.rfind\n\n \"\"\"\n return rfind(self, sub, start, end)\n\n def rindex(self, sub, start=0, end=None):\n \"\"\"\n Like `rfind`, but raises `ValueError` when the substring `sub` is\n not found.\n\n See also\n --------\n char.rindex\n\n \"\"\"\n return rindex(self, sub, start, end)\n\n def rjust(self, width, fillchar=' '):\n \"\"\"\n Return an array with the elements of `self`\n right-justified in a string of length `width`.\n\n See also\n --------\n char.rjust\n\n \"\"\"\n return asarray(rjust(self, width, fillchar))\n\n def rpartition(self, sep):\n \"\"\"\n Partition each element in `self` around `sep`.\n\n See also\n --------\n rpartition\n \"\"\"\n return asarray(rpartition(self, sep))\n\n def rsplit(self, sep=None, maxsplit=None):\n \"\"\"\n For each element in `self`, return a list of the words in\n the string, using `sep` as the delimiter string.\n\n See also\n --------\n char.rsplit\n\n \"\"\"\n return rsplit(self, sep, maxsplit)\n\n def rstrip(self, chars=None):\n \"\"\"\n For each element in `self`, return a copy with the trailing\n characters removed.\n\n See also\n --------\n char.rstrip\n\n \"\"\"\n return asarray(rstrip(self, chars))\n\n def split(self, sep=None, maxsplit=None):\n \"\"\"\n For each element in `self`, return a list of the words in the\n string, using `sep` as the delimiter string.\n\n See also\n --------\n char.split\n\n \"\"\"\n return split(self, sep, maxsplit)\n\n def splitlines(self, keepends=None):\n \"\"\"\n For each element in `self`, return a list of the lines in the\n element, breaking at line boundaries.\n\n See also\n --------\n char.splitlines\n\n \"\"\"\n return splitlines(self, keepends)\n\n def startswith(self, prefix, start=0, end=None):\n \"\"\"\n Returns a boolean array which is `True` where the string element\n in `self` starts with `prefix`, otherwise `False`.\n\n See also\n --------\n char.startswith\n\n \"\"\"\n return startswith(self, prefix, start, end)\n\n def strip(self, chars=None):\n \"\"\"\n For each element in `self`, return a copy with the leading and\n trailing characters removed.\n\n See also\n --------\n char.strip\n\n \"\"\"\n return asarray(strip(self, chars))\n\n def swapcase(self):\n \"\"\"\n For each element in `self`, return a copy of the string with\n uppercase characters converted to lowercase and vice versa.\n\n See also\n --------\n char.swapcase\n\n \"\"\"\n return asarray(swapcase(self))\n\n def title(self):\n \"\"\"\n For each element in `self`, return a titlecased version of the\n string: words start with uppercase characters, all remaining cased\n characters are lowercase.\n\n See also\n --------\n char.title\n\n \"\"\"\n return asarray(title(self))\n\n def translate(self, table, deletechars=None):\n \"\"\"\n For each element in `self`, return a copy of the string where\n all characters occurring in the optional argument\n `deletechars` are removed, and the remaining characters have\n been mapped through the given translation table.\n\n See also\n --------\n char.translate\n\n \"\"\"\n return asarray(translate(self, table, deletechars))\n\n def upper(self):\n \"\"\"\n Return an array with the elements of `self` converted to\n uppercase.\n\n See also\n --------\n char.upper\n\n \"\"\"\n return asarray(upper(self))\n\n def zfill(self, width):\n \"\"\"\n Return the numeric string left-filled with zeros in a string of\n length `width`.\n\n See also\n --------\n char.zfill\n\n \"\"\"\n return asarray(zfill(self, width))\n\n def isnumeric(self):\n \"\"\"\n For each element in `self`, return True if there are only\n numeric characters in the element.\n\n See also\n --------\n char.isnumeric\n\n \"\"\"\n return isnumeric(self)\n\n def isdecimal(self):\n \"\"\"\n For each element in `self`, return True if there are only\n decimal characters in the element.\n\n See also\n --------\n char.isdecimal\n\n \"\"\"\n return isdecimal(self)\n\n\ndef array(obj, itemsize=None, copy=True, unicode=None, order=None):\n \"\"\"\n Create a `chararray`.\n\n .. note::\n This class is provided for numarray backward-compatibility.\n New code (not concerned with numarray compatibility) should use\n arrays of type `string_` or `unicode_` and use the free functions\n in :mod:`numpy.char <numpy.core.defchararray>` for fast\n vectorized string operations instead.\n\n Versus a regular NumPy array of type `str` or `unicode`, this\n class adds the following functionality:\n\n 1) values automatically have whitespace removed from the end\n when indexed\n\n 2) comparison operators automatically remove whitespace from the\n end when comparing values\n\n 3) vectorized string operations are provided as methods\n (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``)\n\n Parameters\n ----------\n obj : array of str or unicode-like\n\n itemsize : int, optional\n `itemsize` is the number of characters per scalar in the\n resulting array. If `itemsize` is None, and `obj` is an\n object array or a Python list, the `itemsize` will be\n automatically determined. If `itemsize` is provided and `obj`\n is of type str or unicode, then the `obj` string will be\n chunked into `itemsize` pieces.\n\n copy : bool, optional\n If true (default), then the object is copied. Otherwise, a copy\n will only be made if __array__ returns a copy, if obj is a\n nested sequence, or if a copy is needed to satisfy any of the other\n requirements (`itemsize`, unicode, `order`, etc.).\n\n unicode : bool, optional\n When true, the resulting `chararray` can contain Unicode\n characters, when false only 8-bit characters. If unicode is\n None and `obj` is one of the following:\n\n - a `chararray`,\n - an ndarray of type `str` or `unicode`\n - a Python str or unicode object,\n\n then the unicode setting of the output array will be\n automatically determined.\n\n order : {'C', 'F', 'A'}, optional\n Specify the order of the array. If order is 'C' (default), then the\n array will be in C-contiguous order (last-index varies the\n fastest). If order is 'F', then the returned array\n will be in Fortran-contiguous order (first-index varies the\n fastest). If order is 'A', then the returned array may\n be in any order (either C-, Fortran-contiguous, or even\n discontiguous).\n \"\"\"\n if isinstance(obj, (_bytes, _unicode)):\n if unicode is None:\n if isinstance(obj, _unicode):\n unicode = True\n else:\n unicode = False\n\n if itemsize is None:\n itemsize = _len(obj)\n shape = _len(obj) // itemsize\n\n if unicode:\n if sys.maxunicode == 0xffff:\n # On a narrow Python build, the buffer for Unicode\n # strings is UCS2, which doesn't match the buffer for\n # NumPy Unicode types, which is ALWAYS UCS4.\n # Therefore, we need to convert the buffer. On Python\n # 2.6 and later, we can use the utf_32 codec. Earlier\n # versions don't have that codec, so we convert to a\n # numerical array that matches the input buffer, and\n # then use NumPy to convert it to UCS4. All of this\n # should happen in native endianness.\n obj = obj.encode('utf_32')\n else:\n obj = _unicode(obj)\n else:\n # Let the default Unicode -> string encoding (if any) take\n # precedence.\n obj = _bytes(obj)\n\n return chararray(shape, itemsize=itemsize, unicode=unicode,\n buffer=obj, order=order)\n\n if isinstance(obj, (list, tuple)):\n obj = numpy.asarray(obj)\n\n if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):\n # If we just have a vanilla chararray, create a chararray\n # view around it.\n if not isinstance(obj, chararray):\n obj = obj.view(chararray)\n\n if itemsize is None:\n itemsize = obj.itemsize\n # itemsize is in 8-bit chars, so for Unicode, we need\n # to divide by the size of a single Unicode character,\n # which for NumPy is always 4\n if issubclass(obj.dtype.type, unicode_):\n itemsize //= 4\n\n if unicode is None:\n if issubclass(obj.dtype.type, unicode_):\n unicode = True\n else:\n unicode = False\n\n if unicode:\n dtype = unicode_\n else:\n dtype = string_\n\n if order is not None:\n obj = numpy.asarray(obj, order=order)\n if (copy or\n (itemsize != obj.itemsize) or\n (not unicode and isinstance(obj, unicode_)) or\n (unicode and isinstance(obj, string_))):\n obj = obj.astype((dtype, long(itemsize)))\n return obj\n\n if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):\n if itemsize is None:\n # Since no itemsize was specified, convert the input array to\n # a list so the ndarray constructor will automatically\n # determine the itemsize for us.\n obj = obj.tolist()\n # Fall through to the default case\n\n if unicode:\n dtype = unicode_\n else:\n dtype = string_\n\n if itemsize is None:\n val = narray(obj, dtype=dtype, order=order, subok=True)\n else:\n val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)\n return val.view(chararray)\n\n\ndef asarray(obj, itemsize=None, unicode=None, order=None):\n \"\"\"\n Convert the input to a `chararray`, copying the data only if\n necessary.\n\n Versus a regular NumPy array of type `str` or `unicode`, this\n class adds the following functionality:\n\n 1) values automatically have whitespace removed from the end\n when indexed\n\n 2) comparison operators automatically remove whitespace from the\n end when comparing values\n\n 3) vectorized string operations are provided as methods\n (e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``)\n\n Parameters\n ----------\n obj : array of str or unicode-like\n\n itemsize : int, optional\n `itemsize` is the number of characters per scalar in the\n resulting array. If `itemsize` is None, and `obj` is an\n object array or a Python list, the `itemsize` will be\n automatically determined. If `itemsize` is provided and `obj`\n is of type str or unicode, then the `obj` string will be\n chunked into `itemsize` pieces.\n\n unicode : bool, optional\n When true, the resulting `chararray` can contain Unicode\n characters, when false only 8-bit characters. If unicode is\n None and `obj` is one of the following:\n\n - a `chararray`,\n - an ndarray of type `str` or 'unicode`\n - a Python str or unicode object,\n\n then the unicode setting of the output array will be\n automatically determined.\n\n order : {'C', 'F'}, optional\n Specify the order of the array. If order is 'C' (default), then the\n array will be in C-contiguous order (last-index varies the\n fastest). If order is 'F', then the returned array\n will be in Fortran-contiguous order (first-index varies the\n fastest).\n \"\"\"\n return array(obj, itemsize, copy=False,\n unicode=unicode, order=order)\n" ]
[ [ "numpy.linspace", "numpy.testing.assert_array_max_ulp", "numpy.testing.assert_no_warnings", "numpy.testing.assert_string_equal", "numpy.all", "numpy.random.randn", "numpy.iinfo", "numpy.testing.assert_equal", "numpy.testing.suppress_warnings", "numpy.testing.assert_no_gc_cycles", "numpy.finfo", "numpy.apply_along_axis", "numpy.testing.tempdir", "numpy.testing.assert_array_almost_equal", "numpy.testing.assert_array_almost_equal_nulp", "numpy.timedelta64", "numpy.testing.temppath", "numpy.testing.assert_raises", "numpy.equal", "numpy.testing.assert_", "numpy.testing.assert_allclose", "numpy.testing.build_err_msg", "numpy.array", "numpy.ma.MaskedArray", "numpy.testing.assert_warns", "numpy.testing.raises", "numpy.testing.clear_and_catch_warnings", "numpy.datetime64", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.testing.assert_array_less", "numpy.ma.masked_array", "numpy.empty" ], [ "numpy.asarray", "numpy.core.overrides.set_module", "numpy.issubdtype", "numpy.compat.long", "numpy.compat.asbytes", "numpy.max", "numpy.core.multiarray._vec_string" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ISTE-NITK/istesleep
[ "caff596dfc02c67fc4fc11f4d386ba04b6e1a0d7" ]
[ "sleepAnalytics/Sleep Analysis.py" ]
[ "\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib.dates import date2num\nimport datetime\nimport matplotlib.dates as mdates\nimport time\nimport numpy as np\nfrom datetime import date\nimport matplotlib.lines as mlines\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.patches as mpatches\nimport category_encoders as ce\nimport glob\nfrom sklearn.cluster import KMeans\n\n\n# In[2]:\n\n#Getting all the data\ndef get_all_data(file_path):\n \n data = pd.DataFrame()\n quality = pd.DataFrame()\n \n avg_illuminance = pd.DataFrame()\n avg_airquaility = pd.DataFrame()\n avg_temp = pd.DataFrame()\n avg_humididty = pd.DataFrame()\n avg_quality = pd.DataFrame()\n \n sleep_quality_data = get_data('/home/prajwal/Desktop/istesleep/data/sleepdata.csv',delim=';')\n \n for filename in glob.glob(file_path): \n \n df = pd.read_csv(filename)\n \n avg_illuminance = avg_illuminance.append([np.mean(df[' illuminance'])])\n avg_airquaility = avg_airquaility.append([np.mean(df['airquality'])])\n avg_temp = avg_temp.append([np.mean(df[' temperature'])])\n avg_humididty = avg_humididty.append([np.mean(df[' humidity'])])\n \n date = df[' timestamp'].astype(str).str[:-22][0]\n date = date[1:11]\n \n sleep_quality = get_sleep_quality(sleep_quality_data,date)\n avg_quality = avg_quality.append(sleep_quality)\n sleep_quality = sleep_quality*df.shape[0]\n \n quality_date = pd.DataFrame(sleep_quality)\n quality = pd.concat([quality,quality_date],axis = 0,ignore_index=True)\n \n data = pd.concat([data,df],axis = 0,ignore_index=True)\n \n avg_data = pd.concat([avg_illuminance,avg_airquaility,avg_temp,avg_humididty,avg_quality],axis = 1, ignore_index=True)\n data = pd.concat([data,quality],axis = 1)\n \n return [data, avg_data]\n\n\n# In[53]:\n\ndef split_data(data):\n \n data_light = data[data['illuminance']>50]\n data_dark = data[data['illuminance']<50]\n #data_dark = data_dark[data_dark['illuminance']>5]\n \n return [data_light, data_dark]\n\n\n# In[ ]:\n\ndef get_sleep_quality(data,date):\n \n x = data['Sleep quality'][data['End'].astype(str)==date].tolist()\n \n return x\n\n\n# In[ ]:\n\ndef get_data(filepath,delim=','):\n \n data = pd.read_csv(filepath,sep=delim)\n \n return data\n\n\n# In[ ]:\n\ndef data_sample(data):\n\n data = data.iloc[::5, :]\n \n return data\n\n\n# In[ ]:\n\ndef data_to_csv(data,file_name):\n \n data.to_csv(file_name, sep=',')\n\n\n# In[ ]:\n\ndef convert_date(data):\n \n data[' timestamp'] = data[' timestamp'].astype(str).str[:-13]\n data[' timestamp'] = pd.to_datetime(data[' timestamp'])\n data['airquality'] = data['airquality'].astype(float)\n \n return data\n\n\n# In[ ]:\n\ndef plot_two(data, x,y):\n \n plt.scatter(data[x], data[y])\n plt.axis('tight')\n plt.ylabel(y)\n plt.xlabel(x)\n plt.show()\n\n\n# In[ ]:\n\ndef plot_simple(data,x,y,c='r',s = 40):\n \n plt.scatter(data[x], data[y], c = c,s=s, alpha=0.5)\n plt.axis('tight')\n plt.ylabel(y)\n plt.xlabel(x)\n plt.show()\n\n\n# In[ ]:\n\ndef plot(data,x,y,c='quality',s = 40):\n \n plt.scatter(data[x], data[y], c = data[c], s=s, alpha=0.5,cmap='viridis')\n plt.axis('tight')\n plt.colorbar()\n plt.ylabel(y)\n plt.xlabel(x)\n plt.show()\n\n\n# In[54]:\n\n#data, avg_data = get_all_data('/home/prajwal/Desktop/istesleep/data/Data/*.csv')\ndata = pd.read_csv('/home/prajwal/Desktop/Data/Data.csv')\navg_data = pd.read_csv('/home/prajwal/Desktop/Data/Avg_Data.csv')\n\n\n# In[55]:\n\n#Splitting data into two components, day and night data.\ndata_light,data_dark = split_data(data)\n\n\n# In[56]:\n\n#Plot - pass parameters, data and the column names you want to plot. Color indicates sleep quality\nplot(data_dark,'illuminance','airquality')\n\n\n# In[15]:\n\n#plot(avg_data,'avg_illuminance','avg_airquaility')\n\n\n# In[16]:\n\n#plot(avg_data,'avg_illuminance','avg_airquaility')\n\n\n# In[17]:\n\n#Plot - pass parameters, data and the column names you want to plot. \n#plot_two(data,'steps','quality')\n\n\n# In[18]:\n\n#x = np.mean(data['steps'][data['quality']==70])\n\n\n# In[19]:\n\n#y = np.mean(data['steps'][data['quality']==68])\n\n\n# In[20]:\n\n#z = np.mean(data['steps'][data['quality']==75])\n\n\n# In[21]:\n\n#v = np.mean(data['steps'][data['quality']==77])\n\n\n# In[22]:\n\n#t = x*70 + y*68 + z*75 + v*77\n\n\n# In[23]:\n\n#t/(70+71+75+77)\n\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.to_datetime", "matplotlib.pyplot.scatter", "pandas.DataFrame", "matplotlib.pyplot.colorbar", "numpy.mean", "matplotlib.pyplot.axis", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
entrepreneur-interet-general/predisauvetage
[ "4d985ee79355652709da322db48daffb3e5a895a" ]
[ "collecte/postes_plage_snsm/convert.py" ]
[ "# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n# Likely coming from\n# https://www.google.com/maps/d/viewer?mid=151Itl_57S7UlpC7P-TdfvT2Pz7Y\n\n\nclass KMLConverter(object):\n def __init__(self, filepath):\n self.filepath = filepath\n self.postes = []\n self.parse()\n\n def clean_key(self, key):\n return {\n u'DÉPARTEMENT': 'departement',\n 'NB DE SAUVETEURS SNSM': 'nb_sauveteurs',\n 'CP': 'code_postal',\n 'VILLE': 'ville',\n }[key]\n\n def parse_coordinates(self, value):\n if value is None:\n return None, None\n parts = map(float, value.text.split(','))\n latitude, longitude = parts[1], parts[0]\n return latitude, longitude\n\n def parse(self):\n with open(self.filepath, 'r') as f:\n soup = BeautifulSoup(f, 'xml')\n for placemark in soup.kml.Document.Folder.find_all('Placemark'):\n poste = {}\n poste['nom'] = placemark.find('name').text.strip()\n poste['latitude'], poste['longitude'] = self.parse_coordinates(\n placemark.find('coordinates')\n )\n for data in placemark.ExtendedData.find_all('Data'):\n key, value = data['name'], data.text.strip()\n if key != 'gx_media_links':\n cleaned_key = self.clean_key(key)\n if cleaned_key == 'nb_sauveteurs':\n poste[cleaned_key] = int(float(value))\n else:\n poste[cleaned_key] = value\n self.postes.append(poste)\n\n def to_csv(self, filepath):\n df = pd.DataFrame(self.postes)\n df = df.sort_values(by='code_postal').reset_index(drop=True)\n df.index += 1\n df.to_csv(filepath, encoding='utf-8', index=True, index_label='id')\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
padmec-reservoir/PRESTO
[ "71525a8dece2bcc4f16ff4a2120d7627e9ecd776" ]
[ "presto/Preprocessors/Upscale/Structured/StructuredUpscalingMethods.py" ]
[ "import numpy as np\nimport collections\nimport time\nfrom pymoab import types\nfrom pymoab import topo_util\nfrom PyTrilinos import Epetra, AztecOO, ML\n\n\nclass StructuredUpscalingMethods:\n \"\"\"Defines a structured upscaling mesh representation\n Parameters\n ----------\n coarse_ratio: List or array of integers\n List or array containing three values indicating the coarsening ratio\n of the mesh in x, y and z directions.\n mesh_size: List or array of integers\n List or array containing three values indicating the mesh size\n (number of fine elements) of the mesh in x, y and z.\n block_size List o array of floats\n List or array containing three values indicating the constant\n increments of vertex coordinates in x, y and z.\n \"\"\"\n def __init__(self, coarse_ratio, mesh_size, block_size, method, moab):\n\n self.coarse_ratio = coarse_ratio\n self.mesh_size = mesh_size\n self.block_size = block_size\n self.method = method\n\n self.verts = None # Array containing MOAB vertex entities\n self.elems = [] # List containing MOAB volume entities\n\n self.coarse_verts = None # Array containing MOAB vertex entities for\n # the coarse mesh\n self.coarse_elems = [] # List containig MOAB volume entities for the\n # coarse mesh\n\n self.primals = {} # Mapping from tuples (idx, dy, idz) to Coarse\n # volumes\n self.primal_ids = []\n\n self.primals_adj = []\n\n self.perm = []\n\n # MOAB boilerplate\n self.mb = moab\n self.root_set = self.mb.get_root_set()\n self.mesh_topo_util = topo_util.MeshTopoUtil(self.mb)\n\n # Pytrilinos boilerplate\n self.comm = Epetra.PyComm()\n self.mlList = {\"max levels\": 3,\n \"output\": 10,\n \"smoother: type\": \"symmetric Gauss-Seidel\",\n \"aggregation: type\": \"Uncoupled\"\n }\n\n def create_tags(self):\n # TODO: - Should go on Common (?)\n\n self.gid_tag = self.mb.tag_get_handle(\n \"GLOBAL_ID\", 1, types.MB_TYPE_INTEGER,\n types.MB_TAG_DENSE, True)\n\n self.coarse_gid_tag = self.mb.tag_get_handle(\n \"GLOBAL_ID_COARSE\", 1, types.MB_TYPE_INTEGER,\n types.MB_TAG_DENSE, True)\n\n # this will gide through the meshsets corresponding to coarse scale\n # volumes\n self.primal_id_tag = self.mb.tag_get_handle(\n \"PRIMAL_ID\", 1, types.MB_TYPE_INTEGER,\n types.MB_TAG_SPARSE, True)\n\n self.phi_tag = self.mb.tag_get_handle(\n \"PHI\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n self.boundary_x_tag, self.boundary_y_tag, self.boundary_z_tag = (\n self.mb.tag_get_handle(\n \"LOCAL BOUNDARY CONDITIONS - X Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True),\n self.mb.tag_get_handle(\n \"LOCAL BOUNDARY CONDITIONS - y Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True),\n self.mb.tag_get_handle(\n \"LOCAL BOUNDARY CONDITIONS - z Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n )\n\n (self.primal_perm_x_tag,\n self.primal_perm_y_tag,\n self.primal_perm_z_tag) = (\n self.mb.tag_get_handle(\n \"COARSE PERMEABILITY - X Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True),\n self.mb.tag_get_handle(\n \"COARSE PERMEABILITY - y Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True),\n self.mb.tag_get_handle(\n \"COARSE PERMEABILITY - z Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n )\n\n # tag handle for upscaling operation\n self.primal_phi_tag = self.mb.tag_get_handle(\n \"PRIMAL_PHI\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n self.perm_tag = self.mb.tag_get_handle(\n \"PERM\", 9, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n # tag handle for upscaling operation\n self.primal_perm_tag = self.mb.tag_get_handle(\n \"PRIMAL_PERM\", 9, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n # either shoud go or put other directions..., I...\n\n self.abs_perm_x_tag = self.mb.tag_get_handle(\n \"ABS_PERM_X\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n self.abs_perm_fine_x_tag = self.mb.tag_get_handle(\n \"ABS_PERM_X_FINE\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n self.fine_to_primal_tag = self.mb.tag_get_handle(\n \"FINE_TO_PRIMAL\", 1, types.MB_TYPE_HANDLE,\n types.MB_TAG_SPARSE, True)\n\n self.primal_adj_tag = self.mb.tag_get_handle(\n \"PRIMAL_ADJ\", 1, types.MB_TYPE_HANDLE,\n types.MB_TAG_SPARSE, True)\n\n self.coarse_injection_tag = self.mb.tag_get_handle(\n \"injection_well_coarse\", 1, types.MB_TYPE_INTEGER,\n types.MB_TAG_SPARSE, True)\n\n self.coarse_production_tag = self.mb.tag_get_handle(\n \"production_well_coarse\", 1, types.MB_TYPE_INTEGER,\n types.MB_TAG_SPARSE, True)\n\n def get_block_size_coarse(self):\n block_size_coarse = []\n total_size = (np.asarray(self.mesh_size, dtype='int32')) * np.asarray(\n self.block_size, dtype='float64')\n\n for dim in range(0, 3):\n block_size_coarse.append([self.coarse_ratio[dim] * np.asarray(\n self.block_size[dim], dtype='float64') * coarse_dim\n for coarse_dim in np.arange(self._coarse_dims()[dim],\n dtype='int32')])\n block_size_coarse[dim].append(total_size[dim])\n return block_size_coarse\n\n def create_coarse_vertices(self):\n # TODO: - Should go on Common\n\n block_size_coarse = self.get_block_size_coarse()\n\n coarse_coords = np.array([\n (i, j, k)\n for k in (np.array(block_size_coarse[2], dtype='float64'))\n for j in (np.array(block_size_coarse[1], dtype='float64'))\n for i in (np.array(block_size_coarse[0], dtype='float64'))\n ])\n return self.mb.create_vertices(coarse_coords.flatten())\n\n def _coarse_dims(self,):\n # TODO: - Should go on Common\n\n mesh_size_coarse = np.asarray(\n self.mesh_size, dtype='int32') // np.asarray(\n self.coarse_ratio, dtype='int32')\n return mesh_size_coarse\n\n def calculate_primal_ids(self):\n # TODO: - Should go on Common\n for dim in range(0, 3):\n self.primal_ids.append(\n [i // (self.coarse_ratio[dim]) for i in xrange(\n self.mesh_size[dim])])\n\n new_primal = []\n for dim in range(0, 3):\n new_primal.append(\n self.primal_ids[dim][(\n self.mesh_size[dim] // self.coarse_ratio[dim]) *\n self.coarse_ratio[dim]:])\n\n if len(new_primal[dim]) < (self.mesh_size[dim] // 2):\n new_primal[dim] = np.repeat(\n max(self.primal_ids[dim]) - 1,\n len(new_primal[dim])).tolist()\n self.primal_ids[dim] = (self.primal_ids[dim][:self.mesh_size[\n dim] // self.coarse_ratio[dim] * self.coarse_ratio[dim]] +\n new_primal[dim])\n\n def create_fine_vertices(self):\n # TODO: - Should go on Common\n\n coords = np.array([\n (i, j, k) for k in (np.arange(\n self.mesh_size[2] + 1, dtype='float64') *\n self.block_size[2])\n for j in (np.arange(\n self.mesh_size[1] + 1, dtype='float64') *\n self.block_size[1])\n for i in (np.arange(\n self.mesh_size[0] + 1, dtype='float64') *\n self.block_size[0])\n ], dtype='float64')\n return self.mb.create_vertices(coords.flatten())\n\n def _create_hexa(self, i, j, k, verts, mesh):\n # TODO: - Should go on Common\n # - Refactor this (????????)\n # (i, j, k)\n hexa = [verts[i + (j * (mesh[0] + 1)) +\n (k * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i+1, j, k)\n verts[(i + 1) + (j * (mesh[0] + 1)) +\n (k * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i+1, j+1, k)\n verts[(i + 1) + (j + 1) * (mesh[0]) +\n (j + 1) + (k * ((mesh[0] + 1)*(mesh[1] + 1)))],\n # (i, j+1, k)\n verts[i + (j + 1) * (mesh[0]) + (j + 1) +\n (k * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i, j, k+1)\n verts[i + (j * (mesh[0] + 1)) +\n ((k + 1) * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i+1, j, k+1)\n verts[(i + 1) + (j * (mesh[0] + 1)) +\n ((k + 1) * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i+1, j+1, k+1)\n verts[(i + 1) + (j + 1) * (mesh[0]) +\n (j + 1) + ((k + 1) * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i, j+1, k+1)\n verts[i + (j + 1) * (mesh[0]) +\n (j + 1) + ((k + 1) * ((mesh[0] + 1) * (mesh[1] + 1)))]]\n\n return hexa\n\n def _coarsening_ratio(self, dim):\n coarsening = (collections.Counter(self.primal_ids[dim]))\n return coarsening.values()\n\n def create_fine_blocks_and_primal(self):\n # TODO: - Should go on Common\n fine_vertices = self.create_fine_vertices()\n cur_id = 0\n # Create fine grid\n for k, idz in zip(xrange(self.mesh_size[2]),\n self.primal_ids[2]):\n # Flake8 bug\n print(\"{0} / {1}\".format(k + 1, self.mesh_size[2]))\n for j, idy in zip(xrange(self.mesh_size[1]),\n self.primal_ids[1]):\n for i, idx in zip(xrange(self.mesh_size[0]),\n self.primal_ids[0]):\n\n hexa = self._create_hexa(i, j, k,\n fine_vertices,\n self.mesh_size)\n el = self.mb.create_element(types.MBHEX, hexa)\n\n self.mb.tag_set_data(self.gid_tag, el, cur_id)\n # Fine Global ID\n self.mb.tag_set_data(self.gid_tag, el, cur_id)\n # Fine Porosity\n self.mb.tag_set_data(self.phi_tag, el, self.phi_values[\n cur_id])\n # Fine Permeability tensor\n self.mb.tag_set_data(self.perm_tag, el, [\n self.perm_values[cur_id], 0, 0,\n 0, self.perm_values[cur_id + self.mesh_size[0] *\n self.mesh_size[1] *\n self.mesh_size[2]], 0,\n 0, 0, self.perm_values[cur_id + 2*self.mesh_size[0] *\n self.mesh_size[1] *\n self.mesh_size[2]]])\n self.mb.tag_set_data(self.abs_perm_fine_x_tag, el,\n self.perm_values[cur_id])\n self.elems.append(el)\n cur_id += 1\n\n # Create primal coarse grid\n try:\n primal = self.primals[(idx, idy, idz)]\n self.mb.add_entities(primal, [el])\n self.mb.tag_set_data(\n self.fine_to_primal_tag, el, primal)\n except KeyError:\n primal = self.mb.create_meshset()\n self.primals[(idx, idy, idz)] = primal\n self.mb.add_entities(primal, [el])\n self.mb.tag_set_data(\n self.fine_to_primal_tag, el, primal)\n\n # do a 'if flow based generate mesh bc over here'\n\n primal_id = 0\n for primal in self.primals.values():\n self.mb.tag_set_data(self.primal_id_tag, primal, primal_id)\n primal_id += 1\n\n def store_primal_adj(self):\n # TODO: - Should go on Common\n min_coarse_ids = np.array([0, 0, 0])\n max_coarse_ids = np.array([max(self.primal_ids[0]),\n max(self.primal_ids[1]),\n max(self.primal_ids[2])])\n\n for primal_id, primal in self.primals.iteritems():\n adj = self.mb.create_meshset()\n adj_ids = []\n\n for i in np.arange(-1, 2):\n for j in np.arange(-1, 2):\n for k in np.arange(-1, 2):\n coord_inc = np.array([i, j, k])\n adj_id = primal_id + coord_inc\n if any(adj_id != primal_id) and \\\n (sum(coord_inc == [0, 0, 0]) == 2) and \\\n all(adj_id >= min_coarse_ids) and \\\n all(adj_id <= max_coarse_ids):\n\n self.mb.add_entities(\n adj, [self.primals[tuple(adj_id)]])\n adj_ids.append(tuple(adj_id))\n\n self.mb.tag_set_data(self.primal_adj_tag, primal, adj)\n\n self.primal_adj[primal_id] = adj_ids\n\n def _get_block_by_ijk(self, i, j, k):\n # TODO: - Should go on Common\n # - Should reformulate to get self.mesh_size instead of input\n\n \"\"\"\n Track down the block from its (i,j,k) position.\n \"\"\"\n block = (k) * self.mesh_size[0] * self.mesh_size[1]+(\n (i)+(j) * self.mesh_size[0])\n return block\n\n def _get_elem_by_ijk(self, ijk):\n # TODO Should go on Common\n\n block_id = self._get_block_by_ijk(\n ijk[0], ijk[1], ijk[2])\n elem = self.elems[block_id]\n return elem # Why not \"return self.elems[block_id]\" ?????\n\n def read_phi(self):\n # TODO: - Should go on Common\n # - This should go on .cfg\n # - It should have a if option for reading or for generating\n phi_values = []\n with open('spe_phi.dat') as phi:\n for line in phi:\n phi_values.extend(line.rstrip().split(' \t'))\n self.phi_values = [float(val) for val in phi_values]\n\n def read_perm(self):\n # TODO: - Should go on Common\n # - This should go on .cfg\n # - It should have a if option for reading or for generating\n\n perm_values = []\n with open('spe_perm.dat') as perm:\n for line in perm:\n line_list = line.rstrip().split(' \t')\n if len(line_list) > 1:\n perm_values.extend(line_list)\n self.perm_values = [float(val) for val in perm_values]\n\n def upscale_phi(self):\n for _, primal in self.primals.iteritems():\n # Calculate mean phi on primal\n fine_elems_in_primal = self.mb.get_entities_by_type(\n primal, types.MBHEX)\n fine_elems_phi_values = self.mb.tag_get_data(self.phi_tag,\n fine_elems_in_primal)\n primal_mean_phi = fine_elems_phi_values.mean()\n # Store mean phi on the primal meshset and internal elements\n self.mb.tag_set_data(self.primal_phi_tag, primal, primal_mean_phi)\n\n def upscale_perm_mean(self, average_method):\n self.primal_perm = (self.primal_perm_x_tag,\n self.primal_perm_y_tag,\n self.primal_perm_z_tag)\n self.average_method = average_method\n basis = ((1, 0, 0), (0, 1, 0), (0, 0, 1))\n perm = []\n for primal_id, primal in self.primals.iteritems():\n\n fine_elems_in_primal = self.mb.get_entities_by_type(\n primal, types.MBHEX)\n fine_perm_values = self.mb.tag_get_data(self.perm_tag,\n fine_elems_in_primal)\n primal_perm = [tensor.reshape(3, 3) for tensor in fine_perm_values]\n for dim in range(0, 3):\n perm = [(np.dot(np.dot(tensor, basis[dim]), basis[dim]))\n for tensor in primal_perm]\n if average_method == 'Arithmetic':\n primal_perm[dim] = np.mean(perm)\n elif average_method == 'Geometric':\n primal_perm[dim] = np.prod(np.asarray(\n perm)) ** len(1 / np.asarray(perm))\n elif average_method == 'Harmonic':\n primal_perm[dim] = len(np.asarray(\n perm)) / sum(1/np.asarray(perm))\n else:\n print(\"Choose either Arithmetic, Geometric or Harmonic.\")\n exit()\n\n perm = primal_perm[dim]\n self.mb.tag_set_data(self.primal_perm[dim], primal, perm)\n\n self.mb.tag_set_data(self.primal_perm_tag, primal,\n [primal_perm[0], 0, 0,\n 0, primal_perm[1], 0,\n 0, 0, primal_perm[2]])\n\n def _primal_centroid(self, setid):\n coarse_sums = np.array(\n [[0, 0, 0],\n [0, 0, 1],\n [0, 1, 0],\n [0, 1, 1],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0],\n [1, 1, 1]]\n )\n primal_centroid = (\n (np.asarray(setid) + coarse_sums[0]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[1]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[2]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[3]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[4]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[5]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[6]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[7]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]))\n\n primal_centroid = primal_centroid // 8\n return primal_centroid\n\n def get_boundary_meshsets(self):\n\n self.boundary_dir = (self.boundary_x_tag,\n self.boundary_y_tag,\n self.boundary_z_tag\n )\n self.boundary_meshsets = {}\n for dim in range(0, 3):\n for k, idz in zip(xrange(self.mesh_size[2]),\n self.primal_ids[2]):\n for j, idy in zip(xrange(self.mesh_size[1]),\n self.primal_ids[1]):\n for i, idx in zip(xrange(self.mesh_size[0]),\n self.primal_ids[0]):\n el = self._get_elem_by_ijk((i, j, k))\n if (i, j, k)[dim] == (self.coarse_ratio[dim] *\n self.primal_ids[dim][(i, j,\n k)[dim]]):\n self.mb.tag_set_data(self.boundary_dir[dim],\n el, 1.0)\n try:\n boundary_meshset = self.boundary_meshsets[\n (idx, idy, idz), dim]\n self.mb.add_entities(boundary_meshset, [el])\n\n except KeyError:\n boundary_meshset = self.mb.create_meshset()\n self.boundary_meshsets[\n (idx, idy, idz), dim] = boundary_meshset\n self.mb.add_entities(boundary_meshset, [el])\n\n if (i, j, k)[dim] == (self.coarse_ratio[dim] *\n self.primal_ids[dim][\n (i, j, k)[dim]] +\n self._coarsening_ratio(dim)[\n self.primal_ids[dim][\n (i, j, k)[dim]]] - 1):\n self.mb.tag_set_data(\n self.boundary_dir[dim], el, 0.0)\n\n try:\n boundary_meshset = self.boundary_meshsets[\n (idx, idy, idz), dim]\n self.mb.add_entities(boundary_meshset, [el])\n\n except KeyError:\n boundary_meshset = self.mb.create_meshset()\n self.boundary_meshsets[\n (idx, idy, idz), dim] = boundary_meshset\n self.mb.add_entities(boundary_meshset, [el])\n\n def set_global_problem(self):\n pass\n\n def upscale_perm_flow_based(self, domain, dim, boundary_meshset):\n self.average_method = 'flow-based'\n area = (self.block_size[1] * self.block_size[2],\n self.block_size[0] * self.block_size[2],\n self.block_size[0] * self.block_size[1],\n )\n pres_tag = self.mb.tag_get_handle(\n \"Pressure\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n std_map = Epetra.Map(len(domain), 0, self.comm)\n linear_vals = np.arange(0, len(domain))\n id_map = dict(zip(domain, linear_vals))\n boundary_elms = set()\n\n b = Epetra.Vector(std_map)\n x = Epetra.Vector(std_map)\n\n A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)\n\n t0 = time.time()\n for elem in boundary_meshset:\n if elem in boundary_elms:\n continue\n boundary_elms.add(elem)\n idx = id_map[elem]\n A.InsertGlobalValues(idx, [1], [idx])\n b[idx] = self.mb.tag_get_data(self.boundary_dir[dim], elem,\n flat=True)\n\n self.mb.tag_set_data(pres_tag, domain, np.repeat(0.0, len(domain)))\n t1 = time.time()\n for elem in (set(domain) ^ boundary_elms):\n\n adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(\n np.asarray([elem]), 2, 3, 0)\n adj_volumes = [elems for elems in adj_volumes if elems in domain]\n adj_volumes_set = set(adj_volumes)\n\n elem_center = self.mesh_topo_util.get_average_position(\n np.asarray([elem]))\n K1 = self.mb.tag_get_data(self.perm_tag, [elem], flat=True)\n adj_perms = []\n for adjacencies in range(len(adj_volumes)):\n adj_perms.append(self.mb.tag_get_data(\n self.perm_tag, adj_volumes, flat=True)[\n adjacencies*9:(adjacencies+1)*9])\n values = []\n ids = []\n for K2, adj in zip(adj_perms, adj_volumes_set):\n adj_center = self.mesh_topo_util.get_average_position(\n np.asarray([adj]))\n N = elem_center - adj_center\n N = N / np.sqrt(N[0] ** 2 + N[1] ** 2 + N[2] ** 2)\n K1proj = np.dot(np.dot(N, K1.reshape([3, 3])), N)\n K2proj = np.dot(np.dot(N, K2.reshape([3, 3])), N)\n dl = np.linalg.norm((elem_center - adj_center)/2)\n K_eq = (2 * K1proj * K2proj) / (K1proj * dl + K2proj * dl)\n values.append(- K_eq)\n if adj in id_map:\n ids.append(id_map[adj])\n values.append(-sum(values))\n idx = id_map[elem]\n ids.append(idx)\n A.InsertGlobalValues(idx, values, ids)\n A.FillComplete()\n t2 = time.time()\n\n linearProblem = Epetra.LinearProblem(A, x, b)\n solver = AztecOO.AztecOO(linearProblem)\n solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_warnings)\n solver.Iterate(300, 1e-9)\n # \"\"\"\n self.mb.tag_set_data(pres_tag, domain, np.asarray(x))\n print(\"took {0} seconds to solve.\".format(time.time() - t2))\n # Get the flux - should break down in another part\n flow_rate = 0.0\n total_area = 0.0\n for elem in boundary_meshset:\n elem_center = self.mesh_topo_util.get_average_position(\n np.asarray([elem]))\n adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(\n np.asarray([elem]), 2, 3)\n adj_volumes_set = set(adj_volumes).intersection(set(domain))\n adj_to_boundary_volumes = set()\n for el in adj_volumes_set:\n if el in boundary_meshset:\n adj_to_boundary_volumes.add(el)\n adj_volumes_set = adj_volumes_set - adj_to_boundary_volumes\n for adj in adj_volumes_set:\n adj_center = self.mesh_topo_util.get_average_position(\n np.asarray([adj]))\n N = elem_center - adj_center\n N = N / np.sqrt(N[0] ** 2 + N[1] ** 2 + N[2] ** 2)\n adj_pres = self.mb.tag_get_data(pres_tag, adj)\n adj_perm = np.dot(N, np.dot(self.mb.tag_get_data(\n self.perm_tag, adj).reshape(\n [3, 3]), N))\n elem_perm = np.dot(N, np.dot(self.mb.tag_get_data(\n self.perm_tag, elem).reshape(\n [3, 3]), N))\n dl = np.linalg.norm((elem_center - adj_center)/2)\n K_equiv = (2 * adj_perm * elem_perm) / (adj_perm * dl +\n elem_perm * dl)\n\n flow_rate = flow_rate + area[dim] * K_equiv * adj_pres / dl\n total_area = total_area + area[dim]\n perm = flow_rate * dl / total_area\n return perm\n\n def flow_based_coarse_perm(self):\n\n self.primal_perm = (self.primal_perm_x_tag,\n self.primal_perm_y_tag,\n self.primal_perm_z_tag)\n self.get_boundary_meshsets()\n\n for primal_id, primal in self.primals.iteritems():\n print(\"iterating over meshset {0}\".format(primal_id))\n fine_elems_in_primal = self.mb.get_entities_by_type(\n primal, types.MBHEX)\n # The A matrix should be called here\n for dim in range(0, 3):\n self.mb.add_child_meshset(self.primals[(primal_id)],\n self.boundary_meshsets[\n primal_id, dim])\n boundary = self.mb.get_entities_by_handle(np.asarray(\n self.boundary_meshsets[primal_id, dim]))\n perm = self.upscale_perm_flow_based(fine_elems_in_primal, dim,\n boundary)\n self.mb.tag_set_data(self.primal_perm[dim], primal, perm)\n\n def coarse_grid(self):\n # We should include a switch for either printing coarse grid or fine\n # grid here that is fedy by the .cfg file.\n \"\"\"\n This will not delete primal grid information prevously calculated,\n since it is only looking for elements within the root_set that are\n MBHEX, whilst all props from primal grid are stored as meshsets\n \"\"\"\n fine_grid = self.mb.get_entities_by_type(self.root_set, types.MBHEX)\n self.mb.delete_entities(fine_grid)\n coarse_vertices = self.create_coarse_vertices()\n coarse_dims = self._coarse_dims()\n cur_id = 0\n for k in xrange(coarse_dims[2]):\n print(\"{0} / {1}\".format(k + 1, coarse_dims[2]))\n for j in xrange(coarse_dims[1]):\n for i in xrange(coarse_dims[0]):\n\n hexa = self._create_hexa(i, j, k,\n coarse_vertices,\n coarse_dims)\n el = self.mb.create_element(types.MBHEX, hexa)\n\n # Assign coarse scale properties previously calculated\n\n self.mb.tag_set_data(\n self.coarse_gid_tag, el, cur_id)\n self.mb.tag_set_data(self.primal_phi_tag, el,\n self.mb.tag_get_data(\n self.primal_phi_tag,\n self.primals[(i, j, k)]))\n self.mb.tag_set_data(self.primal_perm_tag, el, [\n self.mb.tag_get_data(self.primal_perm[0],\n self.primals[(i, j, k)]), 0, 0,\n 0, self.mb.tag_get_data(self.primal_perm[1],\n self.primals[(i, j, k)]), 0, 0,\n 0, self.mb.tag_get_data(self.primal_perm[2],\n self.primals[(i, j, k)])])\n self.mb.tag_set_data(self.abs_perm_x_tag, el,\n self.mb.tag_get_data(self.primal_perm[\n 0], self.primals[(i, j, k)]))\n self.coarse_elems.append(el)\n cur_id += 1\n\n def _get_block_by_ijk_coarse(self, i, j, k):\n # TODO: - Should go on Common\n # - Should reformulate to get self.mesh_size instead of input\n mesh_size_coarse = self._coarse_dims()\n \"\"\"\n Track down the block from its (i,j,k) position.\n \"\"\"\n block = (k) * mesh_size_coarse[0] * mesh_size_coarse[1]+(\n (i)+(j) * mesh_size_coarse[0])\n return block\n\n def _get_elem_by_ijk_coarse(self, ijk):\n # TODO Should go on Common\n\n block_id = self._get_block_by_ijk_coarse(\n ijk[0], ijk[1], ijk[2])\n elem = self.coarse_elems[block_id]\n return elem\n\n def create_wells(self):\n mesh_size_coarse = self._coarse_dims()\n \"\"\"(self.mesh_size[0],\n self.mesh_size[1],\n self.mesh_size[2]) \"\"\" # ,self._coarse_dims()\n self.injection_wells_coarse = {}\n self.production_wells_coarse = {}\n\n self.injection_wells_coarse[1] = self.mb.create_meshset()\n\n self.production_wells_coarse[1] = self.mb.create_meshset()\n self.production_wells_coarse[2] = self.mb.create_meshset()\n self.production_wells_coarse[3] = self.mb.create_meshset()\n self.production_wells_coarse[4] = self.mb.create_meshset()\n\n well = [self._get_elem_by_ijk_coarse((0, mesh_size_coarse[1] - 1, z))\n for z in range(0, mesh_size_coarse[2])]\n for well_el in well:\n self.mb.add_entities(self.production_wells_coarse[1], [well_el])\n self.mb.tag_set_data(self.coarse_production_tag,\n self.production_wells_coarse[1], 1)\n\n well = [self._get_elem_by_ijk_coarse((0, 1, z))\n for z in range(0, mesh_size_coarse[2])]\n for well_el in well:\n self.mb.add_entities(self.production_wells_coarse[2], [well_el])\n self.mb.tag_set_data(self.coarse_production_tag,\n self.production_wells_coarse[2], 1)\n\n well = [self._get_elem_by_ijk_coarse((mesh_size_coarse[0] - 1,\n mesh_size_coarse[1] - 1, z)) for z in range(0,\n mesh_size_coarse[2])]\n for well_el in well:\n self.mb.add_entities(self.production_wells_coarse[3], [well_el])\n self.mb.tag_set_data(self.coarse_production_tag,\n self.production_wells_coarse[3], 1)\n\n well = [self._get_elem_by_ijk_coarse((mesh_size_coarse[0] - 1,\n mesh_size_coarse[1] - 1, z))\n for z in range(0, mesh_size_coarse[2])]\n for well_el in well:\n self.mb.add_entities(self.production_wells_coarse[4], [well_el])\n self.mb.tag_set_data(self.coarse_production_tag,\n self.production_wells_coarse[4], 1)\n\n well = [self._get_elem_by_ijk_coarse((0, 0, z)) for z in range(0,\n mesh_size_coarse[2])]\n for well_el in well:\n self.mb.add_entities(self.injection_wells_coarse[1], [well_el])\n self.mb.tag_set_data(self.coarse_injection_tag,\n self.injection_wells_coarse[1], 1)\n # def solve_it():\n\n def export_data(self):\n writedir = ('I', 'J', 'K')\n mesh_size_coarse = self._coarse_dims()\n with open('coarse_phi{0}_{1}.dat'.format(\n self.coarse_ratio, self.average_method), 'w') as coarse_phi:\n coarse_phi.write('*POR *ALL')\n coarse_phi.write('\\n')\n for k in xrange(mesh_size_coarse[2]):\n # coarse_phi.write('-- LAYER {0}'.format(k+1))\n coarse_phi.write('\\n')\n for j in xrange(mesh_size_coarse[1]):\n\n # coarse_phi.write('-- ROW {0}'.format(j+1))\n coarse_phi.write('\\n')\n for i in xrange(mesh_size_coarse[0]):\n if i < mesh_size_coarse[0] - 1:\n coarse_phi.write('%f' % (self.mb.tag_get_data(\n self.primal_phi_tag,\n self.primals[(i, j, k)])\n )\n )\n coarse_phi.write(' \t')\n else:\n coarse_phi.write('%f' % (self.mb.tag_get_data(\n self.primal_phi_tag,\n self.primals[(i, j, k)])\n )\n )\n coarse_phi.write('\\n')\n coarse_phi.close()\n with open('coarse_perm{0}_{1}.dat'.format(\n self.coarse_ratio, self.average_method), 'w') as coarse_perm:\n for dim in range(0, 3):\n coarse_perm.write('*PERM{0} *ALL'.format(writedir[dim]))\n coarse_perm.write('\\n')\n for k in xrange(mesh_size_coarse[2]):\n # coarse_perm.write('-- LAYER {0}'.format(k+1))\n coarse_perm.write('\\n')\n for j in xrange(mesh_size_coarse[1]):\n # coarse_perm.write('-- ROW {0}'.format(j+1))\n coarse_perm.write('\\n')\n for i in xrange(mesh_size_coarse[0]):\n if i < mesh_size_coarse[0] - 1:\n\n coarse_perm.write(\n '%f' % (self.mb.tag_get_data(\n self.primal_perm[dim],\n self.primals[(i, j, k)])))\n coarse_perm.write(' \t')\n else:\n coarse_perm.write(\n '%f' % (self.mb.tag_get_data(\n self.primal_perm[dim],\n self.primals[(i, j, k)])))\n coarse_perm.write('\\n')\n coarse_perm.close()\n\n def export(self, outfile):\n self.mb.write_file(outfile)\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.asarray", "numpy.arange", "numpy.linalg.norm", "numpy.mean", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
satsumas/tensorflow
[ "3fe3f2b1984aab6f159b89aa3ab0069988925689", "3fe3f2b1984aab6f159b89aa3ab0069988925689" ]
[ "tensorflow/python/module/module.py", "tensorflow/python/keras/mixed_precision/experimental/loss_scale.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Modules encapsulate building stateful components.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"Module\")\nclass Module(tracking.AutoTrackable):\n \"\"\"Base neural network module class.\n\n A module is a named container for `tf.Variable`s, other `tf.Module`s and\n functions which apply to user input. For example a dense layer in a neural\n network might be implemented as a `tf.Module`:\n\n ```python\n >>> class Dense(tf.Module):\n ... def __init__(self, in_features, output_features, name=None):\n ... super(Dense, self).__init__(name=name)\n ... self.w = tf.Variable(\n ... tf.random_normal([input_features, output_features]), name='w')\n ... self.b = tf.Variable(tf.zeros([output_features]), name='b')\n ...\n ... def __call__(self, x):\n ... y = tf.matmul(x, self.w) + self.b\n ... return tf.nn.relu(y)\n ```\n\n You can use the Dense layer as you would expect:\n\n ```python\n >>> d = Dense(input_features=64, output_features=10)\n >>> d(tf.ones([100, 64]))\n <tf.Tensor: ...>\n ```\n\n By subclassing `tf.Module` instead of `object` any `tf.Variable` or\n `tf.Module` instances assigned to object properties can be collected using\n the `variables`, `trainable_variables` or `submodules` property:\n\n ```python\n >>> d.variables\n (<tf.Variable 'b:0' ...>, <tf.Variable 'w:0' ...>)\n ```\n\n Subclasses of `tf.Module` can also take advantage of the `_flatten` method\n which can be used to implement tracking of any other types.\n\n All `tf.Module` classes have an associated `tf.name_scope` which can be used\n to group operations in TensorBoard and create hierarchies for variable names\n which can help with debugging. We suggest using the name scope when creating\n nested submodules/parameters or for forward methods whose graph you might want\n to inspect in TensorBoard. You can enter the name scope explicitly using\n `with self.name_scope:` or you can annotate methods (apart from `__init__`)\n with `@tf.Module.with_name_scope`.\n\n ```python\n >>> class MLP(tf.Module):\n ... def __init__(self, input_size, sizes, name=None):\n ... super(MLP, self).__init__(name=name)\n ... self.layers = []\n ... with self.name_scope:\n ... for size in sizes:\n ... self.layers.append(Dense(input_size=input_size, output_size=size))\n ... input_size = size\n ...\n ... @tf.Module.with_name_scope\n ... def __call__(self, x):\n ... for layer in self.layers:\n ... x = layer(x)\n ... return x\n ```\n \"\"\"\n\n def __init__(self, name=None):\n if name is None:\n name = camel_to_snake(type(self).__name__)\n else:\n if not valid_identifier(name):\n raise ValueError(\n \"%r is not a valid module name. Module names must be valid Python \"\n \"identifiers (e.g. a valid class name).\" % name)\n\n self._name = name\n with ops.name_scope(name) as scope_name:\n self._scope_name = scope_name\n\n @property\n def name(self):\n \"\"\"Returns the name of this module as passed or determined in the ctor.\n\n NOTE: This is not the same as the `self.name_scope.name` which includes\n parent module names.\n \"\"\"\n return self._name\n\n @property\n def name_scope(self):\n \"\"\"Returns a `tf.name_scope` instance for this class.\"\"\"\n # TODO(tomhennigan) Memoize once name scopes are re-entrant.\n return ops.name_scope(self._scope_name)\n\n @property\n def variables(self):\n \"\"\"Sequence of variables owned by this module and it's submodules.\n\n Note: this method uses reflection to find variables on the current instance\n and submodules. For performance reasons you may wish to cache the result\n of calling this method if you don't expect the return value to change.\n\n Returns:\n A sequence of variables for the current module (sorted by attribute\n name) followed by variables from all submodules recursively (breadth\n first).\n \"\"\"\n return tuple(self._flatten(predicate=_is_variable_like))\n\n @property\n def trainable_variables(self):\n \"\"\"Sequence of variables owned by this module and it's submodules.\n\n Note: this method uses reflection to find variables on the current instance\n and submodules. For performance reasons you may wish to cache the result\n of calling this method if you don't expect the return value to change.\n\n Returns:\n A sequence of variables for the current module (sorted by attribute\n name) followed by variables from all submodules recursively (breadth\n first).\n \"\"\"\n return tuple(self._flatten(predicate=_is_trainable_variable))\n\n @property\n def submodules(self):\n \"\"\"Sequence of all sub-modules.\n\n Submodules are modules which are properties of this module, or found as\n properties of modules which are properties of this module (and so on).\n\n >>> a = tf.Module()\n >>> b = tf.Module()\n >>> c = tf.Module()\n >>> a.b = b\n >>> b.c = c\n >>> assert list(a.submodules) == [b, c]\n >>> assert list(b.submodules) == [c]\n >>> assert list(c.submodules) == []\n\n Returns:\n A sequence of all submodules.\n \"\"\"\n return tuple(self._flatten(predicate=_is_module))\n\n def _flatten(self,\n recursive=True,\n predicate=None,\n attribute_traversal_key=None,\n with_path=False):\n \"\"\"Flattened attribute values in sorted order by attribute name.\n\n Modules are flattened by first walking their attributes in name order.\n Each attribute value is then flattened to find leaf values. If flatten is\n to be applied `recursive`ly then if the leaf is a `Module` it will also be\n flattened to find leaves. Finally every leaf value is optionally tested\n against the given `predicate` and finally yielded.\n\n >>> class Foo(tf.Module):\n ... def __init__(self):\n ... super(Foo, self).__init__()\n ... self.x = [tf.constant('a'), tf.constant('b')]\n ... self.y = {'i': tf.constant('c'), 'j': tf.constant('d')}\n ... self.z = tf.constant('e')\n ...\n ... @property\n ... def tensors(self):\n ... return tuple(self._flatten(predicate=is_tensor, with_path=True))\n\n >>> foo = Foo()\n >>> foo.tensors\n ((('x', 0), <tf.Tensor: ...'a'>),\n (('x', 1), <tf.Tensor: ...'b'>),\n (('y', 'i'), <tf.Tensor: ...'c'>),\n (('y', 'j'), <tf.Tensor: ...'d'>),\n (('z',), <tf.Tensor: ...'e'>))\n\n `attribute_traversal_key` controls the order object properties are visited.\n If not set objects are visited in ascending order by name.\n\n Args:\n recursive: Whether to recurse into child modules or not.\n predicate: (Optional) If set then only values matching predicate are\n yielded. A value of `None` (the default) means no items will be\n filtered.\n attribute_traversal_key: (Optional) Method to rekey object attributes\n before they are sorted. Contract is the same as `key` argument to\n builtin `sorted` and only applies to object properties.\n with_path: (Optional) Whether to include the path to the object as well\n as the object itself. If `with_path` is `True` then leaves will not be\n de-duplicated (e.g. if the same leaf instance is reachable via multiple\n modules then it will be yielded multiple times with different paths).\n\n Returns:\n Flat generator for leaves of the current module and optionally all\n submodules.\n \"\"\"\n if predicate is None:\n predicate = lambda _: True\n\n return _flatten_module(\n self,\n recursive=recursive,\n predicate=predicate,\n attribute_traversal_key=attribute_traversal_key,\n with_path=with_path)\n\n @classmethod\n def with_name_scope(cls, method):\n \"\"\"Decorator to automatically enter the module name scope.\n\n >>> class MyModule(tf.Module):\n ... @tf.Module.with_name_scope\n ... def __call__(self, x):\n ... if not hasattr(self, 'w'):\n ... self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))\n ... return tf.matmul(x, self.w)\n\n Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose\n names included the module name:\n\n >>> mod = MyModule()\n >>> mod(tf.ones([8, 32]))\n <tf.Tensor: ...>\n >>> mod.w\n <tf.Variable ...'my_module/w:0'>\n\n Args:\n method: The method to wrap.\n\n Returns:\n The original method wrapped such that it enters the module's name scope.\n \"\"\"\n def method_with_name_scope(self, *args, **kwargs):\n with self.name_scope:\n return method(self, *args, **kwargs)\n\n return tf_decorator.make_decorator(method, method_with_name_scope)\n\n\ndef _is_variable_like(obj):\n return (isinstance(obj, variables.Variable) or\n resource_variable_ops.is_resource_variable(obj))\n\n\ndef _is_trainable_variable(obj):\n return _is_variable_like(obj) and getattr(obj, \"trainable\", False)\n\n\ndef _is_module(obj):\n return isinstance(obj, Module)\n\n_CAMEL_TO_SNAKE_R = re.compile(r\"((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))\")\n_VALID_IDENTIFIER = re.compile(r\"^[a-zA-Z_]([a-zA-Z0-9_])*$\")\n\n\ndef valid_identifier(name):\n return bool(_VALID_IDENTIFIER.match(name))\n\n\ndef camel_to_snake(value):\n return _CAMEL_TO_SNAKE_R.sub(r\"_\\1\", value).lower()\n\n\n# AutoTrackable adds object attributes that users will not expect us to\n# include when flattening (these reference dependencies reachable via other\n# object attributes).\nAUTO_CHECKPOINTABLE_ATTRS = (\"_unconditional_checkpoint_dependencies\",\n \"_unconditional_dependency_names\")\n\n\ndef _flatten_module(module,\n recursive,\n predicate,\n attribute_traversal_key,\n with_path,\n module_path=(),\n seen=None):\n \"\"\"Implementation of `flatten`.\"\"\"\n if seen is None:\n seen = set([id(module)])\n\n module_dict = vars(module)\n submodules = []\n\n for key in sorted(module_dict, key=attribute_traversal_key):\n if key in AUTO_CHECKPOINTABLE_ATTRS:\n continue\n\n for leaf_path, leaf in nest.flatten_with_tuple_paths(module_dict[key]):\n leaf_path = (key,) + leaf_path\n\n # TODO(tomhennigan) Handle cycles for `with_path=True` (e.g. `a.a = a`).\n if not with_path:\n leaf_id = id(leaf)\n if leaf_id in seen:\n continue\n seen.add(leaf_id)\n\n if predicate(leaf):\n if with_path:\n yield module_path + leaf_path, leaf\n else:\n yield leaf\n\n if recursive and _is_module(leaf):\n # Walk direct properties first then recurse.\n submodules.append((module_path + leaf_path, leaf))\n\n for submodule_path, submodule in submodules:\n subvalues = _flatten_module(\n submodule,\n recursive=recursive,\n predicate=predicate,\n attribute_traversal_key=attribute_traversal_key,\n with_path=with_path,\n module_path=submodule_path,\n seen=seen)\n\n for subvalue in subvalues:\n # Predicate is already tested for these values.\n yield subvalue\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains LossScale classes.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nimport six\n\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.utils.generic_utils import deserialize_keras_object\nfrom tensorflow.python.keras.utils.generic_utils import serialize_keras_object\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util.tf_export import keras_export\n\n\[email protected]_metaclass(abc.ABCMeta)\n@keras_export('keras.mixed_precision.experimental.LossScale')\nclass LossScale(trackable.Trackable):\n \"\"\"Loss scale base class.\n\n Instances of this class represent a loss scale. Calling instances of this\n class returns the loss scale as a scalar float32 tensor, while method\n `update()` updates the loss scale depending on the values of the gradients.\n Optimizers use instances of this class to scale loss and gradients.\n \"\"\"\n\n @abc.abstractmethod\n def __call__(self):\n \"\"\"Returns the current loss scale as a scalar `float32` tensor.\"\"\"\n pass\n\n @abc.abstractmethod\n def update(self, grads):\n \"\"\"Updates the value of the loss scale.\n\n The loss scale will be potentially updated, based on the value of `grads`.\n The tensor returned by calling this class is only updated when this function\n is evaluated.\n\n In eager mode, this directly updates the loss scale, so that calling\n `__call__` will return the newly updated loss scale. In graph mode,\n this returns an op that, when evaluated, updates the loss scale.\n\n This function also returns a `should_apply_gradients` bool. If False,\n gradients should not be applied to the variables that step, as nonfinite\n gradients were found, and the loss scale has been be updated to reduce the\n chance of finding nonfinite gradients in the next step. Some loss scale\n classes will always return True, as they cannot adjust themselves in\n response to nonfinite gradients.\n\n When a DistributionStrategy is used, this function may only be called in a\n cross-replica context.\n\n Args:\n grads: A list of unscaled gradients, each which is the gradient of the\n loss with respect to a weight. The gradients should have already been\n divided by the loss scale being before passed to this function.\n\n Returns:\n update_op: In eager mode, None. In graph mode, an op to update the loss\n scale.\n should_apply_gradients: Either a bool or a scalar boolean tensor. If\n False, the caller should skip applying `grads` to the variables this\n step.\n \"\"\"\n pass\n\n def _add_weight(self,\n name,\n shape=(),\n dtype=None,\n initializer='zeros'):\n \"\"\"Adds a weight to this loss scale.\n\n Args:\n name: Variable name.\n shape: Variable shape.\n dtype: The type of the variable.\n initializer: The initializer to use.\n\n Returns:\n A variable.\n \"\"\"\n if isinstance(initializer, six.string_types) or callable(initializer):\n initializer = initializers.get(initializer)\n variable = self._add_variable_with_custom_getter(\n name=name,\n shape=shape,\n getter=base_layer_utils.make_variable,\n overwrite=True,\n initializer=initializer,\n dtype=dtype,\n trainable=False,\n use_resource=True,\n synchronization=variables.VariableSynchronization.AUTO,\n # Set aggregation to NONE, as loss scaling variables should never be\n # aggregated.\n aggregation=variables.VariableAggregation.NONE)\n backend.track_variable(variable)\n return variable\n\n @abc.abstractmethod\n def get_config(self):\n \"\"\"Returns the config of this loss scale.\"\"\"\n pass\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates the LossScale from its config.\"\"\"\n return cls(**config)\n\n\n@keras_export('keras.mixed_precision.experimental.FixedLossScale')\nclass FixedLossScale(LossScale):\n \"\"\"A loss scale with a fixed value.\n\n The loss scale is not updated for the lifetime of instances of this class.\n A given instance of this class always returns the same number when called.\n \"\"\"\n\n def __init__(self, loss_scale_value):\n \"\"\"Creates the fixed loss scale.\n\n Args:\n loss_scale_value: A Python float. Its ideal value varies depending on\n models to run. Choosing a too small loss_scale might affect model quality;\n a too big loss_scale might cause inf or nan. There is no single right\n loss_scale to apply. There is no harm choosing a relatively big number as\n long as no nan or inf is encountered in training.\n\n Raises:\n ValueError: If loss_scale is less than 1.\n \"\"\"\n super(FixedLossScale, self).__init__()\n if not isinstance(loss_scale_value, six.integer_types + (float,)):\n raise ValueError('loss_scale_value must be a Python int or float.')\n if loss_scale_value < 1:\n raise ValueError('loss_scale_value must be at least 1.')\n self._python_loss_scale = float(loss_scale_value)\n self._tensor_loss_scale = ops.convert_to_tensor(self._python_loss_scale,\n dtype=dtypes.float32)\n\n def __call__(self):\n return self._tensor_loss_scale\n\n def update(self, grads):\n del grads\n return control_flow_ops.no_op(), True\n\n def get_config(self):\n return {'loss_scale_value': self._python_loss_scale}\n\n\ndef _is_all_finite(grads):\n \"\"\"Returns a scalar boolean tensor indicating if all gradients are finite.\"\"\"\n is_finite_per_grad = [math_ops.reduce_all(math_ops.is_finite(g))\n for g in grads]\n return math_ops.reduce_all(is_finite_per_grad)\n\n\ndef _op_in_graph_mode(tensor):\n \"\"\"Returns the tensor's op in graph mode, or the tensor in eager mode.\n\n This is useful because sometimes an op is needed in graph mode instead of a\n tensor. In eager mode, there are no ops.\n\n Args:\n tensor: A tensor.\n\n Returns:\n The tensor's op in graph mode. The tensor in eager mode.\n \"\"\"\n if context.executing_eagerly():\n return tensor\n else:\n return tensor.op\n\n\ndef _assign_if_finite(var, value):\n \"\"\"Assigns a value to a variable if the value is finite.\"\"\"\n return control_flow_ops.cond(\n math_ops.is_finite(value),\n lambda: _op_in_graph_mode(var.assign(value)),\n control_flow_ops.no_op)\n\n\n@keras_export('keras.mixed_precision.experimental.DynamicLossScale')\nclass DynamicLossScale(LossScale):\n \"\"\"A loss scale that dynamically adjusts itself.\n\n Dynamic loss scaling works by adjusting the loss scale as training progresses.\n The goal is to keep the loss scale as high as possible without overflowing the\n gradients. As long as the gradients do not overflow, raising the loss scale\n never hurts.\n\n The algorithm starts by setting the loss scale to an initial value. Every N\n steps that the gradients are finite, the loss scale is increased by some\n factor. However, if a NaN or Inf gradient is found, the gradients for that\n step are not applied, and the loss scale is decreased by the factor. This\n process tends to keep the loss scale as high as possible without gradients\n overflowing.\n \"\"\"\n\n def __init__(self,\n initial_loss_scale=2 ** 15, # See docstring for why this is big.\n increment_period=2000,\n multiplier=2.):\n \"\"\"Creates the dynamic loss scale.\n\n Args:\n initial_loss_scale: A Python float. The loss scale to use at the\n beginning. It's better to start this at a very high number, because a\n loss scale that is too high gets lowered far more quickly than a loss\n scale that is to low gets raised. The default is 2 ** 15, which is\n approximately half the maximum float16 value.\n increment_period: Increases loss scale every `increment_period`\n consecutive steps that finite gradients are encountered. If a nonfinite\n gradient is encountered, the count is reset back to zero.\n multiplier: The multiplier to use when increasing or decreasing\n the loss scale.\n \"\"\"\n super(DynamicLossScale, self).__init__()\n self._initial_loss_scale = float(initial_loss_scale)\n self._increment_period = int(increment_period)\n self._multiplier = float(multiplier)\n\n self._current_loss_scale = self._add_weight(\n name='current_loss_scale',\n dtype=dtypes.float32,\n initializer=self._initial_loss_scale)\n # The number of consecutive steps with finite gradients since the last\n # nonfinite gradient or change in loss scale.\n self._num_good_steps = self._add_weight(\n name='good_steps', dtype=dtypes.int64, initializer='zeros')\n\n @property\n def initial_loss_scale(self):\n return self._initial_loss_scale\n\n @property\n def increment_period(self):\n return self._increment_period\n\n @property\n def multiplier(self):\n return self._multiplier\n\n def __call__(self):\n return self._current_loss_scale\n\n def update(self, grads):\n \"\"\"Updates loss scale based on if gradients are finite in current step.\"\"\"\n if distribution_strategy_context.has_strategy():\n distribution = distribution_strategy_context.get_cross_replica_context()\n def get_is_finite(grads):\n is_finite = _is_all_finite(grads)\n # We cast to float, because we cannot reduce booleans with\n # DistributionStrategy.\n return math_ops.cast(is_finite, dtypes.float32)\n is_finite_float = distribution.extended.call_for_each_replica(\n get_is_finite, args=(grads,))\n reduced_is_finite_float = distribution.reduce(reduce_util.ReduceOp.SUM,\n is_finite_float)\n is_finite = math_ops.equal(reduced_is_finite_float,\n distribution.num_replicas_in_sync)\n else:\n is_finite = _is_all_finite(grads)\n\n def update_if_finite_grads():\n \"\"\"Update assuming the gradients are finite.\"\"\"\n\n def incr_loss_scale():\n new_loss_scale = self._current_loss_scale * self._multiplier\n return control_flow_ops.group(\n _assign_if_finite(self._current_loss_scale, new_loss_scale),\n self._num_good_steps.assign(0))\n\n return control_flow_ops.cond(\n self._num_good_steps + 1 >= self._increment_period,\n incr_loss_scale,\n lambda: _op_in_graph_mode(self._num_good_steps.assign_add(1)))\n\n def update_if_not_finite_grads():\n \"\"\"Update assuming the gradients are nonfinite.\"\"\"\n\n new_loss_scale = math_ops.maximum(\n self._current_loss_scale / self._multiplier, 1)\n return control_flow_ops.group(\n self._num_good_steps.assign(0),\n self._current_loss_scale.assign(new_loss_scale)\n )\n\n update_op = control_flow_ops.cond(is_finite, update_if_finite_grads,\n update_if_not_finite_grads)\n should_apply_gradients = is_finite\n return update_op, should_apply_gradients\n\n def get_config(self):\n return {\n 'initial_loss_scale': self.initial_loss_scale,\n 'increment_period': self.increment_period,\n 'multiplier': self.multiplier,\n }\n\n\ndef serialize(loss_scale):\n return serialize_keras_object(loss_scale)\n\n\ndef deserialize(config, custom_objects=None):\n return deserialize_keras_object(\n config,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name='loss scale')\n\n\ndef get(identifier):\n \"\"\"Get a loss scale object.\"\"\"\n if isinstance(identifier, six.integer_types + (float,)):\n return FixedLossScale(identifier)\n if identifier == 'dynamic':\n return DynamicLossScale()\n if isinstance(identifier, LossScale):\n return identifier\n elif identifier is None:\n return None\n elif isinstance(identifier, dict):\n return deserialize(identifier)\n else:\n raise ValueError('Could not interpret loss scale identifier: %s'\n % identifier)\n" ]
[ [ "tensorflow.python.util.nest.flatten_with_tuple_paths", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.util.tf_decorator.make_decorator", "tensorflow.python.ops.resource_variable_ops.is_resource_variable" ], [ "tensorflow.python.ops.math_ops.cast", "tensorflow.python.keras.backend.track_variable", "tensorflow.python.ops.math_ops.reduce_all", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.distribute.distribution_strategy_context.get_cross_replica_context", "tensorflow.python.ops.math_ops.is_finite", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.distribute.distribution_strategy_context.has_strategy", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.ops.math_ops.maximum", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.keras.initializers.get", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.keras.utils.generic_utils.serialize_keras_object" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
pikerbright/DeblurGAN
[ "39e8a4b408b90d0ef98608c5c4562eae3e184251" ]
[ "motion_blur/blur_image.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nfrom scipy import signal\nfrom scipy import misc\nfrom motion_blur.generate_PSF import PSF\nfrom motion_blur.generate_trajectory import Trajectory\n\n\nclass BlurImage(object):\n\n def __init__(self, image_path, PSFs=None, part=None, path__to_save=None):\n \"\"\"\n\n :param image_path: path to square, RGB image.\n :param PSFs: array of Kernels.\n :param part: int number of kernel to use.\n :param path__to_save: folder to save results.\n \"\"\"\n if os.path.isfile(image_path):\n self.image_path = image_path\n self.original = misc.imread(self.image_path)\n self.shape = self.original.shape\n if len(self.shape) < 3:\n raise Exception('We support only RGB images yet.')\n elif self.shape[0] != self.shape[1]:\n raise Exception('We support only square images yet.')\n else:\n raise Exception('Not correct path to image.')\n self.path_to_save = path__to_save\n if PSFs is None:\n if self.path_to_save is None:\n self.PSFs = PSF(canvas=self.shape[0]).fit()\n else:\n self.PSFs = PSF(canvas=self.shape[0], path_to_save=os.path.join(self.path_to_save,\n 'PSFs.png')).fit(save=True)\n else:\n self.PSFs = PSFs\n\n self.part = part\n self.result = []\n\n def blur_image(self, save=False, show=False):\n if self.part is None:\n psf = self.PSFs\n else:\n psf = [self.PSFs[self.part]]\n yN, xN, channel = self.shape\n key, kex = self.PSFs[0].shape\n delta = yN - key\n assert delta >= 0, 'resolution of image should be higher than kernel'\n result=[]\n if len(psf) > 1:\n for p in psf:\n tmp = np.pad(p, delta // 2, 'constant')\n cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n # blured = np.zeros(self.shape)\n blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_32F)\n blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))\n blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))\n blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))\n blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)\n result.append(np.abs(blured))\n else:\n psf = psf[0]\n tmp = np.pad(psf, delta // 2, 'constant')\n cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_32F)\n blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))\n blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))\n blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))\n blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)\n result.append(np.abs(blured))\n self.result = result\n if show or save:\n self.__plot_canvas(show, save)\n\n def __plot_canvas(self, show, save):\n if len(self.result) == 0:\n raise Exception('Please run blur_image() method first.')\n else:\n plt.close()\n plt.axis('off')\n fig, axes = plt.subplots(1, len(self.result), figsize=(10, 10))\n if len(self.result) > 1:\n for i in range(len(self.result)):\n axes[i].imshow(self.result[i])\n else:\n plt.axis('off')\n\n plt.imshow(self.result[0])\n if show and save:\n if self.path_to_save is None:\n raise Exception('Please create Trajectory instance with path_to_save')\n cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255)\n plt.show()\n elif save:\n if self.path_to_save is None:\n raise Exception('Please create Trajectory instance with path_to_save')\n cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255)\n elif show:\n plt.show()\n\n\nif __name__ == '__main__':\n folder = '/Users/mykolam/PycharmProjects/University/DeblurGAN2/results_sharp'\n folder_to_save = '/Users/mykolam/PycharmProjects/University/DeblurGAN2/blured'\n params = [0.01, 0.009, 0.008, 0.007, 0.005, 0.003]\n for path in os.listdir(folder):\n print(path)\n trajectory = Trajectory(canvas=64, max_len=60, expl=np.random.choice(params)).fit()\n psf = PSF(canvas=64, trajectory=trajectory).fit()\n BlurImage(os.path.join(folder, path), PSFs=psf,\n path__to_save=folder_to_save, part=np.random.choice([1, 2, 3])).\\\n blur_image(save=True)\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.pad", "scipy.signal.fftconvolve", "numpy.abs", "numpy.random.choice", "scipy.misc.imread", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.10", "0.16", "0.19", "0.18", "0.12", "1.0", "0.17", "1.2" ], "tensorflow": [] } ]
vibhatha/PSGDSVMPY
[ "69ed88f5db8d9a250ee944f44b88e54351f8696f" ]
[ "examples/SvmSgd.py" ]
[ "import numpy as np\n\nX = np.array([\n [-2,4,-1],\n [4,1,-1],\n [1, 6, -1],\n [2, 4, -1],\n [6, 2, -1],\n\n])\n\ny = np.array([-1,-1,1,1,1])\n\ndef svm_sgd(X, Y):\n\n w = np.zeros(len(X[0]))\n eta = 1\n epochs = 100000\n\n for epoch in range(1,epochs):\n for i, x in enumerate(X):\n if (Y[i]*np.dot(X[i], w)) < 1:\n w = w + eta * ( (X[i] * Y[i]) + (-2 *(1/epoch)* w) )\n else:\n w = w + eta * (-2 *(1/epoch)* w)\n\n return w\n\nw = svm_sgd(X,y)\nprint(w)\n" ]
[ [ "numpy.dot", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SudeepSarkar/equilibrium-propagation
[ "ba6d9ee5426445e9ad91c96c816fa5287ff97258", "ba6d9ee5426445e9ad91c96c816fa5287ff97258" ]
[ "run_energy_model_mnist.py", "lib/config.py" ]
[ "# MIT License\n\n# Copyright (c) 2020 Simon Schug, João Sacramento\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport argparse\nimport json\nimport logging\nimport sys\n\nimport torch\n\nfrom lib import config, data, energy, train, utils\n\n\ndef load_default_config(energy):\n \"\"\"\n Load default parameter configuration from file.\n\n Args:\n tasks: String with the energy name\n\n Returns:\n Dictionary of default parameters for the given energy\n \"\"\"\n if energy == \"restr_hopfield\":\n default_config = \"etc/energy_restr_hopfield.json\"\n elif energy == \"cond_gaussian\":\n default_config = \"etc/energy_cond_gaussian.json\"\n else:\n raise ValueError(\"Energy based model \\\"{}\\\" not defined.\".format(energy))\n\n with open(default_config) as config_json_file:\n cfg = json.load(config_json_file)\n\n return cfg\n\n\ndef parse_shell_args(args):\n \"\"\"\n Parse shell arguments for this script.\n\n Args:\n args: List of shell arguments\n\n Returns:\n Dictionary of shell arguments\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Train an energy-based model on MNIST using Equilibrium Propagation.\"\n )\n\n parser.add_argument(\"--batch_size\", type=int, default=argparse.SUPPRESS,\n help=\"Size of mini batches during training.\")\n parser.add_argument(\"--c_energy\", choices=[\"cross_entropy\", \"squared_error\"],\n default=argparse.SUPPRESS, help=\"Supervised learning cost function.\")\n parser.add_argument(\"--dimensions\", type=int, nargs=\"+\",\n default=argparse.SUPPRESS, help=\"Dimensions of the neural network.\")\n parser.add_argument(\"--energy\", choices=[\"cond_gaussian\", \"restr_hopfield\"],\n default=\"cond_gaussian\", help=\"Type of energy-based model.\")\n parser.add_argument(\"--epochs\", type=int, default=argparse.SUPPRESS,\n help=\"Number of epochs to train.\")\n parser.add_argument(\"--fast_ff_init\", action='store_true', default=argparse.SUPPRESS,\n help=\"Flag to enable fast feedforward initialization.\")\n parser.add_argument(\"--learning_rate\", type=float, default=argparse.SUPPRESS,\n help=\"Learning rate of the optimizer.\")\n parser.add_argument(\"--log_dir\", type=str, default=\"\",\n help=\"Subdirectory within ./log/ to store logs.\")\n parser.add_argument(\"--nonlinearity\", choices=[\"leaky_relu\", \"relu\", \"sigmoid\", \"tanh\"],\n default=argparse.SUPPRESS, help=\"Nonlinearity between network layers.\")\n parser.add_argument(\"--optimizer\", choices=[\"adam\", \"adagrad\", \"sgd\"],\n default=argparse.SUPPRESS, help=\"Optimizer used to train the model.\")\n parser.add_argument(\"--seed\", type=int, default=argparse.SUPPRESS,\n help=\"Random seed for pytorch\")\n\n return vars(parser.parse_args(args))\n\n\ndef run_energy_model_mnist(cfg):\n \"\"\"\n Main script.\n\n Args:\n cfg: Dictionary defining parameters of the run\n \"\"\"\n # Initialize seed if specified (might slow down the model)\n if cfg['seed'] is not None:\n torch.manual_seed(cfg['seed'])\n\n # Create the cost function to be optimized by the model\n c_energy = utils.create_cost(cfg['c_energy'], cfg['beta'])\n\n # Create activation functions for every layer as a list\n phi = utils.create_activations(cfg['nonlinearity'], len(cfg['dimensions']))\n\n # Initialize energy based model\n if cfg[\"energy\"] == \"restr_hopfield\":\n model = energy.RestrictedHopfield(\n cfg['dimensions'], c_energy, cfg['batch_size'], phi).to(config.device)\n elif cfg[\"energy\"] == \"cond_gaussian\":\n model = energy.ConditionalGaussian(\n cfg['dimensions'], c_energy, cfg['batch_size'], phi).to(config.device)\n else:\n raise ValueError(f'Energy based model \\\"{cfg[\"energy\"]}\\\" not defined.')\n\n # Define optimizer (may include l2 regularization via weight_decay)\n w_optimizer = utils.create_optimizer(model, cfg['optimizer'], lr=cfg['learning_rate'])\n\n # Create torch data loaders with the MNIST data set\n mnist_train, mnist_test = data.create_mnist_loaders(cfg['batch_size'])\n\n logging.info(\"Start training with parametrization:\\n{}\".format(\n json.dumps(cfg, indent=4, sort_keys=True)))\n\n for epoch in range(1, cfg['epochs'] + 1):\n # Training\n train.train(model, mnist_train, cfg['dynamics'], w_optimizer, cfg[\"fast_ff_init\"])\n\n # Testing\n test_acc, test_energy = train.test(model, mnist_test, cfg['dynamics'], cfg[\"fast_ff_init\"])\n\n # Logging\n logging.info(\n \"epoch: {} \\t test_acc: {:.4f} \\t mean_E: {:.4f}\".format(\n epoch, test_acc, test_energy)\n )\n\n\nif __name__ == '__main__':\n # Parse shell arguments as input configuration\n user_config = parse_shell_args(sys.argv[1:])\n\n # Load default parameter configuration from file for the specified energy-based model\n cfg = load_default_config(user_config[\"energy\"])\n\n # Overwrite default parameters with user configuration where applicable\n cfg.update(user_config)\n\n # Setup global logger and logging directory\n config.setup_logging(cfg[\"energy\"] + \"_\" + cfg[\"c_energy\"] + \"_\" + cfg[\"dataset\"],\n dir=cfg['log_dir'])\n\n # Run the script using the created paramter configuration\n run_energy_model_mnist(cfg)\n", "# MIT License\n\n# Copyright (c) 2020 Simon Schug, João Sacramento\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport logging\nimport os\nimport time\nfrom importlib import reload\n\nimport torch\n\n# Global variables\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nlog_dir = None\nlog_name = None\nwriter = None\n\n\ndef setup_logging(name, dir=\"\"):\n \"\"\"\n Setup the logging device to log into a uniquely created directory.\n\n Args:\n name: Name of the directory for the log-files.\n dir: Optional sub-directory within log\n \"\"\"\n # Setup global log name and directory\n global log_name\n log_name = name\n\n # Setup global logging directory\n global log_dir\n log_dir = os.path.join(\"log\", dir)\n\n # Create the logging folder if it does not exist already\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n\n # Need to reload logging as otherwise the logger might be captured by another library\n reload(logging)\n\n # Setup global logger\n logging.basicConfig(\n level=logging.INFO,\n format=\"[%(levelname)-5.5s %(asctime)s] %(message)s\",\n datefmt='%H:%M:%S',\n handlers=[\n logging.FileHandler(os.path.join(\n log_dir, time.strftime(\"%Y%m%d_%H%M\") + \"_\" + name + \".log\")\n ),\n logging.StreamHandler()\n ])\n" ]
[ [ "torch.manual_seed" ], [ "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rgsl888prabhu/cugraph
[ "e030a2fe22ad308fba05d6146765a3c9aa865e5b" ]
[ "python/cugraph/tests/test_triangle_count.py" ]
[ "# Copyright (c) 2019, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gc\nfrom itertools import product\n\nimport pytest\nfrom scipy.io import mmread\n\nimport cudf\nimport cugraph\nfrom librmm_cffi import librmm as rmm\nfrom librmm_cffi import librmm_config as rmm_cfg\n\n# Temporarily suppress warnings till networkX fixes deprecation warnings\n# (Using or importing the ABCs from 'collections' instead of from\n# 'collections.abc' is deprecated, and in 3.8 it will stop working) for\n# python 3.7. Also, this import networkx needs to be relocated in the\n# third-party group once this gets fixed.\nimport warnings\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n import networkx as nx\n\n\ndef read_mtx_file(mm_file):\n print('Reading ' + str(mm_file) + '...')\n return mmread(mm_file).asfptype()\n\n\ndef cugraph_call(M, edgevals=False):\n M = M.tocoo()\n rows = cudf.Series(M.row)\n cols = cudf.Series(M.col)\n if edgevals is False:\n values = None\n else:\n values = cudf.Series(M.data)\n G = cugraph.Graph()\n G.add_edge_list(rows, cols, values)\n return cugraph.triangles(G)\n\n\ndef networkx_call(M):\n Gnx = nx.Graph(M)\n dic = nx.triangles(Gnx)\n count = 0\n for i in range(len(dic)):\n count += dic[i]\n return count\n\n\nDATASETS = ['../datasets/dolphins.mtx',\n '../datasets/karate.mtx',\n '../datasets/netscience.mtx']\n\n\n# Test all combinations of default/managed and pooled/non-pooled allocation\[email protected]('managed, pool',\n list(product([False, True], [False, True])))\[email protected]('graph_file', DATASETS)\ndef test_triangles(managed, pool, graph_file):\n gc.collect()\n\n rmm.finalize()\n rmm_cfg.use_managed_memory = managed\n rmm_cfg.use_pool_allocator = pool\n rmm.initialize()\n\n assert(rmm.is_initialized())\n\n M = read_mtx_file(graph_file)\n cu_count = cugraph_call(M)\n nx_count = networkx_call(M)\n assert cu_count == nx_count\n\n\n# Test all combinations of default/managed and pooled/non-pooled allocation\[email protected]('managed, pool',\n list(product([False, True], [False, True])))\[email protected]('graph_file', DATASETS)\ndef test_triangles_edge_vals(managed, pool, graph_file):\n gc.collect()\n\n rmm.finalize()\n rmm_cfg.use_managed_memory = managed\n rmm_cfg.use_pool_allocator = pool\n rmm.initialize()\n\n assert(rmm.is_initialized())\n\n M = read_mtx_file(graph_file)\n cu_count = cugraph_call(M, edgevals=True)\n nx_count = networkx_call(M)\n assert cu_count == nx_count\n" ]
[ [ "scipy.io.mmread" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
tobias-liaudat/wf-psf
[ "0ff1a12d06c46bd8599061d227785393fb528d76", "0ff1a12d06c46bd8599061d227785393fb528d76" ]
[ "wf_psf/SimPSFToolkit.py", "jz-submissions/scripts/training_param_1000.py" ]
[ "import numpy as np\nimport scipy.signal as spsig\nimport scipy.interpolate as sinterp\nimport PIL\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\ntry:\n from cv2 import resize, INTER_AREA\nexcept:\n print('Problem importing opencv..')\n try:\n from skimage.transform import downscale_local_mean\n print('Falling back to skimage.')\n print('Only integer downsampling allowed with this method.')\n except:\n print('Problem importing skimage..')\n\n\nclass SimPSFToolkit(object):\n \"\"\"Simulate PSFs.\n\n In the future the zernike maps could be created with galsim or some other\n alternative.\n\n Parameters\n ----------\n zernike_maps: list of np.ndarray\n Each element of the list should contain a Zernike map of the order\n (OSA/ANSI index convention) corresponding to the position in the list.\n max_order: int\n Maximum Zernike polynomial order. Default is `45`.\n max_wfe_rms: float\n Maximum allowed WFE in RMS. Used forvnormalization. Units in [\\mu m].\n Default is ``0.1``.\n output_dim: int\n Output dimension of the square PSF stamp. Default is `64`.\n rand_seed: int\n Random seed to be used to generate random zernike values.\n Default is `None`.\n plot_opt: bool\n Option to plot some characteristics of the PSF generated.\n Default is `False`.\n oversampling_rate: float\n Oversampling rate for the wavefront PSF simulation. Default is `2.14`\n that is the minumum number required by Euclid so that there is no\n aliasing at any wavelength in the pass band [0.55um, 0.9um].\n output_Q: float\n Downsampling rate to match the specified telescope's sampling. The value\n of `output_Q` should be equal to `oversampling_rate` in order to have\n the right pixel sampling corresponding to the telescope characteristics\n `pix_sampling`, `tel_diameter`, `tel_focal_length`. The final\n oversampling obtained is `oversampling_rate/output_Q`.\n Default is `1`, so the output psf will be super-resolved by a factor of\n `oversampling_rate`.\n pix_sampling: float\n Pixel sampling in [um]. Default is `12`[um] (Euclid-like).\n tel_diameter: float\n Telescope's main mirror diameter in [m]. Default is `1.2`[m]\n (Euclid-like).\n tel_focal_length: float\n Telescope's focal length in [m]. Default is `24.5`[m] (Euclid-like).\n pupil_diameter: int\n Pupil diameter in pixels. Number of samples of the wavefront in the\n pupil plane. More specifically, the Optical Path Differences map.\n Default is `1024` [pix].\n euclid_obsc: bool\n Wheter to use Euclid-like obscurations. Defualt is `True`.\n LP_filter_length: int\n Length of one dimension of the Low-Pass (LP) filter to apply to the\n obscurations to avoid the aliasing effect. The filter is a simple\n top-hat filter. Default is `3`.\n verbose: int\n Self-explanatory variable. Default is `0`, use a value `>0` to activate.\n\n \"\"\"\n\n def __init__(\n self,\n zernike_maps,\n max_order=45,\n max_wfe_rms=0.1,\n output_dim=64,\n rand_seed=None,\n plot_opt=False,\n oversampling_rate=3.,\n output_Q=1,\n pix_sampling=12,\n tel_diameter=1.2,\n tel_focal_length=24.5,\n pupil_diameter=1024,\n euclid_obsc=True,\n LP_filter_length=3,\n verbose=0\n ):\n # Input attributes\n self.max_order = max_order\n self.rand_seed = rand_seed\n self.plot_opt = plot_opt\n self.zernike_maps = zernike_maps\n self.max_wfe_rms = max_wfe_rms # In [um]\n self.output_dim = output_dim # In pixels per dimension\n self.verbose = verbose\n\n # Telescope characteristics\n self.oversampling_rate = oversampling_rate # dimensionless\n self.output_Q = output_Q # dimensionless\n self.pix_sampling = pix_sampling # In [um]\n self.tel_diameter = tel_diameter # In [m]\n self.tel_focal_length = tel_focal_length # In [m]\n self.pupil_diameter = pupil_diameter # In [pix]\n\n # Class attributes\n self.z_coeffs = None\n self.psf = None\n self.opd = None\n self.phase = None\n self.lambda_obs = None\n self.poly_psf = None\n\n # Generate pupil mask\n self.pupil_mask = ~np.isnan(self.zernike_maps[0])\n\n # Generate obscurations\n if euclid_obsc:\n self.obscurations = self.generate_pupil_obscurations(\n N_pix=pupil_diameter, N_filter=LP_filter_length\n )\n else:\n self.obscurations = np.ones((pupil_diameter, pupil_diameter))\n\n @staticmethod\n def _OLD_fft_diffraction_op(wf, pupil_mask, pad_factor=2, match_shapes=True):\n \"\"\" Perform a fft-based diffraction.\n\n Parameters\n ----------\n wf: np.ndarray\n A complex 2D array that corresponds to the wavefront function.\n pupil_mask: np.ndarray of bools\n A 2D boolean mask that corresponds to the pupil function.\n\n\n Returns\n -------\n psf: np.ndarray\n A real 2D array corresponding to the PSF.\n\n \"\"\"\n start = (wf.shape[0] * pad_factor) // 2 - wf.shape[0] // 2\n stop = (wf.shape[0] * pad_factor) // 2 + wf.shape[0] // 2\n\n padded_wf = np.zeros((wf.shape[0] * pad_factor, wf.shape[1] * pad_factor),\n dtype=np.complex128)\n\n padded_wf[start:stop, start:stop][pupil_mask] = wf[pupil_mask]\n\n fft_wf = np.fft.fftshift(np.fft.fft2(padded_wf))\n # fft_wf = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(padded_wf)))\n\n psf = np.abs(fft_wf)**2\n\n if match_shapes:\n # Return the psf with its original shape without the padding factor\n x_dif = int((psf.shape[0] / pad_factor) // 2)\n y_dif = int((psf.shape[1] / pad_factor) // 2)\n\n return psf[x_dif:psf.shape[0] - x_dif, y_dif:psf.shape[1] - y_dif]\n else:\n return psf\n\n @staticmethod\n def fft_diffract(wf, output_Q, output_dim=64):\n # Perform the FFT-based diffraction operation\n fft_wf = np.fft.fftshift(np.fft.fft2(wf))\n psf = np.abs(fft_wf)**2\n\n # Calculate crop dimensions\n if output_dim * output_Q < psf.shape[0]:\n start = int(psf.shape[0] // 2 - (output_dim * output_Q) // 2)\n stop = int(psf.shape[0] // 2 + (output_dim * output_Q) // 2)\n else:\n start = int(0)\n stop = psf.shape[0]\n\n # Crop psf\n psf = psf[start:stop, start:stop]\n\n # Downsample the image depending on `self.output_Q`\n try:\n psf = resize(\n src=psf, dsize=(int(output_dim), int(output_dim)), interpolation=INTER_AREA\n )\n except:\n f_x = int(psf.shape[0] / output_dim)\n f_y = int(psf.shape[1] / output_dim)\n psf = downscale_local_mean(\n image=psf,\n factors=(f_x, f_y),\n )\n\n return psf\n\n @staticmethod\n def generate_pupil_obscurations(N_pix=1024, N_filter=3):\n \"\"\"Generate Euclid like pupil obscurations.\n\n Simple procedure considering only the 2D plane.\n No 3D projections wrt the angle of the FoV is done.\n\n Parameters\n ----------\n N_pix: int\n Total number of pixels\n N_filter: int\n Length of the low-pass filter [pixels]\n\n \"\"\"\n # Telescope parameters\n AS_diam = 1200 # Aperture stop diameter [mm]\n M1_diam = 395 # Mirror 1 cap stopper diameter [mm]\n\n sp_lenght = 700 # Spider length [mm]\n sp_width = 12 # Spider width [mm]\n\n AS_centre = [0, 0]\n M1_centre = [0, 51]\n\n sp1_angle = 106.78 - 90 # [degrees]\n sp2_angle = 50.11 - 90 # [degrees]\n sp3_angle = -10.76 - 90 # [degrees]\n\n sp1_x_pos = 260 # [mm]\n sp1_y_pos = 240 # [mm]\n sp2_x_pos = -330 # [mm]\n sp2_y_pos = 130 # [mm]\n sp3_x_pos = 70 # [mm]\n sp3_y_pos = -330 # [mm]\n\n # Build pupil plane\n pupil_plane = np.ones((N_pix, N_pix))\n\n # coordinates of map in [mm]\n W, H = np.meshgrid(\n np.linspace(-AS_diam // 2, AS_diam // 2, N_pix),\n np.linspace(-AS_diam // 2, AS_diam // 2, N_pix)\n )\n\n ### Calculate the Aperture stop and draw it ###\n aperture_stop_mask = np.sqrt((W - AS_centre[0])**2 + (H - AS_centre[1])**2) <= (AS_diam / 2)\n pupil_plane[~aperture_stop_mask] = 0\n\n ### Calculate the M1/M2 obscurations and draw them ###\n M1_mask = np.sqrt((W - M1_centre[0])**2 + (H - M1_centre[1])**2) <= (M1_diam / 2)\n pupil_plane[M1_mask] = 0\n\n ### Calculate the spiders and draw them ###\n\n # Spider 1\n sp1_a = np.tan(sp1_angle * (np.pi / 180))\n sp1_b = sp1_y_pos - sp1_a * sp1_x_pos\n\n sp1_mask_1 = sp1_a * W + sp1_b - sp_width / 2 * np.sqrt(1 + sp1_a**2) < H\n sp1_mask_2 = sp1_a * W + sp1_b + sp_width / 2 * np.sqrt(1 + sp1_a**2) > H\n sp1_mask = np.logical_and(sp1_mask_1, sp1_mask_2)\n\n sp1_length_mask = np.sqrt((W - sp1_x_pos)**2 + (H - sp1_y_pos)**2) <= (sp_lenght / 2)\n sp1_mask = np.logical_and(sp1_mask, sp1_length_mask)\n\n # Spider 2\n sp2_a = np.tan(sp2_angle * (np.pi / 180))\n sp2_b = sp2_y_pos - sp2_a * sp2_x_pos\n\n sp2_mask_1 = sp2_a * W + sp2_b - sp_width / 2 * np.sqrt(1 + sp2_a**2) < H\n sp2_mask_2 = sp2_a * W + sp2_b + sp_width / 2 * np.sqrt(1 + sp2_a**2) > H\n sp2_mask = np.logical_and(sp2_mask_1, sp2_mask_2)\n\n sp2_length_mask = np.sqrt((W - sp2_x_pos)**2 + (H - sp2_y_pos)**2) <= (sp_lenght / 2)\n sp2_mask = np.logical_and(sp2_mask, sp2_length_mask)\n\n # Spider 3\n sp3_a = np.tan(sp3_angle * (np.pi / 180))\n sp3_b = sp3_y_pos - sp3_a * sp3_x_pos\n\n sp3_mask_1 = sp3_a * W + sp3_b - sp_width / 2 * np.sqrt(1 + sp3_a**2) < H\n sp3_mask_2 = sp3_a * W + sp3_b + sp_width / 2 * np.sqrt(1 + sp3_a**2) > H\n sp3_mask = np.logical_and(sp3_mask_1, sp3_mask_2)\n\n sp3_length_mask = np.sqrt((W - sp3_x_pos)**2 + (H - sp3_y_pos)**2) <= (sp_lenght / 2)\n sp3_mask = np.logical_and(sp3_mask, sp3_length_mask)\n\n # Draw the three spider arms\n pupil_plane[sp1_mask] = 0\n pupil_plane[sp2_mask] = 0\n pupil_plane[sp3_mask] = 0\n\n ### Low-pass filter the image ###\n top_hat_filter = np.ones((N_filter, N_filter))\n\n pupil_plane = spsig.convolve2d(\n pupil_plane, top_hat_filter, boundary='fill', mode='same', fillvalue=0\n )\n\n pupil_plane /= np.sum(top_hat_filter)\n\n return pupil_plane\n\n @staticmethod\n def crop_img(to_crop_img, ref_im):\n cent_x = int(to_crop_img.shape[0] // 2)\n cent_y = int(to_crop_img.shape[1] // 2)\n\n delta_x = int(ref_im.shape[0] // 2)\n delta_y = int(ref_im.shape[1] // 2)\n\n return to_crop_img[cent_x - delta_x:cent_x + delta_x, cent_y - delta_y:cent_y + delta_y]\n\n @staticmethod\n def decimate_im(input_im, decim_f):\n \"\"\"Decimate image.\n\n Decimated by a factor of decim_f.\n Based on the PIL library using the default interpolator.\n\n \"\"\"\n\n pil_im = PIL.Image.fromarray(input_im)\n (width, height) = (pil_im.width // decim_f, pil_im.height // decim_f)\n im_resized = pil_im.resize((width, height))\n\n return np.array(im_resized)\n\n @staticmethod\n def get_radial_idx(max_order=45):\n it = 1\n radial_idxs = []\n\n while (len(radial_idxs) <= max_order):\n for _it in range(it):\n radial_idxs.append(it - 1)\n\n it += 1\n\n return np.array(radial_idxs)\n\n @staticmethod\n def psf_plotter(psf, lambda_obs=0.000, cmap='gist_stern', save_img=False):\n fig = plt.figure(figsize=(18, 10))\n\n ax1 = fig.add_subplot(131)\n im1 = ax1.imshow(psf, cmap=cmap, interpolation='None')\n divider = make_axes_locatable(ax1)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im1, cax=cax, orientation='vertical')\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.set_title('PSF (lambda=%.3f [um])' % (lambda_obs))\n\n ax2 = fig.add_subplot(132)\n im2 = ax2.imshow(np.sqrt(abs(psf)), cmap=cmap, interpolation='None')\n divider2 = make_axes_locatable(ax2)\n cax2 = divider2.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im2, cax=cax2, orientation='vertical')\n ax2.set_title('sqrt PSF (lambda=%.3f [um])' % (lambda_obs))\n ax2.set_xticks([])\n ax2.set_yticks([])\n\n ax3 = fig.add_subplot(133)\n im3 = ax3.imshow(np.log(abs(psf)), cmap=cmap, interpolation='None')\n divider3 = make_axes_locatable(ax3)\n cax3 = divider3.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im3, cax=cax3, orientation='vertical')\n ax3.set_title('log PSF (lambda=%.3f [um])' % (lambda_obs))\n ax3.set_xticks([])\n ax3.set_yticks([])\n\n if save_img:\n plt.savefig('./PSF_lambda_%.3f.pdf' % lambda_obs, bbox_inches='tight')\n\n plt.show()\n\n @staticmethod\n def opd_phase_plotter(pupil_mask, opd, phase, lambda_obs, cmap='viridis', save_img=False):\n fig = plt.figure(figsize=(18, 10))\n\n ax1 = fig.add_subplot(131)\n im1 = ax1.imshow(pupil_mask, interpolation='None')\n divider = make_axes_locatable(ax1)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im1, cax=cax, orientation='vertical')\n ax1.set_title('Pupil mask')\n ax1.set_xticks([])\n ax1.set_yticks([])\n\n vmax = np.max(abs(opd))\n ax2 = fig.add_subplot(132)\n im2 = ax2.imshow(opd, cmap=cmap, interpolation='None', vmin=-vmax, vmax=vmax)\n divider2 = make_axes_locatable(ax2)\n cax2 = divider2.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im2, cax=cax2, orientation='vertical')\n ax2.set_title('OPD [um]')\n ax2.set_xticks([])\n ax2.set_yticks([])\n\n vmax = np.max(abs(np.angle(phase)))\n ax3 = fig.add_subplot(133)\n im3 = ax3.imshow(np.angle(phase), cmap=cmap, interpolation='None', vmin=-vmax, vmax=vmax)\n divider3 = make_axes_locatable(ax3)\n cax3 = divider3.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im3, cax=cax3, orientation='vertical')\n ax3.set_title('W phase [rad](wv=%.2f[um])' % (lambda_obs))\n ax3.set_xticks([])\n ax3.set_yticks([])\n\n if save_img:\n plt.savefig('./OPD_lambda_%.3f.pdf' % lambda_obs, bbox_inches='tight')\n\n plt.show()\n\n def get_psf(self):\n if self.psf is not None:\n return self.psf\n else:\n print('No PSF has been computed yet.')\n\n def plot_psf(self, cmap='gist_stern', save_img=False):\n if self.psf is not None:\n self.psf_plotter(self.psf, self.lambda_obs, cmap, save_img)\n else:\n print('No PSF has been computed yet.')\n\n def plot_opd_phase(self, cmap='viridis', save_img=False):\n if self.opd is not None:\n self.opd_phase_plotter(\n self.pupil_mask * self.obscurations, self.opd * self.obscurations, self.phase,\n self.lambda_obs, cmap, save_img\n )\n else:\n print('No WF has been computed yet.')\n\n def gen_random_Z_coeffs(self, max_order=45, rand_seed=None):\n \"\"\" Generate a random set of Zernike coefficients.\n\n The coefficients are generated following a uniform law U~[-1,1]\n divided by their radial zernike index.\n Ex: u_i / r(i) (u_i is a realization of U)\n\n Parameters\n ----------\n max_order: int\n Maximum order of Zernike polynomials.\n rand_seed: int\n Seed for the random initialization.\n\n Returns\n -------\n rand_coeffs: list of floats\n List containing the random coefficients.\n\n \"\"\"\n if rand_seed is not None:\n np.random.seed(rand_seed)\n\n rad_idx = self.get_radial_idx(max_order)\n rad_idx[0] = 1\n\n z_coeffs = []\n\n for it in range(max_order):\n z_coeffs.append((np.random.rand() - 0.5) * 2. / rad_idx[it])\n\n self.z_coeffs = z_coeffs\n\n def plot_z_coeffs(self, save_img=False):\n \"\"\"Plot random Zernike coefficients.\"\"\"\n if self.z_coeffs is not None:\n fig = plt.figure(figsize=(12, 6))\n ax1 = fig.add_subplot(111)\n im1 = ax1.bar(np.arange(len(self.z_coeffs)), np.array(self.z_coeffs))\n ax1.set_xlabel('Zernike coefficients')\n ax1.set_ylabel('Magnitude')\n\n if save_img:\n plt.savefig('./Z_coeffs.pdf', bbox_inches='tight')\n\n plt.show()\n else:\n print('Random coeffs not generated.')\n\n def get_z_coeffs(self):\n \"\"\"Get random coefficients\"\"\"\n if self.z_coeffs is not None:\n return self.z_coeffs\n else:\n print('Random coeffs not generated.')\n\n def set_z_coeffs(self, z_coeffs):\n \"\"\"Set zernike coefficients.\"\"\"\n if len(z_coeffs) == self.max_order:\n self.z_coeffs = z_coeffs\n else:\n print('Zernike coefficients should be of length %d' % (self.max_order))\n\n def normalize_zernikes(self, z_coeffs=None, max_wfe_rms=None):\n \"\"\"Normalize zernike coefficients.\"\"\"\n if max_wfe_rms is None:\n max_wfe_rms = self.max_wfe_rms\n\n # Calculate normalization factor\n wfe_rms = self.calculate_wfe_rms(z_coeffs=z_coeffs)\n mult_factor = max_wfe_rms / wfe_rms\n\n # Normalize Zernike coefficients and return them\n z_coeffs = [_z * mult_factor for _z in z_coeffs]\n\n return z_coeffs\n\n def calculate_wfe_rms(self, z_coeffs=None):\n \"\"\"Calculate WFE rms from a set of zernike coefficients.\"\"\"\n if z_coeffs is None:\n if self.z_coeffs is None:\n self.gen_random_Z_coeffs(self.max_order, self.rand_seed)\n z_coeffs = self.get_z_coeffs()\n else:\n z_coeffs = self.get_z_coeffs()\n\n # Create the phase with the Zernike basis\n opd = 0\n for it in range(self.max_order):\n opd += self.zernike_maps[it] * z_coeffs[it]\n\n # Proyect obscurations on to the OPD\n opd *= self.obscurations\n\n # Calculate normalization factor\n wfe_rms = np.sqrt(np.mean((opd[self.pupil_mask] - np.mean(opd[self.pupil_mask]))**2))\n\n return wfe_rms\n\n def check_wfe_rms(self, z_coeffs=None, max_wfe_rms=None):\n \"\"\"Check if Zernike coefficients are within the maximum admitted error.\"\"\"\n\n if max_wfe_rms is None:\n max_wfe_rms = self.max_wfe_rms\n\n # Calculate normalization factor\n wfe_rms = self.calculate_wfe_rms(z_coeffs=z_coeffs)\n\n return max_wfe_rms - wfe_rms\n\n def generate_mono_PSF(self, lambda_obs=0.725, regen_sample=False, get_psf=False):\n \"\"\"Generate monochromatic PSF.\"\"\"\n if lambda_obs < 0.55 * 0.9 or lambda_obs > 0.9 * 1.1:\n print(\n 'WARNING: requested wavelength %.4f um is not in VIS passband [0.55,0.9]um' %\n (lambda_obs)\n )\n self.lambda_obs = lambda_obs\n\n # Calculate the OPD from the Zernike coefficients\n self.calculate_opd(regen_sample)\n\n # Apply the diffraction operator using the opd (optical path differences)\n self.diffract_phase()\n\n if get_psf is True:\n return np.copy(self.psf)\n\n def calculate_opd(self, regen_sample=False):\n \"\"\"Calculate the OPD from the Zernike coefficients.\"\"\"\n if self.z_coeffs is None or regen_sample is True:\n # Generate a random sample of coefficients\n self.gen_random_Z_coeffs(self.max_order, self.rand_seed)\n # Normalize coefficients\n z_coeffs = self.normalize_zernikes(self.get_z_coeffs(), self.max_wfe_rms)\n # Save coefficients\n self.set_z_coeffs(z_coeffs)\n # Plot Zernike coefficients\n if self.plot_opt:\n self.plot_z_coeffs()\n\n else:\n # Get the stored Zernike coefficients\n z_coeffs = self.get_z_coeffs()\n\n # Create the phase with the Zernike basis\n opd = 0\n for it in range(self.max_order):\n opd += self.zernike_maps[it] * z_coeffs[it]\n\n # Save the wavefront\n self.opd = opd\n\n def diffract_phase(self, lambda_obs=None):\n \"\"\"Diffract the phase map.\"\"\"\n if lambda_obs is None:\n if self.lambda_obs is None:\n print('WARNING: No wavelength is defined. Using default value 0.8um.')\n lambda_obs = 0.8\n else:\n lambda_obs = self.lambda_obs\n elif lambda_obs < 0.55 * 0.99 or lambda_obs > 0.9 * 1.01:\n print('WARNING: wavelength %.4f is not in VIS passband [0.55,0.9]um' % (lambda_obs))\n\n # Calculate the feasible lambda closest to lambda_obs\n possible_lambda = self.feasible_wavelength(lambda_obs)\n\n # Save wavelength\n self.lambda_obs = possible_lambda\n\n # Calculate the required N for the input lambda_obs\n possible_N = self.feasible_N(self.lambda_obs)\n\n # Generate the full phase and\n # Add zeros to the phase to have the correct fourier sampling\n start = possible_N // 2 - self.opd.shape[0] // 2\n stop = possible_N // 2 + self.opd.shape[0] // 2\n\n self.phase = np.zeros((possible_N, possible_N), dtype=np.complex128)\n self.phase[start:stop,\n start:stop][self.pupil_mask\n ] = np.exp(2j * np.pi * self.opd[self.pupil_mask] / self.lambda_obs)\n\n # Project obscurations to the phase\n self.phase[start:stop, start:stop] *= self.obscurations\n\n # FFT-diffract the phase (wavefront) and then crop to desired dimension\n self.psf = self.fft_diffract(\n wf=self.phase, output_Q=self.output_Q, output_dim=self.output_dim\n )\n\n # Normalize psf\n self.psf /= np.sum(self.psf)\n\n def feasible_N(self, lambda_obs):\n \"\"\"Calculate the feasible N for a lambda_obs diffraction.\n\n Input wavelength must be in [um].\n \"\"\"\n # Calculate the required N for the input lambda_obs\n req_N = (self.oversampling_rate * self.pupil_diameter * lambda_obs *\n self.tel_focal_length) / (\n self.tel_diameter * self.pix_sampling\n )\n # Recalculate the req_N into a possible value (a pair integer)\n possible_N = int((req_N // 2) * 2)\n\n return possible_N\n\n def feasible_wavelength(self, lambda_obs):\n \"\"\"Calculate closest fesible wavelength to target wavelength.\n\n Input wavelength must be in [um].\n \"\"\"\n # Calculate a feasible N for the input lambda_obs\n possible_N = self.feasible_N(lambda_obs)\n\n # Recalculate the corresponding the wavelength\n possible_lambda = (possible_N * self.tel_diameter * self.pix_sampling) / (\n self.pupil_diameter * self.oversampling_rate * self.tel_focal_length\n )\n\n if self.verbose > 0:\n # print(\"Requested wavelength: %.5f \\nRequired N: %.2f\"%(lambda_obs, req_N))\n print(\"Possible wavelength: %.5f \\nPossible N: %.2f\" % (possible_lambda, possible_N))\n\n return possible_lambda\n\n @staticmethod\n def gen_SED_interp(SED, n_bins=35, interp_kind='cubic'):\n \"\"\"Generate SED interpolator.\n\n Returns the interpolator and the wavelengths in [nm].\n \"\"\"\n wv_max = 900\n wv_min = 550\n # wvlength = np.arange(wv_min, wv_max, int((wv_max-wv_min)/n_bins))\n wvlength = np.linspace(wv_min, wv_max, num=n_bins, endpoint=True)\n\n SED_interp = sinterp.interp1d(\n SED[:, 0], SED[:, 1], kind=interp_kind, bounds_error=False, fill_value=\"extrapolate\"\n )\n\n return wvlength, SED_interp\n\n def calc_SED_wave_values(self, SED, n_bins=35):\n \"\"\"Calculate feasible wavelength and SED values.\n\n Feasable so that the padding number N is integer.\n \"\"\"\n # Generate SED interpolator and wavelength array\n wvlength, SED_interp = self.gen_SED_interp(SED, n_bins)\n\n # Convert wavelength from [nm] to [um]\n wvlength_um = wvlength / 1e3\n\n # Calculate feasible wavelengths (in [um])\n verbose = self.verbose\n self.verbose = 0\n feasible_wv = np.array([self.feasible_wavelength(_wv) for _wv in wvlength_um])\n self.verbose = verbose\n\n # Interpolate and normalize SED\n SED_norm = SED_interp(feasible_wv * 1e3) # Interpolation is done in [nm]\n SED_norm /= np.sum(SED_norm)\n\n return feasible_wv, SED_norm\n\n def generate_poly_PSF(self, SED, n_bins=35):\n \"\"\"Generate polychromatic PSF with a specific SED.\n\n The wavelength space will be the Euclid VIS instrument band:\n [550,900]nm and will be sample in ``n_bins``.\n\n \"\"\"\n # Calculate the feasible values of wavelength and the corresponding\n # SED interpolated values\n feasible_wv, SED_norm = self.calc_SED_wave_values(SED, n_bins)\n\n if self.plot_opt:\n # Plot input SEDs and interpolated SEDs\n wvlength, SED_interp = self.gen_SED_interp(SED, n_bins)\n\n fig = plt.figure(figsize=(14, 8))\n ax1 = fig.add_subplot(111)\n ax1.plot(SED[:, 0], SED[:, 1], label='Input SED')\n ax1.scatter(\n feasible_wv * 1e3, SED_interp(feasible_wv * 1e3), label='Interpolated', c='red'\n )\n ax1.set_xlabel('wavelength [nm]')\n ax1.set_ylabel('SED(wavelength)')\n ax1.set_title('SED')\n ax1.legend()\n # plt.savefig(output_path+'SED_interp_nbin_%d.pdf'%n_bins, bbox_inches='tight')\n plt.show()\n\n stacked_psf = 0\n\n # Generate the required monochromatic PSFs\n for it in range(feasible_wv.shape[0]):\n self.generate_mono_PSF(lambda_obs=feasible_wv[it])\n stacked_psf += self.get_psf() * SED_norm[it]\n\n self.poly_psf = stacked_psf\n\n return stacked_psf\n\n\n# This pythonic version of the polychromatic calculation is not working\n# The parallelisation with the class with shared variables might not be working\n# It may work if we define a @staticmethod for the diffracvtion\n# psf_cube = np.array([_sed*self.generate_mono_PSF(_wv, get_psf=True)\n# for _wv, _sed in zip(feasible_wv, SED_norm)])\n# # Sum to obtain the polychromatic PSFs\n# self.poly_psf = np.sum(np_psf_cube, axis=0)\n# return np.copy(self.poly_psf)\n", "#!/usr/bin/env python\n# coding: utf-8\n\n# # PSF modelling\n\n\n#@title Import packages\nimport sys\nimport numpy as np\nimport time\n\n# Import wavefront code\nimport wf_psf as wf\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\n# Start measuring elapsed time\nstarting_time = time.time()\n\n# # Define saving paths\n# model = 'mccd'\n# model = 'poly'\nmodel = 'param'\n\nid_name = '-coherent_euclid_1000stars'\nrun_id_name = model + id_name\n\n# Saving paths\nbase_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'\nlog_save_file = base_path + 'log-files/'\nmodel_save_file= base_path + 'chkp/'\noptim_hist_file = base_path + 'optim-hist/'\nsaving_optim_hist = dict()\n\nchkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'\n\n# Input paths\ndataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'\ntrain_path = 'train_Euclid_res_1000_TrainStars_id_001.npy'\ntest_path = 'test_Euclid_res_id_001.npy'\n\n\n# Save output prints to logfile\nold_stdout = sys.stdout\nlog_file = open(log_save_file + run_id_name + '_output.log','w')\nsys.stdout = log_file\nprint('Starting the log file.')\n\n# Check GPU\ndevice_name = tf.test.gpu_device_name()\nif device_name != '/device:GPU:0':\n raise SystemError('GPU device not found')\nprint('Found GPU at: {}'.format(device_name))\nprint('tf_version: ' + str(tf.__version__))\n\n# # Define new model\n\n# Decimation factor for Zernike polynomials\nn_zernikes = 45\n\n# Some parameters\npupil_diameter = 256\nn_bins_lda = 20\n\noutput_Q = 3.\noversampling_rate = 3.\n\nbatch_size = 16\noutput_dim = 32\nd_max = 2\nd_max_nonparam = 3 # polynomial-constraint features\nx_lims = [0, 1e3]\ny_lims = [0, 1e3]\ngraph_features = 10 # Graph-constraint features\nl1_rate = 1e-8 # L1 regularisation\n\n# Learning rates and number of epochs\nl_rate_param = [1e-2, 1e-2]\nn_epochs_param = [30, 40]\n\n\n## Prepare the inputs\n\n# Generate Zernike maps\nzernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)\n\n# Now as cubes\nnp_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))\n\nfor it in range(len(zernikes)):\n np_zernike_cube[it,:,:] = zernikes[it]\n\nnp_zernike_cube[np.isnan(np_zernike_cube)] = 0\ntf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)\n\nprint('Zernike cube:')\nprint(tf_zernike_cube.shape)\n\n\n## Load the dictionaries\ntrain_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]\n# train_stars = train_dataset['stars']\n# noisy_train_stars = train_dataset['noisy_stars']\n# train_pos = train_dataset['positions']\ntrain_SEDs = train_dataset['SEDs']\n# train_zernike_coef = train_dataset['zernike_coef']\ntrain_C_poly = train_dataset['C_poly']\ntrain_parameters = train_dataset['parameters']\n\n\ntest_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]\n# test_stars = test_dataset['stars']\n# test_pos = test_dataset['positions']\ntest_SEDs = test_dataset['SEDs']\n# test_zernike_coef = test_dataset['zernike_coef']\n\n# Convert to tensor\ntf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)\ntf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)\ntf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)\n\ntf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)\ntf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)\n\nprint('Dataset parameters:')\nprint(train_parameters)\n\n\n## Generate initializations\n\n# Prepare np input\nsimPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,\n pupil_diameter=pupil_diameter, output_dim=output_dim,\n oversampling_rate=oversampling_rate, output_Q=output_Q)\nsimPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)\nz_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)\nsimPSF_np.set_z_coeffs(z_coeffs)\nsimPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)\n\n# Obscurations\nobscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)\ntf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)\n\n# Initialize the SED data list\npacked_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)\n for _sed in train_SEDs]\n\n\n# Prepare the inputs for the training\ntf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)\ntf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])\n\ninputs = [tf_train_pos, tf_packed_SED_data]\n\n# Select the observed stars (noisy or noiseless)\noutputs = tf_noisy_train_stars\n# outputs = tf_train_stars\n\n\n## Prepare validation data inputs\n\n# Let's take a subset of the testing data for the validation\n# in order to test things faster\nval_SEDs = test_SEDs # [0:50, :, :]\ntf_val_pos = tf_test_pos # [0:50, :]\ntf_val_stars = tf_test_stars # [0:50, :, :]\n\n# Initialize the SED data list\nval_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)\n for _sed in val_SEDs]\n\n# Prepare the inputs for the validation\ntf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)\ntf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])\n \n# Prepare input validation tuple\nval_x_inputs = [tf_val_pos, tf_val_packed_SED_data]\nval_y_inputs = tf_val_stars\nval_data = (val_x_inputs, val_y_inputs)\n\n\n## Select the model\nif model == 'mccd':\n raise NotImplementedError\n\nelif model == 'poly':\n # # Initialize the model\n raise NotImplementedError\n\nelif model == 'param':\n # Initialize the model\n tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,\n obscurations=tf_obscurations,\n batch_size=batch_size,\n output_Q=output_Q,\n output_dim=output_dim,\n n_zernikes=n_zernikes,\n d_max=d_max,\n x_lims=x_lims,\n y_lims=y_lims)\n\n\n\n\n# # Model Training\n\n# Prepare the saving callback\n# Prepare to save the model as a callback\nfilepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'\nmodel_chkp_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath_chkp_callback,\n monitor='mean_squared_error', verbose=1, save_best_only=True,\n save_weights_only=False, mode='min', save_freq='epoch',\n options=None)\n\n# Prepare the optimisers\nparam_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])\n\nprint('Starting cycle 1..')\nstart_cycle1 = time.time()\n\ntf_semiparam_field, hist_param = wf.train_utils.param_train_cycle(\n tf_semiparam_field,\n inputs=inputs,\n outputs=outputs,\n val_data=val_data,\n batch_size=batch_size,\n l_rate=l_rate_param[0],\n n_epochs=n_epochs_param[0], \n param_optim=param_optim,\n param_loss=None, \n param_metrics=None, \n param_callback=None, \n general_callback=[model_chkp_callback],\n verbose=2)\n\n\n\n# Save weights\ntf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')\n\nend_cycle1 = time.time()\nprint('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))\n\n# Save optimisation history in the saving dict\nsaving_optim_hist['param_cycle1'] = hist_param.history\n\n\n\n# Prepare to save the model as a callback\nfilepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'\nmodel_chkp_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath_chkp_callback,\n monitor='mean_squared_error', verbose=1, save_best_only=True,\n save_weights_only=False, mode='min', save_freq='epoch',\n options=None)\n\n# Prepare the optimisers\nparam_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])\n\n\nprint('Starting cycle 2..')\nstart_cycle2 = time.time()\n\n# Compute the next cycle\ntf_semiparam_field, hist_param_2 = wf.train_utils.param_train_cycle(\n tf_semiparam_field,\n inputs=inputs,\n outputs=outputs,\n val_data=val_data,\n batch_size=batch_size,\n l_rate=l_rate_param[0],\n n_epochs=n_epochs_param[0], \n param_optim=param_optim,\n param_loss=None, \n param_metrics=None, \n param_callback=None, \n general_callback=[model_chkp_callback],\n verbose=2)\n\n\n# Save the weights at the end of the second cycle\ntf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')\n\nend_cycle2 = time.time()\nprint('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))\n\n# Save optimisation history in the saving dict\nsaving_optim_hist['param_cycle2'] = hist_param_2.history\n\n# Save optimisation history dictionary\nnp.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)\n\n\n## Print final time\nfinal_time = time.time()\nprint('\\nTotal elapsed time: %f'%(final_time - starting_time))\n\n## Close log file\nprint('\\n Good bye..')\nsys.stdout = old_stdout\nlog_file.close()\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.mean", "numpy.exp", "numpy.copy", "scipy.interpolate.interp1d", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.fft.fft2", "numpy.isnan", "scipy.signal.convolve2d", "matplotlib.pyplot.savefig", "numpy.tan", "numpy.random.rand", "numpy.array", "numpy.logical_and", "numpy.sum", "matplotlib.pyplot.show", "numpy.abs", "numpy.random.seed", "numpy.ones", "numpy.angle" ], [ "tensorflow.convert_to_tensor", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.transpose", "tensorflow.test.gpu_device_name", "numpy.isnan", "numpy.save", "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
binfen/FBDD
[ "26c859a2dbe3d308711898ef1d149a5f8c49055f" ]
[ "utils/postprocess.py" ]
[ "import time\nimport numpy as np\nimport pandas as pd\n\nfrom molecules import mol_from_smiles\nfrom molecules import add_property\nfrom molecules import (\n add_atom_counts, add_bond_counts, add_ring_counts)\n\nfrom .config import get_dataset_info\nfrom .filesystem import load_dataset\n\nSCORES = [\"validity\", \"novelty\", \"uniqueness\"]\n\n\ndef dump_scores(config, scores, epoch):\n filename = config.path('performance') / \"scores.csv\"\n df = pd.DataFrame([scores], columns=SCORES)\n\n if not filename.exists():\n df.to_csv(filename)\n is_max = True\n else:\n ref = pd.read_csv(filename, index_col=0)\n is_max = scores[2] >= ref.uniqueness.max()\n ref = pd.concat([ref, df], axis=0, sort=False, ignore_index=True)\n ref.to_csv(filename)\n\n return is_max\n\n\ndef retrieve_samples(config):\n dfs = []\n filenames = config.path('samples').glob('*_*.csv')\n\n for filename in filenames:\n dfs.append(pd.read_csv(filename, index_col=0))\n\n samples = pd.concat(dfs, axis=0, ignore_index=True, sort=False)\n samples = samples.reset_index(drop=True)\n return samples.copy()\n\n\ndef mask_valid_molecules(smiles):\n valid_mask = []\n\n for smi in smiles:\n try:\n mol = mol_from_smiles(smi)\n valid_mask.append(mol is not None)\n except Exception:\n valid_mask.append(False)\n\n return np.array(valid_mask)\n\n\ndef mask_novel_molecules(smiles, data_smiles):\n novel_mask = []\n\n for smi in smiles:\n novel_mask.append(smi not in data_smiles)\n\n return np.array(novel_mask)\n\n\ndef mask_unique_molecules(smiles):\n uniques, unique_mask = set(), []\n\n for smi in smiles:\n unique_mask.append(smi not in uniques)\n uniques.add(smi)\n\n return np.array(unique_mask)\n\n\ndef score_samples(samples, dataset, calc=True):\n def ratio(mask):\n total = mask.shape[0]\n if total == 0:\n return 0.0\n return mask.sum() / total\n\n if isinstance(samples, pd.DataFrame):\n smiles = samples.smiles.tolist()\n elif isinstance(samples, list):\n smiles = [s[0] for s in samples]\n data_smiles = dataset.smiles.tolist()\n\n valid_mask = mask_valid_molecules(smiles)\n novel_mask = mask_novel_molecules(smiles, data_smiles)\n unique_mask = mask_unique_molecules(smiles)\n\n scores = []\n if calc:\n start = time.time()\n print(\"Start scoring...\")\n validity_score = ratio(valid_mask)\n novelty_score = ratio(novel_mask[valid_mask])\n uniqueness_score = ratio(unique_mask[valid_mask])\n\n print(f\"valid: {validity_score} - \"\n f\"novel: {novelty_score} - \"\n f\"unique: {uniqueness_score}\")\n\n scores = [validity_score, novelty_score, uniqueness_score]\n end = time.time() - start\n elapsed = time.strftime(\"%H:%M:%S\", time.gmtime(end))\n print(f'Done. Time elapsed: {elapsed}.')\n\n return valid_mask * novel_mask * unique_mask, scores\n\n\ndef postprocess_samples(config, use_train=False, n_jobs=-1):\n start = time.time()\n print(\"Start postprocessing...\", end=\" \")\n kind = 'train' if use_train else 'test'\n dataset = load_dataset(config, kind=kind)\n samples = retrieve_samples(config)\n\n mask, _ = score_samples(samples, dataset, calc=False)\n samples = samples.iloc[mask, :].reset_index(drop=True)\n\n info = get_dataset_info(config.get('dataset'))\n samples = add_atom_counts(samples, info, n_jobs)\n samples = add_bond_counts(samples, info, n_jobs)\n samples = add_ring_counts(samples, info, n_jobs)\n\n for prop in info['properties']:\n samples = add_property(samples, prop, n_jobs)\n\n samples = samples[info['column_order']]\n samples['who'] = 'OURS'\n dataset['who'] = info['name']\n\n data = [samples, dataset]\n aggregated = pd.concat(data, axis=0, ignore_index=True, sort=False)\n aggregated.to_csv(config.path('samples') / 'aggregated.csv')\n\n end = time.time() - start\n elapsed = time.strftime(\"%H:%M:%S\", time.gmtime(end))\n print(f'Done. Time elapsed: {elapsed}.')\n" ]
[ [ "pandas.concat", "numpy.array", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Jappenn/CCL
[ "a37cad61f060f3928fa5d47b1e2670db3e9bce6f", "a37cad61f060f3928fa5d47b1e2670db3e9bce6f", "a37cad61f060f3928fa5d47b1e2670db3e9bce6f" ]
[ "pyccl/tests/test_power.py", "benchmarks/data/codes/growth_lowz_bm.py", "benchmarks/data/codes/sigmaM_bm.py" ]
[ "import numpy as np\nimport pytest\n\nimport pyccl as ccl\nfrom pyccl import CCLError, CCLWarning\n\n\nCOSMO = ccl.Cosmology(\n Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,\n transfer_function='bbks', matter_power_spectrum='halofit')\nCOSMO_HM = ccl.Cosmology(\n Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,\n transfer_function='bbks', matter_power_spectrum='halo_model',\n mass_function='shethtormen')\n\n\ndef test_halomod_f2d_copy():\n from pyccl.pyutils import assert_warns\n mdef = ccl.halos.MassDef('vir', 'matter')\n hmf = ccl.halos.MassFuncSheth99(COSMO_HM, mdef,\n mass_def_strict=False,\n use_delta_c_fit=True)\n hbf = ccl.halos.HaloBiasSheth99(COSMO_HM, mass_def=mdef,\n mass_def_strict=False)\n cc = ccl.halos.ConcentrationDuffy08(mdef)\n prf = ccl.halos.HaloProfileNFW(cc)\n hmc = ccl.halos.HMCalculator(COSMO_HM, hmf, hbf, mdef)\n pk2d = ccl.halos.halomod_Pk2D(COSMO_HM, hmc, prf, normprof1=True)\n psp_new = pk2d.psp\n # This just triggers the internal calculation\n pk_old = assert_warns(\n ccl.CCLWarning,\n ccl.nonlin_matter_power, COSMO_HM, 1., 0.8)\n pk_new = pk2d.eval(1., 0.8, COSMO_HM)\n psp_old = COSMO_HM.get_nonlin_power().psp\n assert psp_new.lkmin == psp_old.lkmin\n assert psp_new.lkmax == psp_old.lkmax\n assert psp_new.amin == psp_old.amin\n assert psp_new.amax == psp_old.amax\n assert psp_new.is_factorizable == psp_old.is_factorizable\n assert psp_new.is_k_constant == psp_old.is_k_constant\n assert psp_new.is_a_constant == psp_old.is_a_constant\n assert psp_new.is_log == psp_old.is_log\n assert psp_new.growth_factor_0 == psp_old.growth_factor_0\n assert psp_new.growth_exponent == psp_old.growth_exponent\n assert psp_new.extrap_order_lok == psp_old.extrap_order_lok\n assert psp_new.extrap_order_hik == psp_old.extrap_order_hik\n assert pk_old == pk_new\n\n\[email protected]('k', [\n 1,\n 1.0,\n [0.3, 0.5, 10],\n np.array([0.3, 0.5, 10])\n])\ndef test_nonlin_matter_power_halomod(k):\n a = 0.8\n pk = ccl.nonlin_matter_power(COSMO_HM, k, a)\n\n # New implementation\n mdef = ccl.halos.MassDef('vir', 'matter')\n hmf = ccl.halos.MassFuncSheth99(COSMO_HM, mdef,\n mass_def_strict=False,\n use_delta_c_fit=True)\n hbf = ccl.halos.HaloBiasSheth99(COSMO_HM, mass_def=mdef,\n mass_def_strict=False)\n cc = ccl.halos.ConcentrationDuffy08(mdef)\n prf = ccl.halos.HaloProfileNFW(cc)\n hmc = ccl.halos.HMCalculator(COSMO_HM, hmf, hbf, mdef)\n pkb = ccl.halos.halomod_power_spectrum(COSMO_HM, hmc, k, a,\n prf, normprof1=True)\n\n assert np.allclose(pk, pkb)\n assert np.all(np.isfinite(pk))\n assert np.shape(pk) == np.shape(k)\n\n\[email protected]('k', [\n 1,\n 1.0,\n [0.3, 0.5, 10],\n np.array([0.3, 0.5, 10])])\ndef test_linear_matter_power_smoke(k):\n a = 0.8\n pk = ccl.linear_matter_power(COSMO, k, a)\n assert np.all(np.isfinite(pk))\n assert np.shape(pk) == np.shape(k)\n\n\ndef test_linear_matter_power_raises():\n cosmo = ccl.CosmologyVanillaLCDM(transfer_function=None)\n with pytest.raises(ccl.CCLError):\n ccl.linear_matter_power(cosmo, 1., 1.)\n\n\ndef test_nonlin_matter_power_raises():\n cosmo = ccl.CosmologyVanillaLCDM(matter_power_spectrum=None)\n with pytest.raises(ccl.CCLError):\n ccl.nonlin_matter_power(cosmo, 1., 1.)\n\n\ndef test_linear_power_raises():\n cosmo = ccl.CosmologyVanillaLCDM(transfer_function='bbks')\n with pytest.raises(KeyError):\n ccl.linear_power(cosmo, 1., 1., p_of_k_a='a:b')\n\n\ndef test_nonlin_power_raises():\n cosmo = ccl.CosmologyVanillaLCDM(transfer_function='bbks')\n with pytest.raises(KeyError):\n ccl.nonlin_power(cosmo, 1., 1., p_of_k_a='a:b')\n\n\[email protected]('k', [\n 1,\n 1.0,\n [0.3, 0.5, 10],\n np.array([0.3, 0.5, 10])])\ndef test_nonlin_matter_power_smoke(k):\n a = 0.8\n pk = ccl.nonlin_matter_power(COSMO, k, a)\n assert np.all(np.isfinite(pk))\n assert np.shape(pk) == np.shape(k)\n\n\[email protected]('r', [\n 1,\n 1.0,\n [0.3, 0.5, 10],\n np.array([0.3, 0.5, 10])])\ndef test_sigmaR_smoke(r):\n a = 0.8\n sig = ccl.sigmaR(COSMO, r, a)\n assert np.all(np.isfinite(sig))\n assert np.shape(sig) == np.shape(r)\n\n\[email protected]('r', [\n 1,\n 1.0,\n [0.3, 0.5, 10],\n np.array([0.3, 0.5, 10])])\ndef test_sigmaV_smoke(r):\n a = 0.8\n sig = ccl.sigmaV(COSMO, r, a)\n assert np.all(np.isfinite(sig))\n assert np.shape(sig) == np.shape(r)\n\n\ndef test_sigma8_consistent():\n assert np.allclose(ccl.sigma8(COSMO), COSMO['sigma8'])\n assert np.allclose(ccl.sigmaR(COSMO, 8 / COSMO['h'], 1), COSMO['sigma8'])\n\n\[email protected]('A', [\n 1,\n 1.0,\n [0.3, 0.5, 1],\n np.array([0.3, 0.5, 1])])\ndef test_kNL(A):\n knl = ccl.kNL(COSMO, A)\n assert np.all(np.isfinite(knl))\n assert np.shape(knl) == np.shape(A)\n\n\[email protected]('tf,pk,m_nu', [\n # ('boltzmann_class', 'emu', 0.06), - this case is slow and not needed\n (None, 'emu', 0.06),\n ('bbks', 'emu', 0.06),\n ('eisenstein_hu', 'emu', 0.06),\n])\ndef test_transfer_matter_power_nu_raises(tf, pk, m_nu):\n cosmo = ccl.Cosmology(\n Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,\n transfer_function=tf, matter_power_spectrum=pk, m_nu=m_nu)\n\n if tf is not None:\n with pytest.warns(CCLWarning):\n ccl.linear_matter_power(cosmo, 1, 1)\n\n with pytest.raises(CCLError):\n ccl.nonlin_matter_power(cosmo, 1, 1)\n\n\[email protected]('tf', [\n 'boltzmann_class', 'boltzmann_camb', 'boltzmann_isitgr'])\ndef test_power_sigma8norm_norms_consistent(tf):\n # make a cosmo with A_s\n cosmo = ccl.Cosmology(\n Omega_c=0.27, Omega_b=0.045, h=0.67, A_s=2e-9, n_s=0.96,\n transfer_function=tf)\n sigma8 = ccl.sigma8(cosmo)\n\n # remake same but now give sigma8\n cosmo_s8 = ccl.Cosmology(\n Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=sigma8, n_s=0.96,\n transfer_function=tf)\n\n # make sure they come out the same-ish\n assert np.allclose(ccl.sigma8(cosmo), ccl.sigma8(cosmo_s8))\n\n # and that the power spectra look right\n a = 0.8\n gfac = (\n ccl.growth_factor(cosmo, a) / ccl.growth_factor(cosmo_s8, a))**2\n pk_rat = (\n ccl.linear_matter_power(cosmo, 1e-4, a) /\n ccl.linear_matter_power(cosmo_s8, 1e-4, a))\n assert np.allclose(pk_rat, gfac)\n\n\ndef test_input_lin_power_spectrum():\n # Setup\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7, n_s=0.965,\n A_s=2e-9)\n a_arr = np.linspace(0.1, 1.0, 50)\n chi_from_ccl = ccl.background.comoving_radial_distance(cosmo, a_arr)\n hoh0_from_ccl = ccl.background.h_over_h0(cosmo, a_arr)\n growth_from_ccl = ccl.background.growth_factor_unnorm(cosmo, a_arr)\n fgrowth_from_ccl = ccl.background.growth_rate(cosmo, a_arr)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pk_arr = np.empty(shape=(len(a_arr), len(k_arr)))\n for i, a in enumerate(a_arr):\n pk_arr[i] = ccl.power.linear_matter_power(cosmo, k_arr, a)\n\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n background={'a': a_arr,\n 'chi': chi_from_ccl,\n 'h_over_h0': hoh0_from_ccl},\n growth={'a': a_arr,\n 'growth_factor': growth_from_ccl,\n 'growth_rate': fgrowth_from_ccl},\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr})\n\n pk_CCL_input = ccl.power.linear_matter_power(cosmo_input, k_arr, 0.5)\n pk_CCL = ccl.power.linear_matter_power(cosmo, k_arr, 0.5)\n\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Test again with negative power spectrum (so it's not logscaled)\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n background={'a': a_arr,\n 'chi': chi_from_ccl,\n 'h_over_h0': hoh0_from_ccl},\n growth={'a': a_arr,\n 'growth_factor': growth_from_ccl,\n 'growth_rate': fgrowth_from_ccl},\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': -pk_arr})\n\n pk_CCL_input = -ccl.power.linear_matter_power(cosmo_input, k_arr, 0.5)\n\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Via `linear_power`\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n background={'a': a_arr,\n 'chi': chi_from_ccl,\n 'h_over_h0': hoh0_from_ccl},\n growth={'a': a_arr,\n 'growth_factor': growth_from_ccl,\n 'growth_rate': fgrowth_from_ccl},\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a:b': pk_arr})\n pk_CCL_input = ccl.power.linear_power(cosmo_input, k_arr, 0.5,\n p_of_k_a='a:b')\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n\ndef test_input_linpower_raises():\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n transfer_function='bbks')\n a_arr = np.linspace(0.1, 1.0, 50)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pk_arr = np.array([ccl.power.linear_matter_power(cosmo, k_arr, a)\n for a in a_arr])\n\n # Not a dictionary\n with pytest.raises(TypeError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear=np.pi)\n\n # a not increasing\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr[::-1], 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr})\n\n # Dm x Dm not present\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter;delta_matter': pk_arr})\n\n # Non-parsable power spectrum\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a;b': pk_arr})\n\n # Wrong shape\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a:b': pk_arr[0]})\n\n # Check new power spectrum is stored\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a:b': pk_arr})\n assert 'a:b' in cosmo_input._pk_lin\n assert cosmo_input.has_linear_power\n\n\ndef test_input_nonlinear_model():\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7, n_s=0.965,\n A_s=2e-9, transfer_function='boltzmann_class')\n a_arr = np.linspace(0.1, 1.0, 50)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pk_arr = np.empty(shape=(len(a_arr), len(k_arr)))\n for i, a in enumerate(a_arr):\n pk_arr[i] = ccl.power.nonlin_matter_power(cosmo, k_arr, a)\n\n pk_CCL = ccl.power.nonlin_matter_power(cosmo, k_arr, 0.5)\n\n # Test again passing only linear Pk, but letting HALOFIT do its thing\n kl_arr = np.logspace(-4, 1, 1000)\n pkl_arr = np.array([ccl.power.linear_matter_power(cosmo, kl_arr, a)\n for a in a_arr])\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n pk_linear={'a': a_arr, 'k': kl_arr,\n 'delta_matter:delta_matter': pkl_arr},\n nonlinear_model='halofit')\n\n pk_CCL_input = ccl.power.nonlin_matter_power(cosmo_input, k_arr, 0.5)\n\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Test extra power spectrum\n kl_arr = np.logspace(-4, 1, 1000)\n pkl_arr = np.array([ccl.power.linear_matter_power(cosmo, kl_arr, a)\n for a in a_arr])\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n pk_linear={'a': a_arr, 'k': kl_arr,\n 'delta_matter:delta_matter': pkl_arr,\n 'a:b': pkl_arr},\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': -pk_arr},\n nonlinear_model='halofit')\n\n pk_CCL_input = cosmo_input.get_nonlin_power('a:b').eval(k_arr,\n 0.5,\n cosmo_input)\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Via `nonlin_power`\n pk_CCL_input = ccl.power.nonlin_power(cosmo_input, k_arr, 0.5,\n p_of_k_a='a:b')\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Use dictionary\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n pk_linear={'a': a_arr, 'k': kl_arr,\n 'delta_matter:delta_matter': pkl_arr,\n 'a:b': pkl_arr, 'c:d': pkl_arr},\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': -pk_arr},\n nonlinear_model={'a:b': 'halofit',\n 'c:d': None})\n pk_CCL_input = ccl.power.nonlin_power(cosmo_input, k_arr, 0.5,\n p_of_k_a='a:b')\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n assert 'c:d' not in cosmo_input._pk_nl\n\n\ndef test_input_nonlin_power_spectrum():\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7, n_s=0.965,\n A_s=2e-9, transfer_function='boltzmann_class')\n a_arr = np.linspace(0.1, 1.0, 50)\n chi_from_ccl = ccl.background.comoving_radial_distance(cosmo, a_arr)\n hoh0_from_ccl = ccl.background.h_over_h0(cosmo, a_arr)\n growth_from_ccl = ccl.background.growth_factor_unnorm(cosmo, a_arr)\n fgrowth_from_ccl = ccl.background.growth_rate(cosmo, a_arr)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pk_arr = np.empty(shape=(len(a_arr), len(k_arr)))\n for i, a in enumerate(a_arr):\n pk_arr[i] = ccl.power.nonlin_matter_power(cosmo, k_arr, a)\n\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n background={'a': a_arr,\n 'chi': chi_from_ccl,\n 'h_over_h0': hoh0_from_ccl},\n growth={'a': a_arr,\n 'growth_factor': growth_from_ccl,\n 'growth_rate': fgrowth_from_ccl},\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr})\n\n pk_CCL_input = ccl.power.nonlin_matter_power(cosmo_input, k_arr, 0.5)\n pk_CCL = ccl.power.nonlin_matter_power(cosmo, k_arr, 0.5)\n\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Test again with negative power spectrum (so it's not logscaled)\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n background={'a': a_arr,\n 'chi': chi_from_ccl,\n 'h_over_h0': hoh0_from_ccl},\n growth={'a': a_arr,\n 'growth_factor': growth_from_ccl,\n 'growth_rate': fgrowth_from_ccl},\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': -pk_arr})\n\n pk_CCL_input = -ccl.power.nonlin_matter_power(cosmo_input, k_arr, 0.5)\n\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n\ndef test_input_nonlinear_model_raises():\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n transfer_function='bbks')\n a_arr = np.linspace(0.1, 1.0, 50)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pkl_arr = np.array([ccl.power.linear_matter_power(cosmo, k_arr, a)\n for a in a_arr])\n\n # If no non-linear model provided, delta_matter:delta_matter\n # should be there.\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'a:b': pkl_arr})\n\n with pytest.raises(TypeError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pkl_arr},\n nonlinear_model=np.pi)\n\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n nonlinear_model='halofit')\n\n with pytest.raises(KeyError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pkl_arr},\n nonlinear_model={'y:z': 'halofit'})\n\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pkl_arr},\n nonlinear_model={'delta_matter:delta_matter': None})\n\n with pytest.raises(KeyError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pkl_arr},\n nonlinear_model={'delta_matter:delta_matter': 'hmcode'})\n\n\ndef test_input_nonlin_raises():\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n transfer_function='bbks')\n a_arr = np.linspace(0.1, 1.0, 50)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pkl_arr = np.array([ccl.power.linear_matter_power(cosmo, k_arr, a)\n for a in a_arr])\n pk_arr = np.array([ccl.power.nonlin_matter_power(cosmo, k_arr, a)\n for a in a_arr])\n\n # Not a dictionary\n with pytest.raises(TypeError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin=np.pi)\n\n # k not present\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr, 'kk': k_arr,\n 'delta_matter;delta_matter': pk_arr})\n\n # a not increasing\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr[::-1], 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr})\n\n # delta_matter:delta_matter not present\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter;delta_matter': pk_arr})\n\n # Non-parsable power spectrum\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a;b': pk_arr})\n\n # Wrong shape\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a:b': pk_arr[0]})\n\n # Linear Pk not set for halofit\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n nonlinear_model='halofit')\n\n # Check new power spectrum is stored\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pkl_arr,\n 'a:b': pkl_arr},\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr},\n nonlinear_model='halofit')\n assert 'a:b' in cosmo_input._pk_nl\n assert cosmo_input.has_nonlin_power\n\n\ndef test_camb_de_model():\n \"\"\"Check that the dark energy model for CAMB has been properly defined.\"\"\"\n with pytest.raises(ValueError):\n cosmo = ccl.CosmologyVanillaLCDM(\n transfer_function='boltzmann_camb',\n extra_parameters={\"camb\": {\"dark_energy_model\": \"pf\"}})\n ccl.linear_matter_power(cosmo, 1, 1)\n\n \"\"\"Check that w is not less than -1, if the chosen dark energy model for\n CAMB is fluid.\"\"\"\n with pytest.raises(ValueError):\n cosmo = ccl.CosmologyVanillaLCDM(\n transfer_function='boltzmann_camb', w0=-1, wa=-1)\n ccl.linear_matter_power(cosmo, 1, 1)\n\n \"\"\"Check that ppf is running smoothly.\"\"\"\n cosmo = ccl.CosmologyVanillaLCDM(\n transfer_function='boltzmann_camb', w0=-1, wa=-1,\n extra_parameters={\"camb\": {\"dark_energy_model\": \"ppf\"}})\n assert np.isfinite(ccl.linear_matter_power(cosmo, 1, 1))\n", "import numpy as np\nimport matplotlib.pyplot as plt\nimport py_cosmo_mad as csm\n\nTCMB=2.725\nPLOT_STUFF=0\nWRITE_STUFF=1\nFS=16\n\ndef do_all(z_arr,cpar,prefix) :\n pcs=csm.PcsPar()\n pcs.background_set(cpar['om'],cpar['ol'],cpar['ob'],cpar['w0'],cpar['wa'],cpar['hh'],TCMB)\n\n a_arr=1./(z_arr+1)\n gf_arr=np.array([pcs.growth_factor(a) for a in a_arr])\n\n if PLOT_STUFF==1 :\n plt.plot(a_arr,gf_arr); plt.xlabel('$a$',fontsize=FS); plt.ylabel('$D(z)$',fontsize=FS); plt.show()\n\n if WRITE_STUFF==1 :\n np.savetxt(prefix+\"_gf.txt\",np.transpose([z_arr,gf_arr]),header=\"[1] z, [2] D(z)\")\n\nz_arr=np.array([0.,1.,2.,3.,4.,5.])\n\ncpar_model1={'om': 0.3,'ol': 0.7 ,'ob':0.05,'hh': 0.7,'s8': 0.8,'ns': 0.96,'w0': -1.0, 'wa': 0.0}\ncpar_model2={'om': 0.3,'ol': 0.7 ,'ob':0.05,'hh': 0.7,'s8': 0.8,'ns': 0.96,'w0': -0.9, 'wa': 0.0}\ncpar_model3={'om': 0.3,'ol': 0.7 ,'ob':0.05,'hh': 0.7,'s8': 0.8,'ns': 0.96,'w0': -0.9, 'wa': 0.1}\ncpar_model4={'om': 0.3,'ol': 0.75,'ob':0.05,'hh': 0.7,'s8': 0.8,'ns': 0.96,'w0': -0.9, 'wa': 0.1}\ncpar_model5={'om': 0.3,'ol': 0.65,'ob':0.05,'hh': 0.7,'s8': 0.8,'ns': 0.96,'w0': -0.9, 'wa': 0.1}\n\ndo_all(z_arr,cpar_model1,\"model1\")\ndo_all(z_arr,cpar_model2,\"model2\")\ndo_all(z_arr,cpar_model3,\"model3\")\ndo_all(z_arr,cpar_model3,\"model4\")\ndo_all(z_arr,cpar_model3,\"model5\")\n", "import numpy as np\nimport matplotlib.pyplot as plt\nimport py_cosmo_mad as csm\n\n#Produces all sigma(M) benchmarks\n#Contact [email protected] if you have issues running this script\n\nTCMB=2.725\nPLOT_STUFF=0\nWRITE_STUFF=1\nFS=16\nLKMAX=7\n\ndef do_all(m_arr,cpar,prefix) :\n pcs=csm.PcsPar()\n pcs.background_set(cpar['om'],cpar['ol'],cpar['ob'],cpar['w0'],cpar['wa'],cpar['hh'],TCMB)\n pcs.set_linear_pk('BBKS',-3,LKMAX,0.01,cpar['ns'],cpar['s8'])\n\n r_arr=np.array([pcs.M2R(m) for m in m_arr])\n sm_arr=np.sqrt(np.array([pcs.sig0_L(r,r,'TopHat','TopHat') for r in r_arr]))\n\n if PLOT_STUFF==1 :\n plt.plot(m_arr,sm_arr)\n plt.xlabel('$M\\\\,[M_{\\\\odot}\\\\,h^{-1}]$',fontsize=FS)\n plt.ylabel('$\\\\sigma(M)$',fontsize=FS)\n plt.gca().set_xscale('log');\n plt.gca().set_yscale('log');\n plt.show()\n\n if WRITE_STUFF==1 :\n np.savetxt(prefix+\"_sm.txt\",np.transpose([m_arr,sm_arr]),header=\"[1] M (M_sun/h), [2] sigma(M)\")\n\nz_arr=np.array([0.,1.,2.,3.,4.,5.])\nlm_arr=6.+2*np.arange(6)\nm_arr=10**lm_arr\n\ncpar_model1={'om': 0.3,'ol': 0.7,'ob':0.05,'hh': 0.7,'s8': 0.8,'ns': 0.96,'w0': -1.0, 'wa': 0.0}\ncpar_model2={'om': 0.3,'ol': 0.7,'ob':0.05,'hh': 0.7,'s8': 0.8,'ns': 0.96,'w0': -0.9, 'wa': 0.0}\ncpar_model3={'om': 0.3,'ol': 0.7,'ob':0.05,'hh': 0.7,'s8': 0.8,'ns': 0.96,'w0': -0.9, 'wa': 0.1}\n\ndo_all(m_arr,cpar_model1,\"model1\")\ndo_all(m_arr,cpar_model2,\"model2\")\ndo_all(m_arr,cpar_model3,\"model3\")\n" ]
[ [ "numpy.allclose", "numpy.isfinite", "numpy.linspace", "numpy.logspace", "numpy.log10", "numpy.shape", "numpy.array" ], [ "matplotlib.pyplot.plot", "numpy.transpose", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "matplotlib.pyplot.gca", "numpy.arange", "matplotlib.pyplot.plot", "numpy.transpose", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vinzdef/rorrim
[ "7a3fd4a212420b6fbe6590005300ea095938bf66" ]
[ "python/frame_convert2.py" ]
[ "import numpy as np\nimport matplotlib\n\ndef to_hsv(numb):\n hue = np.interp(numb, [0, 1024], [0, 1])\n rgb = matplotlib.colors.hsv_to_rgb(np.array([hue, 0.5, 1]))\n bgr = rgb[:, :, ::-1] # RGB -> BGR\n return np.array(bgr)\n\ndef hsv_depth(depth):\n depth = to_hsv(depth)\n return depth\n\ndef pretty_depth(depth):\n \"\"\"Converts depth into a 'nicer' format for display\n\n This is abstracted to allow for experimentation with normalization\n\n Args:\n depth: A numpy array with 2 bytes per pixel\n\n Returns:\n A numpy array that has been processed with unspecified datatype\n \"\"\"\n np.clip(depth, 0, 1024, depth)\n depth >>= 2\n depth = depth.astype(np.uint8)\n return depth\n\n\ndef pretty_depth_cv(depth):\n \"\"\"Converts depth into a 'nicer' format for display\n\n This is abstracted to allow for experimentation with normalization\n\n Args:\n depth: A numpy array with 2 bytes per pixel\n\n Returns:\n A numpy array with unspecified datatype\n \"\"\"\n return pretty_depth(depth)\n\n\ndef video_cv(video):\n \"\"\"Converts video into a BGR format for display\n\n This is abstracted out to allow for experimentation\n\n Args:\n video: A numpy array with 1 byte per pixel, 3 channels RGB\n\n Returns:\n A numpy array with with 1 byte per pixel, 3 channels BGR\n \"\"\"\n return video[:, :, ::-1] # RGB -> BGR" ]
[ [ "numpy.array", "numpy.interp", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
guillefix/mt-lightning
[ "56e93a569d85a768c178b15461e5362c25fdc3e3", "56e93a569d85a768c178b15461e5362c25fdc3e3" ]
[ "analysis/pymo/preprocessing.py", "feature_extraction/madmom/evaluation/beats.py" ]
[ "'''\nPreprocessing Tranformers Based on sci-kit's API\n\nBy Omid Alemi\nCreated on June 12, 2017\n'''\nimport copy\nimport pandas as pd\nimport numpy as np\nimport transforms3d as t3d\nimport scipy.ndimage.filters as filters\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nfrom analysis.pymo.rotation_tools import Rotation, euler2expmap, euler2expmap2, expmap2euler, euler_reorder, unroll\nfrom analysis.pymo.Quaternions import Quaternions\nfrom analysis.pymo.Pivots import Pivots\n\nclass MocapParameterizer(BaseEstimator, TransformerMixin):\n def __init__(self, param_type = 'euler'):\n '''\n\n param_type = {'euler', 'quat', 'expmap', 'position', 'expmap2pos'}\n '''\n self.param_type = param_type\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n print(\"MocapParameterizer: \" + self.param_type)\n if self.param_type == 'euler':\n return X\n elif self.param_type == 'expmap':\n return self._to_expmap(X)\n elif self.param_type == 'quat':\n return X\n elif self.param_type == 'position':\n return self._to_pos(X)\n elif self.param_type == 'expmap2pos':\n return self._expmap_to_pos(X)\n else:\n raise 'param types: euler, quat, expmap, position, expmap2pos'\n\n# return X\n\n def inverse_transform(self, X, copy=None):\n if self.param_type == 'euler':\n return X\n elif self.param_type == 'expmap':\n return self._expmap_to_euler(X)\n elif self.param_type == 'quat':\n raise 'quat2euler is not supported'\n elif self.param_type == 'position':\n # raise 'positions 2 eulers is not supported'\n print('positions 2 eulers is not supported')\n return X\n else:\n raise 'param types: euler, quat, expmap, position'\n\n def _to_pos(self, X):\n '''Converts joints rotations in Euler angles to joint positions'''\n\n Q = []\n for track in X:\n channels = []\n titles = []\n euler_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n pos_df = pd.DataFrame(index=euler_df.index)\n\n # Copy the root rotations into the new DataFrame\n # rxp = '%s_Xrotation'%track.root_name\n # ryp = '%s_Yrotation'%track.root_name\n # rzp = '%s_Zrotation'%track.root_name\n # pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)\n # pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)\n # pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)\n\n # List the columns that contain rotation channels\n rot_cols = [c for c in euler_df.columns if ('rotation' in c)]\n\n # List the columns that contain position channels\n pos_cols = [c for c in euler_df.columns if ('position' in c)]\n\n # List the joints that are not end sites, i.e., have channels\n joints = (joint for joint in track.skeleton)\n\n tree_data = {}\n\n for joint in track.traverse():\n parent = track.skeleton[joint]['parent']\n rot_order = track.skeleton[joint]['order']\n #print(\"rot_order:\" + joint + \" :\" + rot_order)\n\n # Get the rotation columns that belong to this joint\n rc = euler_df[[c for c in rot_cols if joint in c]]\n\n # Get the position columns that belong to this joint\n pc = euler_df[[c for c in pos_cols if joint in c]]\n\n # Make sure the columns are organized in xyz order\n if rc.shape[1] < 3:\n euler_values = np.zeros((euler_df.shape[0], 3))\n rot_order = \"XYZ\"\n else:\n euler_values = np.pi/180.0*np.transpose(np.array([track.values['%s_%srotation'%(joint, rot_order[0])], track.values['%s_%srotation'%(joint, rot_order[1])], track.values['%s_%srotation'%(joint, rot_order[2])]]))\n\n if pc.shape[1] < 3:\n pos_values = np.asarray([[0,0,0] for f in pc.iterrows()])\n else:\n pos_values =np.asarray([[f[1]['%s_Xposition'%joint],\n f[1]['%s_Yposition'%joint],\n f[1]['%s_Zposition'%joint]] for f in pc.iterrows()])\n\n quats = Quaternions.from_euler(np.asarray(euler_values), order=rot_order.lower(), world=False)\n\n tree_data[joint]=[\n [], # to store the rotation matrix\n [] # to store the calculated position\n ]\n if track.root_name == joint:\n tree_data[joint][0] = quats#rotmats\n # tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])\n tree_data[joint][1] = pos_values\n else:\n # for every frame i, multiply this joint's rotmat to the rotmat of its parent\n tree_data[joint][0] = tree_data[parent][0]*quats# np.matmul(rotmats, tree_data[parent][0])\n\n # add the position channel to the offset and store it in k, for every frame i\n k = pos_values + np.asarray(track.skeleton[joint]['offsets'])\n\n # multiply k to the rotmat of the parent for every frame i\n q = tree_data[parent][0]*k #np.matmul(k.reshape(k.shape[0],1,3), tree_data[parent][0])\n\n # add q to the position of the parent, for every frame i\n tree_data[joint][1] = tree_data[parent][1] + q #q.reshape(k.shape[0],3) + tree_data[parent][1]\n\n # Create the corresponding columns in the new DataFrame\n pos_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in tree_data[joint][1]], index=pos_df.index)\n pos_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in tree_data[joint][1]], index=pos_df.index)\n pos_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in tree_data[joint][1]], index=pos_df.index)\n\n\n new_track = track.clone()\n new_track.values = pos_df\n Q.append(new_track)\n return Q\n\n def _expmap2rot(self, expmap):\n\n theta = np.linalg.norm(expmap, axis=1, keepdims=True)\n nz = np.nonzero(theta)[0]\n\n expmap[nz,:] = expmap[nz,:]/theta[nz]\n\n nrows=expmap.shape[0]\n x = expmap[:,0]\n y = expmap[:,1]\n z = expmap[:,2]\n\n s = np.sin(theta*0.5).reshape(nrows)\n c = np.cos(theta*0.5).reshape(nrows)\n\n rotmats = np.zeros((nrows, 3, 3))\n\n rotmats[:,0,0] = 2*(x*x-1)*s*s+1\n rotmats[:,0,1] = 2*x*y*s*s-2*z*c*s\n rotmats[:,0,2] = 2*x*z*s*s+2*y*c*s\n rotmats[:,1,0] = 2*x*y*s*s+2*z*c*s\n rotmats[:,1,1] = 2*(y*y-1)*s*s+1\n rotmats[:,1,2] = 2*y*z*s*s-2*x*c*s\n rotmats[:,2,0] = 2*x*z*s*s-2*y*c*s\n rotmats[:,2,1] = 2*y*z*s*s+2*x*c*s\n rotmats[:,2,2] = 2*(z*z-1)*s*s+1\n\n return rotmats\n\n def _expmap_to_pos(self, X):\n '''Converts joints rotations in expmap notation to joint positions'''\n\n Q = []\n for track in X:\n channels = []\n titles = []\n exp_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n pos_df = pd.DataFrame(index=exp_df.index)\n\n # Copy the root rotations into the new DataFrame\n # rxp = '%s_Xrotation'%track.root_name\n # ryp = '%s_Yrotation'%track.root_name\n # rzp = '%s_Zrotation'%track.root_name\n # pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)\n # pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)\n # pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)\n\n # List the columns that contain rotation channels\n exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]\n\n # List the joints that are not end sites, i.e., have channels\n joints = (joint for joint in track.skeleton)\n\n tree_data = {}\n\n for joint in track.traverse():\n parent = track.skeleton[joint]['parent']\n\n if 'Nub' not in joint:\n r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint\n expmap = r.values\n #expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()]\n else:\n expmap = np.zeros((exp_df.shape[0], 3))\n\n # Convert the eulers to rotation matrices\n #rotmats = np.asarray([Rotation(f, 'expmap').rotmat for f in expmap])\n #angs = np.linalg.norm(expmap,axis=1, keepdims=True)\n rotmats = self._expmap2rot(expmap)\n\n tree_data[joint]=[\n [], # to store the rotation matrix\n [] # to store the calculated position\n ]\n pos_values = np.zeros((exp_df.shape[0], 3))\n\n if track.root_name == joint:\n tree_data[joint][0] = rotmats\n # tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])\n tree_data[joint][1] = pos_values\n else:\n # for every frame i, multiply this joint's rotmat to the rotmat of its parent\n tree_data[joint][0] = np.matmul(rotmats, tree_data[parent][0])\n\n # add the position channel to the offset and store it in k, for every frame i\n k = pos_values + track.skeleton[joint]['offsets']\n\n # multiply k to the rotmat of the parent for every frame i\n q = np.matmul(k.reshape(k.shape[0],1,3), tree_data[parent][0])\n\n # add q to the position of the parent, for every frame i\n tree_data[joint][1] = q.reshape(k.shape[0],3) + tree_data[parent][1]\n\n\n # Create the corresponding columns in the new DataFrame\n pos_df['%s_Xposition'%joint] = pd.Series(data=tree_data[joint][1][:,0], index=pos_df.index)\n pos_df['%s_Yposition'%joint] = pd.Series(data=tree_data[joint][1][:,1], index=pos_df.index)\n pos_df['%s_Zposition'%joint] = pd.Series(data=tree_data[joint][1][:,2], index=pos_df.index)\n\n new_track = track.clone()\n new_track.values = pos_df\n Q.append(new_track)\n return Q\n\n def _to_expmap(self, X):\n '''Converts Euler angles to Exponential Maps'''\n\n Q = []\n for track in X:\n channels = []\n titles = []\n euler_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n exp_df = euler_df.copy()# pd.DataFrame(index=euler_df.index)\n\n # Copy the root positions into the new DataFrame\n #rxp = '%s_Xposition'%track.root_name\n #ryp = '%s_Yposition'%track.root_name\n #rzp = '%s_Zposition'%track.root_name\n #exp_df[rxp] = pd.Series(data=euler_df[rxp], index=exp_df.index)\n #exp_df[ryp] = pd.Series(data=euler_df[ryp], index=exp_df.index)\n #exp_df[rzp] = pd.Series(data=euler_df[rzp], index=exp_df.index)\n\n # List the columns that contain rotation channels\n rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]\n\n # List the joints that are not end sites, i.e., have channels\n joints = (joint for joint in track.skeleton if 'Nub' not in joint)\n\n for joint in joints:\n #print(joint)\n r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint\n rot_order = track.skeleton[joint]['order']\n r1_col = '%s_%srotation'%(joint, rot_order[0])\n r2_col = '%s_%srotation'%(joint, rot_order[1])\n r3_col = '%s_%srotation'%(joint, rot_order[2])\n\n exp_df.drop([r1_col, r2_col, r3_col], axis=1, inplace=True)\n euler = [[f[1][r1_col], f[1][r2_col], f[1][r3_col]] for f in r.iterrows()]\n #exps = [Rotation(f, 'euler', from_deg=True, order=rot_order).to_expmap() for f in euler] # Convert the eulers to exp maps\n exps = unroll(np.array([euler2expmap(f, rot_order, True) for f in euler])) # Convert the exp maps to eulers\n # exps = np.array([euler2expmap(f, rot_order, True) for f in euler]) # Convert the exp maps to eulers\n #exps = euler2expmap2(euler, rot_order, True) # Convert the eulers to exp maps\n\n # Create the corresponding columns in the new DataFrame\n\n exp_df.insert(loc=0, column='%s_gamma'%joint, value=pd.Series(data=[e[2] for e in exps], index=exp_df.index))\n exp_df.insert(loc=0, column='%s_beta'%joint, value=pd.Series(data=[e[1] for e in exps], index=exp_df.index))\n exp_df.insert(loc=0, column='%s_alpha'%joint, value=pd.Series(data=[e[0] for e in exps], index=exp_df.index))\n\n #print(exp_df.columns)\n new_track = track.clone()\n new_track.values = exp_df\n Q.append(new_track)\n\n return Q\n\n def _expmap_to_euler(self, X):\n Q = []\n for track in X:\n channels = []\n titles = []\n exp_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n #euler_df = pd.DataFrame(index=exp_df.index)\n euler_df = exp_df.copy()\n\n # Copy the root positions into the new DataFrame\n #rxp = '%s_Xposition'%track.root_name\n #ryp = '%s_Yposition'%track.root_name\n #rzp = '%s_Zposition'%track.root_name\n #euler_df[rxp] = pd.Series(data=exp_df[rxp], index=euler_df.index)\n #euler_df[ryp] = pd.Series(data=exp_df[ryp], index=euler_df.index)\n #euler_df[rzp] = pd.Series(data=exp_df[rzp], index=euler_df.index)\n\n # List the columns that contain rotation channels\n exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]\n\n # List the joints that are not end sites, i.e., have channels\n joints = (joint for joint in track.skeleton if 'Nub' not in joint)\n\n for joint in joints:\n r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint\n\n euler_df.drop(['%s_alpha'%joint, '%s_beta'%joint, '%s_gamma'%joint], axis=1, inplace=True)\n expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order\n rot_order = track.skeleton[joint]['order']\n #euler_rots = [Rotation(f, 'expmap').to_euler(True, rot_order) for f in expmap] # Convert the exp maps to eulers\n euler_rots = [expmap2euler(f, rot_order, True) for f in expmap] # Convert the exp maps to eulers\n\n # Create the corresponding columns in the new DataFrame\n\n euler_df['%s_%srotation'%(joint, rot_order[0])] = pd.Series(data=[e[0] for e in euler_rots], index=euler_df.index)\n euler_df['%s_%srotation'%(joint, rot_order[1])] = pd.Series(data=[e[1] for e in euler_rots], index=euler_df.index)\n euler_df['%s_%srotation'%(joint, rot_order[2])] = pd.Series(data=[e[2] for e in euler_rots], index=euler_df.index)\n\n new_track = track.clone()\n new_track.values = euler_df\n Q.append(new_track)\n\n return Q\n\nclass Mirror(BaseEstimator, TransformerMixin):\n def __init__(self, axis=\"X\", append=True):\n \"\"\"\n Mirrors the data\n \"\"\"\n self.axis = axis\n self.append = append\n\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n print(\"Mirror: \" + self.axis)\n Q = []\n\n if self.append:\n for track in X:\n Q.append(track)\n\n for track in X:\n channels = []\n titles = []\n\n if self.axis == \"X\":\n signs = np.array([1,-1,-1])\n if self.axis == \"Y\":\n signs = np.array([-1,1,-1])\n if self.axis == \"Z\":\n signs = np.array([-1,-1,1])\n\n euler_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n new_df = pd.DataFrame(index=euler_df.index)\n\n # Copy the root positions into the new DataFrame\n rxp = '%s_Xposition'%track.root_name\n ryp = '%s_Yposition'%track.root_name\n rzp = '%s_Zposition'%track.root_name\n new_df[rxp] = pd.Series(data=-signs[0]*euler_df[rxp], index=new_df.index)\n new_df[ryp] = pd.Series(data=-signs[1]*euler_df[ryp], index=new_df.index)\n new_df[rzp] = pd.Series(data=-signs[2]*euler_df[rzp], index=new_df.index)\n\n # List the columns that contain rotation channels\n rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]\n #lft_rots = [c for c in euler_df.columns if ('Left' in c and 'rotation' in c and 'Nub' not in c)]\n #rgt_rots = [c for c in euler_df.columns if ('Right' in c and 'rotation' in c and 'Nub' not in c)]\n lft_joints = (joint for joint in track.skeleton if 'Left' in joint and 'Nub' not in joint)\n rgt_joints = (joint for joint in track.skeleton if 'Right' in joint and 'Nub' not in joint)\n\n new_track = track.clone()\n\n for lft_joint in lft_joints:\n #lr = euler_df[[c for c in rots if lft_joint + \"_\" in c]]\n #rot_order = track.skeleton[lft_joint]['order']\n #lft_eulers = [[f[1]['%s_Xrotation'%lft_joint], f[1]['%s_Yrotation'%lft_joint], f[1]['%s_Zrotation'%lft_joint]] for f in lr.iterrows()]\n\n rgt_joint = lft_joint.replace('Left', 'Right')\n #rr = euler_df[[c for c in rots if rgt_joint + \"_\" in c]]\n #rot_order = track.skeleton[rgt_joint]['order']\n# rgt_eulers = [[f[1]['%s_Xrotation'%rgt_joint], f[1]['%s_Yrotation'%rgt_joint], f[1]['%s_Zrotation'%rgt_joint]] for f in rr.iterrows()]\n\n # Create the corresponding columns in the new DataFrame\n\n new_df['%s_Xrotation'%lft_joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%rgt_joint], index=new_df.index)\n new_df['%s_Yrotation'%lft_joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%rgt_joint], index=new_df.index)\n new_df['%s_Zrotation'%lft_joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%rgt_joint], index=new_df.index)\n\n new_df['%s_Xrotation'%rgt_joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%lft_joint], index=new_df.index)\n new_df['%s_Yrotation'%rgt_joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%lft_joint], index=new_df.index)\n new_df['%s_Zrotation'%rgt_joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%lft_joint], index=new_df.index)\n\n # List the joints that are not left or right, i.e. are on the trunk\n joints = (joint for joint in track.skeleton if 'Nub' not in joint and 'Left' not in joint and 'Right' not in joint)\n\n for joint in joints:\n #r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint\n #rot_order = track.skeleton[joint]['order']\n\n #eulers = [[f[1]['%s_Xrotation'%joint], f[1]['%s_Yrotation'%joint], f[1]['%s_Zrotation'%joint]] for f in r.iterrows()]\n\n # Create the corresponding columns in the new DataFrame\n new_df['%s_Xrotation'%joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%joint], index=new_df.index)\n new_df['%s_Yrotation'%joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%joint], index=new_df.index)\n new_df['%s_Zrotation'%joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%joint], index=new_df.index)\n\n new_track.values = new_df\n Q.append(new_track)\n\n return Q\n\n def inverse_transform(self, X, copy=None, start_pos=None):\n return X\n\nclass EulerReorder(BaseEstimator, TransformerMixin):\n def __init__(self, new_order):\n \"\"\"\n Add a\n \"\"\"\n self.new_order = new_order\n\n\n def fit(self, X, y=None):\n self.orig_skeleton = copy.deepcopy(X[0].skeleton)\n print(self.orig_skeleton)\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n for track in X:\n channels = []\n titles = []\n euler_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n new_df = pd.DataFrame(index=euler_df.index)\n\n # Copy the root positions into the new DataFrame\n rxp = '%s_Xposition'%track.root_name\n ryp = '%s_Yposition'%track.root_name\n rzp = '%s_Zposition'%track.root_name\n new_df[rxp] = pd.Series(data=euler_df[rxp], index=new_df.index)\n new_df[ryp] = pd.Series(data=euler_df[ryp], index=new_df.index)\n new_df[rzp] = pd.Series(data=euler_df[rzp], index=new_df.index)\n\n # List the columns that contain rotation channels\n rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]\n\n # List the joints that are not end sites, i.e., have channels\n joints = (joint for joint in track.skeleton if 'Nub' not in joint)\n\n new_track = track.clone()\n for joint in joints:\n r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint\n rot_order = track.skeleton[joint]['order']\n\n euler = [[f[1]['%s_Xrotation'%(joint)], f[1]['%s_Yrotation'%(joint)], f[1]['%s_Zrotation'%(joint)]] for f in r.iterrows()]\n new_euler = [euler_reorder(f, rot_order, self.new_order, True) for f in euler]\n #new_euler = euler_reorder2(np.array(euler), rot_order, self.new_order, True)\n\n # Create the corresponding columns in the new DataFrame\n new_df['%s_%srotation'%(joint, self.new_order[0])] = pd.Series(data=[e[0] for e in new_euler], index=new_df.index)\n new_df['%s_%srotation'%(joint, self.new_order[1])] = pd.Series(data=[e[1] for e in new_euler], index=new_df.index)\n new_df['%s_%srotation'%(joint, self.new_order[2])] = pd.Series(data=[e[2] for e in new_euler], index=new_df.index)\n\n new_track.skeleton[joint]['order'] = self.new_order\n\n new_track.values = new_df\n Q.append(new_track)\n\n return Q\n\n def inverse_transform(self, X, copy=None, start_pos=None):\n return X\n# Q = []\n#\n# for track in X:\n# channels = []\n# titles = []\n# euler_df = track.values\n#\n# # Create a new DataFrame to store the exponential map rep\n# new_df = pd.DataFrame(index=euler_df.index)\n#\n# # Copy the root positions into the new DataFrame\n# rxp = '%s_Xposition'%track.root_name\n# ryp = '%s_Yposition'%track.root_name\n# rzp = '%s_Zposition'%track.root_name\n# new_df[rxp] = pd.Series(data=euler_df[rxp], index=new_df.index)\n# new_df[ryp] = pd.Series(data=euler_df[ryp], index=new_df.index)\n# new_df[rzp] = pd.Series(data=euler_df[rzp], index=new_df.index)\n#\n# # List the columns that contain rotation channels\n# rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]\n#\n# # List the joints that are not end sites, i.e., have channels\n# joints = (joint for joint in track.skeleton if 'Nub' not in joint)\n#\n# new_track = track.clone()\n# for joint in joints:\n# r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint\n# rot_order = track.skeleton[joint]['order']\n# new_order = self.orig_skeleton[joint]['order']\n# print(\"rot_order:\" + str(rot_order))\n# print(\"new_order:\" + str(new_order))\n#\n# euler = [[f[1]['%s_%srotation'%(joint, rot_order[0])], f[1]['%s_%srotation'%(joint, rot_order[1])], f[1]['%s_%srotation'%(joint, rot_order[2])]] for f in r.iterrows()]\n# #new_euler = [euler_reorder(f, rot_order, new_order, True) for f in euler]\n# new_euler = euler_reorder2(np.array(euler), rot_order, self.new_order, True)\n#\n# # Create the corresponding columns in the new DataFrame\n# new_df['%s_%srotation'%(joint, new_order[0])] = pd.Series(data=[e[0] for e in new_euler], index=new_df.index)\n# new_df['%s_%srotation'%(joint, new_order[1])] = pd.Series(data=[e[1] for e in new_euler], index=new_df.index)\n# new_df['%s_%srotation'%(joint, new_order[2])] = pd.Series(data=[e[2] for e in new_euler], index=new_df.index)\n#\n# new_track.skeleton[joint]['order'] = new_order\n#\n# new_track.values = new_df\n# Q.append(new_track)\n# return Q\n\nclass JointSelector(BaseEstimator, TransformerMixin):\n '''\n Allows for filtering the mocap data to include only the selected joints\n '''\n def __init__(self, joints, include_root=False):\n self.joints = joints\n self.include_root = include_root\n\n def fit(self, X, y=None):\n selected_joints = []\n selected_channels = []\n\n if self.include_root:\n selected_joints.append(X[0].root_name)\n\n selected_joints.extend(self.joints)\n\n for joint_name in selected_joints:\n selected_channels.extend([o for o in X[0].values.columns if (joint_name + \"_\") in o and 'Nub' not in o])\n\n self.selected_joints = selected_joints\n self.selected_channels = selected_channels\n self.not_selected = X[0].values.columns.difference(selected_channels)\n self.not_selected_values = {c:X[0].values[c].values[0] for c in self.not_selected}\n\n self.orig_skeleton = X[0].skeleton\n return self\n\n def transform(self, X, y=None):\n print(\"JointSelector\")\n Q = []\n for track in X:\n t2 = track.clone()\n for key in track.skeleton.keys():\n if key not in self.selected_joints:\n parent = t2.skeleton[key]['parent']\n if parent in t2.skeleton:\n t2.skeleton[parent]['children'].remove(key)\n t2.skeleton.pop(key)\n t2.values = track.values[self.selected_channels]\n\n Q.append(t2)\n\n\n return Q\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n t2 = track.clone()\n t2.skeleton = self.orig_skeleton\n for d in self.not_selected:\n t2.values[d] = self.not_selected_values[d]\n Q.append(t2)\n\n return Q\n\n\nclass Numpyfier(BaseEstimator, TransformerMixin):\n '''\n Just converts the values in a MocapData object into a numpy array\n Useful for the final stage of a pipeline before training\n '''\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n self.org_mocap_ = X[0].clone()\n self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)\n\n return self\n\n def transform(self, X, y=None):\n print(\"Numpyfier\")\n Q = []\n\n for track in X:\n Q.append(track.values.values)\n #print(\"Numpyfier:\" + str(track.values.columns))\n\n return np.array(Q)\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n\n new_mocap = self.org_mocap_.clone()\n time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')\n\n # print(self.org_mocap_.values.columns)\n # import pdb;pdb.set_trace()\n new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)\n\n new_mocap.values = new_df\n\n\n Q.append(new_mocap)\n\n return Q\n\nclass Slicer(BaseEstimator, TransformerMixin):\n '''\n Slice the data into intervals of equal size\n '''\n def __init__(self, window_size, overlap=0.5):\n self.window_size = window_size\n self.overlap = overlap\n pass\n\n def fit(self, X, y=None):\n self.org_mocap_ = X[0].clone()\n self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)\n\n return self\n\n def transform(self, X, y=None):\n print(\"Slicer\")\n Q = []\n\n for track in X:\n vals = track.values.values\n nframes = vals.shape[0]\n overlap_frames = (int)(self.overlap*self.window_size)\n\n n_sequences = (nframes-overlap_frames)//(self.window_size-overlap_frames)\n\n if n_sequences>0:\n y = np.zeros((n_sequences, self.window_size, vals.shape[1]))\n\n # extract sequences from the input data\n for i in range(0,n_sequences):\n frameIdx = (self.window_size-overlap_frames) * i\n Q.append(vals[frameIdx:frameIdx+self.window_size,:])\n\n return np.array(Q)\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n\n new_mocap = self.org_mocap_.clone()\n time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')\n\n new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)\n\n new_mocap.values = new_df\n\n\n Q.append(new_mocap)\n\n return Q\n\nclass RootTransformer(BaseEstimator, TransformerMixin):\n def __init__(self, method, position_smoothing=0, rotation_smoothing=0):\n \"\"\"\n Accepted methods:\n abdolute_translation_deltas\n pos_rot_deltas\n \"\"\"\n self.method = method\n self.position_smoothing=position_smoothing\n self.rotation_smoothing=rotation_smoothing\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n print(\"RootTransformer\")\n Q = []\n\n for track in X:\n if self.method == 'abdolute_translation_deltas':\n new_df = track.values.copy()\n xpcol = '%s_Xposition'%track.root_name\n ypcol = '%s_Yposition'%track.root_name\n zpcol = '%s_Zposition'%track.root_name\n\n\n dxpcol = '%s_dXposition'%track.root_name\n dzpcol = '%s_dZposition'%track.root_name\n\n x=track.values[xpcol].copy()\n z=track.values[zpcol].copy()\n\n if self.position_smoothing>0:\n x_sm = filters.gaussian_filter1d(x, self.position_smoothing, axis=0, mode='nearest')\n z_sm = filters.gaussian_filter1d(z, self.position_smoothing, axis=0, mode='nearest')\n dx = pd.Series(data=x_sm, index=new_df.index).diff()\n dz = pd.Series(data=z_sm, index=new_df.index).diff()\n new_df[xpcol] = x-x_sm\n new_df[zpcol] = z-z_sm\n else:\n dx = x.diff()\n dz = z.diff()\n new_df.drop([xpcol, zpcol], axis=1, inplace=True)\n\n dx[0] = dx[1]\n dz[0] = dz[1]\n\n new_df[dxpcol] = dx\n new_df[dzpcol] = dz\n\n new_track = track.clone()\n new_track.values = new_df\n # end of abdolute_translation_deltas\n\n elif self.method == 'pos_rot_deltas':\n new_track = track.clone()\n\n # Absolute columns\n xp_col = '%s_Xposition'%track.root_name\n yp_col = '%s_Yposition'%track.root_name\n zp_col = '%s_Zposition'%track.root_name\n\n #rot_order = track.skeleton[track.root_name]['order']\n #%(joint, rot_order[0])\n\n rot_order = track.skeleton[track.root_name]['order']\n r1_col = '%s_%srotation'%(track.root_name, rot_order[0])\n r2_col = '%s_%srotation'%(track.root_name, rot_order[1])\n r3_col = '%s_%srotation'%(track.root_name, rot_order[2])\n\n # Delta columns\n dxp_col = '%s_dXposition'%track.root_name\n dzp_col = '%s_dZposition'%track.root_name\n\n dxr_col = '%s_dXrotation'%track.root_name\n dyr_col = '%s_dYrotation'%track.root_name\n dzr_col = '%s_dZrotation'%track.root_name\n\n positions = np.transpose(np.array([track.values[xp_col], track.values[yp_col], track.values[zp_col]]))\n rotations = np.pi/180.0*np.transpose(np.array([track.values[r1_col], track.values[r2_col], track.values[r3_col]]))\n\n \"\"\" Get Trajectory and smooth it\"\"\"\n trajectory_filterwidth = self.position_smoothing\n reference = positions.copy()*np.array([1,0,1])\n if trajectory_filterwidth>0:\n reference = filters.gaussian_filter1d(reference, trajectory_filterwidth, axis=0, mode='nearest')\n\n \"\"\" Get Root Velocity \"\"\"\n velocity = np.diff(reference, axis=0)\n velocity = np.vstack((velocity[0,:], velocity))\n\n \"\"\" Remove Root Translation \"\"\"\n positions = positions-reference\n\n \"\"\" Get Forward Direction along the x-z plane, assuming character is facig z-forward \"\"\"\n #forward = [Rotation(f, 'euler', from_deg=True, order=rot_order).rotmat[:,2] for f in rotations] # get the z-axis of the rotation matrix, assuming character is facig z-forward\n #print(\"order:\" + rot_order.lower())\n quats = Quaternions.from_euler(rotations, order=rot_order.lower(), world=False)\n forward = quats*np.array([[0,0,1]])\n forward[:,1] = 0\n\n \"\"\" Smooth Forward Direction \"\"\"\n direction_filterwidth = self.rotation_smoothing\n if direction_filterwidth>0:\n forward = filters.gaussian_filter1d(forward, direction_filterwidth, axis=0, mode='nearest')\n\n forward = forward / np.sqrt((forward**2).sum(axis=-1))[...,np.newaxis]\n\n \"\"\" Remove Y Rotation \"\"\"\n target = np.array([[0,0,1]]).repeat(len(forward), axis=0)\n rotation = Quaternions.between(target, forward)[:,np.newaxis]\n positions = (-rotation[:,0]) * positions\n new_rotations = (-rotation[:,0]) * quats\n velocity = (-rotation[:,0]) * velocity\n\n \"\"\" Get Root Rotation \"\"\"\n #print(rotation[:,0])\n rvelocity = Pivots.from_quaternions(rotation[1:] * -rotation[:-1]).ps\n rvelocity = np.vstack((rvelocity[0], rvelocity))\n\n eulers = np.array([t3d.euler.quat2euler(q, axes=('s'+rot_order.lower()[::-1]))[::-1] for q in new_rotations])*180.0/np.pi\n\n new_df = track.values.copy()\n\n root_pos_x = pd.Series(data=positions[:,0], index=new_df.index)\n root_pos_y = pd.Series(data=positions[:,1], index=new_df.index)\n root_pos_z = pd.Series(data=positions[:,2], index=new_df.index)\n root_pos_x_diff = pd.Series(data=velocity[:,0], index=new_df.index)\n root_pos_z_diff = pd.Series(data=velocity[:,2], index=new_df.index)\n\n root_rot_1 = pd.Series(data=eulers[:,0], index=new_df.index)\n root_rot_2 = pd.Series(data=eulers[:,1], index=new_df.index)\n root_rot_3 = pd.Series(data=eulers[:,2], index=new_df.index)\n root_rot_y_diff = pd.Series(data=rvelocity[:,0], index=new_df.index)\n\n #new_df.drop([xr_col, yr_col, zr_col, xp_col, zp_col], axis=1, inplace=True)\n\n new_df[xp_col] = root_pos_x\n new_df[yp_col] = root_pos_y\n new_df[zp_col] = root_pos_z\n new_df[dxp_col] = root_pos_x_diff\n new_df[dzp_col] = root_pos_z_diff\n\n new_df[r1_col] = root_rot_1\n new_df[r2_col] = root_rot_2\n new_df[r3_col] = root_rot_3\n #new_df[dxr_col] = root_rot_x_diff\n new_df[dyr_col] = root_rot_y_diff\n #new_df[dzr_col] = root_rot_z_diff\n\n new_track.values = new_df\n\n\n elif self.method == 'hip_centric':\n new_track = track.clone()\n\n # Absolute columns\n xp_col = '%s_Xposition'%track.root_name\n yp_col = '%s_Yposition'%track.root_name\n zp_col = '%s_Zposition'%track.root_name\n\n xr_col = '%s_Xrotation'%track.root_name\n yr_col = '%s_Yrotation'%track.root_name\n zr_col = '%s_Zrotation'%track.root_name\n\n new_df = track.values.copy()\n\n all_zeros = np.zeros(track.values[xp_col].values.shape)\n\n new_df[xp_col] = pd.Series(data=all_zeros, index=new_df.index)\n new_df[yp_col] = pd.Series(data=all_zeros, index=new_df.index)\n new_df[zp_col] = pd.Series(data=all_zeros, index=new_df.index)\n\n new_df[xr_col] = pd.Series(data=all_zeros, index=new_df.index)\n new_df[yr_col] = pd.Series(data=all_zeros, index=new_df.index)\n new_df[zr_col] = pd.Series(data=all_zeros, index=new_df.index)\n\n new_track.values = new_df\n\n #print(new_track.values.columns)\n Q.append(new_track)\n\n return Q\n\n def inverse_transform(self, X, copy=None, start_pos=None):\n Q = []\n\n #TODO: simplify this implementation\n\n startx = 0\n startz = 0\n\n if start_pos is not None:\n startx, startz = start_pos\n\n for track in X:\n new_track = track.clone()\n if self.method == 'abdolute_translation_deltas':\n new_df = new_track.values\n xpcol = '%s_Xposition'%track.root_name\n ypcol = '%s_Yposition'%track.root_name\n zpcol = '%s_Zposition'%track.root_name\n\n\n dxpcol = '%s_dXposition'%track.root_name\n dzpcol = '%s_dZposition'%track.root_name\n\n dx = track.values[dxpcol].values\n dz = track.values[dzpcol].values\n\n recx = [startx]\n recz = [startz]\n\n for i in range(dx.shape[0]-1):\n recx.append(recx[i]+dx[i+1])\n recz.append(recz[i]+dz[i+1])\n\n # recx = [recx[i]+dx[i+1] for i in range(dx.shape[0]-1)]\n # recz = [recz[i]+dz[i+1] for i in range(dz.shape[0]-1)]\n # recx = dx[:-1] + dx[1:]\n # recz = dz[:-1] + dz[1:]\n if self.position_smoothing > 0:\n new_df[xpcol] = pd.Series(data=new_df[xpcol]+recx, index=new_df.index)\n new_df[zpcol] = pd.Series(data=new_df[zpcol]+recz, index=new_df.index)\n else:\n new_df[xpcol] = pd.Series(data=recx, index=new_df.index)\n new_df[zpcol] = pd.Series(data=recz, index=new_df.index)\n\n new_df.drop([dxpcol, dzpcol], axis=1, inplace=True)\n\n new_track.values = new_df\n # end of abdolute_translation_deltas\n\n elif self.method == 'pos_rot_deltas':\n # Absolute columns\n rot_order = track.skeleton[track.root_name]['order']\n xp_col = '%s_Xposition'%track.root_name\n yp_col = '%s_Yposition'%track.root_name\n zp_col = '%s_Zposition'%track.root_name\n\n xr_col = '%s_Xrotation'%track.root_name\n yr_col = '%s_Yrotation'%track.root_name\n zr_col = '%s_Zrotation'%track.root_name\n r1_col = '%s_%srotation'%(track.root_name, rot_order[0])\n r2_col = '%s_%srotation'%(track.root_name, rot_order[1])\n r3_col = '%s_%srotation'%(track.root_name, rot_order[2])\n\n # Delta columns\n dxp_col = '%s_dXposition'%track.root_name\n dzp_col = '%s_dZposition'%track.root_name\n\n dyr_col = '%s_dYrotation'%track.root_name\n\n positions = np.transpose(np.array([track.values[xp_col], track.values[yp_col], track.values[zp_col]]))\n rotations = np.pi/180.0*np.transpose(np.array([track.values[r1_col], track.values[r2_col], track.values[r3_col]]))\n quats = Quaternions.from_euler(rotations, order=rot_order.lower(), world=False)\n\n new_df = track.values.copy()\n\n dx = track.values[dxp_col].values\n dz = track.values[dzp_col].values\n\n dry = track.values[dyr_col].values\n\n #rec_p = np.array([startx, 0, startz])+positions[0,:]\n rec_ry = Quaternions.id(quats.shape[0])\n rec_xp = [0]\n rec_zp = [0]\n\n #rec_r = Quaternions.id(quats.shape[0])\n\n for i in range(dx.shape[0]-1):\n #print(dry[i])\n q_y = Quaternions.from_angle_axis(np.array(dry[i+1]), np.array([0,1,0]))\n rec_ry[i+1] = q_y*rec_ry[i]\n #print(\"dx: + \" + str(dx[i+1]))\n dp = rec_ry[i+1]*np.array([dx[i+1], 0, dz[i+1]])\n rec_xp.append(rec_xp[i]+dp[0,0])\n rec_zp.append(rec_zp[i]+dp[0,2])\n\n rec_r=rec_ry*quats\n pp=rec_ry*positions\n rec_xp = rec_xp + pp[:,0]\n rec_zp = rec_zp + pp[:,2]\n\n eulers = np.array([t3d.euler.quat2euler(q, axes=('s'+rot_order.lower()[::-1]))[::-1] for q in rec_r])*180.0/np.pi\n\n new_df = track.values.copy()\n\n root_rot_1 = pd.Series(data=eulers[:,0], index=new_df.index)\n root_rot_2 = pd.Series(data=eulers[:,1], index=new_df.index)\n root_rot_3 = pd.Series(data=eulers[:,2], index=new_df.index)\n\n new_df[xp_col] = pd.Series(data=rec_xp, index=new_df.index)\n new_df[zp_col] = pd.Series(data=rec_zp, index=new_df.index)\n\n new_df[r1_col] = pd.Series(data=root_rot_1, index=new_df.index)\n new_df[r2_col] = pd.Series(data=root_rot_2, index=new_df.index)\n new_df[r3_col] = pd.Series(data=root_rot_3, index=new_df.index)\n\n new_df.drop([dyr_col, dxp_col, dzp_col], axis=1, inplace=True)\n\n\n new_track.values = new_df\n\n #print(new_track.values.columns)\n Q.append(new_track)\n\n return Q\n\n\nclass RootCentricPositionNormalizer(BaseEstimator, TransformerMixin):\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n for track in X:\n new_track = track.clone()\n\n rxp = '%s_Xposition'%track.root_name\n ryp = '%s_Yposition'%track.root_name\n rzp = '%s_Zposition'%track.root_name\n\n projected_root_pos = track.values[[rxp, ryp, rzp]]\n\n projected_root_pos.loc[:,ryp] = 0 # we want the root's projection on the floor plane as the ref\n\n new_df = pd.DataFrame(index=track.values.index)\n\n all_but_root = [joint for joint in track.skeleton if track.root_name not in joint]\n # all_but_root = [joint for joint in track.skeleton]\n for joint in all_but_root:\n new_df['%s_Xposition'%joint] = pd.Series(data=track.values['%s_Xposition'%joint]-projected_root_pos[rxp], index=new_df.index)\n new_df['%s_Yposition'%joint] = pd.Series(data=track.values['%s_Yposition'%joint]-projected_root_pos[ryp], index=new_df.index)\n new_df['%s_Zposition'%joint] = pd.Series(data=track.values['%s_Zposition'%joint]-projected_root_pos[rzp], index=new_df.index)\n\n\n # keep the root as it is now\n new_df[rxp] = track.values[rxp]\n new_df[ryp] = track.values[ryp]\n new_df[rzp] = track.values[rzp]\n\n new_track.values = new_df\n\n Q.append(new_track)\n\n return Q\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n new_track = track.clone()\n\n rxp = '%s_Xposition'%track.root_name\n ryp = '%s_Yposition'%track.root_name\n rzp = '%s_Zposition'%track.root_name\n\n projected_root_pos = track.values[[rxp, ryp, rzp]]\n\n projected_root_pos.loc[:,ryp] = 0 # we want the root's projection on the floor plane as the ref\n\n new_df = pd.DataFrame(index=track.values.index)\n\n for joint in track.skeleton:\n new_df['%s_Xposition'%joint] = pd.Series(data=track.values['%s_Xposition'%joint]+projected_root_pos[rxp], index=new_df.index)\n new_df['%s_Yposition'%joint] = pd.Series(data=track.values['%s_Yposition'%joint]+projected_root_pos[ryp], index=new_df.index)\n new_df['%s_Zposition'%joint] = pd.Series(data=track.values['%s_Zposition'%joint]+projected_root_pos[rzp], index=new_df.index)\n\n\n new_track.values = new_df\n\n Q.append(new_track)\n\n return Q\n\nclass Flattener(BaseEstimator, TransformerMixin):\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n return np.concatenate(X, axis=0)\n\nclass ConstantsRemover(BaseEstimator, TransformerMixin):\n '''\n For now it just looks at the first track\n '''\n\n def __init__(self, eps = 1e-6, only_cols=None):\n self.eps = eps\n self.only_cols = only_cols\n\n\n def fit(self, X, y=None):\n stds = X[0].values.std()\n cols = X[0].values.columns.values\n if self.only_cols is not None:\n self.const_dims_ = [c for c in cols if ((stds[c] < self.eps).any()) and c in self.only_cols]\n else:\n self.const_dims_ = [c for c in cols if (stds[c] < self.eps).any()]\n # self.const_values_ = {c:X[0].values[c].values[0] for c in cols if (stds[c] < self.eps).any()}\n self.const_values_ = {c:X[0].values[c].values[0] for c in cols if self.const_dims_}\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n\n for track in X:\n t2 = track.clone()\n #for key in t2.skeleton.keys():\n # if key in self.ConstDims_:\n # t2.skeleton.pop(key)\n #print(track.values.columns.difference(self.const_dims_))\n t2.values.drop(self.const_dims_, axis=1, inplace=True)\n #t2.values = track.values[track.values.columns.difference(self.const_dims_)]\n Q.append(t2)\n\n return Q\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n t2 = track.clone()\n for d in self.const_dims_:\n t2.values[d] = self.const_values_[d]\n# t2.values.assign(d=pd.Series(data=self.const_values_[d], index = t2.values.index))\n Q.append(t2)\n\n return Q\n\nclass ListStandardScaler(BaseEstimator, TransformerMixin):\n def __init__(self, is_DataFrame=False):\n self.is_DataFrame = is_DataFrame\n\n def fit(self, X, y=None):\n if self.is_DataFrame:\n X_train_flat = np.concatenate([m.values for m in X], axis=0)\n else:\n X_train_flat = np.concatenate([m for m in X], axis=0)\n\n self.data_mean_ = np.mean(X_train_flat, axis=0)\n self.data_std_ = np.std(X_train_flat, axis=0)\n\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n for track in X:\n if self.is_DataFrame:\n normalized_track = track.copy()\n normalized_track.values = (track.values - self.data_mean_) / self.data_std_\n else:\n normalized_track = (track - self.data_mean_) / self.data_std_\n\n Q.append(normalized_track)\n\n if self.is_DataFrame:\n return Q\n else:\n return np.array(Q)\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n\n if self.is_DataFrame:\n unnormalized_track = track.copy()\n unnormalized_track.values = (track.values * self.data_std_) + self.data_mean_\n else:\n unnormalized_track = (track * self.data_std_) + self.data_mean_\n\n Q.append(unnormalized_track)\n\n if self.is_DataFrame:\n return Q\n else:\n return np.array(Q)\n\nclass ListMinMaxScaler(BaseEstimator, TransformerMixin):\n def __init__(self, is_DataFrame=False):\n self.is_DataFrame = is_DataFrame\n\n def fit(self, X, y=None):\n if self.is_DataFrame:\n X_train_flat = np.concatenate([m.values for m in X], axis=0)\n else:\n X_train_flat = np.concatenate([m for m in X], axis=0)\n\n self.data_max_ = np.max(X_train_flat, axis=0)\n self.data_min_ = np.min(X_train_flat, axis=0)\n\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n for track in X:\n if self.is_DataFrame:\n normalized_track = track.copy()\n normalized_track.values = (track.values - self.data_min_) / (self.data_max_ - self.data_min_)\n else:\n normalized_track = (track - self.data_min_) / (self.data_max_ - self.data_min_)\n\n Q.append(normalized_track)\n\n if self.is_DataFrame:\n return Q\n else:\n return np.array(Q)\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n\n if self.is_DataFrame:\n unnormalized_track = track.copy()\n unnormalized_track.values = (track.values * (self.data_max_ - self.data_min_)) + self.data_min_\n else:\n unnormalized_track = (track * (self.data_max_ - self.data_min_)) + self.data_min_\n\n Q.append(unnormalized_track)\n\n if self.is_DataFrame:\n return Q\n else:\n return np.array(Q)\n\nclass DownSampler(BaseEstimator, TransformerMixin):\n def __init__(self, tgt_fps, keep_all=False):\n self.tgt_fps = tgt_fps\n self.keep_all = keep_all\n\n\n def fit(self, X, y=None):\n\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n for track in X:\n orig_fps=round(1.0/track.framerate)\n rate = orig_fps//self.tgt_fps\n if orig_fps%self.tgt_fps!=0:\n print(\"error orig_fps (\" + str(orig_fps) + \") is not dividable with tgt_fps (\" + str(self.tgt_fps) + \")\")\n else:\n print(\"downsampling with rate: \" + str(rate))\n\n #print(track.values.size)\n for ii in range(0,rate):\n new_track = track.clone()\n new_track.values = track.values[ii:-1:rate].copy()\n #print(new_track.values.size)\n #new_track = track[0:-1:self.rate]\n new_track.framerate = 1.0/self.tgt_fps\n Q.append(new_track)\n if not self.keep_all:\n break\n\n return Q\n\n def inverse_transform(self, X, copy=None):\n return X\n\nclass ReverseTime(BaseEstimator, TransformerMixin):\n def __init__(self, append=True):\n self.append = append\n\n\n def fit(self, X, y=None):\n\n return self\n\n def transform(self, X, y=None):\n Q = []\n if self.append:\n for track in X:\n Q.append(track)\n for track in X:\n new_track = track.clone()\n new_track.values = track.values[-1::-1]\n Q.append(new_track)\n\n return Q\n\n def inverse_transform(self, X, copy=None):\n return X\n\n#TODO: JointsSelector (x)\n#TODO: SegmentMaker\n#TODO: DynamicFeaturesAdder\n#TODO: ShapeFeaturesAdder\n#TODO: DataFrameNumpier (x)\n\nclass TemplateTransform(BaseEstimator, TransformerMixin):\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n return X\n", "# encoding: utf-8\n# pylint: disable=no-member\n# pylint: disable=invalid-name\n# pylint: disable=too-many-arguments\n\"\"\"\nThis module contains beat evaluation functionality.\n\nThe measures are described in [1]_, a Matlab implementation exists here:\nhttp://code.soundsoftware.ac.uk/projects/beat-evaluation/repository\n\nNotes\n-----\nPlease note that this is a complete re-implementation, which took some other\ndesign decisions. For example, the beat detections and annotations are not\nquantised before being evaluated with F-measure, P-score and other metrics.\nHence these evaluation functions DO NOT report the exact same results/scores.\nThis approach was chosen, because it is simpler and produces more accurate\nresults.\n\nReferences\n----------\n.. [1] Matthew E. P. Davies, Norberto Degara, and Mark D. Plumbley,\n \"Evaluation Methods for Musical Audio Beat Tracking Algorithms\",\n Technical Report C4DM-TR-09-06,\n Centre for Digital Music, Queen Mary University of London, 2009.\n\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom functools import wraps\nimport warnings\n\nimport numpy as np\n\nfrom . import (MeanEvaluation, calc_absolute_errors, calc_errors,\n evaluation_io, find_closest_matches)\nfrom .onsets import OnsetEvaluation\nfrom ..io import load_beats\n\n\n# exceptions\nclass BeatIntervalError(Exception):\n \"\"\"\n Exception to be raised whenever an interval cannot be computed.\n\n \"\"\"\n # pylint: disable=super-init-not-called\n\n def __init__(self, value=None):\n if value is None:\n value = \"At least two beats must be present to be able to \" \\\n \"calculate an interval.\"\n self.value = value\n\n def __str__(self):\n return repr(self.value)\n\n\n# decorators\ndef array(metric):\n \"\"\"\n Decorate metric to convert annotations and detections to numpy arrays.\n\n \"\"\"\n\n @wraps(metric)\n def float_array(detections, annotations, *args, **kwargs):\n \"\"\"Warp detections and annotations as numpy arrays.\"\"\"\n # make sure the annotations and detections have a float dtype\n detections = np.asarray(detections, dtype=np.float)\n annotations = np.asarray(annotations, dtype=np.float)\n return metric(detections, annotations, *args, **kwargs)\n\n return float_array\n\n\ndef _score_decorator(perfect_score, zero_score):\n \"\"\"\n Decorate metric with evaluation results for perfect and zero score.\n\n Parameters\n ----------\n perfect_score : float or tuple\n zero_score : float or tuple\n\n Returns\n -------\n metric\n Decorated metric.\n\n \"\"\"\n\n def wrap(metric):\n \"\"\"Metric to decorate\"\"\"\n\n @wraps(metric)\n def score(detections, annotations, *args, **kwargs):\n \"\"\"\n Return perfect/zero score if neither/either detections and\n annotations are given, respectively.\n\n \"\"\"\n # neither detections nor annotations are given, perfect score\n if len(detections) == 0 and len(annotations) == 0:\n return perfect_score\n # either beat detections or annotations are empty, score 0\n elif (len(detections) == 0) != (len(annotations) == 0):\n return zero_score\n # normal scoring\n return metric(detections, annotations, *args, **kwargs)\n\n return score\n\n return wrap\n\n\nscore_10 = _score_decorator(1., 0.)\nscore_1100 = _score_decorator((1., 1.), (0., 0.))\n\n\n# function for sequence variations generation\ndef variations(sequence, offbeat=False, double=False, half=False,\n triple=False, third=False):\n \"\"\"\n Create variations of the given beat sequence.\n\n Parameters\n ----------\n sequence : numpy array\n Beat sequence.\n offbeat : bool, optional\n Create an offbeat sequence.\n double : bool, optional\n Create a double tempo sequence.\n half : bool, optional\n Create half tempo sequences (includes offbeat version).\n triple : bool, optional\n Create triple tempo sequence.\n third : bool, optional\n Create third tempo sequences (includes offbeat versions).\n\n Returns\n -------\n list\n Beat sequence variations.\n\n \"\"\"\n # create different variants of the annotations\n sequences = []\n # double/half and offbeat variation\n if double or offbeat:\n if len(sequence) == 0:\n # if we have an empty sequence, there's nothing to interpolate\n double_sequence = []\n else:\n # create a sequence with double tempo\n same = np.arange(0, len(sequence))\n # request one item less, otherwise we would extrapolate\n shifted = np.arange(0, len(sequence), 0.5)[:-1]\n double_sequence = np.interp(shifted, same, sequence)\n # same tempo, half tempo off\n if offbeat:\n sequences.append(double_sequence[1::2])\n # double/half tempo variations\n if double:\n # double tempo\n sequences.append(double_sequence)\n if half:\n # half tempo odd beats (i.e. 1,3,1,3,..)\n sequences.append(sequence[0::2])\n # half tempo even beats (i.e. 2,4,2,4,..)\n sequences.append(sequence[1::2])\n # triple/third tempo variations\n if triple:\n if len(sequence) == 0:\n # if we have an empty sequence, there's nothing to interpolate\n triple_sequence = []\n else:\n # create a annotation sequence with triple tempo\n same = np.arange(0, len(sequence))\n # request two items less, otherwise we would extrapolate\n shifted = np.arange(0, len(sequence), 1. / 3)[:-2]\n triple_sequence = np.interp(shifted, same, sequence)\n # triple tempo\n sequences.append(triple_sequence)\n if third:\n # third tempo 1st beat (1,4,3,2,..)\n sequences.append(sequence[0::3])\n # third tempo 2nd beat (2,1,4,3,..)\n sequences.append(sequence[1::3])\n # third tempo 3rd beat (3,2,1,4,..)\n sequences.append(sequence[2::3])\n # return\n return sequences\n\n\n# helper functions for beat evaluation\ndef calc_intervals(events, fwd=False):\n \"\"\"\n Calculate the intervals of all events to the previous/next event.\n\n Parameters\n ----------\n events : numpy array\n Beat sequence.\n fwd : bool, optional\n Calculate the intervals towards the next event (instead of previous).\n\n Returns\n -------\n numpy array\n Beat intervals.\n\n Notes\n -----\n The sequence must be ordered. The first (last) interval will be set to\n the same value as the second (second to last) interval (when used in\n `fwd` mode).\n\n \"\"\"\n # at least 2 events must be given to calculate an interval\n if len(events) < 2:\n raise BeatIntervalError\n interval = np.zeros_like(events)\n if fwd:\n interval[:-1] = np.diff(events)\n # set the last interval to the same value as the second last\n interval[-1] = interval[-2]\n else:\n interval[1:] = np.diff(events)\n # set the first interval to the same value as the second\n interval[0] = interval[1]\n # return\n return interval\n\n\ndef find_closest_intervals(detections, annotations, matches=None):\n \"\"\"\n Find the closest annotated interval to each beat detection.\n\n Parameters\n ----------\n detections : list or numpy array\n Detected beats.\n annotations : list or numpy array\n Annotated beats.\n matches : list or numpy array\n Indices of the closest beats.\n\n Returns\n -------\n numpy array\n Closest annotated beat intervals.\n\n Notes\n -----\n The sequences must be ordered. To speed up the calculation, a list of\n pre-computed indices of the closest matches can be used.\n\n The function does NOT test if each detection has a surrounding interval,\n it always returns the closest interval.\n\n \"\"\"\n # if no detection are given, return an empty interval array\n if len(detections) == 0:\n return np.zeros(0, dtype=np.float)\n # at least annotations must be given\n if len(annotations) < 2:\n raise BeatIntervalError\n # init array\n closest_interval = np.ones_like(detections)\n # intervals\n # Note: it is faster if we combine the forward and backward intervals,\n # but we need to take care of the sizes; intervals to the next\n # annotation are always the same as those at the next index\n intervals = np.zeros(len(annotations) + 1)\n # intervals to previous annotation\n intervals[1:-1] = np.diff(annotations)\n # interval of the first annotation to the left is the same as to the right\n intervals[0] = intervals[1]\n # interval of the last annotation to the right is the same as to the left\n intervals[-1] = intervals[-2]\n # determine the closest annotations\n if matches is None:\n matches = find_closest_matches(detections, annotations)\n # calculate the absolute errors\n errors = calc_errors(detections, annotations, matches)\n # if the errors are positive, the detection is after the annotation\n # thus use the interval towards the next annotation\n closest_interval[errors > 0] = intervals[matches[errors > 0] + 1]\n # if the errors are 0 or negative, the detection is before the annotation\n # or at the same position; thus use the interval to previous annotation\n closest_interval[errors <= 0] = intervals[matches[errors <= 0]]\n # return the closest interval\n return closest_interval\n\n\ndef find_longest_continuous_segment(sequence_indices):\n \"\"\"\n ind the longest consecutive segment in the given sequence.\n\n Parameters\n ----------\n sequence_indices : numpy array\n Indices of the beats\n\n Returns\n -------\n length : int\n Length of the longest consecutive segment.\n start : int\n Start position of the longest continuous segment.\n\n \"\"\"\n # continuous segments have consecutive indices, i.e. diffs =! 1 are\n # boundaries between continuous segments; add 1 to get the correct index\n boundaries = np.nonzero(np.diff(sequence_indices) != 1)[0] + 1\n # add a start (index 0) and stop (length of correct detections) to the\n # segment boundary indices\n boundaries = np.concatenate(([0], boundaries, [len(sequence_indices)]))\n # lengths of the individual segments\n segment_lengths = np.diff(boundaries)\n # return the length and start position of the longest continuous segment\n length = int(np.max(segment_lengths))\n start_pos = int(boundaries[np.argmax(segment_lengths)])\n return length, start_pos\n\n\n@array\ndef calc_relative_errors(detections, annotations, matches=None):\n \"\"\"\n Errors of the detections relative to the closest annotated interval.\n\n Parameters\n ----------\n detections : list or numpy array\n Detected beats.\n annotations : list or numpy array\n Annotated beats.\n matches : list or numpy array\n Indices of the closest beats.\n\n Returns\n -------\n numpy array\n Errors relative to the closest annotated beat interval.\n\n Notes\n -----\n The sequences must be ordered! To speed up the calculation, a list of\n pre-computed indices of the closest matches can be used.\n\n \"\"\"\n # if no detection are given, return an empty interval array\n if len(detections) == 0:\n return np.zeros(0, dtype=np.float)\n # at least annotations must be given\n if len(annotations) < 2:\n raise BeatIntervalError\n # determine the closest annotations\n if matches is None:\n matches = find_closest_matches(detections, annotations)\n # calculate the absolute errors\n errors = calc_errors(detections, annotations, matches)\n # get the closest intervals\n intervals = find_closest_intervals(detections, annotations, matches)\n # return the relative errors\n return errors / intervals\n\n\n# default beat evaluation parameter values\nFMEASURE_WINDOW = 0.07\nPSCORE_TOLERANCE = 0.2\nCEMGIL_SIGMA = 0.04\nGOTO_THRESHOLD = 0.175\nGOTO_SIGMA = 0.1\nGOTO_MU = 0.1\nCONTINUITY_TEMPO_TOLERANCE = 0.175\nCONTINUITY_PHASE_TOLERANCE = 0.175\nINFORMATION_GAIN_BINS = 40\n\n\n# evaluation functions for beat detection\n@array\n@score_10\ndef pscore(detections, annotations, tolerance=PSCORE_TOLERANCE):\n \"\"\"\n Calculate the P-score accuracy for the given detections and annotations.\n\n The P-score is determined by taking the sum of the cross-correlation\n between two impulse trains, representing the detections and annotations\n allowing for a tolerance of 20% of the median annotated interval [1]_.\n\n Parameters\n ----------\n detections : list or numpy array\n Detected beats.\n annotations : list or numpy array\n Annotated beats.\n tolerance : float, optional\n Evaluation tolerance (fraction of the median beat interval).\n\n Returns\n -------\n pscore : float\n P-Score.\n\n Notes\n -----\n Contrary to the original implementation which samples the two impulse\n trains with 100Hz, we do not quantise the annotations and detections but\n rather count all detections falling withing the defined tolerance window.\n\n References\n ----------\n .. [1] M. McKinney, D. Moelants, M. Davies and A. Klapuri,\n \"Evaluation of audio beat tracking and music tempo extraction\n algorithms\",\n Journal of New Music Research, vol. 36, no. 1, 2007.\n\n \"\"\"\n # at least 2 annotations must be given to calculate an interval\n if len(annotations) < 2:\n raise BeatIntervalError(\"At least 2 annotations are needed for\"\n \"P-Score.\")\n # tolerance must be greater than 0\n if float(tolerance) <= 0:\n raise ValueError(\"`tolerance` must be greater than 0.\")\n # the error window is the given fraction of the median beat interval\n window = tolerance * np.median(np.diff(annotations))\n # errors\n errors = calc_absolute_errors(detections, annotations)\n # count the instances where the error is smaller or equal than the window\n p = len(detections[errors <= window])\n # normalize by the max number of detections/annotations\n p /= float(max(len(detections), len(annotations)))\n # return p-score\n return p\n\n\n@array\n@score_10\ndef cemgil(detections, annotations, sigma=CEMGIL_SIGMA):\n \"\"\"\n Calculate the Cemgil accuracy for the given detections and annotations.\n\n Parameters\n ----------\n detections : list or numpy array\n Detected beats.\n annotations : list or numpy array\n Annotated beats.\n sigma : float, optional\n Sigma for Gaussian error function.\n\n Returns\n -------\n cemgil : float\n Cemgil beat tracking accuracy.\n\n References\n ----------\n .. [1] A.T. Cemgil, B. Kappen, P. Desain, and H. Honing,\n \"On tempo tracking: Tempogram representation and Kalman filtering\",\n Journal Of New Music Research, vol. 28, no. 4, 2001.\n\n \"\"\"\n # sigma must be greater than 0\n if float(sigma) <= 0:\n raise ValueError(\"`sigma` must be greater than 0.\")\n # determine the abs. errors of the detections to the closest annotations\n # Note: the original implementation searches for the closest matches of\n # detections given the annotations. Since absolute errors > a usual\n # beat interval produce high errors (and thus in turn add negligible\n # values to the accuracy), it is safe to swap those two.\n errors = calc_absolute_errors(detections, annotations)\n # apply a Gaussian error function with the given std. dev. on the errors\n acc = np.exp(-(errors ** 2.) / (2. * (sigma ** 2.)))\n # and sum up the accuracy\n acc = np.sum(acc)\n # normalized by the mean of the number of detections and annotations\n acc /= 0.5 * (len(annotations) + len(detections))\n # return accuracy\n return acc\n\n\n@array\n@score_10\ndef goto(detections, annotations, threshold=GOTO_THRESHOLD, sigma=GOTO_SIGMA,\n mu=GOTO_MU):\n \"\"\"\n Calculate the Goto and Muraoka accuracy for the given detections and\n annotations.\n\n Parameters\n ----------\n detections : list or numpy array\n Detected beats.\n annotations : list or numpy array\n Annotated beats.\n threshold : float, optional\n Threshold.\n sigma : float, optional\n Allowed std. dev. of the errors in the longest segment.\n mu : float, optional\n Allowed mean. of the errors in the longest segment.\n\n Returns\n -------\n goto : float\n Goto beat tracking accuracy.\n\n Notes\n -----\n [1]_ requires that the first correct beat detection must occur within the\n first 3/4 of the excerpt. In order to be able to deal with audio with\n varying tempo, this was altered that the length of the longest continuously\n tracked segment must be at least 1/4 of the total length [2]_.\n\n References\n ----------\n .. [1] M. Goto and Y. Muraoka,\n \"Issues in evaluating beat tracking systems\",\n Working Notes of the IJCAI-97 Workshop on Issues in AI and Music -\n Evaluation and Assessment, 1997.\n .. [2] Matthew E. P. Davies, Norberto Degara, and Mark D. Plumbley,\n \"Evaluation Methods for Musical Audio Beat Tracking Algorithms\",\n Technical Report C4DM-TR-09-06,\n Centre for Digital Music, Queen Mary University of London, 2009.\n\n \"\"\"\n # at least 2 annotations must be given to calculate an interval\n if len(annotations) < 2:\n raise BeatIntervalError(\"At least 2 annotations are needed for Goto's \"\n \"score.\")\n # threshold, sigma and mu must be greater than 0\n if float(threshold) <= 0 or float(sigma) <= 0 or float(mu) <= 0:\n raise ValueError(\"Threshold, sigma and mu must be positive.\")\n # get the indices of the closest detections to the annotations to determine\n # the longest continuous segment\n closest = find_closest_matches(annotations, detections)\n # keep only those which have abs(errors) <= threshold\n # Note: both the original paper and the Matlab implementation normalize by\n # half a beat interval, thus our threshold is halved (same applies to\n # sigma and mu)\n # errors of the detections relative to the surrounding annotation interval\n errors = calc_relative_errors(detections, annotations)\n # the absolute error must be smaller than the given threshold\n closest = closest[np.abs(errors[closest]) <= threshold]\n # get the length and start position of the longest continuous segment\n length, start = find_longest_continuous_segment(closest)\n # three conditions must be met to identify the segment as correct\n # 1) the length of the segment must be at least 1/4 of the total length\n # Note: the original paper requires that the first element must occur\n # within the first 3/4 of the excerpt, but this was altered in the\n # Matlab implementation to the above condition to be able to deal\n # with audio with varying tempo\n if length < 0.25 * len(annotations):\n return 0.\n # errors of the longest segment\n segment_errors = errors[closest[start: start + length]]\n # 2) mean of the errors must not exceed mu\n if np.mean(np.abs(segment_errors)) > mu:\n return 0.\n # 3) std deviation of the errors must not exceed sigma\n # Note: contrary to the original paper and in line with the Matlab code,\n # we calculate the std. deviation based on the raw errors and not on\n # their absolute values.\n if np.std(segment_errors) > sigma:\n return 0.\n # otherwise return 1\n return 1.\n\n\n@array\n@score_1100\ndef cml(detections, annotations, phase_tolerance=CONTINUITY_PHASE_TOLERANCE,\n tempo_tolerance=CONTINUITY_TEMPO_TOLERANCE):\n \"\"\"\n Calculate the cmlc and cmlt scores for the given detections and\n annotations.\n\n Parameters\n ----------\n detections : list or numpy array\n Detected beats.\n annotations : list or numpy array\n Annotated beats.\n phase_tolerance : float, optional\n Allowed phase tolerance.\n tempo_tolerance : float, optional\n Allowed tempo tolerance.\n\n Returns\n -------\n cmlc : float\n Longest continuous segment of correct detections normalized by the\n maximum length of both sequences (detection and annotations).\n cmlt : float\n Same as cmlc, but no continuity required.\n\n References\n ----------\n .. [1] S. Hainsworth,\n \"Techniques for the automated analysis of musical audio\",\n PhD. dissertation, Department of Engineering, Cambridge University,\n 2004.\n .. [2] A.P. Klapuri, A. Eronen, and J. Astola,\n \"Analysis of the meter of acoustic musical signals\",\n IEEE Transactions on Audio, Speech and Language Processing, vol. 14,\n no. 1, 2006.\n\n \"\"\"\n # at least 2 annotations must be given to calculate an interval\n if len(annotations) < 2:\n raise BeatIntervalError(\"At least 2 annotations are needed for \"\n \"continuity scores, %s given.\" % annotations)\n # TODO: remove this, see TODO below\n if len(detections) < 2:\n raise BeatIntervalError(\"At least 2 detections are needed for \"\n \"continuity scores, %s given.\" % detections)\n # tolerances must be greater than 0\n if float(tempo_tolerance) <= 0 or float(phase_tolerance) <= 0:\n raise ValueError(\"Tempo and phase tolerances must be greater than 0\")\n # determine closest annotations to detections\n closest = find_closest_matches(detections, annotations)\n # errors of the detections wrt. to the annotations\n errors = calc_absolute_errors(detections, annotations, closest)\n # detection intervals\n det_interval = calc_intervals(detections)\n # annotation intervals (get those intervals at the correct positions)\n ann_interval = calc_intervals(annotations)[closest]\n # a detection is correct, if it fulfills 2 conditions:\n # 1) must match an annotation within a certain tolerance window, i.e. the\n # phase must be correct\n correct_phase = detections[errors <= ann_interval * phase_tolerance]\n # Note: the initially cited technical report has an additional condition\n # ii) on page 5 which requires the same condition to be true for the\n # previous detection / annotation combination. We do not enforce\n # this, since a) this condition is kind of pointless: why shouldn't\n # we count a correct beat just because its predecessor is not? and\n # b) the original Matlab implementation does not enforce it either\n # 2) the tempo, i.e. the intervals, must be within the tempo tolerance\n # TODO: as agreed with Matthew, this should only be enforced from the 2nd\n # beat onwards.\n correct_tempo = detections[abs(1 - (det_interval / ann_interval)) <=\n tempo_tolerance]\n # combine the conditions\n correct = np.intersect1d(correct_phase, correct_tempo)\n # convert to indices\n correct_idx = np.searchsorted(detections, correct)\n # cmlc: longest continuous segment of detections normalized by the max.\n # length of both sequences (detection and annotations)\n length = float(max(len(detections), len(annotations)))\n longest, _ = find_longest_continuous_segment(correct_idx)\n cmlc = longest / length\n # cmlt: same but for all detections (no need for continuity)\n cmlt = len(correct) / length\n # return a tuple\n return cmlc, cmlt\n\n\n@array\ndef continuity(detections, annotations,\n phase_tolerance=CONTINUITY_PHASE_TOLERANCE,\n tempo_tolerance=CONTINUITY_TEMPO_TOLERANCE,\n offbeat=True, double=True, triple=True):\n \"\"\"\n Calculate the cmlc, cmlt, amlc and amlt scores for the given detections and\n annotations.\n\n Parameters\n ----------\n detections : list or numpy array\n Detected beats.\n annotations : list or numpy array\n Annotated beats.\n phase_tolerance : float, optional\n Allowed phase tolerance.\n tempo_tolerance : float, optional\n Allowed tempo tolerance.\n offbeat : bool, optional\n Include offbeat variation.\n double : bool, optional\n Include double and half tempo variations (and offbeat thereof).\n triple : bool, optional\n Include triple and third tempo variations (and offbeats thereof).\n\n Returns\n -------\n cmlc : float\n Tracking accuracy, continuity at the correct metrical level required.\n cmlt : float\n Same as cmlc, continuity at the correct metrical level not required.\n amlc : float\n Same as cmlc, alternate metrical levels allowed.\n amlt : float\n Same as cmlt, alternate metrical levels allowed.\n\n See Also\n --------\n :func:`cml`\n\n \"\"\"\n # neither detections nor annotations are given\n if len(detections) == 0 and len(annotations) == 0:\n return 1., 1., 1., 1.\n # either a single beat detections or annotations given, score 0\n if len(detections) <= 1 or len(annotations) <= 1:\n return 0., 0., 0., 0.\n # evaluate the correct tempo\n cmlc, cmlt = cml(detections, annotations, tempo_tolerance, phase_tolerance)\n amlc = cmlc\n amlt = cmlt\n # speed up calculation by skipping other metrical levels if the score is\n # higher than 0.5 already. We must have tested the correct metrical level\n # already, otherwise the cmlc score would be lower.\n if cmlc > 0.5:\n return cmlc, cmlt, amlc, amlt\n # create different variants of the annotations:\n # Note: double also includes half as does triple third, respectively\n sequences = variations(annotations, offbeat=offbeat, double=double,\n half=double, triple=triple, third=triple)\n # evaluate these metrical variants\n for sequence in sequences:\n # if other metrical levels achieve higher accuracies, take these values\n try:\n # Note: catch the IntervalError here, because the beat variants\n # could be too short for valid interval calculation;\n # ok, since we already have valid values for amlc & amlt\n c, t = cml(detections, sequence, tempo_tolerance, phase_tolerance)\n except BeatIntervalError:\n c, t = np.nan, np.nan\n amlc = max(amlc, c)\n amlt = max(amlt, t)\n # return a tuple\n return cmlc, cmlt, amlc, amlt\n\n\ndef _histogram_bins(num_bins):\n \"\"\"\n Helper function to generate the histogram bins used to calculate the error\n histogram of the information gain.\n\n Parameters\n ----------\n num_bins : int\n Number of histogram bins.\n Returns\n -------\n numpy array\n Histogram bin edges.\n\n Notes\n -----\n This functions returns the bin edges for a histogram with one more bin than\n the requested number of bins, because the fist and last bins are added\n together (to make the histogram circular) later on. Because of the same\n reason, the first and the last bin are only half as wide as the others.\n\n \"\"\"\n # allow only even numbers and require at least 2 bins\n if num_bins % 2 != 0 or num_bins < 2:\n # Note: because of the implementation details of the histogram, the\n # easiest way to make sure the an error of 0 is always mapped\n # to the centre bin is to enforce an even number of bins\n raise ValueError(\"Number of error histogram bins must be even and \"\n \"greater than 0\")\n # since np.histogram accepts a sequence of bin edges we just increase the\n # number of bins by 1, but we need to apply offset\n offset = 0.5 / num_bins\n # because the histogram is made circular by adding the last bin to the\n # first one before being removed, increase the number of bins by 2\n return np.linspace(-0.5 - offset, 0.5 + offset, num_bins + 2)\n\n\ndef _error_histogram(detections, annotations, histogram_bins):\n \"\"\"\n Helper function to calculate the relative errors of the given detections\n and annotations and map them to an histogram with the given bins edges.\n\n Parameters\n ----------\n detections : list or numpy array\n Detected beats.\n annotations : list or numpy array\n Annotated beats.\n histogram_bins : numpy array\n Beat error histogram bin edges.\n\n Returns\n -------\n error_histogram : numpy array\n Beat error histogram.\n\n Notes\n -----\n The returned error histogram is circular, i.e. it contains 1 bin less than\n a histogram built normally with the given histogram bin edges. The values\n of the last and first bin are summed and mapped to the first bin.\n\n \"\"\"\n # get the relative errors of the detections to the annotations\n errors = calc_relative_errors(detections, annotations)\n # map the relative beat errors to the range of -0.5..0.5\n errors = np.mod(errors + 0.5, -1) + 0.5\n # get bin counts for the given errors over the distribution\n histogram = np.histogram(errors, histogram_bins)[0].astype(np.float)\n # make the histogram circular by adding the last bin to the first one\n histogram[0] += histogram[-1]\n # return the histogram without the last bin\n return histogram[:-1]\n\n\ndef _entropy(error_histogram):\n \"\"\"\n Helper function to calculate the entropy of the given error histogram.\n\n Parameters\n ----------\n error_histogram : numpy array\n Error histogram.\n\n Returns\n -------\n entropy : float\n Entropy of the error histogram.\n\n \"\"\"\n # copy the error_histogram, because it must not be altered\n histogram = np.copy(error_histogram).astype(np.float)\n # normalize the histogram\n histogram /= np.sum(histogram)\n # set all 0 values to 1 to make entropy calculation well-behaved\n histogram[histogram == 0] = 1.\n # calculate entropy\n return - np.sum(histogram * np.log2(histogram))\n\n\ndef _information_gain(error_histogram):\n \"\"\"\n Helper function to calculate the information gain of the given error\n histogram.\n\n Parameters\n ----------\n error_histogram : numpy array\n Error histogram.\n\n Returns\n -------\n information_gain : float\n Information gain.\n\n \"\"\"\n # calculate the entropy of th error histogram\n if np.asarray(error_histogram).any():\n entropy = _entropy(error_histogram)\n else:\n # an empty error histogram has an entropy of 0\n entropy = 0.\n # return information gain\n return np.log2(len(error_histogram)) - entropy\n\n\n@array\ndef information_gain(detections, annotations, num_bins=INFORMATION_GAIN_BINS):\n \"\"\"\n Calculate information gain for the given detections and annotations.\n\n Parameters\n ----------\n detections : list or numpy array\n Detected beats.\n annotations : list or numpy array\n Annotated beats.\n num_bins : int, optional\n Number of bins for the beat error histogram.\n\n Returns\n -------\n information_gain : float\n Information gain.\n error_histogram : numpy array\n Error histogram.\n\n References\n ----------\n .. [1] M. E.P. Davies, N. Degara and M. D. Plumbley,\n \"Measuring the performance of beat tracking algorithms algorithms\n using a beat error histogram\",\n IEEE Signal Processing Letters, vol. 18, vo. 3, 2011.\n\n \"\"\"\n # neither detections nor annotations are given, perfect score\n if len(detections) == 0 and len(annotations) == 0:\n # return a max. information gain and an empty error histogram\n return np.log2(num_bins), np.zeros(num_bins)\n # either beat detections or annotations are empty, score 0\n # Note: use \"or\" here since we test both the detections against the\n # annotations and vice versa during the evaluation process\n if len(detections) <= 1 or len(annotations) <= 1:\n # return an information gain of 0 and a uniform beat error histogram\n # Note: because swapped detections and annotations should return the\n # same uniform histogram, the maximum length of the detections\n # and annotations is chosen (instead of just the length of the\n # annotations as in the Matlab implementation).\n max_length = max(len(detections), len(annotations))\n return 0., np.ones(num_bins) * max_length / float(num_bins)\n # at least 2 annotations must be given to calculate an interval\n if len(detections) < 2 or len(annotations) < 2:\n raise BeatIntervalError(\"At least 2 annotations and 2 detections are\"\n \"needed for Information gain.\")\n # check if there are enough beat annotations for the number of bins\n if num_bins > len(annotations):\n warnings.warn(\"Not enough beat annotations (%d) for %d histogram bins.\"\n % (len(annotations), num_bins))\n # create bins edges for the error histogram\n histogram_bins = _histogram_bins(num_bins)\n # evaluate detections against annotations\n fwd_histogram = _error_histogram(detections, annotations, histogram_bins)\n fwd_ig = _information_gain(fwd_histogram)\n # if only a few (but correct) beats are detected, the errors could be small\n # thus evaluate also the annotations against the detections, i.e. simulate\n # a lot of false positive detections\n bwd_histogram = _error_histogram(annotations, detections, histogram_bins)\n bwd_ig = _information_gain(bwd_histogram)\n # only use the lower information gain\n if fwd_ig < bwd_ig:\n return fwd_ig, fwd_histogram\n return bwd_ig, bwd_histogram\n\n\n# human readable output\ndef tostring(obj):\n \"\"\"\n Format the evaluation metrics as a human readable string.\n\n Returns\n -------\n str\n Evaluation metrics formatted as a human readable string.\n\n \"\"\"\n ret = ''\n if obj.name is not None:\n ret += '%s\\n ' % obj.name\n ret += 'F-measure: %.3f P-score: %.3f Cemgil: %.3f Goto: %.3f CMLc: ' \\\n '%.3f CMLt: %.3f AMLc: %.3f AMLt: %.3f D: %.3f Dg: %.3f' % \\\n (obj.fmeasure, obj.pscore, obj.cemgil, obj.goto, obj.cmlc,\n obj.cmlt, obj.amlc, obj.amlt, obj.information_gain,\n obj.global_information_gain)\n return ret\n\n\n# beat evaluation class\nclass BeatEvaluation(OnsetEvaluation):\n # this class inherits from OnsetEvaluation the Precision, Recall, and\n # F-measure evaluation stuff but uses a different evaluation window\n \"\"\"\n Beat evaluation class.\n\n Parameters\n ----------\n detections : str, list or numpy array\n Detected beats.\n annotations : str, list or numpy array\n Annotated ground truth beats.\n fmeasure_window : float, optional\n F-measure evaluation window [seconds]\n pscore_tolerance : float, optional\n P-Score tolerance [fraction of the median beat interval].\n cemgil_sigma : float, optional\n Sigma of Gaussian window for Cemgil accuracy.\n goto_threshold : float, optional\n Threshold for Goto error.\n goto_sigma : float, optional\n Sigma for Goto error.\n goto_mu : float, optional\n Mu for Goto error.\n continuity_phase_tolerance : float, optional\n Continuity phase tolerance.\n continuity_tempo_tolerance : float, optional\n Continuity tempo tolerance.\n information_gain_bins : int, optional\n Number of bins for for the information gain beat error histogram.\n offbeat : bool, optional\n Include offbeat variation.\n double : bool, optional\n Include double and half tempo variations (and offbeat thereof).\n triple : bool, optional\n Include triple and third tempo variations (and offbeats thereof).\n skip : float, optional\n Skip the first `skip` seconds for evaluation.\n downbeats : bool, optional\n Evaluate downbeats instead of beats.\n\n Notes\n -----\n The `offbeat`, `double`, and `triple` variations of the beat sequences are\n used only for AMLc/AMLt.\n\n \"\"\"\n METRIC_NAMES = [\n ('fmeasure', 'F-measure'),\n ('pscore', 'P-score'),\n ('cemgil', 'Cemgil'),\n ('goto', 'Goto'),\n ('cmlc', 'CMLc'),\n ('cmlt', 'CMLt'),\n ('amlc', 'AMLc'),\n ('amlt', 'AMLt'),\n ('information_gain', 'D'),\n ('global_information_gain', 'Dg')\n ]\n\n def __init__(self, detections, annotations,\n fmeasure_window=FMEASURE_WINDOW,\n pscore_tolerance=PSCORE_TOLERANCE,\n cemgil_sigma=CEMGIL_SIGMA, goto_threshold=GOTO_THRESHOLD,\n goto_sigma=GOTO_SIGMA, goto_mu=GOTO_MU,\n continuity_phase_tolerance=CONTINUITY_PHASE_TOLERANCE,\n continuity_tempo_tolerance=CONTINUITY_TEMPO_TOLERANCE,\n information_gain_bins=INFORMATION_GAIN_BINS,\n offbeat=True, double=True, triple=True, skip=0,\n downbeats=False, **kwargs):\n # convert to numpy array\n detections = np.array(detections, dtype=np.float, ndmin=1)\n annotations = np.array(annotations, dtype=np.float, ndmin=1)\n # use only the first column (i.e. the time stamp) or extract the\n # downbeats if these are 2D\n if detections.ndim > 1:\n if downbeats:\n detections = detections[detections[:, 1] == 1][:, 0]\n else:\n detections = detections[:, 0]\n if annotations.ndim > 1:\n if downbeats:\n annotations = annotations[annotations[:, 1] == 1][:, 0]\n else:\n annotations = annotations[:, 0]\n # sort them\n detections = np.sort(detections)\n annotations = np.sort(annotations)\n # remove detections and annotations that are within the first N seconds\n # Note: skipping the first few seconds alters the results!\n if skip > 0:\n start_idx = np.searchsorted(detections, skip, 'right')\n detections = detections[start_idx:]\n start_idx = np.searchsorted(annotations, skip, 'right')\n annotations = annotations[start_idx:]\n\n # perform onset evaluation with the appropriate fmeasure_window\n super(BeatEvaluation, self).__init__(detections, annotations,\n window=fmeasure_window, **kwargs)\n # other scores\n self.pscore = pscore(detections, annotations, pscore_tolerance)\n self.cemgil = cemgil(detections, annotations, cemgil_sigma)\n self.goto = goto(detections, annotations, goto_threshold,\n goto_sigma, goto_mu)\n # continuity scores\n scores = continuity(detections, annotations,\n continuity_tempo_tolerance,\n continuity_phase_tolerance,\n offbeat, double, triple)\n self.cmlc, self.cmlt, self.amlc, self.amlt = scores\n # information gain stuff\n scores = information_gain(detections, annotations,\n information_gain_bins)\n self.information_gain, self.error_histogram = scores\n\n @property\n def global_information_gain(self):\n \"\"\"Global information gain.\"\"\"\n # Note: if only 1 file is evaluated, it is the same as information gain\n return self.information_gain\n\n def tostring(self, **kwargs):\n return tostring(self)\n\n\nclass BeatMeanEvaluation(MeanEvaluation):\n \"\"\"\n Class for averaging beat evaluation scores.\n\n \"\"\"\n METRIC_NAMES = BeatEvaluation.METRIC_NAMES\n\n @property\n def fmeasure(self):\n \"\"\"F-measure.\"\"\"\n return np.nanmean([e.fmeasure for e in self.eval_objects])\n\n @property\n def pscore(self):\n \"\"\"P-score.\"\"\"\n return np.nanmean([e.pscore for e in self.eval_objects])\n\n @property\n def cemgil(self):\n \"\"\"Cemgil accuracy.\"\"\"\n return np.nanmean([e.cemgil for e in self.eval_objects])\n\n @property\n def goto(self):\n \"\"\"Goto accuracy.\"\"\"\n return np.nanmean([e.goto for e in self.eval_objects])\n\n @property\n def cmlc(self):\n \"\"\"CMLc.\"\"\"\n return np.nanmean([e.cmlc for e in self.eval_objects])\n\n @property\n def cmlt(self):\n \"\"\"CMLt.\"\"\"\n return np.nanmean([e.cmlt for e in self.eval_objects])\n\n @property\n def amlc(self):\n \"\"\"AMLc.\"\"\"\n return np.nanmean([e.amlc for e in self.eval_objects])\n\n @property\n def amlt(self):\n \"\"\"AMLt.\"\"\"\n return np.nanmean([e.amlt for e in self.eval_objects])\n\n @property\n def information_gain(self):\n \"\"\"Information gain.\"\"\"\n return np.nanmean([e.information_gain for e in self.eval_objects])\n\n @property\n def error_histogram(self):\n \"\"\"Error histogram.\"\"\"\n if not self.eval_objects:\n # return an empty error histogram of length 0\n return np.zeros(0)\n # sum all error histograms to gather a global one\n return np.sum([e.error_histogram for e in self.eval_objects], axis=0)\n\n @property\n def global_information_gain(self):\n \"\"\"Global information gain.\"\"\"\n if len(self.error_histogram) == 0:\n # if the error histogram has length 0, the information gain is 0\n return 0.\n # calculate the information gain from the (global) error histogram\n return _information_gain(self.error_histogram)\n\n def tostring(self, **kwargs):\n return tostring(self)\n\n\ndef add_parser(parser):\n \"\"\"\n Add a beat evaluation sub-parser to an existing parser.\n\n Parameters\n ----------\n parser : argparse parser instance\n Existing argparse parser object.\n\n Returns\n -------\n sub_parser : argparse sub-parser instance\n Beat evaluation sub-parser.\n parser_group : argparse argument group\n Beat evaluation argument group.\n\n \"\"\"\n import argparse\n # add beat evaluation sub-parser to the existing parser\n p = parser.add_parser(\n 'beats', help='beat evaluation',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\n This program evaluates pairs of files containing the beat annotations and\n detections. Suffixes can be given to filter them from the list of files.\n\n Each line represents a beat and must have the following format with values\n being separated by whitespace [brackets indicate optional values]:\n `beat_time [beat_inside_bar]`\n\n Lines starting with # are treated as comments and are ignored.\n\n To maintain compatibility with the original Matlab implementation, use the\n arguments '--skip 5 --no_triple'. Please note, that the results can still\n differ, because of the different implementation approach.\n\n ''')\n # set defaults\n p.set_defaults(eval=BeatEvaluation, sum_eval=None,\n mean_eval=BeatMeanEvaluation, load_fn=load_beats)\n # file I/O\n evaluation_io(p, ann_suffix='.beats', det_suffix='.beats.txt')\n # parameters for sequence variants\n s = p.add_argument_group('sequence manipulation arguments')\n s.add_argument('--no_offbeat', dest='offbeat', action='store_false',\n help='do not include offbeat evaluation')\n s.add_argument('--no_double', dest='double', action='store_false',\n help='do not include double/half tempo evaluation')\n s.add_argument('--no_triple', dest='triple', action='store_false',\n help='do not include triple/third tempo evaluation')\n s.add_argument('--skip', action='store', type=float, default=0,\n help='skip first N seconds for evaluation '\n '[default=%(default).3f]')\n s.add_argument('--downbeats', action='store_true',\n help='evaluate only downbeats')\n # evaluation parameters\n g = p.add_argument_group('beat evaluation arguments')\n g.add_argument('--window', dest='fmeasure_window', action='store',\n type=float, default=FMEASURE_WINDOW,\n help='evaluation window for F-measure '\n '[seconds, default=%(default).3f]')\n g.add_argument('--tolerance', dest='pscore_tolerance', action='store',\n type=float, default=PSCORE_TOLERANCE,\n help='evaluation tolerance for P-score '\n '[default=%(default).3f]')\n g.add_argument('--sigma', dest='cemgil_sigma', action='store', type=float,\n default=CEMGIL_SIGMA,\n help='sigma for Cemgil accuracy [default=%(default).3f]')\n g.add_argument('--goto_threshold', action='store', type=float,\n default=GOTO_THRESHOLD,\n help='threshold for Goto error [default=%(default).3f]')\n g.add_argument('--goto_sigma', action='store', type=float,\n default=GOTO_SIGMA,\n help='sigma for Goto error [default=%(default).3f]')\n g.add_argument('--goto_mu', action='store', type=float,\n default=GOTO_MU,\n help='µ for Goto error [default=%(default).3f]')\n g.add_argument('--phase_tolerance', dest='continuity_phase_tolerance',\n action='store', type=float,\n default=CONTINUITY_PHASE_TOLERANCE,\n help='phase tolerance window for continuity accuracies '\n '[default=%(default).3f]')\n g.add_argument('--tempo_tolerance', dest='continuity_tempo_tolerance',\n action='store', type=float,\n default=CONTINUITY_TEMPO_TOLERANCE,\n help='tempo tolerance window for continuity accuracies '\n '[default=%(default).3f]')\n g.add_argument('--bins', dest='information_gain_bins', action='store',\n type=int, default=INFORMATION_GAIN_BINS,\n help='number of histogram bins for information gain '\n '[default=%(default)i]')\n # return the sub-parser and evaluation argument group\n return p, g\n" ]
[ [ "pandas.Series", "numpy.nonzero", "numpy.min", "numpy.asarray", "numpy.matmul", "numpy.linalg.norm", "pandas.DataFrame", "numpy.sin", "numpy.concatenate", "numpy.max", "numpy.std", "numpy.cos", "numpy.mean", "numpy.diff", "scipy.ndimage.filters.gaussian_filter1d", "numpy.array", "numpy.zeros", "numpy.vstack" ], [ "numpy.linspace", "numpy.asarray", "numpy.max", "numpy.zeros_like", "numpy.nanmean", "numpy.searchsorted", "numpy.exp", "numpy.histogram", "numpy.ones_like", "numpy.intersect1d", "numpy.copy", "numpy.std", "numpy.diff", "numpy.interp", "numpy.argmax", "numpy.zeros", "numpy.array", "numpy.sum", "numpy.log2", "numpy.abs", "numpy.sort", "numpy.ones", "numpy.mod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
techthiyanes/openspeech
[ "10307587f08615224df5a868fb5249c68c70b12d" ]
[ "openspeech/search/beam_search_rnn_transducer.py" ]
[ "# MIT License\n#\n# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport torch\n\nfrom openspeech.search.beam_search_base import OpenspeechBeamSearchBase\nfrom openspeech.decoders import RNNTransducerDecoder\n\n\nclass BeamSearchRNNTransducer(OpenspeechBeamSearchBase):\n r\"\"\"\n RNN Transducer Beam Search\n Reference: RNN-T FOR LATENCY CONTROLLED ASR WITH IMPROVED BEAM SEARCH (https://arxiv.org/pdf/1911.01629.pdf)\n\n Args: joint, decoder, beam_size, expand_beam, state_beam, blank_id\n joint: joint `encoder_outputs` and `decoder_outputs`\n decoder (TransformerTransducerDecoder): base decoder of transformer transducer model.\n beam_size (int): size of beam.\n expand_beam (int): The threshold coefficient to limit the number of expanded hypotheses.\n state_beam (int): The threshold coefficient to decide if hyps in A (process_hyps)\n is likely to compete with hyps in B (ongoing_beams)\n blank_id (int): blank id\n\n Inputs: encoder_output, max_length\n encoder_output (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size\n ``(seq_length, dimension)``\n max_length (int): max decoding time step\n\n Returns:\n * predictions (torch.LongTensor): model predictions.\n \"\"\"\n def __init__(\n self,\n joint,\n decoder: RNNTransducerDecoder,\n beam_size: int = 3,\n expand_beam: float = 2.3,\n state_beam: float = 4.6,\n blank_id: int = 3,\n ) -> None:\n super(BeamSearchRNNTransducer, self).__init__(decoder, beam_size)\n self.joint = joint\n self.expand_beam = expand_beam\n self.state_beam = state_beam\n self.blank_id = blank_id\n\n def forward(self, encoder_outputs: torch.Tensor, max_length: int):\n r\"\"\"\n Beam search decoding.\n\n Inputs: encoder_output, max_length\n encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size\n ``(batch, seq_length, dimension)``\n max_length (int): max decoding time step\n\n Returns:\n * predictions (torch.LongTensor): model predictions.\n \"\"\"\n hypothesis = list()\n hypothesis_score = list()\n\n for batch_idx in range(encoder_outputs.size(0)):\n blank = (\n torch.ones((1, 1), device=encoder_outputs.device, dtype=torch.long) * self.blank_id\n )\n step_input = (\n torch.ones((1, 1), device=encoder_outputs.device, dtype=torch.long) * self.sos_id\n )\n hyp = {\n \"prediction\": [self.sos_id],\n \"logp_score\": 0.0,\n \"hidden_states\": None,\n }\n ongoing_beams = [hyp]\n\n for t_step in range(max_length):\n process_hyps = ongoing_beams\n ongoing_beams = list()\n\n while True:\n if len(ongoing_beams) >= self.beam_size:\n break\n\n a_best_hyp = max(process_hyps, key=lambda x: x[\"logp_score\"] / len(x[\"prediction\"]))\n\n if len(ongoing_beams) > 0:\n b_best_hyp = max(\n ongoing_beams,\n key=lambda x: x[\"logp_score\"] / len(x[\"prediction\"]),\n )\n\n a_best_prob = a_best_hyp[\"logp_score\"]\n b_best_prob = b_best_hyp[\"logp_score\"]\n\n if b_best_prob >= self.state_beam + a_best_prob:\n break\n\n process_hyps.remove(a_best_hyp)\n\n step_input[0, 0] = a_best_hyp[\"prediction\"][-1]\n\n step_outputs, hidden_states = self.decoder(step_input, a_best_hyp[\"hidden_states\"])\n log_probs = self.joint(encoder_outputs[batch_idx, t_step, :], step_outputs.view(-1))\n\n topk_targets, topk_idx = log_probs.topk(k=self.beam_size)\n\n if topk_idx[0] != blank:\n best_logp = topk_targets[0]\n else:\n best_logp = topk_targets[1]\n\n for j in range(topk_targets.size(0)):\n topk_hyp = {\n \"prediction\": a_best_hyp[\"prediction\"][:],\n \"logp_score\": a_best_hyp[\"logp_score\"] + topk_targets[j],\n \"hidden_states\": a_best_hyp[\"hidden_states\"],\n }\n\n if topk_idx[j] == self.blank_id:\n ongoing_beams.append(topk_hyp)\n continue\n\n if topk_targets[j] >= best_logp - self.expand_beam:\n topk_hyp[\"prediction\"].append(topk_idx[j].item())\n topk_hyp[\"hidden_states\"] = hidden_states\n process_hyps.append(topk_hyp)\n\n ongoing_beams = sorted(\n ongoing_beams,\n key=lambda x: x[\"logp_score\"] / len(x[\"prediction\"]),\n reverse=True,\n )[0]\n\n hypothesis.append(torch.LongTensor(ongoing_beams[\"prediction\"][1:]))\n hypothesis_score.append(ongoing_beams[\"logp_score\"] / len(ongoing_beams[\"prediction\"]))\n\n return self._fill_sequence(hypothesis)" ]
[ [ "torch.LongTensor", "torch.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]