repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
icyyy/information_value | [
"8d71ab742e285f452e1dc0dc7952a924a27167ec"
] | [
"src/information_value.py"
] | [
"import numpy as np\nimport math\nfrom scipy import stats\nfrom sklearn.utils.multiclass import type_of_target\n\nclass WOE:\n def __init__(self):\n self._WOE_MIN = -20\n self._WOE_MAX = 20\n\n def woe(self, X, y, event=1):\n '''\n Calculate woe of each feature category and information value\n :param X: 2-D numpy array explanatory features which should be discreted already\n :param y: 1-D numpy array target variable which should be binary\n :param event: value of binary stands for the event to predict\n :return: numpy array of woe dictionaries, each dictionary contains woe values for categories of each feature\n numpy array of information value of each feature\n '''\n self.check_target_binary(y)\n X1 = self.feature_discretion(X)\n\n res_woe = []\n res_iv = []\n for i in range(0, X1.shape[-1]):\n x = X1[:, i]\n woe_dict, iv1 = self.woe_single_x(x, y, event)\n res_woe.append(woe_dict)\n res_iv.append(iv1)\n return np.array(res_woe), np.array(res_iv)\n\n def woe_single_x(self, x, y, event=1):\n '''\n calculate woe and information for a single feature\n :param x: 1-D numpy starnds for single feature\n :param y: 1-D numpy array target variable\n :param event: value of binary stands for the event to predict\n :return: dictionary contains woe values for categories of this feature\n information value of this feature\n '''\n self.check_target_binary(y)\n\n event_total, non_event_total = self.count_binary(y, event=event)\n x_labels = np.unique(x)\n woe_dict = {}\n iv = 0\n for x1 in x_labels:\n y1 = y[np.where(x == x1)[0]]\n event_count, non_event_count = self.count_binary(y1, event=event)\n rate_event = 1.0 * event_count / event_total\n rate_non_event = 1.0 * non_event_count / non_event_total\n if rate_event == 0:\n woe1 = self._WOE_MIN\n elif rate_non_event == 0:\n woe1 = self._WOE_MAX\n else:\n woe1 = math.log(rate_event / rate_non_event)\n woe_dict[x1] = woe1\n iv += (rate_event - rate_non_event) * woe1\n return woe_dict, iv\n\n def woe_replace(self, X, woe_arr):\n '''\n replace the explanatory feature categories with its woe value\n :param X: 2-D numpy array explanatory features which should be discreted already\n :param woe_arr: numpy array of woe dictionaries, each dictionary contains woe values for categories of each feature\n :return: the new numpy array in which woe values filled\n '''\n if X.shape[-1] != woe_arr.shape[-1]:\n raise ValueError('WOE dict array length must be equal with features length')\n\n res = np.copy(X).astype(float)\n idx = 0\n for woe_dict in woe_arr:\n for k in woe_dict.keys():\n woe = woe_dict[k]\n res[:, idx][np.where(res[:, idx] == k)[0]] = woe * 1.0\n idx += 1\n\n return res\n\n def combined_iv(self, X, y, masks, event=1):\n '''\n calcute the information vlaue of combination features\n :param X: 2-D numpy array explanatory features which should be discreted already\n :param y: 1-D numpy array target variable\n :param masks: 1-D numpy array of masks stands for which features are included in combination,\n e.g. np.array([0,0,1,1,1,0,0,0,0,0,1]), the length should be same as features length\n :param event: value of binary stands for the event to predict\n :return: woe dictionary and information value of combined features\n '''\n if masks.shape[-1] != X.shape[-1]:\n raise ValueError('Masks array length must be equal with features length')\n\n x = X[:, np.where(masks == 1)[0]]\n tmp = []\n for i in range(x.shape[0]):\n tmp.append(self.combine(x[i, :]))\n\n dumy = np.array(tmp)\n # dumy_labels = np.unique(dumy)\n woe, iv = self.woe_single_x(dumy, y, event)\n return woe, iv\n\n def combine(self, list):\n res = ''\n for item in list:\n res += str(item)\n return res\n\n def count_binary(self, a, event=1):\n event_count = (a == event).sum()\n non_event_count = a.shape[-1] - event_count\n return event_count, non_event_count\n\n def check_target_binary(self, y):\n '''\n check if the target variable is binary, raise error if not.\n :param y:\n :return:\n '''\n y_type = type_of_target(y)\n if y_type not in ['binary']:\n raise ValueError('Label type must be binary')\n\n def feature_discretion(self, X):\n '''\n Discrete the continuous features of input data X, and keep other features unchanged.\n :param X : numpy array\n :return: the numpy array in which all continuous features are discreted\n '''\n temp = []\n for i in range(0, X.shape[-1]):\n x = X[:, i]\n x_type = type_of_target(x)\n if x_type == 'continuous':\n x1 = self.discrete(x)\n temp.append(x1)\n else:\n temp.append(x)\n return np.array(temp).T\n\n def discrete(self, x):\n '''\n Discrete the input 1-D numpy array using 5 equal percentiles\n :param x: 1-D numpy array\n :return: discreted 1-D numpy array\n '''\n res = np.array([0] * x.shape[-1], dtype=int)\n for i in range(5):\n point1 = stats.scoreatpercentile(x, i * 20)\n point2 = stats.scoreatpercentile(x, (i + 1) * 20)\n x1 = x[np.where((x >= point1) & (x <= point2))]\n mask = np.in1d(x, x1)\n res[mask] = (i + 1)\n return res\n\n @property\n def WOE_MIN(self):\n return self._WOE_MIN\n @WOE_MIN.setter\n def WOE_MIN(self, woe_min):\n self._WOE_MIN = woe_min\n @property\n def WOE_MAX(self):\n return self._WOE_MAX\n @WOE_MAX.setter\n def WOE_MAX(self, woe_max):\n self._WOE_MAX = woe_max\n"
] | [
[
"numpy.in1d",
"numpy.copy",
"scipy.stats.scoreatpercentile",
"numpy.array",
"numpy.where",
"numpy.unique",
"sklearn.utils.multiclass.type_of_target"
]
] |
kylejn27/dask | [
"3327b2e158dbadf2057685fdb51b74ce3129416e"
] | [
"dask/array/core.py"
] | [
"import math\nimport operator\nimport os\nimport pickle\nimport re\nimport sys\nimport traceback\nimport uuid\nimport warnings\nfrom bisect import bisect\nfrom collections.abc import Iterable, Iterator, Mapping\nfrom functools import partial, wraps, reduce\nfrom itertools import product, zip_longest\nfrom numbers import Number, Integral\nfrom operator import add, getitem, mul\nfrom threading import Lock\n\nfrom tlz import partition, concat, first, groupby, accumulate, frequencies\nfrom tlz.curried import pluck\nimport numpy as np\n\nfrom . import chunk\nfrom .. import config, compute\nfrom ..base import (\n DaskMethodsMixin,\n tokenize,\n dont_optimize,\n compute_as_if_collection,\n persist,\n is_dask_collection,\n)\nfrom ..blockwise import broadcast_dimensions\nfrom ..context import globalmethod\nfrom ..utils import (\n ndeepmap,\n ignoring,\n concrete,\n derived_from,\n is_integer,\n IndexCallable,\n funcname,\n SerializableLock,\n Dispatch,\n factors,\n parse_bytes,\n has_keyword,\n M,\n ndimlist,\n format_bytes,\n typename,\n)\nfrom ..core import quote\nfrom ..delayed import delayed, Delayed\nfrom .. import threaded, core\nfrom ..sizeof import sizeof\nfrom ..highlevelgraph import HighLevelGraph\nfrom .numpy_compat import _Recurser, _make_sliced_dtype\nfrom .slicing import slice_array, replace_ellipsis, cached_cumsum\nfrom .blockwise import blockwise\nfrom .chunk_types import is_valid_array_chunk, is_valid_chunk_type\n\n\nconfig.update_defaults({\"array\": {\"chunk-size\": \"128MiB\", \"rechunk-threshold\": 4}})\n\n\nconcatenate_lookup = Dispatch(\"concatenate\")\ntensordot_lookup = Dispatch(\"tensordot\")\neinsum_lookup = Dispatch(\"einsum\")\nconcatenate_lookup.register((object, np.ndarray), np.concatenate)\ntensordot_lookup.register((object, np.ndarray), np.tensordot)\neinsum_lookup.register((object, np.ndarray), np.einsum)\n\nunknown_chunk_message = (\n \"\\n\\n\"\n \"A possible solution: \"\n \"https://docs.dask.org/en/latest/array-chunks.html#unknown-chunks\\n\"\n \"Summary: to compute chunks sizes, use\\n\\n\"\n \" x.compute_chunk_sizes() # for Dask Array `x`\\n\"\n \" ddf.to_dask_array(lengths=True) # for Dask DataFrame `ddf`\"\n)\n\n\nclass PerformanceWarning(Warning):\n \"\"\" A warning given when bad chunking may cause poor performance \"\"\"\n\n\ndef getter(a, b, asarray=True, lock=None):\n if isinstance(b, tuple) and any(x is None for x in b):\n b2 = tuple(x for x in b if x is not None)\n b3 = tuple(\n None if x is None else slice(None, None)\n for x in b\n if not isinstance(x, Integral)\n )\n return getter(a, b2, asarray=asarray, lock=lock)[b3]\n\n if lock:\n lock.acquire()\n try:\n c = a[b]\n if asarray:\n c = np.asarray(c)\n finally:\n if lock:\n lock.release()\n return c\n\n\ndef getter_nofancy(a, b, asarray=True, lock=None):\n \"\"\"A simple wrapper around ``getter``.\n\n Used to indicate to the optimization passes that the backend doesn't\n support fancy indexing.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)\n\n\ndef getter_inline(a, b, asarray=True, lock=None):\n \"\"\"A getter function that optimizations feel comfortable inlining\n\n Slicing operations with this function may be inlined into a graph, such as\n in the following rewrite\n\n **Before**\n\n >>> a = x[:10] # doctest: +SKIP\n >>> b = a + 1 # doctest: +SKIP\n >>> c = a * 2 # doctest: +SKIP\n\n **After**\n\n >>> b = x[:10] + 1 # doctest: +SKIP\n >>> c = x[:10] * 2 # doctest: +SKIP\n\n This inlining can be relevant to operations when running off of disk.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)\n\n\nfrom .optimization import optimize, fuse_slice\n\n\n# __array_function__ dict for mapping aliases and mismatching names\n_HANDLED_FUNCTIONS = {}\n\n\ndef implements(*numpy_functions):\n \"\"\"Register an __array_function__ implementation for dask.array.Array\n\n Register that a function implements the API of a NumPy function (or several\n NumPy functions in case of aliases) which is handled with\n ``__array_function__``.\n\n Parameters\n ----------\n \\\\*numpy_functions : callables\n One or more NumPy functions that are handled by ``__array_function__``\n and will be mapped by `implements` to a `dask.array` function.\n \"\"\"\n\n def decorator(dask_func):\n for numpy_function in numpy_functions:\n _HANDLED_FUNCTIONS[numpy_function] = dask_func\n\n return dask_func\n\n return decorator\n\n\ndef check_if_handled_given_other(f):\n \"\"\"Check if method is handled by Dask given type of other\n\n Ensures proper deferral to upcast types in dunder operations without\n assuming unknown types are automatically downcast types.\n \"\"\"\n\n @wraps(f)\n def wrapper(self, other):\n if (\n is_valid_array_chunk(other)\n or isinstance(other, (self.__class__, list, tuple, np.generic))\n or \"dask.dataframe.core.Scalar\" in str(other.__class__)\n ):\n return f(self, other)\n else:\n return NotImplemented\n\n return wrapper\n\n\ndef slices_from_chunks(chunks):\n \"\"\"Translate chunks tuple to a set of slices in product order\n\n >>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE\n [(slice(0, 2, None), slice(0, 3, None)),\n (slice(0, 2, None), slice(3, 6, None)),\n (slice(0, 2, None), slice(6, 9, None)),\n (slice(2, 4, None), slice(0, 3, None)),\n (slice(2, 4, None), slice(3, 6, None)),\n (slice(2, 4, None), slice(6, 9, None))]\n \"\"\"\n cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]\n slices = [\n [slice(s, s + dim) for s, dim in zip(starts, shapes)]\n for starts, shapes in zip(cumdims, chunks)\n ]\n return list(product(*slices))\n\n\ndef getem(\n arr,\n chunks,\n getitem=getter,\n shape=None,\n out_name=None,\n lock=False,\n asarray=True,\n dtype=None,\n):\n \"\"\"Dask getting various chunks from an array-like\n\n >>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n\n >>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n \"\"\"\n out_name = out_name or arr\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n keys = product([out_name], *(range(len(bds)) for bds in chunks))\n slices = slices_from_chunks(chunks)\n\n if (\n has_keyword(getitem, \"asarray\")\n and has_keyword(getitem, \"lock\")\n and (not asarray or lock)\n ):\n values = [(getitem, arr, x, asarray, lock) for x in slices]\n else:\n # Common case, drop extra parameters\n values = [(getitem, arr, x) for x in slices]\n\n return dict(zip(keys, values))\n\n\ndef dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):\n \"\"\"Dot product of many aligned chunks\n\n >>> x = np.array([[1, 2], [1, 2]])\n >>> y = np.array([[10, 20], [10, 20]])\n >>> dotmany([x, x, x], [y, y, y])\n array([[ 90, 180],\n [ 90, 180]])\n\n Optionally pass in functions to apply to the left and right chunks\n\n >>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)\n array([[150, 150],\n [150, 150]])\n \"\"\"\n if leftfunc:\n A = map(leftfunc, A)\n if rightfunc:\n B = map(rightfunc, B)\n return sum(map(partial(np.dot, **kwargs), A, B))\n\n\ndef _concatenate2(arrays, axes=[]):\n \"\"\"Recursively Concatenate nested lists of arrays along axes\n\n Each entry in axes corresponds to each level of the nested list. The\n length of axes should correspond to the level of nesting of arrays.\n If axes is an empty list or tuple, return arrays, or arrays[0] if\n arrays is a list.\n\n >>> x = np.array([[1, 2], [3, 4]])\n >>> _concatenate2([x, x], axes=[0])\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n >>> _concatenate2([x, x], axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n >>> _concatenate2([[x, x], [x, x]], axes=[0, 1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4],\n [1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Supports Iterators\n >>> _concatenate2(iter([x, x]), axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Special Case\n >>> _concatenate2([x, x], axes=())\n array([[1, 2],\n [3, 4]])\n \"\"\"\n if axes == ():\n if isinstance(arrays, list):\n return arrays[0]\n else:\n return arrays\n\n if isinstance(arrays, Iterator):\n arrays = list(arrays)\n if not isinstance(arrays, (list, tuple)):\n return arrays\n if len(axes) > 1:\n arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]\n concatenate = concatenate_lookup.dispatch(\n type(max(arrays, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n return concatenate(arrays, axis=axes[0])\n\n\ndef apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype=\"dtype\", nout=None):\n \"\"\"\n Tries to infer output dtype of ``func`` for a small set of input arguments.\n\n Parameters\n ----------\n func: Callable\n Function for which output dtype is to be determined\n\n args: List of array like\n Arguments to the function, which would usually be used. Only attributes\n ``ndim`` and ``dtype`` are used.\n\n kwargs: dict\n Additional ``kwargs`` to the ``func``\n\n funcname: String\n Name of calling function to improve potential error messages\n\n suggest_dtype: None/False or String\n If not ``None`` adds suggestion to potential error message to specify a dtype\n via the specified kwarg. Defaults to ``'dtype'``.\n\n nout: None or Int\n ``None`` if function returns single output, integer if many.\n Deafults to ``None``.\n\n Returns\n -------\n : dtype or List of dtype\n One or many dtypes (depending on ``nout``)\n \"\"\"\n args = [\n np.ones((1,) * x.ndim, dtype=x.dtype) if isinstance(x, Array) else x\n for x in args\n ]\n try:\n with np.errstate(all=\"ignore\"):\n o = func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = \"\".join(traceback.format_tb(exc_traceback))\n suggest = (\n (\n \"Please specify the dtype explicitly using the \"\n \"`{dtype}` kwarg.\\n\\n\".format(dtype=suggest_dtype)\n )\n if suggest_dtype\n else \"\"\n )\n msg = (\n \"`dtype` inference failed in `{0}`.\\n\\n\"\n \"{1}\"\n \"Original error is below:\\n\"\n \"------------------------\\n\"\n \"{2}\\n\\n\"\n \"Traceback:\\n\"\n \"---------\\n\"\n \"{3}\"\n ).format(funcname, suggest, repr(e), tb)\n else:\n msg = None\n if msg is not None:\n raise ValueError(msg)\n return o.dtype if nout is None else tuple(e.dtype for e in o)\n\n\ndef normalize_arg(x):\n \"\"\"Normalize user provided arguments to blockwise or map_blocks\n\n We do a few things:\n\n 1. If they are string literals that might collide with blockwise_token then we\n quote them\n 2. IF they are large (as defined by sizeof) then we put them into the\n graph on their own by using dask.delayed\n \"\"\"\n if is_dask_collection(x):\n return x\n elif isinstance(x, str) and re.match(r\"_\\d+\", x):\n return delayed(x)\n elif isinstance(x, list) and len(x) >= 10:\n return delayed(x)\n elif sizeof(x) > 1e6:\n return delayed(x)\n else:\n return x\n\n\ndef _pass_extra_kwargs(func, keys, *args, **kwargs):\n \"\"\"Helper for :func:`map_blocks` to pass `block_info` or `block_id`.\n\n For each element of `keys`, a corresponding element of args is changed\n to a keyword argument with that key, before all arguments re passed on\n to `func`.\n \"\"\"\n kwargs.update(zip(keys, args))\n return func(*args[len(keys) :], **kwargs)\n\n\ndef map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n \"\"\"Map a function across all blocks of a dask array.\n\n Parameters\n ----------\n func : callable\n Function to apply to every block in the array.\n args : dask arrays or other objects\n dtype : np.dtype, optional\n The ``dtype`` of the output array. It is recommended to provide this.\n If not provided, will be inferred by applying the function to a small\n set of fake data.\n chunks : tuple, optional\n Chunk shape of resulting blocks if the function does not preserve\n shape. If not provided, the resulting array is assumed to have the same\n block structure as the first input array.\n drop_axis : number or iterable, optional\n Dimensions lost by the function.\n new_axis : number or iterable, optional\n New dimensions created by the function. Note that these are applied\n after ``drop_axis`` (if present).\n token : string, optional\n The key prefix to use for the output array. If not provided, will be\n determined from the function name.\n name : string, optional\n The key name to use for the output array. Note that this fully\n specifies the output key name, and must be unique. If not provided,\n will be determined by a hash of the arguments.\n **kwargs :\n Other keyword arguments to pass to function. Values must be constants\n (not dask.arrays)\n\n See Also\n --------\n dask.array.blockwise : Generalized operation with control over block alignment.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(6, chunks=3)\n\n >>> x.map_blocks(lambda x: x * 2).compute()\n array([ 0, 2, 4, 6, 8, 10])\n\n The ``da.map_blocks`` function can also accept multiple arrays.\n\n >>> d = da.arange(5, chunks=2)\n >>> e = da.arange(5, chunks=2)\n\n >>> f = map_blocks(lambda a, b: a + b**2, d, e)\n >>> f.compute()\n array([ 0, 2, 6, 12, 20])\n\n If the function changes shape of the blocks then you must provide chunks\n explicitly.\n\n >>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))\n\n You have a bit of freedom in specifying chunks. If all of the output chunk\n sizes are the same, you can provide just that chunk size as a single tuple.\n\n >>> a = da.arange(18, chunks=(6,))\n >>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))\n\n If the function changes the dimension of the blocks you must specify the\n created or destroyed dimensions.\n\n >>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),\n ... new_axis=[0, 2])\n\n If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to\n add the necessary number of axes on the left.\n\n Map_blocks aligns blocks by block positions without regard to shape. In the\n following example we have two arrays with the same number of blocks but\n with different shape and chunk sizes.\n\n >>> x = da.arange(1000, chunks=(100,))\n >>> y = da.arange(100, chunks=(10,))\n\n The relevant attribute to match is numblocks.\n\n >>> x.numblocks\n (10,)\n >>> y.numblocks\n (10,)\n\n If these match (up to broadcasting rules) then we can map arbitrary\n functions across blocks\n\n >>> def func(a, b):\n ... return np.array([a.max(), b.max()])\n\n >>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')\n dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>\n\n >>> _.compute()\n array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,\n 69, 799, 79, 899, 89, 999, 99])\n\n Your block function get information about where it is in the array by\n accepting a special ``block_info`` keyword argument.\n\n >>> def func(block, block_info=None):\n ... pass\n\n This will receive the following information:\n\n >>> block_info # doctest: +SKIP\n {0: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)]},\n None: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)],\n 'chunk-shape': (100,),\n 'dtype': dtype('float64')}}\n\n For each argument and keyword arguments that are dask arrays (the positions\n of which are the first index), you will receive the shape of the full\n array, the number of chunks of the full array in each dimension, the chunk\n location (for example the fourth chunk over in the first dimension), and\n the array location (for example the slice corresponding to ``40:50``). The\n same information is provided for the output, with the key ``None``, plus\n the shape and dtype that should be returned.\n\n These features can be combined to synthesize an array from scratch, for\n example:\n\n >>> def func(block_info=None):\n ... loc = block_info[None]['array-location'][0]\n ... return np.arange(loc[0], loc[1])\n\n >>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)\n dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>\n\n >>> _.compute()\n array([0, 1, 2, 3, 4, 5, 6, 7])\n\n You may specify the key name prefix of the resulting task in the graph with\n the optional ``token`` keyword argument.\n\n >>> x.map_blocks(lambda x: x + 1, name='increment') # doctest: +SKIP\n dask.array<increment, shape=(100,), dtype=int64, chunksize=(10,), chunktype=numpy.ndarray>\n \"\"\"\n if not callable(func):\n msg = (\n \"First argument must be callable function, not %s\\n\"\n \"Usage: da.map_blocks(function, x)\\n\"\n \" or: da.map_blocks(function, x, y, z)\"\n )\n raise TypeError(msg % type(func).__name__)\n if token:\n warnings.warn(\"The token= keyword to map_blocks has been moved to name=\")\n name = token\n\n name = \"%s-%s\" % (name or funcname(func), tokenize(func, *args, **kwargs))\n new_axes = {}\n\n if isinstance(drop_axis, Number):\n drop_axis = [drop_axis]\n if isinstance(new_axis, Number):\n new_axis = [new_axis] # TODO: handle new_axis\n\n arrs = [a for a in args if isinstance(a, Array)]\n\n argpairs = [\n (a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)\n for a in args\n ]\n if arrs:\n out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]\n else:\n out_ind = ()\n\n original_kwargs = kwargs\n\n if dtype is None and meta is None:\n dtype = apply_infer_dtype(func, args, original_kwargs, \"map_blocks\")\n\n if drop_axis:\n out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)\n if new_axis is None and chunks is not None and len(out_ind) < len(chunks):\n new_axis = range(len(chunks) - len(out_ind))\n if new_axis:\n # new_axis = [x + len(drop_axis) for x in new_axis]\n out_ind = list(out_ind)\n for ax in sorted(new_axis):\n n = len(out_ind) + len(drop_axis)\n out_ind.insert(ax, n)\n if chunks is not None:\n new_axes[n] = chunks[ax]\n else:\n new_axes[n] = 1\n out_ind = tuple(out_ind)\n if max(new_axis) > max(out_ind):\n raise ValueError(\"New_axis values do not fill in all dimensions\")\n\n if chunks is not None:\n if len(chunks) != len(out_ind):\n raise ValueError(\n \"Provided chunks have {0} dims, expected {1} \"\n \"dims.\".format(len(chunks), len(out_ind))\n )\n adjust_chunks = dict(zip(out_ind, chunks))\n else:\n adjust_chunks = None\n\n out = blockwise(\n func,\n out_ind,\n *concat(argpairs),\n name=name,\n new_axes=new_axes,\n dtype=dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=adjust_chunks,\n meta=meta,\n **kwargs,\n )\n\n extra_argpairs = []\n extra_names = []\n # If func has block_id as an argument, construct an array of block IDs and\n # prepare to inject it.\n if has_keyword(func, \"block_id\"):\n block_id_name = \"block-id-\" + out.name\n block_id_dsk = {\n (block_id_name,) + block_id: block_id\n for block_id in product(*(range(len(c)) for c in out.chunks))\n }\n block_id_array = Array(\n block_id_dsk,\n block_id_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_id_array, out_ind))\n extra_names.append(\"block_id\")\n\n # If func has block_info as an argument, construct an array of block info\n # objects and prepare to inject it.\n if has_keyword(func, \"block_info\"):\n starts = {}\n num_chunks = {}\n shapes = {}\n\n for i, (arg, in_ind) in enumerate(argpairs):\n if in_ind is not None:\n shapes[i] = arg.shape\n if drop_axis:\n # We concatenate along dropped axes, so we need to treat them\n # as if there is only a single chunk.\n starts[i] = [\n (\n cached_cumsum(arg.chunks[j], initial_zero=True)\n if ind in out_ind\n else [0, arg.shape[j]]\n )\n for j, ind in enumerate(in_ind)\n ]\n num_chunks[i] = tuple(len(s) - 1 for s in starts[i])\n else:\n starts[i] = [\n cached_cumsum(c, initial_zero=True) for c in arg.chunks\n ]\n num_chunks[i] = arg.numblocks\n out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]\n\n block_info_name = \"block-info-\" + out.name\n block_info_dsk = {}\n for block_id in product(*(range(len(c)) for c in out.chunks)):\n # Get position of chunk, indexed by axis labels\n location = {out_ind[i]: loc for i, loc in enumerate(block_id)}\n info = {}\n for i, shape in shapes.items():\n # Compute chunk key in the array, taking broadcasting into\n # account. We don't directly know which dimensions are\n # broadcast, but any dimension with only one chunk can be\n # treated as broadcast.\n arr_k = tuple(\n location.get(ind, 0) if num_chunks[i][j] > 1 else 0\n for j, ind in enumerate(argpairs[i][1])\n )\n info[i] = {\n \"shape\": shape,\n \"num-chunks\": num_chunks[i],\n \"array-location\": [\n (starts[i][ij][j], starts[i][ij][j + 1])\n for ij, j in enumerate(arr_k)\n ],\n \"chunk-location\": arr_k,\n }\n\n info[None] = {\n \"shape\": out.shape,\n \"num-chunks\": out.numblocks,\n \"array-location\": [\n (out_starts[ij][j], out_starts[ij][j + 1])\n for ij, j in enumerate(block_id)\n ],\n \"chunk-location\": block_id,\n \"chunk-shape\": tuple(\n out.chunks[ij][j] for ij, j in enumerate(block_id)\n ),\n \"dtype\": dtype,\n }\n block_info_dsk[(block_info_name,) + block_id] = info\n\n block_info = Array(\n block_info_dsk,\n block_info_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_info, out_ind))\n extra_names.append(\"block_info\")\n\n if extra_argpairs:\n # Rewrite the Blockwise layer. It would be nice to find a way to\n # avoid doing it twice, but it's currently needed to determine\n # out.chunks from the first pass. Since it constructs a Blockwise\n # rather than an expanded graph, it shouldn't be too expensive.\n out = blockwise(\n _pass_extra_kwargs,\n out_ind,\n func,\n None,\n tuple(extra_names),\n None,\n *concat(extra_argpairs),\n *concat(argpairs),\n name=out.name,\n dtype=out.dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=dict(zip(out_ind, out.chunks)),\n meta=meta,\n **kwargs,\n )\n\n return out\n\n\ndef broadcast_chunks(*chunkss):\n \"\"\"Construct a chunks tuple that broadcasts many chunks tuples\n\n >>> a = ((5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((5, 5),)\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((1,), (5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((3, 3,), (5, 5),)\n >>> broadcast_chunks(a, b)\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]\n \"\"\"\n if not chunkss:\n return ()\n elif len(chunkss) == 1:\n return chunkss[0]\n n = max(map(len, chunkss))\n chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]\n result = []\n for i in range(n):\n step1 = [c[i] for c in chunkss2]\n if all(c == (1,) for c in step1):\n step2 = step1\n else:\n step2 = [c for c in step1 if c != (1,)]\n if len(set(step2)) != 1:\n raise ValueError(\"Chunks do not align: %s\" % str(step2))\n result.append(step2[0])\n return tuple(result)\n\n\ndef store(\n sources,\n targets,\n lock=True,\n regions=None,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n \"\"\"Store dask arrays in array-like objects, overwrite data in target\n\n This stores dask arrays into object that supports numpy-style setitem\n indexing. It stores values chunk by chunk so that it does not have to\n fill up memory. For best performance you can align the block size of\n the storage target with the block size of your array.\n\n If your data fits in memory then you may prefer calling\n ``np.array(myarray)`` instead.\n\n Parameters\n ----------\n\n sources: Array or iterable of Arrays\n targets: array-like or Delayed or iterable of array-likes and/or Delayeds\n These should support setitem syntax ``target[10:20] = ...``\n lock: boolean or threading.Lock, optional\n Whether or not to lock the data stores while storing.\n Pass True (lock each file individually), False (don't lock) or a\n particular ``threading.Lock`` object to be shared among all writes.\n regions: tuple of slices or list of tuples of slices\n Each ``region`` tuple in ``regions`` should be such that\n ``target[region].shape = source.shape``\n for the corresponding source and target in sources and targets,\n respectively. If this is a tuple, the contents will be assumed to be\n slices, so do not provide a tuple of tuples.\n compute: boolean, optional\n If true compute immediately, return ``dask.delayed.Delayed`` otherwise\n return_stored: boolean, optional\n Optionally return the stored result (default False).\n\n Examples\n --------\n >>> x = ... # doctest: +SKIP\n\n >>> import h5py # doctest: +SKIP\n >>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP\n >>> dset = f.create_dataset('/data', shape=x.shape,\n ... chunks=x.chunks,\n ... dtype='f8') # doctest: +SKIP\n\n >>> store(x, dset) # doctest: +SKIP\n\n Alternatively store many arrays at the same time\n\n >>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP\n \"\"\"\n\n if isinstance(sources, Array):\n sources = [sources]\n targets = [targets]\n\n if any(not isinstance(s, Array) for s in sources):\n raise ValueError(\"All sources must be dask array objects\")\n\n if len(sources) != len(targets):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d]\"\n % (len(sources), len(targets))\n )\n\n if isinstance(regions, tuple) or regions is None:\n regions = [regions]\n\n if len(sources) > 1 and len(regions) == 1:\n regions *= len(sources)\n\n if len(sources) != len(regions):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d] than regions [%d]\"\n % (len(sources), len(targets), len(regions))\n )\n\n # Optimize all sources together\n sources_dsk = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])\n sources_dsk = Array.__dask_optimize__(\n sources_dsk, list(core.flatten([e.__dask_keys__() for e in sources]))\n )\n sources2 = [Array(sources_dsk, e.name, e.chunks, meta=e) for e in sources]\n\n # Optimize all targets together\n targets2 = []\n targets_keys = []\n targets_dsk = []\n for e in targets:\n if isinstance(e, Delayed):\n targets2.append(e.key)\n targets_keys.extend(e.__dask_keys__())\n targets_dsk.append(e.__dask_graph__())\n elif is_dask_collection(e):\n raise TypeError(\"Targets must be either Delayed objects or array-likes\")\n else:\n targets2.append(e)\n\n targets_dsk = HighLevelGraph.merge(*targets_dsk)\n targets_dsk = Delayed.__dask_optimize__(targets_dsk, targets_keys)\n\n load_stored = return_stored and not compute\n toks = [str(uuid.uuid1()) for _ in range(len(sources))]\n store_dsk = HighLevelGraph.merge(\n *[\n insert_to_ooc(s, t, lock, r, return_stored, load_stored, tok)\n for s, t, r, tok in zip(sources2, targets2, regions, toks)\n ]\n )\n store_keys = list(store_dsk.keys())\n\n store_dsk = HighLevelGraph.merge(store_dsk, targets_dsk, sources_dsk)\n\n if return_stored:\n load_store_dsk = store_dsk\n if compute:\n store_dlyds = [Delayed(k, store_dsk) for k in store_keys]\n store_dlyds = persist(*store_dlyds, **kwargs)\n store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])\n\n load_store_dsk = retrieve_from_ooc(store_keys, store_dsk, store_dsk_2)\n\n result = tuple(\n Array(load_store_dsk, \"load-store-%s\" % t, s.chunks, meta=s)\n for s, t in zip(sources, toks)\n )\n\n return result\n else:\n name = \"store-\" + str(uuid.uuid1())\n dsk = HighLevelGraph.merge({name: store_keys}, store_dsk)\n result = Delayed(name, dsk)\n\n if compute:\n result.compute(**kwargs)\n return None\n else:\n return result\n\n\ndef blockdims_from_blockshape(shape, chunks):\n \"\"\"\n\n >>> blockdims_from_blockshape((10, 10), (4, 3))\n ((4, 4, 2), (3, 3, 3, 1))\n >>> blockdims_from_blockshape((10, 0), (4, 0))\n ((4, 4, 2), (0,))\n \"\"\"\n if chunks is None:\n raise TypeError(\"Must supply chunks= keyword argument\")\n if shape is None:\n raise TypeError(\"Must supply shape= keyword argument\")\n if np.isnan(sum(shape)) or np.isnan(sum(chunks)):\n raise ValueError(\n \"Array chunk sizes are unknown. shape: %s, chunks: %s%s\"\n % (shape, chunks, unknown_chunk_message)\n )\n if not all(map(is_integer, chunks)):\n raise ValueError(\"chunks can only contain integers.\")\n if not all(map(is_integer, shape)):\n raise ValueError(\"shape can only contain integers.\")\n shape = tuple(map(int, shape))\n chunks = tuple(map(int, chunks))\n return tuple(\n ((bd,) * (d // bd) + ((d % bd,) if d % bd else ()) if d else (0,))\n for d, bd in zip(shape, chunks)\n )\n\n\ndef finalize(results):\n if not results:\n return concatenate3(results)\n results2 = results\n while isinstance(results2, (tuple, list)):\n if len(results2) > 1:\n return concatenate3(results)\n else:\n results2 = results2[0]\n return unpack_singleton(results)\n\n\nCHUNKS_NONE_ERROR_MESSAGE = \"\"\"\nYou must specify a chunks= keyword argument.\nThis specifies the chunksize of your array blocks.\n\nSee the following documentation page for details:\n https://docs.dask.org/en/latest/array-creation.html#chunks\n\"\"\".strip()\n\n\nclass Array(DaskMethodsMixin):\n \"\"\"Parallel Dask Array\n\n A parallel nd-array comprised of many numpy arrays arranged in a grid.\n\n This constructor is for advanced uses only. For normal use see the\n ``da.from_array`` function.\n\n Parameters\n ----------\n dask : dict\n Task dependency graph\n name : string\n Name of array in dask\n shape : tuple of ints\n Shape of the entire array\n chunks: iterable of tuples\n block sizes along each dimension\n dtype : str or dtype\n Typecode or data-type for the new Dask Array\n meta : empty ndarray\n empty ndarray created with same NumPy backend, ndim and dtype as the\n Dask Array being created (overrides dtype)\n\n See Also\n --------\n dask.array.from_array\n \"\"\"\n\n __slots__ = \"dask\", \"_name\", \"_cached_keys\", \"_chunks\", \"_meta\"\n\n def __new__(cls, dask, name, chunks, dtype=None, meta=None, shape=None):\n self = super(Array, cls).__new__(cls)\n assert isinstance(dask, Mapping)\n if not isinstance(dask, HighLevelGraph):\n dask = HighLevelGraph.from_collections(name, dask, dependencies=())\n self.dask = dask\n self.name = str(name)\n meta = meta_from_array(meta, dtype=dtype)\n\n if (\n isinstance(chunks, str)\n or isinstance(chunks, tuple)\n and chunks\n and any(isinstance(c, str) for c in chunks)\n ):\n dt = meta.dtype\n else:\n dt = None\n self._chunks = normalize_chunks(chunks, shape, dtype=dt)\n if self._chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n\n self._meta = meta_from_array(meta, ndim=self.ndim, dtype=dtype)\n\n for plugin in config.get(\"array_plugins\", ()):\n result = plugin(self)\n if result is not None:\n self = result\n\n return self\n\n def __reduce__(self):\n return (Array, (self.dask, self.name, self.chunks, self.dtype))\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_layers__(self):\n return (self.name,)\n\n def __dask_keys__(self):\n if self._cached_keys is not None:\n return self._cached_keys\n\n name, chunks, numblocks = self.name, self.chunks, self.numblocks\n\n def keys(*args):\n if not chunks:\n return [(name,)]\n ind = len(args)\n if ind + 1 == len(numblocks):\n result = [(name,) + args + (i,) for i in range(numblocks[ind])]\n else:\n result = [keys(*(args + (i,))) for i in range(numblocks[ind])]\n return result\n\n self._cached_keys = result = keys()\n return result\n\n def __dask_tokenize__(self):\n return self.name\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"array_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return Array, (self.name, self.chunks, self.dtype, self._meta)\n\n @property\n def numblocks(self):\n return tuple(map(len, self.chunks))\n\n @property\n def npartitions(self):\n return reduce(mul, self.numblocks, 1)\n\n def compute_chunk_sizes(self):\n \"\"\"\n Compute the chunk sizes for a Dask array. This is especially useful\n when the chunk sizes are unknown (e.g., when indexing one Dask array\n with another).\n\n Notes\n -----\n This function modifies the Dask array in-place.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.from_array([-2, -1, 0, 1, 2], chunks=2)\n >>> x.chunks\n ((2, 2, 1),)\n >>> y = x[x <= 0]\n >>> y.chunks\n ((nan, nan, nan),)\n >>> y.compute_chunk_sizes() # in-place computation\n dask.array<getitem, shape=(3,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>\n >>> y.chunks\n ((2, 1, 0),)\n\n \"\"\"\n x = self\n chunk_shapes = x.map_blocks(\n _get_chunk_shape,\n dtype=int,\n chunks=tuple(len(c) * (1,) for c in x.chunks) + ((x.ndim,),),\n new_axis=x.ndim,\n )\n\n c = []\n for i in range(x.ndim):\n s = x.ndim * [0] + [i]\n s[i] = slice(None)\n s = tuple(s)\n\n c.append(tuple(chunk_shapes[s]))\n\n # `map_blocks` assigns numpy dtypes\n # cast chunk dimensions back to python int before returning\n x._chunks = tuple(\n [tuple([int(chunk) for chunk in chunks]) for chunks in compute(tuple(c))[0]]\n )\n return x\n\n @property\n def shape(self):\n return tuple(cached_cumsum(c, initial_zero=True)[-1] for c in self.chunks)\n\n @property\n def chunksize(self):\n return tuple(max(c) for c in self.chunks)\n\n @property\n def dtype(self):\n return self._meta.dtype\n\n def _get_chunks(self):\n return self._chunks\n\n def _set_chunks(self, chunks):\n msg = (\n \"Can not set chunks directly\\n\\n\"\n \"Please use the rechunk method instead:\\n\"\n \" x.rechunk({})\\n\\n\"\n \"If trying to avoid unknown chunks, use\\n\"\n \" x.compute_chunk_sizes()\"\n )\n raise TypeError(msg.format(chunks))\n\n chunks = property(_get_chunks, _set_chunks, \"chunks property\")\n\n def __len__(self):\n if not self.chunks:\n raise TypeError(\"len() of unsized object\")\n return sum(self.chunks[0])\n\n def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n # Verify all arrays are properly handled by Dask\n if not isinstance(x, Array) and not is_valid_array_chunk(x):\n return NotImplemented\n\n if method == \"__call__\":\n if numpy_ufunc is np.matmul:\n from .routines import matmul\n\n # special case until apply_gufunc handles optional dimensions\n return matmul(*inputs, **kwargs)\n if numpy_ufunc.signature is not None:\n from .gufunc import apply_gufunc\n\n return apply_gufunc(\n numpy_ufunc, numpy_ufunc.signature, *inputs, **kwargs\n )\n if numpy_ufunc.nout > 1:\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc(*inputs, **kwargs)\n else:\n return elemwise(numpy_ufunc, *inputs, **kwargs)\n elif method == \"outer\":\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc.outer(*inputs, **kwargs)\n else:\n return NotImplemented\n\n def __repr__(self):\n \"\"\"\n\n >>> import dask.array as da\n >>> da.ones((10, 10), chunks=(5, 5), dtype='i4')\n dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5), chunktype=numpy.ndarray>\n \"\"\"\n chunksize = str(self.chunksize)\n name = self.name.rsplit(\"-\", 1)[0]\n return \"dask.array<%s, shape=%s, dtype=%s, chunksize=%s, chunktype=%s.%s>\" % (\n name,\n self.shape,\n self.dtype,\n chunksize,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n )\n\n def _repr_html_(self):\n table = self._repr_html_table()\n try:\n grid = self.to_svg(size=config.get(\"array.svg.size\", 120))\n except NotImplementedError:\n grid = \"\"\n\n both = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n table,\n \"</td>\",\n \"<td>\",\n grid,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n return \"\\n\".join(both)\n\n def _repr_html_table(self):\n if \"sparse\" in typename(type(self._meta)):\n nbytes = None\n cbytes = None\n elif not math.isnan(self.nbytes):\n nbytes = format_bytes(self.nbytes)\n cbytes = format_bytes(np.prod(self.chunksize) * self.dtype.itemsize)\n else:\n nbytes = \"unknown\"\n cbytes = \"unknown\"\n\n table = [\n \"<table>\",\n \" <thead>\",\n \" <tr><td> </td><th> Array </th><th> Chunk </th></tr>\",\n \" </thead>\",\n \" <tbody>\",\n \" <tr><th> Bytes </th><td> %s </td> <td> %s </td></tr>\"\n % (nbytes, cbytes)\n if nbytes is not None\n else \"\",\n \" <tr><th> Shape </th><td> %s </td> <td> %s </td></tr>\"\n % (str(self.shape), str(self.chunksize)),\n \" <tr><th> Count </th><td> %d Tasks </td><td> %d Chunks </td></tr>\"\n % (len(self.__dask_graph__()), self.npartitions),\n \" <tr><th> Type </th><td> %s </td><td> %s.%s </td></tr>\"\n % (\n self.dtype,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n ),\n \" </tbody>\",\n \"</table>\",\n ]\n return \"\\n\".join(table)\n\n @property\n def ndim(self):\n return len(self.shape)\n\n @property\n def size(self):\n \"\"\" Number of elements in array \"\"\"\n return reduce(mul, self.shape, 1)\n\n @property\n def nbytes(self):\n \"\"\" Number of bytes in array \"\"\"\n return self.size * self.dtype.itemsize\n\n @property\n def itemsize(self):\n \"\"\" Length of one array element in bytes \"\"\"\n return self.dtype.itemsize\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, val):\n self._name = val\n # Clear the key cache when the name is reset\n self._cached_keys = None\n\n __array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix\n\n def __array__(self, dtype=None, **kwargs):\n x = self.compute()\n if dtype and x.dtype != dtype:\n x = x.astype(dtype)\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n return x\n\n def __array_function__(self, func, types, args, kwargs):\n import dask.array as module\n\n def handle_nonmatching_names(func, args, kwargs):\n if func not in _HANDLED_FUNCTIONS:\n warnings.warn(\n \"The `{}` function is not implemented by Dask array. \"\n \"You may want to use the da.map_blocks function \"\n \"or something similar to silence this warning. \"\n \"Your code may stop working in a future release.\".format(\n func.__module__ + \".\" + func.__name__\n ),\n FutureWarning,\n )\n # Need to convert to array object (e.g. numpy.ndarray or\n # cupy.ndarray) as needed, so we can call the NumPy function\n # again and it gets the chance to dispatch to the right\n # implementation.\n args, kwargs = compute(args, kwargs)\n return func(*args, **kwargs)\n\n return _HANDLED_FUNCTIONS[func](*args, **kwargs)\n\n # First, verify that all types are handled by Dask. Otherwise, return NotImplemented.\n if not all(type is Array or is_valid_chunk_type(type) for type in types):\n return NotImplemented\n\n # Now try to find a matching function name. If that doesn't work, we may\n # be dealing with an alias or a function that's simply not in the Dask API.\n # Handle aliases via the _HANDLED_FUNCTIONS dict mapping, and warn otherwise.\n for submodule in func.__module__.split(\".\")[1:]:\n try:\n module = getattr(module, submodule)\n except AttributeError:\n return handle_nonmatching_names(func, args, kwargs)\n\n if not hasattr(module, func.__name__):\n return handle_nonmatching_names(func, args, kwargs)\n\n da_func = getattr(module, func.__name__)\n if da_func is func:\n return handle_nonmatching_names(func, args, kwargs)\n return da_func(*args, **kwargs)\n\n @property\n def _elemwise(self):\n return elemwise\n\n @wraps(store)\n def store(self, target, **kwargs):\n r = store([self], [target], **kwargs)\n\n if kwargs.get(\"return_stored\", False):\n r = r[0]\n\n return r\n\n def to_svg(self, size=500):\n \"\"\"Convert chunks from Dask Array into an SVG Image\n\n Parameters\n ----------\n chunks: tuple\n size: int\n Rough size of the image\n\n Examples\n --------\n >>> x.to_svg(size=500) # doctest: +SKIP\n\n Returns\n -------\n text: An svg string depicting the array as a grid of chunks\n \"\"\"\n from .svg import svg\n\n return svg(self.chunks, size=size)\n\n def to_hdf5(self, filename, datapath, **kwargs):\n \"\"\"Store array in HDF5 file\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n return to_hdf5(filename, datapath, self, **kwargs)\n\n def to_dask_dataframe(self, columns=None, index=None, meta=None):\n \"\"\"Convert dask Array to dask Dataframe\n\n Parameters\n ----------\n columns: list or string\n list of column names if DataFrame, single string if Series\n index : dask.dataframe.Index, optional\n An optional *dask* Index to use for the output Series or DataFrame.\n\n The default output index depends on whether the array has any unknown\n chunks. If there are any unknown chunks, the output has ``None``\n for all the divisions (one per chunk). If all the chunks are known,\n a default index with known divsions is created.\n\n Specifying ``index`` can be useful if you're conforming a Dask Array\n to an existing dask Series or DataFrame, and you would like the\n indices to match.\n meta : object, optional\n An optional `meta` parameter can be passed for dask\n to specify the concrete dataframe type to use for partitions of\n the Dask dataframe. By default, pandas DataFrame is used.\n\n See Also\n --------\n dask.dataframe.from_dask_array\n \"\"\"\n from ..dataframe import from_dask_array\n\n return from_dask_array(self, columns=columns, index=index, meta=meta)\n\n def __bool__(self):\n if self.size > 1:\n raise ValueError(\n \"The truth value of a {0} is ambiguous. \"\n \"Use a.any() or a.all().\".format(self.__class__.__name__)\n )\n else:\n return bool(self.compute())\n\n __nonzero__ = __bool__ # python 2\n\n def _scalarfunc(self, cast_type):\n if self.size > 1:\n raise TypeError(\"Only length-1 arrays can be converted to Python scalars\")\n else:\n return cast_type(self.compute())\n\n def __int__(self):\n return self._scalarfunc(int)\n\n __long__ = __int__ # python 2\n\n def __float__(self):\n return self._scalarfunc(float)\n\n def __complex__(self):\n return self._scalarfunc(complex)\n\n def __setitem__(self, key, value):\n from .routines import where\n\n if isinstance(key, Array):\n if isinstance(value, Array) and value.ndim > 1:\n raise ValueError(\"boolean index array should have 1 dimension\")\n y = where(key, value, self)\n self._meta = y._meta\n self.dask = y.dask\n self.name = y.name\n self._chunks = y.chunks\n return self\n else:\n raise NotImplementedError(\n \"Item assignment with %s not supported\" % type(key)\n )\n\n def __getitem__(self, index):\n # Field access, e.g. x['a'] or x[['a', 'b']]\n if isinstance(index, str) or (\n isinstance(index, list) and index and all(isinstance(i, str) for i in index)\n ):\n if isinstance(index, str):\n dt = self.dtype[index]\n else:\n dt = _make_sliced_dtype(self.dtype, index)\n\n if dt.shape:\n new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))\n chunks = self.chunks + tuple((i,) for i in dt.shape)\n return self.map_blocks(\n getitem, index, dtype=dt.base, chunks=chunks, new_axis=new_axis\n )\n else:\n return self.map_blocks(getitem, index, dtype=dt)\n\n if not isinstance(index, tuple):\n index = (index,)\n\n from .slicing import (\n normalize_index,\n slice_with_int_dask_array,\n slice_with_bool_dask_array,\n )\n\n index2 = normalize_index(index, self.shape)\n dependencies = {self.name}\n for i in index2:\n if isinstance(i, Array):\n dependencies.add(i.name)\n\n if any(isinstance(i, Array) and i.dtype.kind in \"iu\" for i in index2):\n self, index2 = slice_with_int_dask_array(self, index2)\n if any(isinstance(i, Array) and i.dtype == bool for i in index2):\n self, index2 = slice_with_bool_dask_array(self, index2)\n\n if all(isinstance(i, slice) and i == slice(None) for i in index2):\n return self\n\n out = \"getitem-\" + tokenize(self, index2)\n dsk, chunks = slice_array(out, self.name, self.chunks, index2, self.itemsize)\n\n graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])\n\n meta = meta_from_array(self._meta, ndim=len(chunks))\n if np.isscalar(meta):\n meta = np.array(meta)\n\n return Array(graph, out, chunks, meta=meta)\n\n def _vindex(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n if any(k is None for k in key):\n raise IndexError(\n \"vindex does not support indexing with None (np.newaxis), \"\n \"got {}\".format(key)\n )\n if all(isinstance(k, slice) for k in key):\n if all(\n k.indices(d) == slice(0, d).indices(d) for k, d in zip(key, self.shape)\n ):\n return self\n raise IndexError(\n \"vindex requires at least one non-slice to vectorize over \"\n \"when the slices are not over the entire array (i.e, x[:]). \"\n \"Use normal slicing instead when only using slices. Got: {}\".format(key)\n )\n return _vindex(self, *key)\n\n @property\n def vindex(self):\n \"\"\"Vectorized indexing with broadcasting.\n\n This is equivalent to numpy's advanced indexing, using arrays that are\n broadcast against each other. This allows for pointwise indexing:\n\n >>> x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> x = from_array(x, chunks=2)\n >>> x.vindex[[0, 1, 2], [0, 1, 2]].compute()\n array([1, 5, 9])\n\n Mixed basic/advanced indexing with slices/arrays is also supported. The\n order of dimensions in the result follows those proposed for\n `ndarray.vindex <https://github.com/numpy/numpy/pull/6256>`_:\n the subspace spanned by arrays is followed by all slices.\n\n Note: ``vindex`` provides more general functionality than standard\n indexing, but it also has fewer optimizations and can be significantly\n slower.\n \"\"\"\n return IndexCallable(self._vindex)\n\n def _blocks(self, index):\n from .slicing import normalize_index\n\n if not isinstance(index, tuple):\n index = (index,)\n if sum(isinstance(ind, (np.ndarray, list)) for ind in index) > 1:\n raise ValueError(\"Can only slice with a single list\")\n if any(ind is None for ind in index):\n raise ValueError(\"Slicing with np.newaxis or None is not supported\")\n index = normalize_index(index, self.numblocks)\n index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)\n\n name = \"blocks-\" + tokenize(self, index)\n\n new_keys = np.array(self.__dask_keys__(), dtype=object)[index]\n\n chunks = tuple(\n tuple(np.array(c)[i].tolist()) for c, i in zip(self.chunks, index)\n )\n\n keys = product(*(range(len(c)) for c in chunks))\n\n layer = {(name,) + key: tuple(new_keys[key].tolist()) for key in keys}\n\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n return Array(graph, name, chunks, meta=self)\n\n @property\n def blocks(self):\n \"\"\"Slice an array by blocks\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.blocks[0].compute()\n array([0, 1])\n >>> x.blocks[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.blocks[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.blocks[[-1, 0]].compute()\n array([8, 9, 0, 1])\n\n Returns\n -------\n A Dask array\n \"\"\"\n return IndexCallable(self._blocks)\n\n @property\n def partitions(self):\n \"\"\"Slice an array by partitions. Alias of dask array .blocks attribute.\n\n This alias allows you to write agnostic code that works with both\n dask arrays and dask dataframes.\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.partitions[0].compute()\n array([0, 1])\n >>> x.partitions[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.partitions[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.partitions[[-1, 0]].compute()\n array([8, 9, 0, 1])\n >>> all(x.partitions[:].compute() == x.blocks[:].compute())\n True\n\n Returns\n -------\n A Dask array\n \"\"\"\n return self.blocks\n\n @derived_from(np.ndarray)\n def dot(self, other):\n from .routines import tensordot\n\n return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))\n\n @property\n def A(self):\n return self\n\n @property\n def T(self):\n return self.transpose()\n\n @derived_from(np.ndarray)\n def transpose(self, *axes):\n from .routines import transpose\n\n if not axes:\n axes = None\n elif len(axes) == 1 and isinstance(axes[0], Iterable):\n axes = axes[0]\n if (axes == tuple(range(self.ndim))) or (axes == tuple(range(-self.ndim, 0))):\n # no transpose necessary\n return self\n else:\n return transpose(self, axes=axes)\n\n @derived_from(np.ndarray)\n def ravel(self):\n from .routines import ravel\n\n return ravel(self)\n\n flatten = ravel\n\n @derived_from(np.ndarray)\n def choose(self, choices):\n from .routines import choose\n\n return choose(self, choices)\n\n @derived_from(np.ndarray)\n def reshape(self, *shape):\n from .reshape import reshape\n\n if len(shape) == 1 and not isinstance(shape[0], Number):\n shape = shape[0]\n return reshape(self, shape)\n\n def topk(self, k, axis=-1, split_every=None):\n \"\"\"The top k elements of an array.\n\n See ``da.topk`` for docstring\"\"\"\n from .reductions import topk\n\n return topk(self, k, axis=axis, split_every=split_every)\n\n def argtopk(self, k, axis=-1, split_every=None):\n \"\"\"The indices of the top k elements of an array.\n\n See ``da.argtopk`` for docstring\"\"\"\n from .reductions import argtopk\n\n return argtopk(self, k, axis=axis, split_every=split_every)\n\n def astype(self, dtype, **kwargs):\n \"\"\"Copy of the array, cast to a specified type.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'unsafe'\n for backwards compatibility.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n copy : bool, optional\n By default, astype always returns a newly allocated array. If this\n is set to False and the `dtype` requirement is satisfied, the input\n array is returned instead of a copy.\n \"\"\"\n # Scalars don't take `casting` or `copy` kwargs - as such we only pass\n # them to `map_blocks` if specified by user (different than defaults).\n extra = set(kwargs) - {\"casting\", \"copy\"}\n if extra:\n raise TypeError(\n \"astype does not take the following keyword \"\n \"arguments: {0!s}\".format(list(extra))\n )\n casting = kwargs.get(\"casting\", \"unsafe\")\n dtype = np.dtype(dtype)\n if self.dtype == dtype:\n return self\n elif not np.can_cast(self.dtype, dtype, casting=casting):\n raise TypeError(\n \"Cannot cast array from {0!r} to {1!r}\"\n \" according to the rule \"\n \"{2!r}\".format(self.dtype, dtype, casting)\n )\n return self.map_blocks(chunk.astype, dtype=dtype, astype_dtype=dtype, **kwargs)\n\n def __abs__(self):\n return elemwise(operator.abs, self)\n\n @check_if_handled_given_other\n def __add__(self, other):\n return elemwise(operator.add, self, other)\n\n @check_if_handled_given_other\n def __radd__(self, other):\n return elemwise(operator.add, other, self)\n\n @check_if_handled_given_other\n def __and__(self, other):\n return elemwise(operator.and_, self, other)\n\n @check_if_handled_given_other\n def __rand__(self, other):\n return elemwise(operator.and_, other, self)\n\n @check_if_handled_given_other\n def __div__(self, other):\n return elemwise(operator.div, self, other)\n\n @check_if_handled_given_other\n def __rdiv__(self, other):\n return elemwise(operator.div, other, self)\n\n @check_if_handled_given_other\n def __eq__(self, other):\n return elemwise(operator.eq, self, other)\n\n @check_if_handled_given_other\n def __gt__(self, other):\n return elemwise(operator.gt, self, other)\n\n @check_if_handled_given_other\n def __ge__(self, other):\n return elemwise(operator.ge, self, other)\n\n def __invert__(self):\n return elemwise(operator.invert, self)\n\n @check_if_handled_given_other\n def __lshift__(self, other):\n return elemwise(operator.lshift, self, other)\n\n @check_if_handled_given_other\n def __rlshift__(self, other):\n return elemwise(operator.lshift, other, self)\n\n @check_if_handled_given_other\n def __lt__(self, other):\n return elemwise(operator.lt, self, other)\n\n @check_if_handled_given_other\n def __le__(self, other):\n return elemwise(operator.le, self, other)\n\n @check_if_handled_given_other\n def __mod__(self, other):\n return elemwise(operator.mod, self, other)\n\n @check_if_handled_given_other\n def __rmod__(self, other):\n return elemwise(operator.mod, other, self)\n\n @check_if_handled_given_other\n def __mul__(self, other):\n return elemwise(operator.mul, self, other)\n\n @check_if_handled_given_other\n def __rmul__(self, other):\n return elemwise(operator.mul, other, self)\n\n @check_if_handled_given_other\n def __ne__(self, other):\n return elemwise(operator.ne, self, other)\n\n def __neg__(self):\n return elemwise(operator.neg, self)\n\n @check_if_handled_given_other\n def __or__(self, other):\n return elemwise(operator.or_, self, other)\n\n def __pos__(self):\n return self\n\n @check_if_handled_given_other\n def __ror__(self, other):\n return elemwise(operator.or_, other, self)\n\n @check_if_handled_given_other\n def __pow__(self, other):\n return elemwise(operator.pow, self, other)\n\n @check_if_handled_given_other\n def __rpow__(self, other):\n return elemwise(operator.pow, other, self)\n\n @check_if_handled_given_other\n def __rshift__(self, other):\n return elemwise(operator.rshift, self, other)\n\n @check_if_handled_given_other\n def __rrshift__(self, other):\n return elemwise(operator.rshift, other, self)\n\n @check_if_handled_given_other\n def __sub__(self, other):\n return elemwise(operator.sub, self, other)\n\n @check_if_handled_given_other\n def __rsub__(self, other):\n return elemwise(operator.sub, other, self)\n\n @check_if_handled_given_other\n def __truediv__(self, other):\n return elemwise(operator.truediv, self, other)\n\n @check_if_handled_given_other\n def __rtruediv__(self, other):\n return elemwise(operator.truediv, other, self)\n\n @check_if_handled_given_other\n def __floordiv__(self, other):\n return elemwise(operator.floordiv, self, other)\n\n @check_if_handled_given_other\n def __rfloordiv__(self, other):\n return elemwise(operator.floordiv, other, self)\n\n @check_if_handled_given_other\n def __xor__(self, other):\n return elemwise(operator.xor, self, other)\n\n @check_if_handled_given_other\n def __rxor__(self, other):\n return elemwise(operator.xor, other, self)\n\n @check_if_handled_given_other\n def __matmul__(self, other):\n from .routines import matmul\n\n return matmul(self, other)\n\n @check_if_handled_given_other\n def __rmatmul__(self, other):\n from .routines import matmul\n\n return matmul(other, self)\n\n @check_if_handled_given_other\n def __divmod__(self, other):\n from .ufunc import divmod\n\n return divmod(self, other)\n\n @check_if_handled_given_other\n def __rdivmod__(self, other):\n from .ufunc import divmod\n\n return divmod(other, self)\n\n @derived_from(np.ndarray)\n def any(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import any\n\n return any(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def all(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import all\n\n return all(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def min(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import min\n\n return min(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def max(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import max\n\n return max(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmin(self, axis=None, split_every=None, out=None):\n from .reductions import argmin\n\n return argmin(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmax(self, axis=None, split_every=None, out=None):\n from .reductions import argmax\n\n return argmax(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import sum\n\n return sum(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def trace(self, offset=0, axis1=0, axis2=1, dtype=None):\n from .reductions import trace\n\n return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)\n\n @derived_from(np.ndarray)\n def prod(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import prod\n\n return prod(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def mean(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import mean\n\n return mean(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def std(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import std\n\n return std(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def var(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import var\n\n return var(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n def moment(\n self,\n order,\n axis=None,\n dtype=None,\n keepdims=False,\n ddof=0,\n split_every=None,\n out=None,\n ):\n \"\"\"Calculate the nth centralized moment.\n\n Parameters\n ----------\n order : int\n Order of the moment that is returned, must be >= 2.\n axis : int, optional\n Axis along which the central moment is computed. The default is to\n compute the moment of the flattened array.\n dtype : data-type, optional\n Type to use in computing the moment. For arrays of integer type the\n default is float64; for arrays of float types it is the same as the\n array type.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the\n result as dimensions with size one. With this option, the result\n will broadcast correctly against the original array.\n ddof : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n N - ddof, where N represents the number of elements. By default\n ddof is zero.\n\n Returns\n -------\n moment : ndarray\n\n References\n ----------\n .. [1] Pebay, Philippe (2008), \"Formulas for Robust, One-Pass Parallel\n Computation of Covariances and Arbitrary-Order Statistical Moments\",\n Technical Report SAND2008-6212, Sandia National Laboratories.\n\n \"\"\"\n\n from .reductions import moment\n\n return moment(\n self,\n order,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @wraps(map_blocks)\n def map_blocks(self, func, *args, **kwargs):\n return map_blocks(func, self, *args, **kwargs)\n\n def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):\n \"\"\"Map a function over blocks of the array with some overlap\n\n We share neighboring zones between blocks of the array, then map a\n function, then trim away the neighboring strips.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block\n depth: int, tuple, or dict\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis\n boundary: str, tuple, dict\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n **kwargs:\n Other keyword arguments valid in ``map_blocks``\n\n Examples\n --------\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> import dask.array as da\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> d.map_overlap(lambda x: x + x.size, depth=1).compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n \"\"\"\n from .overlap import map_overlap\n\n return map_overlap(\n func, self, depth=depth, boundary=boundary, trim=trim, **kwargs\n )\n\n @derived_from(np.ndarray)\n def cumsum(self, axis, dtype=None, out=None):\n from .reductions import cumsum\n\n return cumsum(self, axis, dtype, out=out)\n\n @derived_from(np.ndarray)\n def cumprod(self, axis, dtype=None, out=None):\n from .reductions import cumprod\n\n return cumprod(self, axis, dtype, out=out)\n\n @derived_from(np.ndarray)\n def squeeze(self, axis=None):\n from .routines import squeeze\n\n return squeeze(self, axis)\n\n def rechunk(self, chunks=\"auto\", threshold=None, block_size_limit=None):\n \"\"\" See da.rechunk for docstring \"\"\"\n from . import rechunk # avoid circular import\n\n return rechunk(self, chunks, threshold, block_size_limit)\n\n @property\n def real(self):\n from .ufunc import real\n\n return real(self)\n\n @property\n def imag(self):\n from .ufunc import imag\n\n return imag(self)\n\n def conj(self):\n from .ufunc import conj\n\n return conj(self)\n\n @derived_from(np.ndarray)\n def clip(self, min=None, max=None):\n from .ufunc import clip\n\n return clip(self, min, max)\n\n def view(self, dtype=None, order=\"C\"):\n \"\"\"Get a view of the array as a new data type\n\n Parameters\n ----------\n dtype:\n The dtype by which to view the array.\n The default, None, results in the view having the same data-type\n as the original array.\n order: string\n 'C' or 'F' (Fortran) ordering\n\n This reinterprets the bytes of the array under a new dtype. If that\n dtype does not have the same size as the original array then the shape\n will change.\n\n Beware that both numpy and dask.array can behave oddly when taking\n shape-changing views of arrays under Fortran ordering. Under some\n versions of NumPy this function will fail when taking shape-changing\n views of Fortran ordered arrays if the first dimension has chunks of\n size one.\n \"\"\"\n if dtype is None:\n dtype = self.dtype\n else:\n dtype = np.dtype(dtype)\n mult = self.dtype.itemsize / dtype.itemsize\n\n if order == \"C\":\n chunks = self.chunks[:-1] + (\n tuple(ensure_int(c * mult) for c in self.chunks[-1]),\n )\n elif order == \"F\":\n chunks = (\n tuple(ensure_int(c * mult) for c in self.chunks[0]),\n ) + self.chunks[1:]\n else:\n raise ValueError(\"Order must be one of 'C' or 'F'\")\n\n return self.map_blocks(\n chunk.view, dtype, order=order, dtype=dtype, chunks=chunks\n )\n\n @derived_from(np.ndarray)\n def swapaxes(self, axis1, axis2):\n from .routines import swapaxes\n\n return swapaxes(self, axis1, axis2)\n\n @derived_from(np.ndarray)\n def round(self, decimals=0):\n from .routines import round\n\n return round(self, decimals=decimals)\n\n def copy(self):\n \"\"\"\n Copy array. This is a no-op for dask.arrays, which are immutable\n \"\"\"\n if self.npartitions == 1:\n return self.map_blocks(M.copy)\n else:\n return Array(self.dask, self.name, self.chunks, meta=self)\n\n def __deepcopy__(self, memo):\n c = self.copy()\n memo[id(self)] = c\n return c\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into an array of ``dask.delayed`` objects, one per chunk.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n\n See Also\n --------\n dask.array.from_delayed\n \"\"\"\n keys = self.__dask_keys__()\n graph = self.__dask_graph__()\n if optimize_graph:\n graph = self.__dask_optimize__(graph, keys) # TODO, don't collape graph\n name = \"delayed-\" + self.name\n graph = HighLevelGraph.from_collections(name, graph, dependencies=())\n L = ndeepmap(self.ndim, lambda k: Delayed(k, graph), keys)\n return np.array(L, dtype=object)\n\n @derived_from(np.ndarray)\n def repeat(self, repeats, axis=None):\n from .creation import repeat\n\n return repeat(self, repeats, axis=axis)\n\n @derived_from(np.ndarray)\n def nonzero(self):\n from .routines import nonzero\n\n return nonzero(self)\n\n def to_zarr(self, *args, **kwargs):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n See function ``to_zarr()`` for parameters.\n \"\"\"\n return to_zarr(self, *args, **kwargs)\n\n def to_tiledb(self, uri, *args, **kwargs):\n \"\"\"Save array to the TileDB storage manager\n\n See function ``to_tiledb()`` for argument documentation.\n\n See https://docs.tiledb.io for details about the format and engine.\n \"\"\"\n from .tiledb_io import to_tiledb\n\n return to_tiledb(self, uri, *args, **kwargs)\n\n\ndef ensure_int(f):\n i = int(f)\n if i != f:\n raise ValueError(\"Could not coerce %f to integer\" % f)\n return i\n\n\ndef normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):\n \"\"\"Normalize chunks to tuple of tuples\n\n This takes in a variety of input types and information and produces a full\n tuple-of-tuples result for chunks, suitable to be passed to Array or\n rechunk or any other operation that creates a Dask array.\n\n Parameters\n ----------\n chunks: tuple, int, dict, or string\n The chunks to be normalized. See examples below for more details\n shape: Tuple[int]\n The shape of the array\n limit: int (optional)\n The maximum block size to target in bytes,\n if freedom is given to choose\n dtype: np.dtype\n previous_chunks: Tuple[Tuple[int]] optional\n Chunks from a previous array that we should use for inspiration when\n rechunking auto dimensions. If not provided but auto-chunking exists\n then auto-dimensions will prefer square-like chunk shapes.\n\n Examples\n --------\n Specify uniform chunk sizes\n\n >>> normalize_chunks((2, 2), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Also passes through fully explicit tuple-of-tuples\n\n >>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Cleans up lists to tuples\n\n >>> normalize_chunks([[2, 2], [3, 3]])\n ((2, 2), (3, 3))\n\n Expands integer inputs 10 -> (10, 10)\n\n >>> normalize_chunks(10, shape=(30, 5))\n ((10, 10, 10), (5,))\n\n Expands dict inputs\n\n >>> normalize_chunks({0: 2, 1: 3}, shape=(6, 6))\n ((2, 2, 2), (3, 3))\n\n The values -1 and None get mapped to full size\n\n >>> normalize_chunks((5, -1), shape=(10, 10))\n ((5, 5), (10,))\n\n Use the value \"auto\" to automatically determine chunk sizes along certain\n dimensions. This uses the ``limit=`` and ``dtype=`` keywords to\n determine how large to make the chunks. The term \"auto\" can be used\n anywhere an integer can be used. See array chunking documentation for more\n information.\n\n >>> normalize_chunks((\"auto\",), shape=(20,), limit=5, dtype='uint8')\n ((5, 5, 5, 5),)\n\n You can also use byte sizes (see ``dask.utils.parse_bytes``) in place of\n \"auto\" to ask for a particular size\n\n >>> normalize_chunks(\"1kiB\", shape=(2000,), dtype='float32')\n ((250, 250, 250, 250, 250, 250, 250, 250),)\n\n Respects null dimensions\n\n >>> normalize_chunks((), shape=(0, 0))\n ((0,), (0,))\n \"\"\"\n if dtype and not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n if chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n if isinstance(chunks, list):\n chunks = tuple(chunks)\n if isinstance(chunks, (Number, str)):\n chunks = (chunks,) * len(shape)\n if isinstance(chunks, dict):\n chunks = tuple(chunks.get(i, None) for i in range(len(shape)))\n if isinstance(chunks, np.ndarray):\n chunks = chunks.tolist()\n if not chunks and shape and all(s == 0 for s in shape):\n chunks = ((0,),) * len(shape)\n\n if (\n shape\n and len(shape) == 1\n and len(chunks) > 1\n and all(isinstance(c, (Number, str)) for c in chunks)\n ):\n chunks = (chunks,)\n\n if shape and len(chunks) != len(shape):\n raise ValueError(\n \"Chunks and shape must be of the same length/dimension. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n if -1 in chunks or None in chunks:\n chunks = tuple(s if c == -1 or c is None else c for c, s in zip(chunks, shape))\n\n # If specifying chunk size in bytes, use that value to set the limit.\n # Verify there is only one consistent value of limit or chunk-bytes used.\n for c in chunks:\n if isinstance(c, str) and c != \"auto\":\n parsed = parse_bytes(c)\n if limit is None:\n limit = parsed\n elif parsed != limit:\n raise ValueError(\n \"Only one consistent value of limit or chunk is allowed.\"\n \"Used %s != %s\" % (parsed, limit)\n )\n # Substitute byte limits with 'auto' now that limit is set.\n chunks = tuple(\"auto\" if isinstance(c, str) and c != \"auto\" else c for c in chunks)\n\n if any(c == \"auto\" for c in chunks):\n chunks = auto_chunks(chunks, shape, limit, dtype, previous_chunks)\n\n if shape is not None:\n chunks = tuple(c if c not in {None, -1} else s for c, s in zip(chunks, shape))\n\n if chunks and shape is not None:\n chunks = sum(\n (\n blockdims_from_blockshape((s,), (c,))\n if not isinstance(c, (tuple, list))\n else (c,)\n for s, c in zip(shape, chunks)\n ),\n (),\n )\n for c in chunks:\n if not c:\n raise ValueError(\n \"Empty tuples are not allowed in chunks. Express \"\n \"zero length dimensions with 0(s) in chunks\"\n )\n\n if shape is not None:\n if len(chunks) != len(shape):\n raise ValueError(\n \"Input array has %d dimensions but the supplied \"\n \"chunks has only %d dimensions\" % (len(shape), len(chunks))\n )\n if not all(\n c == s or (math.isnan(c) or math.isnan(s))\n for c, s in zip(map(sum, chunks), shape)\n ):\n raise ValueError(\n \"Chunks do not add up to shape. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n\n return tuple(tuple(int(x) if not math.isnan(x) else x for x in c) for c in chunks)\n\n\ndef _compute_multiplier(limit: int, dtype, largest_block: int, result):\n \"\"\"\n Utility function for auto_chunk, to fin how much larger or smaller the ideal\n chunk size is relative to what we have now.\n \"\"\"\n return (\n limit\n / dtype.itemsize\n / largest_block\n / np.prod(list(r if r != 0 else 1 for r in result.values()))\n )\n\n\ndef auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):\n \"\"\"Determine automatic chunks\n\n This takes in a chunks value that contains ``\"auto\"`` values in certain\n dimensions and replaces those values with concrete dimension sizes that try\n to get chunks to be of a certain size in bytes, provided by the ``limit=``\n keyword. If multiple dimensions are marked as ``\"auto\"`` then they will\n all respond to meet the desired byte limit, trying to respect the aspect\n ratio of their dimensions in ``previous_chunks=``, if given.\n\n Parameters\n ----------\n chunks: Tuple\n A tuple of either dimensions or tuples of explicit chunk dimensions\n Some entries should be \"auto\"\n shape: Tuple[int]\n limit: int, str\n The maximum allowable size of a chunk in bytes\n previous_chunks: Tuple[Tuple[int]]\n\n See also\n --------\n normalize_chunks: for full docstring and parameters\n \"\"\"\n if previous_chunks is not None:\n previous_chunks = tuple(\n c if isinstance(c, tuple) else (c,) for c in previous_chunks\n )\n chunks = list(chunks)\n\n autos = {i for i, c in enumerate(chunks) if c == \"auto\"}\n if not autos:\n return tuple(chunks)\n\n if limit is None:\n limit = config.get(\"array.chunk-size\")\n if isinstance(limit, str):\n limit = parse_bytes(limit)\n\n if dtype is None:\n raise TypeError(\"DType must be known for auto-chunking\")\n\n if dtype.hasobject:\n raise NotImplementedError(\n \"Can not use auto rechunking with object dtype. \"\n \"We are unable to estimate the size in bytes of object data\"\n )\n\n for x in tuple(chunks) + tuple(shape):\n if (\n isinstance(x, Number)\n and np.isnan(x)\n or isinstance(x, tuple)\n and np.isnan(x).any()\n ):\n raise ValueError(\n \"Can not perform automatic rechunking with unknown \"\n \"(nan) chunk sizes.%s\" % unknown_chunk_message\n )\n\n limit = max(1, limit)\n\n largest_block = np.prod(\n [cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != \"auto\"]\n )\n\n if previous_chunks:\n # Base ideal ratio on the median chunk size of the previous chunks\n result = {a: np.median(previous_chunks[a]) for a in autos}\n\n ideal_shape = []\n for i, s in enumerate(shape):\n chunk_frequencies = frequencies(previous_chunks[i])\n mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])\n if mode > 1 and count >= len(previous_chunks[i]) / 2:\n ideal_shape.append(mode)\n else:\n ideal_shape.append(s)\n\n # How much larger or smaller the ideal chunk size is relative to what we have now\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n last_multiplier = 0\n last_autos = set()\n while (\n multiplier != last_multiplier or autos != last_autos\n ): # while things change\n last_multiplier = multiplier # record previous values\n last_autos = set(autos) # record previous values\n\n # Expand or contract each of the dimensions appropriately\n for a in sorted(autos):\n if ideal_shape[a] == 0:\n result[a] = 0\n continue\n proposed = result[a] * multiplier ** (1 / len(autos))\n if proposed > shape[a]: # we've hit the shape boundary\n autos.remove(a)\n largest_block *= shape[a]\n chunks[a] = shape[a]\n del result[a]\n else:\n result[a] = round_to(proposed, ideal_shape[a])\n\n # recompute how much multiplier we have left, repeat\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n for k, v in result.items():\n chunks[k] = v\n return tuple(chunks)\n\n else:\n size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))\n small = [i for i in autos if shape[i] < size]\n if small:\n for i in small:\n chunks[i] = (shape[i],)\n return auto_chunks(chunks, shape, limit, dtype)\n\n for i in autos:\n chunks[i] = round_to(size, shape[i])\n\n return tuple(chunks)\n\n\ndef round_to(c, s):\n \"\"\"Return a chunk dimension that is close to an even multiple or factor\n\n We want values for c that are nicely aligned with s.\n\n If c is smaller than s then we want the largest factor of s that is less than the\n desired chunk size, but not less than half, which is too much. If no such\n factor exists then we just go with the original chunk size and accept an\n uneven chunk at the end.\n\n If c is larger than s then we want the largest multiple of s that is still\n smaller than c.\n \"\"\"\n if c <= s:\n try:\n return max(f for f in factors(s) if c / 2 <= f <= c)\n except ValueError: # no matching factors within factor of two\n return max(1, int(c))\n else:\n return c // s * s\n\n\ndef _get_chunk_shape(a):\n s = np.asarray(a.shape, dtype=int)\n return s[len(s) * (None,) + (slice(None),)]\n\n\ndef from_array(\n x,\n chunks=\"auto\",\n name=None,\n lock=False,\n asarray=None,\n fancy=True,\n getitem=None,\n meta=None,\n):\n \"\"\"Create dask array from something that looks like an array\n\n Input must have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing.\n\n Parameters\n ----------\n x : array_like\n chunks : int, tuple\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A blockshape like (1000, 1000).\n - Explicit sizes of all blocks along all dimensions like\n ((1000, 1000, 500), (400, 400)).\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n\n -1 or None as a blocksize indicate the size of the corresponding\n dimension.\n name : str, optional\n The key name to use for the array. Defaults to a hash of ``x``.\n By default, hash uses python's standard sha1. This behaviour can be\n changed by installing cityhash, xxhash or murmurhash. If installed,\n a large-factor speedup can be obtained in the tokenisation step.\n Use ``name=False`` to generate a random name instead of hashing (fast)\n\n .. note::\n\n Because this ``name`` is used as the key in task graphs, you should\n ensure that it uniquely identifies the data contained within. If\n you'd like to provide a descriptive name that is still unique, combine\n the descriptive name with :func:`dask.base.tokenize` of the\n ``array_like``. See :ref:`graphs` for more.\n\n lock : bool or Lock, optional\n If ``x`` doesn't support concurrent reads then provide a lock here, or\n pass in True to have dask.array create one for you.\n asarray : bool, optional\n If True then call np.asarray on chunks to convert them to numpy arrays.\n If False then chunks are passed through unchanged.\n If None (default) then we use True if the ``__array_function__`` method\n is undefined.\n fancy : bool, optional\n If ``x`` doesn't support fancy indexing (e.g. indexing with lists or\n arrays) then set to False. Default is True.\n meta : Array-like, optional\n The metadata for the resulting dask array. This is the kind of array\n that will result from slicing the input array.\n Defaults to the input array.\n\n Examples\n --------\n\n >>> x = h5py.File('...')['/data/path'] # doctest: +SKIP\n >>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP\n\n If your underlying datastore does not support concurrent reads then include\n the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple\n arrays to coordinate around the same lock.\n\n >>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP\n\n If your underlying datastore has a ``.chunks`` attribute (as h5py and zarr\n datasets do) then a multiple of that chunk shape will be used if you\n do not provide a chunk shape.\n\n >>> a = da.from_array(x, chunks='auto') # doctest: +SKIP\n >>> a = da.from_array(x, chunks='100 MiB') # doctest: +SKIP\n >>> a = da.from_array(x) # doctest: +SKIP\n\n If providing a name, ensure that it is unique\n\n >>> import dask.base\n >>> token = dask.base.tokenize(x) # doctest: +SKIP\n >>> a = da.from_array('myarray-' + token) # doctest: +SKIP\n\n Numpy ndarrays are eagerly sliced and then embedded in the graph.\n\n >>> import dask.array\n >>> a = dask.array.from_array(np.array([[1, 2], [3, 4]]), chunks=(1,1))\n >>> a.dask[a.name, 0, 0][0]\n array([1])\n\n \"\"\"\n if isinstance(x, Array):\n raise ValueError(\n \"Array is already a dask array. Use 'asarray' or \" \"'rechunk' instead.\"\n )\n elif is_dask_collection(x):\n warnings.warn(\n \"Passing an object to dask.array.from_array which is already a \"\n \"Dask collection. This can lead to unexpected behavior.\"\n )\n\n if isinstance(x, (list, tuple, memoryview) + np.ScalarType):\n x = np.array(x)\n\n if asarray is None:\n asarray = not hasattr(x, \"__array_function__\")\n\n previous_chunks = getattr(x, \"chunks\", None)\n\n chunks = normalize_chunks(\n chunks, x.shape, dtype=x.dtype, previous_chunks=previous_chunks\n )\n\n if name in (None, True):\n token = tokenize(x, chunks)\n original_name = \"array-original-\" + token\n name = name or \"array-\" + token\n elif name is False:\n original_name = name = \"array-\" + str(uuid.uuid1())\n else:\n original_name = name\n\n if lock is True:\n lock = SerializableLock()\n\n is_ndarray = type(x) is np.ndarray\n is_single_block = all(len(c) == 1 for c in chunks)\n # Always use the getter for h5py etc. Not using isinstance(x, np.ndarray)\n # because np.matrix is a subclass of np.ndarray.\n if is_ndarray and not is_single_block and not lock:\n # eagerly slice numpy arrays to prevent memory blowup\n # GH5367, GH5601\n slices = slices_from_chunks(chunks)\n keys = product([name], *(range(len(bds)) for bds in chunks))\n values = [x[slc] for slc in slices]\n dsk = dict(zip(keys, values))\n\n elif is_ndarray and is_single_block:\n # No slicing needed\n dsk = {(name,) + (0,) * x.ndim: x}\n else:\n if getitem is None:\n if fancy:\n getitem = getter\n else:\n getitem = getter_nofancy\n\n dsk = getem(\n original_name,\n chunks,\n getitem=getitem,\n shape=x.shape,\n out_name=name,\n lock=lock,\n asarray=asarray,\n dtype=x.dtype,\n )\n dsk[original_name] = x\n\n # Workaround for TileDB, its indexing is 1-based,\n # and doesn't seems to support 0-length slicing\n if x.__class__.__module__.split(\".\")[0] == \"tiledb\" and hasattr(x, \"_ctx_\"):\n return Array(dsk, name, chunks, dtype=x.dtype)\n\n if meta is None:\n meta = x\n\n return Array(dsk, name, chunks, meta=meta, dtype=getattr(x, \"dtype\", None))\n\n\ndef from_zarr(\n url, component=None, storage_options=None, chunks=None, name=None, **kwargs\n):\n \"\"\"Load array from the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be loaded, something like ``'foo/bar'``.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n chunks: tuple of ints or tuples of ints\n Passed to ``da.from_array``, allows setting the chunks on\n initialisation, if the chunking scheme in the on-disc dataset is not\n optimal for the calculations to follow.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n kwargs: passed to ``zarr.Array``.\n \"\"\"\n import zarr\n\n storage_options = storage_options or {}\n if isinstance(url, zarr.Array):\n z = url\n elif isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n else:\n mapper = url\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n chunks = chunks if chunks is not None else z.chunks\n if name is None:\n name = \"from-zarr-\" + tokenize(z, component, storage_options, chunks, **kwargs)\n return from_array(z, chunks, name=name)\n\n\ndef to_zarr(\n arr,\n url,\n component=None,\n storage_options=None,\n overwrite=False,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n arr: dask.array\n Data to store\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be created/over-written.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n overwrite: bool\n If given array already exists, overwrite=False will cause an error,\n where overwrite=True will replace the existing data. Note that this\n check is done at computation time, not during graph creation.\n compute, return_stored: see ``store()``\n kwargs: passed to the ``zarr.create()`` function, e.g., compression options\n\n Raises\n ------\n ValueError\n If ``arr`` has unknown chunk sizes, which is not supported by Zarr.\n\n See Also\n --------\n dask.array.Array.compute_chunk_sizes\n\n \"\"\"\n import zarr\n\n if np.isnan(arr.shape).any():\n raise ValueError(\n \"Saving a dask array with unknown chunk sizes is not \"\n \"currently supported by Zarr.%s\" % unknown_chunk_message\n )\n\n if isinstance(url, zarr.Array):\n z = url\n if isinstance(z.store, (dict, zarr.DictStore)) and \"distributed\" in config.get(\n \"scheduler\", \"\"\n ):\n raise RuntimeError(\n \"Cannot store into in memory Zarr Array using \"\n \"the Distributed Scheduler.\"\n )\n arr = arr.rechunk(z.chunks)\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)\n\n if not _check_regular_chunks(arr.chunks):\n raise ValueError(\n \"Attempt to save array to zarr with irregular \"\n \"chunking, please call `arr.rechunk(...)` first.\"\n )\n\n storage_options = storage_options or {}\n\n if isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n else:\n # assume the object passed is already a mapper\n mapper = url\n\n chunks = [c[0] for c in arr.chunks]\n\n # The zarr.create function has the side-effect of immediately\n # creating metadata on disk. This may not be desired,\n # particularly if compute=False. The caller may be creating many\n # arrays on a slow filesystem, with the desire that any I/O be\n # sharded across workers (not done serially on the originating\n # machine). Or the caller may decide later to not to do this\n # computation, and so nothing should be written to disk.\n z = delayed(zarr.create)(\n shape=arr.shape,\n chunks=chunks,\n dtype=arr.dtype,\n store=mapper,\n path=component,\n overwrite=overwrite,\n **kwargs,\n )\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)\n\n\ndef _check_regular_chunks(chunkset):\n \"\"\"Check if the chunks are regular\n\n \"Regular\" in this context means that along every axis, the chunks all\n have the same size, except the last one, which may be smaller\n\n Parameters\n ----------\n chunkset: tuple of tuples of ints\n From the ``.chunks`` attribute of an ``Array``\n\n Returns\n -------\n True if chunkset passes, else False\n\n Examples\n --------\n >>> import dask.array as da\n >>> arr = da.zeros(10, chunks=(5, ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 3, 3, 1), ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 1, 3, 3), ))\n >>> _check_regular_chunks(arr.chunks)\n False\n \"\"\"\n for chunks in chunkset:\n if len(chunks) == 1:\n continue\n if len(set(chunks[:-1])) > 1:\n return False\n if chunks[-1] > chunks[0]:\n return False\n return True\n\n\ndef from_delayed(value, shape, dtype=None, meta=None, name=None):\n \"\"\"Create a dask array from a dask delayed value\n\n This routine is useful for constructing dask arrays in an ad-hoc fashion\n using dask delayed, particularly when combined with stack and concatenate.\n\n The dask array will consist of a single chunk.\n\n Examples\n --------\n >>> import dask\n >>> import dask.array as da\n >>> value = dask.delayed(np.ones)(5)\n >>> array = da.from_delayed(value, (5,), dtype=float)\n >>> array\n dask.array<from-value, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>\n >>> array.compute()\n array([1., 1., 1., 1., 1.])\n \"\"\"\n from ..delayed import delayed, Delayed\n\n if not isinstance(value, Delayed) and hasattr(value, \"key\"):\n value = delayed(value)\n\n name = name or \"from-value-\" + tokenize(value, shape, dtype, meta)\n dsk = {(name,) + (0,) * len(shape): value.key}\n chunks = tuple((d,) for d in shape)\n # TODO: value._key may not be the name of the layer in value.dask\n # This should be fixed after we build full expression graphs\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[value])\n return Array(graph, name, chunks, dtype=dtype, meta=meta)\n\n\ndef from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):\n \"\"\"Create dask array in a single block by calling a function\n\n Calling the provided function with func(*args, **kwargs) should return a\n NumPy array of the indicated shape and dtype.\n\n Examples\n --------\n\n >>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))\n >>> a.compute()\n array([0, 1, 2])\n\n This works particularly well when coupled with dask.array functions like\n concatenate and stack:\n\n >>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]\n >>> stack(arrays).compute()\n array([0, 1, 2, 3, 4])\n \"\"\"\n name = name or \"from_func-\" + tokenize(func, shape, dtype, args, kwargs)\n if args or kwargs:\n func = partial(func, *args, **kwargs)\n dsk = {(name,) + (0,) * len(shape): (func,)}\n chunks = tuple((i,) for i in shape)\n return Array(dsk, name, chunks, dtype)\n\n\ndef common_blockdim(blockdims):\n \"\"\"Find the common block dimensions from the list of block dimensions\n\n Currently only implements the simplest possible heuristic: the common\n block-dimension is the only one that does not span fully span a dimension.\n This is a conservative choice that allows us to avoid potentially very\n expensive rechunking.\n\n Assumes that each element of the input block dimensions has all the same\n sum (i.e., that they correspond to dimensions of the same size).\n\n Examples\n --------\n >>> common_blockdim([(3,), (2, 1)])\n (2, 1)\n >>> common_blockdim([(1, 2), (2, 1)])\n (1, 1, 1)\n >>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align\n \"\"\"\n if not any(blockdims):\n return ()\n non_trivial_dims = set([d for d in blockdims if len(d) > 1])\n if len(non_trivial_dims) == 1:\n return first(non_trivial_dims)\n if len(non_trivial_dims) == 0:\n return max(blockdims, key=first)\n\n if np.isnan(sum(map(sum, blockdims))):\n raise ValueError(\n \"Arrays chunk sizes (%s) are unknown.\\n\\n\"\n \"A possible solution:\\n\"\n \" x.compute_chunk_sizes()\" % blockdims\n )\n\n if len(set(map(sum, non_trivial_dims))) > 1:\n raise ValueError(\"Chunks do not add up to same value\", blockdims)\n\n # We have multiple non-trivial chunks on this axis\n # e.g. (5, 2) and (4, 3)\n\n # We create a single chunk tuple with the same total length\n # that evenly divides both, e.g. (4, 1, 2)\n\n # To accomplish this we walk down all chunk tuples together, finding the\n # smallest element, adding it to the output, and subtracting it from all\n # other elements and remove the element itself. We stop once we have\n # burned through all of the chunk tuples.\n # For efficiency's sake we reverse the lists so that we can pop off the end\n rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]\n total = sum(first(non_trivial_dims))\n i = 0\n\n out = []\n while i < total:\n m = min(c[-1] for c in rchunks)\n out.append(m)\n for c in rchunks:\n c[-1] -= m\n if c[-1] == 0:\n c.pop()\n i += m\n\n return tuple(out)\n\n\ndef unify_chunks(*args, **kwargs):\n \"\"\"\n Unify chunks across a sequence of arrays\n\n This utility function is used within other common operations like\n ``map_blocks`` and ``blockwise``. It is not commonly used by end-users\n directly.\n\n Parameters\n ----------\n *args: sequence of Array, index pairs\n Sequence like (x, 'ij', y, 'jk', z, 'i')\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.ones(10, chunks=((5, 2, 3),))\n >>> y = da.ones(10, chunks=((2, 3, 5),))\n >>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')\n >>> chunkss\n {'i': (2, 3, 2, 3)}\n\n >>> x = da.ones((100, 10), chunks=(20, 5))\n >>> y = da.ones((10, 100), chunks=(4, 50))\n >>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk', 'constant', None)\n >>> chunkss # doctest: +SKIP\n {'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}\n\n >>> unify_chunks(0, None)\n ({}, [0])\n\n Returns\n -------\n chunkss : dict\n Map like {index: chunks}.\n arrays : list\n List of rechunked arrays.\n\n See Also\n --------\n common_blockdim\n \"\"\"\n if not args:\n return {}, []\n\n arginds = [\n (asanyarray(a) if ind is not None else a, ind) for a, ind in partition(2, args)\n ] # [x, ij, y, jk]\n args = list(concat(arginds)) # [(x, ij), (y, jk)]\n warn = kwargs.get(\"warn\", True)\n\n arrays, inds = zip(*arginds)\n if all(ind is None for ind in inds):\n return {}, list(arrays)\n if all(ind == inds[0] for ind in inds) and all(\n a.chunks == arrays[0].chunks for a in arrays\n ):\n return dict(zip(inds[0], arrays[0].chunks)), arrays\n\n nameinds = []\n blockdim_dict = dict()\n max_parts = 0\n for a, ind in arginds:\n if ind is not None:\n nameinds.append((a.name, ind))\n blockdim_dict[a.name] = a.chunks\n max_parts = max(max_parts, a.npartitions)\n else:\n nameinds.append((a, ind))\n\n chunkss = broadcast_dimensions(nameinds, blockdim_dict, consolidate=common_blockdim)\n nparts = np.prod(list(map(len, chunkss.values())))\n\n if warn and nparts and nparts >= max_parts * 10:\n warnings.warn(\n \"Increasing number of chunks by factor of %d\" % (nparts / max_parts),\n PerformanceWarning,\n stacklevel=3,\n )\n\n arrays = []\n for a, i in arginds:\n if i is None:\n arrays.append(a)\n else:\n chunks = tuple(\n chunkss[j]\n if a.shape[n] > 1\n else a.shape[n]\n if not np.isnan(sum(chunkss[j]))\n else None\n for n, j in enumerate(i)\n )\n if chunks != a.chunks and all(a.chunks):\n arrays.append(a.rechunk(chunks))\n else:\n arrays.append(a)\n return chunkss, arrays\n\n\ndef unpack_singleton(x):\n \"\"\"\n\n >>> unpack_singleton([[[[1]]]])\n 1\n >>> unpack_singleton(np.array(np.datetime64('2000-01-01')))\n array('2000-01-01', dtype='datetime64[D]')\n \"\"\"\n while isinstance(x, (list, tuple)):\n try:\n x = x[0]\n except (IndexError, TypeError, KeyError):\n break\n return x\n\n\ndef block(arrays, allow_unknown_chunksizes=False):\n \"\"\"\n Assemble an nd-array from nested lists of blocks.\n\n Blocks in the innermost lists are concatenated along the last\n dimension (-1), then these are concatenated along the second-last\n dimension (-2), and so on until the outermost list is reached\n\n Blocks can be of any dimension, but will not be broadcasted using the normal\n rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``\n the same for all blocks. This is primarily useful for working with scalars,\n and means that code like ``block([v, 1])`` is valid, where\n ``v.ndim == 1``.\n\n When the nested list is two levels deep, this allows block matrices to be\n constructed from their components.\n\n Parameters\n ----------\n arrays : nested list of array_like or scalars (but not tuples)\n If passed a single ndarray or scalar (a nested list of depth 0), this\n is returned unmodified (and not copied).\n\n Elements shapes must match along the appropriate axes (without\n broadcasting), but leading 1s will be prepended to the shape as\n necessary to make the dimensions match.\n\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Returns\n -------\n block_array : ndarray\n The array assembled from the given blocks.\n\n The dimensionality of the output is equal to the greatest of:\n * the dimensionality of all the inputs\n * the depth to which the input list is nested\n\n Raises\n ------\n ValueError\n * If list depths are mismatched - for instance, ``[[a, b], c]`` is\n illegal, and should be spelt ``[[a, b], [c]]``\n * If lists are empty - for instance, ``[[a, b], []]``\n\n See Also\n --------\n concatenate : Join a sequence of arrays together.\n stack : Stack arrays in sequence along a new dimension.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n vsplit : Split array into a list of multiple sub-arrays vertically.\n\n Notes\n -----\n\n When called with only scalars, ``block`` is equivalent to an ndarray\n call. So ``block([[1, 2], [3, 4]])`` is equivalent to\n ``array([[1, 2], [3, 4]])``.\n\n This function does not enforce that the blocks lie on a fixed grid.\n ``block([[a, b], [c, d]])`` is not restricted to arrays of the form::\n\n AAAbb\n AAAbb\n cccDD\n\n But is also allowed to produce, for some ``a, b, c, d``::\n\n AAAbb\n AAAbb\n cDDDD\n\n Since concatenation happens along the last axis first, `block` is _not_\n capable of producing the following directly::\n\n AAAbb\n cccbb\n cccDD\n\n Matlab's \"square bracket stacking\", ``[A, B, ...; p, q, ...]``, is\n equivalent to ``block([[A, B, ...], [p, q, ...]])``.\n \"\"\"\n\n # This was copied almost verbatim from numpy.core.shape_base.block\n # See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt\n # or NUMPY_LICENSE.txt within this directory\n\n def atleast_nd(x, ndim):\n x = asanyarray(x)\n diff = max(ndim - x.ndim, 0)\n if diff == 0:\n return x\n else:\n return x[(None,) * diff + (Ellipsis,)]\n\n def format_index(index):\n return \"arrays\" + \"\".join(\"[{}]\".format(i) for i in index)\n\n rec = _Recurser(recurse_if=lambda x: type(x) is list)\n\n # ensure that the lists are all matched in depth\n list_ndim = None\n any_empty = False\n for index, value, entering in rec.walk(arrays):\n if type(value) is tuple:\n # not strictly necessary, but saves us from:\n # - more than one way to do things - no point treating tuples like\n # lists\n # - horribly confusing behaviour that results when tuples are\n # treated like ndarray\n raise TypeError(\n \"{} is a tuple. \"\n \"Only lists can be used to arrange blocks, and np.block does \"\n \"not allow implicit conversion from tuple to ndarray.\".format(\n format_index(index)\n )\n )\n if not entering:\n curr_depth = len(index)\n elif len(value) == 0:\n curr_depth = len(index) + 1\n any_empty = True\n else:\n continue\n\n if list_ndim is not None and list_ndim != curr_depth:\n raise ValueError(\n \"List depths are mismatched. First element was at depth {}, \"\n \"but there is an element at depth {} ({})\".format(\n list_ndim, curr_depth, format_index(index)\n )\n )\n list_ndim = curr_depth\n\n # do this here so we catch depth mismatches first\n if any_empty:\n raise ValueError(\"Lists cannot be empty\")\n\n # convert all the arrays to ndarrays\n arrays = rec.map_reduce(arrays, f_map=asanyarray, f_reduce=list)\n\n # determine the maximum dimension of the elements\n elem_ndim = rec.map_reduce(arrays, f_map=lambda xi: xi.ndim, f_reduce=max)\n ndim = max(list_ndim, elem_ndim)\n\n # first axis to concatenate along\n first_axis = ndim - list_ndim\n\n # Make all the elements the same dimension\n arrays = rec.map_reduce(\n arrays, f_map=lambda xi: atleast_nd(xi, ndim), f_reduce=list\n )\n\n # concatenate innermost lists on the right, outermost on the left\n return rec.map_reduce(\n arrays,\n f_reduce=lambda xs, axis: concatenate(\n list(xs), axis=axis, allow_unknown_chunksizes=allow_unknown_chunksizes\n ),\n f_kwargs=lambda axis: dict(axis=(axis + 1)),\n axis=first_axis,\n )\n\n\ndef concatenate(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Concatenate arrays along an existing axis\n\n Given a sequence of dask Arrays form a new dask Array by stacking them\n along an existing dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.concatenate(data, axis=0)\n >>> x.shape\n (12, 4)\n\n >>> da.concatenate(data, axis=1).shape\n (4, 12)\n\n Result is a new dask Array\n\n See Also\n --------\n stack\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to concatenate\")\n\n seq_metas = [meta_from_array(s) for s in seq]\n _concatenate = concatenate_lookup.dispatch(\n type(max(seq_metas, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n meta = _concatenate(seq_metas, axis=axis)\n\n # Promote types to match meta\n seq = [a.astype(meta.dtype) for a in seq]\n\n # Find output array shape\n ndim = len(seq[0].shape)\n shape = tuple(\n sum((a.shape[i] for a in seq)) if i == axis else seq[0].shape[i]\n for i in range(ndim)\n )\n\n # Drop empty arrays\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n if axis < 0:\n axis = ndim + axis\n if axis >= ndim:\n msg = (\n \"Axis must be less than than number of dimensions\"\n \"\\nData has %d dimensions, but got axis=%d\"\n )\n raise ValueError(msg % (ndim, axis))\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n elif n == 1:\n return seq2[0]\n\n if not allow_unknown_chunksizes and not all(\n i == axis or all(x.shape[i] == seq2[0].shape[i] for x in seq2)\n for i in range(ndim)\n ):\n if any(map(np.isnan, seq2[0].shape)):\n raise ValueError(\n \"Tried to concatenate arrays with unknown\"\n \" shape %s.\\n\\nTwo solutions:\\n\"\n \" 1. Force concatenation pass\"\n \" allow_unknown_chunksizes=True.\\n\"\n \" 2. Compute shapes with \"\n \"[x.compute_chunk_sizes() for x in seq]\" % str(seq2[0].shape)\n )\n raise ValueError(\"Shapes do not align: %s\", [x.shape for x in seq2])\n\n inds = [list(range(ndim)) for i in range(n)]\n for i, ind in enumerate(inds):\n ind[axis] = -(i + 1)\n\n uc_args = list(concat(zip(seq2, inds)))\n _, seq2 = unify_chunks(*uc_args, warn=False)\n\n bds = [a.chunks for a in seq2]\n\n chunks = (\n seq2[0].chunks[:axis]\n + (sum([bd[axis] for bd in bds], ()),)\n + seq2[0].chunks[axis + 1 :]\n )\n\n cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq2]))\n\n names = [a.name for a in seq2]\n\n name = \"concatenate-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n values = [\n (names[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[1 : axis + 1]\n + (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[axis + 2 :]\n for key in keys\n ]\n\n dsk = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)\n\n\ndef load_store_chunk(x, out, index, lock, return_stored, load_stored):\n \"\"\"\n A function inserted in a Dask graph for storing a chunk.\n\n Parameters\n ----------\n x: array-like\n An array (potentially a NumPy one)\n out: array-like\n Where to store results too.\n index: slice-like\n Where to store result from ``x`` in ``out``.\n lock: Lock-like or False\n Lock to use before writing to ``out``.\n return_stored: bool\n Whether to return ``out``.\n load_stored: bool\n Whether to return the array stored in ``out``.\n Ignored if ``return_stored`` is not ``True``.\n\n Examples\n --------\n\n >>> a = np.ones((5, 6))\n >>> b = np.empty(a.shape)\n >>> load_store_chunk(a, b, (slice(None), slice(None)), False, False, False)\n \"\"\"\n\n result = None\n if return_stored and not load_stored:\n result = out\n\n if lock:\n lock.acquire()\n try:\n if x is not None:\n out[index] = np.asanyarray(x)\n if return_stored and load_stored:\n result = out[index]\n finally:\n if lock:\n lock.release()\n\n return result\n\n\ndef store_chunk(x, out, index, lock, return_stored):\n return load_store_chunk(x, out, index, lock, return_stored, False)\n\n\ndef load_chunk(out, index, lock):\n return load_store_chunk(None, out, index, lock, True, True)\n\n\ndef insert_to_ooc(\n arr, out, lock=True, region=None, return_stored=False, load_stored=False, tok=None\n):\n \"\"\"\n Creates a Dask graph for storing chunks from ``arr`` in ``out``.\n\n Parameters\n ----------\n arr: da.Array\n A dask array\n out: array-like\n Where to store results too.\n lock: Lock-like or bool, optional\n Whether to lock or with what (default is ``True``,\n which means a ``threading.Lock`` instance).\n region: slice-like, optional\n Where in ``out`` to store ``arr``'s results\n (default is ``None``, meaning all of ``out``).\n return_stored: bool, optional\n Whether to return ``out``\n (default is ``False``, meaning ``None`` is returned).\n load_stored: bool, optional\n Whether to handling loading from ``out`` at the same time.\n Ignored if ``return_stored`` is not ``True``.\n (default is ``False``, meaning defer to ``return_stored``).\n tok: str, optional\n Token to use when naming keys\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> insert_to_ooc(d, a) # doctest: +SKIP\n \"\"\"\n\n if lock is True:\n lock = Lock()\n\n slices = slices_from_chunks(arr.chunks)\n if region:\n slices = [fuse_slice(region, slc) for slc in slices]\n\n name = \"store-%s\" % (tok or str(uuid.uuid1()))\n func = store_chunk\n args = ()\n if return_stored and load_stored:\n name = \"load-%s\" % name\n func = load_store_chunk\n args = args + (load_stored,)\n\n dsk = {\n (name,) + t[1:]: (func, t, out, slc, lock, return_stored) + args\n for t, slc in zip(core.flatten(arr.__dask_keys__()), slices)\n }\n\n return dsk\n\n\ndef retrieve_from_ooc(keys, dsk_pre, dsk_post=None):\n \"\"\"\n Creates a Dask graph for loading stored ``keys`` from ``dsk``.\n\n Parameters\n ----------\n keys: Sequence\n A sequence containing Dask graph keys to load\n dsk_pre: Mapping\n A Dask graph corresponding to a Dask Array before computation\n dsk_post: Mapping, optional\n A Dask graph corresponding to a Dask Array after computation\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> g = insert_to_ooc(d, a)\n >>> retrieve_from_ooc(g.keys(), g) # doctest: +SKIP\n \"\"\"\n\n if not dsk_post:\n dsk_post = {k: k for k in keys}\n\n load_dsk = {\n (\"load-\" + k[0],) + k[1:]: (load_chunk, dsk_post[k]) + dsk_pre[k][3:-1]\n for k in keys\n }\n\n return load_dsk\n\n\ndef asarray(a, **kwargs):\n \"\"\"Convert the input to a dask array.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asarray(x)\n dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asarray(y)\n dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n return stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asarray(a)\n return from_array(a, getitem=getter_inline, **kwargs)\n\n\ndef asanyarray(a):\n \"\"\"Convert the input to a dask array.\n\n Subclasses of ``np.ndarray`` will be passed through as chunks unchanged.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asanyarray(x)\n dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asanyarray(y)\n dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asanyarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n return stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asanyarray(a)\n return from_array(a, chunks=a.shape, getitem=getter_inline, asarray=False)\n\n\ndef is_scalar_for_elemwise(arg):\n \"\"\"\n\n >>> is_scalar_for_elemwise(42)\n True\n >>> is_scalar_for_elemwise('foo')\n True\n >>> is_scalar_for_elemwise(True)\n True\n >>> is_scalar_for_elemwise(np.array(42))\n True\n >>> is_scalar_for_elemwise([1, 2, 3])\n True\n >>> is_scalar_for_elemwise(np.array([1, 2, 3]))\n False\n >>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))\n False\n >>> is_scalar_for_elemwise(np.dtype('i4'))\n True\n \"\"\"\n # the second half of shape_condition is essentially just to ensure that\n # dask series / frame are treated as scalars in elemwise.\n maybe_shape = getattr(arg, \"shape\", None)\n shape_condition = not isinstance(maybe_shape, Iterable) or any(\n is_dask_collection(x) for x in maybe_shape\n )\n\n return (\n np.isscalar(arg)\n or shape_condition\n or isinstance(arg, np.dtype)\n or (isinstance(arg, np.ndarray) and arg.ndim == 0)\n )\n\n\ndef broadcast_shapes(*shapes):\n \"\"\"\n Determines output shape from broadcasting arrays.\n\n Parameters\n ----------\n shapes : tuples\n The shapes of the arguments.\n\n Returns\n -------\n output_shape : tuple\n\n Raises\n ------\n ValueError\n If the input shapes cannot be successfully broadcast together.\n \"\"\"\n if len(shapes) == 1:\n return shapes[0]\n out = []\n for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):\n if np.isnan(sizes).any():\n dim = np.nan\n else:\n dim = 0 if 0 in sizes else np.max(sizes)\n if any(i not in [-1, 0, 1, dim] and not np.isnan(i) for i in sizes):\n raise ValueError(\n \"operands could not be broadcast together with \"\n \"shapes {0}\".format(\" \".join(map(str, shapes)))\n )\n out.append(dim)\n return tuple(reversed(out))\n\n\ndef elemwise(op, *args, **kwargs):\n \"\"\"Apply elementwise function across arguments\n\n Respects broadcasting rules\n\n Examples\n --------\n >>> elemwise(add, x, y) # doctest: +SKIP\n >>> elemwise(sin, x) # doctest: +SKIP\n\n See Also\n --------\n blockwise\n \"\"\"\n out = kwargs.pop(\"out\", None)\n if not set([\"name\", \"dtype\"]).issuperset(kwargs):\n msg = \"%s does not take the following keyword arguments %s\"\n raise TypeError(\n msg % (op.__name__, str(sorted(set(kwargs) - set([\"name\", \"dtype\"]))))\n )\n\n args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]\n\n shapes = []\n for arg in args:\n shape = getattr(arg, \"shape\", ())\n if any(is_dask_collection(x) for x in shape):\n # Want to exclude Delayed shapes and dd.Scalar\n shape = ()\n shapes.append(shape)\n\n shapes = [s if isinstance(s, Iterable) else () for s in shapes]\n out_ndim = len(\n broadcast_shapes(*shapes)\n ) # Raises ValueError if dimensions mismatch\n expr_inds = tuple(range(out_ndim))[::-1]\n\n need_enforce_dtype = False\n if \"dtype\" in kwargs:\n dt = kwargs[\"dtype\"]\n else:\n # We follow NumPy's rules for dtype promotion, which special cases\n # scalars and 0d ndarrays (which it considers equivalent) by using\n # their values to compute the result dtype:\n # https://github.com/numpy/numpy/issues/6240\n # We don't inspect the values of 0d dask arrays, because these could\n # hold potentially very expensive calculations. Instead, we treat\n # them just like other arrays, and if necessary cast the result of op\n # to match.\n vals = [\n np.empty((1,) * max(1, a.ndim), dtype=a.dtype)\n if not is_scalar_for_elemwise(a)\n else a\n for a in args\n ]\n try:\n dt = apply_infer_dtype(op, vals, {}, \"elemwise\", suggest_dtype=False)\n except Exception:\n return NotImplemented\n need_enforce_dtype = any(\n not is_scalar_for_elemwise(a) and a.ndim == 0 for a in args\n )\n\n name = kwargs.get(\"name\", None) or \"%s-%s\" % (funcname(op), tokenize(op, dt, *args))\n\n blockwise_kwargs = dict(dtype=dt, name=name, token=funcname(op).strip(\"_\"))\n if need_enforce_dtype:\n blockwise_kwargs[\"enforce_dtype\"] = dt\n blockwise_kwargs[\"enforce_dtype_function\"] = op\n op = _enforce_dtype\n result = blockwise(\n op,\n expr_inds,\n *concat(\n (a, tuple(range(a.ndim)[::-1]) if not is_scalar_for_elemwise(a) else None)\n for a in args\n ),\n **blockwise_kwargs,\n )\n\n return handle_out(out, result)\n\n\ndef handle_out(out, result):\n \"\"\"Handle out parameters\n\n If out is a dask.array then this overwrites the contents of that array with\n the result\n \"\"\"\n if isinstance(out, tuple):\n if len(out) == 1:\n out = out[0]\n elif len(out) > 1:\n raise NotImplementedError(\"The out parameter is not fully supported\")\n else:\n out = None\n if isinstance(out, Array):\n if out.shape != result.shape:\n raise ValueError(\n \"Mismatched shapes between result and out parameter. \"\n \"out=%s, result=%s\" % (str(out.shape), str(result.shape))\n )\n out._chunks = result.chunks\n out.dask = result.dask\n out._meta = result._meta\n out.name = result.name\n elif out is not None:\n msg = (\n \"The out parameter is not fully supported.\"\n \" Received type %s, expected Dask Array\" % type(out).__name__\n )\n raise NotImplementedError(msg)\n else:\n return result\n\n\ndef _enforce_dtype(*args, **kwargs):\n \"\"\"Calls a function and converts its result to the given dtype.\n\n The parameters have deliberately been given unwieldy names to avoid\n clashes with keyword arguments consumed by blockwise\n\n A dtype of `object` is treated as a special case and not enforced,\n because it is used as a dummy value in some places when the result will\n not be a block in an Array.\n\n Parameters\n ----------\n enforce_dtype : dtype\n Result dtype\n enforce_dtype_function : callable\n The wrapped function, which will be passed the remaining arguments\n \"\"\"\n dtype = kwargs.pop(\"enforce_dtype\")\n function = kwargs.pop(\"enforce_dtype_function\")\n\n result = function(*args, **kwargs)\n if hasattr(result, \"dtype\") and dtype != result.dtype and dtype != object:\n if not np.can_cast(result, dtype, casting=\"same_kind\"):\n raise ValueError(\n \"Inferred dtype from function %r was %r \"\n \"but got %r, which can't be cast using \"\n \"casting='same_kind'\"\n % (funcname(function), str(dtype), str(result.dtype))\n )\n if np.isscalar(result):\n # scalar astype method doesn't take the keyword arguments, so\n # have to convert via 0-dimensional array and back.\n result = result.astype(dtype)\n else:\n try:\n result = result.astype(dtype, copy=False)\n except TypeError:\n # Missing copy kwarg\n result = result.astype(dtype)\n return result\n\n\ndef broadcast_to(x, shape, chunks=None):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n x : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n chunks : tuple, optional\n If provided, then the result will use these chunks instead of the same\n chunks as the source array. Setting chunks explicitly as part of\n broadcast_to is more efficient than rechunking afterwards. Chunks are\n only allowed to differ from the original shape along dimensions that\n are new on the result or have size 1 the input array.\n\n Returns\n -------\n broadcast : dask array\n\n See Also\n --------\n :func:`numpy.broadcast_to`\n \"\"\"\n x = asarray(x)\n shape = tuple(shape)\n\n if x.shape == shape and (chunks is None or chunks == x.chunks):\n return x\n\n ndim_new = len(shape) - x.ndim\n if ndim_new < 0 or any(\n new != old for new, old in zip(shape[ndim_new:], x.shape) if old != 1\n ):\n raise ValueError(\"cannot broadcast shape %s to shape %s\" % (x.shape, shape))\n\n if chunks is None:\n chunks = tuple((s,) for s in shape[:ndim_new]) + tuple(\n bd if old > 1 else (new,)\n for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])\n )\n else:\n chunks = normalize_chunks(\n chunks, shape, dtype=x.dtype, previous_chunks=x.chunks\n )\n for old_bd, new_bd in zip(x.chunks, chunks[ndim_new:]):\n if old_bd != new_bd and old_bd != (1,):\n raise ValueError(\n \"cannot broadcast chunks %s to chunks %s: \"\n \"new chunks must either be along a new \"\n \"dimension or a dimension of size 1\" % (x.chunks, chunks)\n )\n\n name = \"broadcast_to-\" + tokenize(x, shape, chunks)\n dsk = {}\n\n enumerated_chunks = product(*(enumerate(bds) for bds in chunks))\n for new_index, chunk_shape in (zip(*ec) for ec in enumerated_chunks):\n old_index = tuple(\n 0 if bd == (1,) else i for bd, i in zip(x.chunks, new_index[ndim_new:])\n )\n old_key = (x.name,) + old_index\n new_key = (name,) + new_index\n dsk[new_key] = (np.broadcast_to, old_key, quote(chunk_shape))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, dtype=x.dtype)\n\n\n@derived_from(np)\ndef broadcast_arrays(*args, **kwargs):\n subok = bool(kwargs.pop(\"subok\", False))\n\n to_array = asanyarray if subok else asarray\n args = tuple(to_array(e) for e in args)\n\n if kwargs:\n raise TypeError(\"unsupported keyword argument(s) provided\")\n\n # Unify uneven chunking\n inds = [list(reversed(range(x.ndim))) for x in args]\n uc_args = concat(zip(args, inds))\n _, args = unify_chunks(*uc_args, warn=False)\n\n shape = broadcast_shapes(*(e.shape for e in args))\n chunks = broadcast_chunks(*(e.chunks for e in args))\n\n result = [broadcast_to(e, shape=shape, chunks=chunks) for e in args]\n\n return result\n\n\ndef offset_func(func, offset, *args):\n \"\"\"Offsets inputs by offset\n\n >>> double = lambda x: x * 2\n >>> f = offset_func(double, (10,))\n >>> f(1)\n 22\n >>> f(300)\n 620\n \"\"\"\n\n def _offset(*args):\n args2 = list(map(add, args, offset))\n return func(*args2)\n\n with ignoring(Exception):\n _offset.__name__ = \"offset_\" + func.__name__\n\n return _offset\n\n\ndef chunks_from_arrays(arrays):\n \"\"\"Chunks tuple from nested list of arrays\n\n >>> x = np.array([1, 2])\n >>> chunks_from_arrays([x, x])\n ((2, 2),)\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x], [x]])\n ((1, 1), (2,))\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x, x]])\n ((1,), (2, 2))\n\n >>> chunks_from_arrays([1, 1])\n ((1, 1),)\n \"\"\"\n if not arrays:\n return ()\n result = []\n dim = 0\n\n def shape(x):\n try:\n return x.shape\n except AttributeError:\n return (1,)\n\n while isinstance(arrays, (list, tuple)):\n result.append(tuple([shape(deepfirst(a))[dim] for a in arrays]))\n arrays = arrays[0]\n dim += 1\n return tuple(result)\n\n\ndef deepfirst(seq):\n \"\"\"First element in a nested list\n\n >>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])\n 1\n \"\"\"\n if not isinstance(seq, (list, tuple)):\n return seq\n else:\n return deepfirst(seq[0])\n\n\ndef shapelist(a):\n \"\"\" Get the shape of nested list \"\"\"\n if type(a) is list:\n return tuple([len(a)] + list(shapelist(a[0])))\n else:\n return ()\n\n\ndef reshapelist(shape, seq):\n \"\"\"Reshape iterator to nested shape\n\n >>> reshapelist((2, 3), range(6))\n [[0, 1, 2], [3, 4, 5]]\n \"\"\"\n if len(shape) == 1:\n return list(seq)\n else:\n n = int(len(seq) / shape[0])\n return [reshapelist(shape[1:], part) for part in partition(n, seq)]\n\n\ndef transposelist(arrays, axes, extradims=0):\n \"\"\"Permute axes of nested list\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1])\n [[[1, 1], [1, 1], [1, 1]]]\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)\n [[[[1], [1]], [[1], [1]], [[1], [1]]]]\n \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n if extradims < 0:\n raise ValueError(\"`newdims` should be positive\")\n if len(axes) > len(set(axes)):\n raise ValueError(\"`axes` should be unique\")\n\n ndim = max(axes) + 1\n shape = shapelist(arrays)\n newshape = [\n shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)\n ]\n\n result = list(core.flatten(arrays))\n return reshapelist(newshape, result)\n\n\ndef stack(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Stack arrays along a new axis\n\n Given a sequence of dask arrays, form a new dask array by stacking them\n along a new dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.stack(data, axis=0)\n >>> x.shape\n (3, 4, 4)\n\n >>> da.stack(data, axis=1).shape\n (4, 3, 4)\n\n >>> da.stack(data, axis=-1).shape\n (4, 4, 3)\n\n Result is a new dask Array\n\n See Also\n --------\n concatenate\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to stack\")\n if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):\n idx = first(i for i in enumerate(seq) if i[1].shape != seq[0].shape)\n raise ValueError(\n \"Stacked arrays must have the same shape. \"\n \"The first array had shape {0}, while array \"\n \"{1} has shape {2}.\".format(seq[0].shape, idx[0] + 1, idx[1].shape)\n )\n\n meta = np.stack([meta_from_array(a) for a in seq], axis=axis)\n seq = [x.astype(meta.dtype) for x in seq]\n\n ndim = meta.ndim - 1\n if axis < 0:\n axis = ndim + axis + 1\n shape = tuple(\n len(seq)\n if i == axis\n else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])\n for i in range(meta.ndim)\n )\n\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n\n ind = list(range(ndim))\n uc_args = list(concat((x, ind) for x in seq2))\n _, seq2 = unify_chunks(*uc_args)\n\n assert len(set(a.chunks for a in seq2)) == 1 # same chunks\n chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]\n\n names = [a.name for a in seq2]\n name = \"stack-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n inputs = [\n (names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys\n ]\n values = [\n (\n getitem,\n inp,\n (slice(None, None, None),) * axis\n + (None,)\n + (slice(None, None, None),) * (ndim - axis),\n )\n for inp in inputs\n ]\n\n layer = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)\n\n\ndef concatenate3(arrays):\n \"\"\"Recursive np.concatenate\n\n Input should be a nested list of numpy arrays arranged in the order they\n should appear in the array itself. Each array should have the same number\n of dimensions as the desired output and the nesting of the lists.\n\n >>> x = np.array([[1, 2]])\n >>> concatenate3([[x, x, x], [x, x, x]])\n array([[1, 2, 1, 2, 1, 2],\n [1, 2, 1, 2, 1, 2]])\n\n >>> concatenate3([[x, x], [x, x], [x, x]])\n array([[1, 2, 1, 2],\n [1, 2, 1, 2],\n [1, 2, 1, 2]])\n \"\"\"\n from .utils import IS_NEP18_ACTIVE\n\n # We need this as __array_function__ may not exist on older NumPy versions.\n # And to reduce verbosity.\n NDARRAY_ARRAY_FUNCTION = getattr(np.ndarray, \"__array_function__\", None)\n\n arrays = concrete(arrays)\n if not arrays:\n return np.empty(0)\n\n advanced = max(\n core.flatten(arrays, container=(list, tuple)),\n key=lambda x: getattr(x, \"__array_priority__\", 0),\n )\n\n if IS_NEP18_ACTIVE and not all(\n NDARRAY_ARRAY_FUNCTION\n is getattr(arr, \"__array_function__\", NDARRAY_ARRAY_FUNCTION)\n for arr in arrays\n ):\n try:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=tuple(range(x.ndim)))\n except TypeError:\n pass\n\n if concatenate_lookup.dispatch(type(advanced)) is not np.concatenate:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=list(range(x.ndim)))\n\n ndim = ndimlist(arrays)\n if not ndim:\n return arrays\n chunks = chunks_from_arrays(arrays)\n shape = tuple(map(sum, chunks))\n\n def dtype(x):\n try:\n return x.dtype\n except AttributeError:\n return type(x)\n\n result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))\n\n for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):\n if hasattr(arr, \"ndim\"):\n while arr.ndim < ndim:\n arr = arr[None, ...]\n result[idx] = arr\n\n return result\n\n\ndef concatenate_axes(arrays, axes):\n \"\"\" Recursively call np.concatenate along axes \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n\n extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))\n return concatenate3(transposelist(arrays, axes, extradims=extradims))\n\n\ndef to_hdf5(filename, *args, **kwargs):\n \"\"\"Store arrays in HDF5 file\n\n This saves several dask arrays into several datapaths in an HDF5 file.\n It creates the necessary datasets and handles clean file opening/closing.\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP\n\n or\n\n >>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP\n\n This can also be used as a method on a single Array\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n if len(args) == 1 and isinstance(args[0], dict):\n data = args[0]\n elif len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Array):\n data = {args[0]: args[1]}\n else:\n raise ValueError(\"Please provide {'/data/path': array} dictionary\")\n\n chunks = kwargs.pop(\"chunks\", True)\n\n import h5py\n\n with h5py.File(filename, mode=\"a\") as f:\n dsets = [\n f.require_dataset(\n dp,\n shape=x.shape,\n dtype=x.dtype,\n chunks=tuple([c[0] for c in x.chunks]) if chunks is True else chunks,\n **kwargs,\n )\n for dp, x in data.items()\n ]\n store(list(data.values()), dsets)\n\n\ndef interleave_none(a, b):\n \"\"\"\n\n >>> interleave_none([0, None, 2, None], [1, 3])\n (0, 1, 2, 3)\n \"\"\"\n result = []\n i = j = 0\n n = len(a) + len(b)\n while i + j < n:\n if a[i] is not None:\n result.append(a[i])\n i += 1\n else:\n result.append(b[j])\n i += 1\n j += 1\n return tuple(result)\n\n\ndef keyname(name, i, okey):\n \"\"\"\n\n >>> keyname('x', 3, [None, None, 0, 2])\n ('x', 3, 0, 2)\n \"\"\"\n return (name, i) + tuple(k for k in okey if k is not None)\n\n\ndef _vindex(x, *indexes):\n \"\"\"Point wise indexing with broadcasting.\n\n >>> x = np.arange(56).reshape((7, 8))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55]])\n\n >>> d = from_array(x, chunks=(3, 4))\n >>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])\n >>> result.compute()\n array([ 0, 9, 48, 7])\n \"\"\"\n indexes = replace_ellipsis(x.ndim, indexes)\n\n nonfancy_indexes = []\n reduced_indexes = []\n for i, ind in enumerate(indexes):\n if isinstance(ind, Number):\n nonfancy_indexes.append(ind)\n elif isinstance(ind, slice):\n nonfancy_indexes.append(ind)\n reduced_indexes.append(slice(None))\n else:\n nonfancy_indexes.append(slice(None))\n reduced_indexes.append(ind)\n\n nonfancy_indexes = tuple(nonfancy_indexes)\n reduced_indexes = tuple(reduced_indexes)\n\n x = x[nonfancy_indexes]\n\n array_indexes = {}\n for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)):\n if not isinstance(ind, slice):\n ind = np.array(ind, copy=True)\n if ind.dtype.kind == \"b\":\n raise IndexError(\"vindex does not support indexing with boolean arrays\")\n if ((ind >= size) | (ind < -size)).any():\n raise IndexError(\n \"vindex key has entries out of bounds for \"\n \"indexing along axis %s of size %s: %r\" % (i, size, ind)\n )\n ind %= size\n array_indexes[i] = ind\n\n if array_indexes:\n x = _vindex_array(x, array_indexes)\n\n return x\n\n\ndef _vindex_array(x, dict_indexes):\n \"\"\"Point wise indexing with only NumPy Arrays.\"\"\"\n\n try:\n broadcast_indexes = np.broadcast_arrays(*dict_indexes.values())\n except ValueError as e:\n # note: error message exactly matches numpy\n shapes_str = \" \".join(str(a.shape) for a in dict_indexes.values())\n raise IndexError(\n \"shape mismatch: indexing arrays could not be \"\n \"broadcast together with shapes \" + shapes_str\n ) from e\n broadcast_shape = broadcast_indexes[0].shape\n\n lookup = dict(zip(dict_indexes, broadcast_indexes))\n flat_indexes = [\n lookup[i].ravel().tolist() if i in lookup else None for i in range(x.ndim)\n ]\n flat_indexes.extend([None] * (x.ndim - len(flat_indexes)))\n\n flat_indexes = [\n list(index) if index is not None else index for index in flat_indexes\n ]\n bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]\n bounds2 = [b for i, b in zip(flat_indexes, bounds) if i is not None]\n axis = _get_axis(flat_indexes)\n token = tokenize(x, flat_indexes)\n out_name = \"vindex-merge-\" + token\n\n points = list()\n for i, idx in enumerate(zip(*[i for i in flat_indexes if i is not None])):\n block_idx = [\n np.searchsorted(b, ind, \"right\") - 1 for b, ind in zip(bounds2, idx)\n ]\n inblock_idx = [\n ind - bounds2[k][j] for k, (ind, j) in enumerate(zip(idx, block_idx))\n ]\n points.append((i, tuple(block_idx), tuple(inblock_idx)))\n\n chunks = [c for i, c in zip(flat_indexes, x.chunks) if i is None]\n chunks.insert(0, (len(points),) if points else (0,))\n chunks = tuple(chunks)\n\n if points:\n per_block = groupby(1, points)\n per_block = dict((k, v) for k, v in per_block.items() if v)\n\n other_blocks = list(\n product(\n *[\n list(range(len(c))) if i is None else [None]\n for i, c in zip(flat_indexes, x.chunks)\n ]\n )\n )\n\n full_slices = [slice(None, None) if i is None else None for i in flat_indexes]\n\n name = \"vindex-slice-\" + token\n vindex_merge_name = \"vindex-merge-\" + token\n dsk = {}\n for okey in other_blocks:\n for i, key in enumerate(per_block):\n dsk[keyname(name, i, okey)] = (\n _vindex_transpose,\n (\n _vindex_slice,\n (x.name,) + interleave_none(okey, key),\n interleave_none(\n full_slices, list(zip(*pluck(2, per_block[key])))\n ),\n ),\n axis,\n )\n dsk[keyname(vindex_merge_name, 0, okey)] = (\n _vindex_merge,\n [list(pluck(0, per_block[key])) for key in per_block],\n [keyname(name, i, okey) for i in range(len(per_block))],\n )\n\n result_1d = Array(\n HighLevelGraph.from_collections(out_name, dsk, dependencies=[x]),\n out_name,\n chunks,\n x.dtype,\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n # output has a zero dimension, just create a new zero-shape array with the\n # same dtype\n from .wrap import empty\n\n result_1d = empty(\n tuple(map(sum, chunks)), chunks=chunks, dtype=x.dtype, name=out_name\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n\ndef _get_axis(indexes):\n \"\"\"Get axis along which point-wise slicing results lie\n\n This is mostly a hack because I can't figure out NumPy's rule on this and\n can't be bothered to go reading.\n\n >>> _get_axis([[1, 2], None, [1, 2], None])\n 0\n >>> _get_axis([None, [1, 2], [1, 2], None])\n 1\n >>> _get_axis([None, None, [1, 2], [1, 2]])\n 2\n \"\"\"\n ndim = len(indexes)\n indexes = [slice(None, None) if i is None else [0] for i in indexes]\n x = np.empty((2,) * ndim)\n x2 = x[tuple(indexes)]\n return x2.shape.index(1)\n\n\ndef _vindex_slice(block, points):\n \"\"\" Pull out point-wise slices from block \"\"\"\n points = [p if isinstance(p, slice) else list(p) for p in points]\n return block[tuple(points)]\n\n\ndef _vindex_transpose(block, axis):\n \"\"\" Rotate block so that points are on the first dimension \"\"\"\n axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))\n return block.transpose(axes)\n\n\ndef _vindex_merge(locations, values):\n \"\"\"\n\n >>> locations = [0], [2, 1]\n >>> values = [np.array([[1, 2, 3]]),\n ... np.array([[10, 20, 30], [40, 50, 60]])]\n\n >>> _vindex_merge(locations, values)\n array([[ 1, 2, 3],\n [40, 50, 60],\n [10, 20, 30]])\n \"\"\"\n locations = list(map(list, locations))\n values = list(values)\n\n n = sum(map(len, locations))\n\n shape = list(values[0].shape)\n shape[0] = n\n shape = tuple(shape)\n\n dtype = values[0].dtype\n\n x = np.empty(shape, dtype=dtype)\n\n ind = [slice(None, None) for i in range(x.ndim)]\n for loc, val in zip(locations, values):\n ind[0] = loc\n x[tuple(ind)] = val\n\n return x\n\n\ndef to_npy_stack(dirname, x, axis=0):\n \"\"\"Write dask array to a stack of .npy files\n\n This partitions the dask.array along one axis and stores each block along\n that axis as a single .npy file in the specified directory\n\n Examples\n --------\n >>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP\n >>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP\n\n The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``\n respectively, as is specified by the chunk size along the zeroth axis::\n\n $ tree data/\n data/\n |-- 0.npy\n |-- 1.npy\n |-- 2.npy\n |-- info\n\n The ``info`` file stores the dtype, chunks, and axis information of the array.\n You can load these stacks with the ``da.from_npy_stack`` function.\n\n >>> y = da.from_npy_stack('data/') # doctest: +SKIP\n\n See Also\n --------\n from_npy_stack\n \"\"\"\n\n chunks = tuple((c if i == axis else (sum(c),)) for i, c in enumerate(x.chunks))\n xx = x.rechunk(chunks)\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n meta = {\"chunks\": chunks, \"dtype\": x.dtype, \"axis\": axis}\n\n with open(os.path.join(dirname, \"info\"), \"wb\") as f:\n pickle.dump(meta, f)\n\n name = \"to-npy-stack-\" + str(uuid.uuid1())\n dsk = {\n (name, i): (np.save, os.path.join(dirname, \"%d.npy\" % i), key)\n for i, key in enumerate(core.flatten(xx.__dask_keys__()))\n }\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[xx])\n compute_as_if_collection(Array, graph, list(dsk))\n\n\ndef from_npy_stack(dirname, mmap_mode=\"r\"):\n \"\"\"Load dask array from stack of npy files\n\n See ``da.to_npy_stack`` for docstring\n\n Parameters\n ----------\n dirname: string\n Directory of .npy files\n mmap_mode: (None or 'r')\n Read data in memory map mode\n \"\"\"\n with open(os.path.join(dirname, \"info\"), \"rb\") as f:\n info = pickle.load(f)\n\n dtype = info[\"dtype\"]\n chunks = info[\"chunks\"]\n axis = info[\"axis\"]\n\n name = \"from-npy-stack-%s\" % dirname\n keys = list(product([name], *[range(len(c)) for c in chunks]))\n values = [\n (np.load, os.path.join(dirname, \"%d.npy\" % i), mmap_mode)\n for i in range(len(chunks[axis]))\n ]\n dsk = dict(zip(keys, values))\n\n return Array(dsk, name, chunks, dtype)\n\n\nfrom .utils import meta_from_array\n"
] | [
[
"numpy.ones",
"numpy.empty",
"numpy.searchsorted",
"numpy.dtype",
"numpy.asarray",
"numpy.median",
"numpy.errstate",
"numpy.asanyarray",
"numpy.can_cast",
"numpy.prod",
"numpy.max",
"numpy.isnan",
"numpy.array",
"numpy.isscalar"
]
] |
ShamimSuf/AI_FlappyBird_GeneticAlgo | [
"438da76b1327ef3167dea42706dce4aaeccc8136"
] | [
"AI_Flappy.py"
] | [
"#This flappy will have 6 inputs (i1 to i6) : up, down, bird top-right to up-block-low-right, bird top-left to up-block-low-left, so on. \n\nimport numpy as np\nimport pygame\nimport time\nimport random\nfrom random import randint\n\npygame.init()\n\n#6 input nodes\ni_ROW = 1\ni_COL = 6\n\n#3 hidden layer nodes\n#input to hidden layer nodes\t\t\nw1_ROW = 6\nw1_COL = 3\n\n#hiddenb layer to op layer nodes\nw2_ROW = 3\nw2_COL = 1\n\nclass Colors:\n\tdef __init__(self): \n\t\t#https://www.webucator.com/blog/2015/03/python-color-constants-module/\n\t\tself.black = (0,0,0)\n\t\tself.white = (255,255,255)\n\t\tself.cornflowerblue = (100,149,237)\n\t\tself.azure4 = (131,139,139)\n\t\tself.cadetblue1\t= (152,245,255)\n\nclass GameWindow:\n\tdef __init__(self):\n\t\tcolors = Colors()\n\t\t\n\t\tself.surfaceWidth = 800\n\t\tself.surfaceHeight = 400\t\n\t\tself.surface = pygame.display.set_mode((self.surfaceWidth, self.surfaceHeight))\n\t\tself.clock = pygame.time.Clock()\n\t\tself.surface.fill(colors.azure4)\n\t\tpygame.display.set_caption('Flappy Bork')\n\nclass Point:\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\t\nclass Block:\n\tdef __init__(self, gameWindow, x, y_gap, gapHeight):\n\t\t\n\t\tself.gameWindow\t= gameWindow\n\t\tself.x \t\t\t= x\n\t\tself.y_gap\t\t= y_gap\n\t\tself.gapHeight\t= gapHeight\n\t\t\t\t\n\t\tself.blockWidth = 50\n\t\tself.blockSpeed = 5\n\t\t\n\tdef draw(self):\n\t\tcolors = Colors()\n\t\tpygame.draw.rect( self.gameWindow.surface, colors.white, (self.x, 0, self.blockWidth, self.y_gap))\n\t\tpygame.draw.rect( self.gameWindow.surface, colors.white, (self.x, self.y_gap + self.gapHeight, self.blockWidth, self.gameWindow.surfaceHeight - self.y_gap - self.gapHeight))\n\n\tdef reset(self):\n\t\t#Reset block\n\t\tself.x = gameWindow.surfaceWidth - self.blockWidth\n\t\tself.y_gap = random_obj.randint(0, gameWindow.surfaceHeight - 150)\n\t\n#Global Variables (Shitty way of implementing! )\t\nrandom_obj = random.SystemRandom()\t\nmutation_rate = 0.3 #between 0,1\ngeneration = 0\ngameWindow = GameWindow()\nblock = Block (gameWindow, gameWindow.surfaceWidth - 50, randint(0, gameWindow.surfaceHeight - 150), 150)\nblock.draw()\t\t\t\t\n\n#Game Over\ndef gg_wp():\n\tuser_input = False\n\twhile not user_input:\n\t\tfor event in pygame.event.get():\n\n\t\t\tif event.type==pygame.KEYDOWN:\n\t\t\t\tuser_input = True\n\n\t\t\t\t#restart game via SPACE key\n\t\t\t\tif event.key == pygame.K_SPACE:\n\t\t\t\t\tmain()\n\n\t\t\telse:\n\t\t\t\tpygame.quit()\n\t\t\t\tquit()\n\t\t\t\t\n\t\t\t\t\ndef getRandMatrix(row, col):\n\tmatrix = np.random.rand(row, col)\n\tfor i in range(matrix.shape[0]):\n\t\tfor j in range(matrix.shape[1]):\n\t\t\tmatrix[i][j] = np.random.uniform(-1, 1)\n\treturn matrix\n\n# ReLU activation Function\ndef ReLU(z):\n\tz[z < 0] = 0\n\treturn z\n\n# Softmax or averaging the value\ndef softmax(z):\n\tsummation = np.sum(z)\n\tif summation == 0.0:\n\t\tsummation = 1.0\n\tfor i in range(len(z)):\n\t\tz[i] = z[i]/summation\n\treturn z\n\n# Sigmoid Activation Function\ndef sigmoid(z):\n\treturn 1.0/(1.0 + np.exp(-z))\n\nclass Brain:\t\n\tdef __init__(self):\t\t\n\t\tself.w1_matrix = getRandMatrix(w1_ROW, w1_COL)\n\t\tself.w2_matrix = getRandMatrix(w2_ROW, w2_COL)\n\t\t\n\tdef feedforward(self, i_matrix):\n\t\top1_matrix = np.dot(i_matrix, \tself.w1_matrix)\n\t\top2_matrix = np.dot(op1_matrix, self.w2_matrix)\n\t\top_final = sigmoid(op2_matrix)\t\n\t\tif( op_final > 0.5):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\nclass Bird:\n\tdef __init__(self, point):\n\t\tself.brain = Brain()\n\t\tself.life = True\n\t\tself.score = 0\n\t\tself.fitness = 0\n\t\t\t\t\n\t\tself.point = point\n\t\tself.size = 50\n\t\tself.fall_speed = 8\n\t\t\t\n\t\t#Increment score everytime bird is alive (increment per frame the bird is alive)\n\t\tself.incr_score = 1;\n\t\t\n\t\t#Input coords\n\t\tself.p1 = Point(0, 0)\n\t\tself.p2 = Point(0, 0)\n\t\tself.p3 = Point(0, 0)\n\t\tself.p4 = Point(0, 0)\n\t\tself.p5 = Point(0, 0)\n\t\tself.p6 = Point(0, 0)\n\t\tself.p7 = Point(0, 0)\n\t\t\n\tdef draw(self):\n\t\tcolors = Colors()\n\t\tpygame.draw.rect( gameWindow.surface, colors.cadetblue1, (self.point.x, self.point.y, self.size, self.size))\n\t\t\n\t\t#get block\n\t\tcurrent_block = block\n\t\t\n\t\t#draw the lines\n\t\tcolors = Colors()\n\t\tcurrent_block = block\n\t\tmidpoint = Point(self.point.x + self.size/2, self.point.y + self.size/2)\n\t\ti1 = midpoint.y - self.size/2\n\t\ti2 = midpoint.y + self.size/2\n\t\t\n\t\tblock_up_low_right\t = Point(current_block.x + current_block.blockWidth\t, current_block.y_gap) \n\t\tblock_up_low_left\t = Point(current_block.x\t\t\t\t\t\t\t, current_block.y_gap)\n\t\tblock_lower_up_left\t = Point(current_block.x\t\t\t\t\t\t\t, current_block.y_gap + current_block.gapHeight)\n\t\tblock_lower_up_right = Point(current_block.x + current_block.blockWidth\t,current_block.y_gap + current_block.gapHeight)\n\t\t\n\t\tp1 = Point( midpoint.x, i1 )\n\t\tp2 = Point( midpoint.x, i2)\n\t\tp3 = Point( midpoint.x + self.size/2, midpoint.y - self.size/2)\n\t\tp4 = Point( midpoint.x - self.size/2, midpoint.y - self.size/2)\n\t\tp5 = Point( midpoint.x - self.size/2, midpoint.y + self.size/2)\n\t\tp6 = Point( midpoint.x + self.size/2, midpoint.y + self.size/2)\n\t\t\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p1.x, p1.y), (p1.x, 0))\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p2.x, p2.y), (p2.x, gameWindow.surfaceHeight))\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p3.x, p3.y), (current_block.x + current_block.blockWidth\t, current_block.y_gap))\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p4.x, p4.y), (current_block.x\t\t\t\t\t\t\t\t, current_block.y_gap))\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p5.x, p5.y), (current_block.x\t\t\t\t\t\t\t\t, current_block.y_gap + current_block.gapHeight))\n\t\tpygame.draw.line(gameWindow.surface, colors.cadetblue1, (p6.x, p6.y), (current_block.x + current_block.blockWidth\t,current_block.y_gap + current_block.gapHeight))\n\t\t\t\t\t\t\n\tdef move(self):\t\t\n\t\t#self.point.y += self.fall_speed\t\t\n\t\tself.score = self.score + 1\n\t\tself.think()\n\n\t\t#self boundary restrict wrt Game Window \n\t\tif ( self.point.x <= 0):\n\t\t\tself.point.x = 0\n\t\tif ( self.point.x >= gameWindow.surfaceWidth - self.size):\n\t\t\tself.point.x = gameWindow.surfaceWidth - self.size\t\t\t\n\t\tif ( self.point.y <= 0):\n\t\t\tself.point.y = 0\n\t\tif ( self.point.y >= gameWindow.surfaceHeight - self.size):\n\t\t\tself.point.y = gameWindow.surfaceHeight - self.size\n\t\t\n\t\t#get_block\n\t\tcurrent_block = block\n\t\t\t\t\n\t\t#check collision\n\t\t#Check if self has collided with block\n\t\tif( (current_block.x < (self.point.x + self.size) < (current_block.x + current_block.blockWidth)) or\n\t\t\t(current_block.x < (self.point.x) \t\t\t < (current_block.x + current_block.blockWidth))):\n\t\t\tif(not( (current_block.y_gap < self.point.y < current_block.y_gap + current_block.gapHeight) and \n\t\t\t\t\t(current_block.y_gap < (self.point.y + self.size) < current_block.y_gap + current_block.gapHeight))):\n\t\t\t\tself.fitness = self.score\n\t\t\t\tself.life = False\n\t\t\t\tself.score = 0.0 \n\t\t\n\t\t#check collision with up and low\n\t\tif (self.point.y == 0) or (self.point.y + 50 == gameWindow.surfaceHeight):\n\t\t\tself.fitness = self.score\n\t\t\tself.life = False\n\t\t\tself.score = 0.0 \n\t\t\n\tdef think(self):\n\t\ti_matrix = self.get_input_matrix()\n\t\tdoFlap = self.brain.feedforward(i_matrix)\n\t\t\n\t\tif doFlap == 1:\n\t\t\tself.fall_speed = 8\n\t\telse:\n\t\t\tself.fall_speed = -3\n\t\t\t\n\t\t#Update player movement value\n\t\tself.point.y += self.fall_speed\n\t\t\t\t\n\tdef reset(self):\n\t\tself.point = Point( 50, gameWindow.surfaceHeight/2)\n\t\tself.life = True\n\n\tdef get_input_matrix(self):\n\t\tcurrent_block = block\n\t\t\n\t\t#TO_DO calculate i_matrix\n\t\tmidpoint = Point(self.point.x + self.size/2, self.point.y + self.size/2)\n\t\t\t\t\n\t\t#i1 = (current_block.x + current_block.blockWidth) - midpoint.x\n\t\t#i2 = current_block.x - midpoint.x\n\t\t\n\t\t#distance of player from up and down\n\t\ti1 = midpoint.y - self.size/2\n\t\ti2 = gameWindow.surfaceHeight - (midpoint.y + self.size/2)\n\t\t\n\t\tblock_up_low_right\t = Point(current_block.x + current_block.blockWidth\t, current_block.y_gap) \n\t\tblock_up_low_left\t = Point(current_block.x\t\t\t\t\t\t\t, current_block.y_gap)\n\t\tblock_lower_up_left\t = Point(current_block.x\t\t\t\t\t\t\t, current_block.y_gap + current_block.gapHeight)\n\t\tblock_lower_up_right = Point(current_block.x + current_block.blockWidth\t,current_block.y_gap + current_block.gapHeight)\n\t\t\n\t\tp1 = Point( midpoint.x, i1 )\n\t\tp2 = Point( midpoint.x, i2)\n\t\tp3 = Point( midpoint.x + self.size/2, midpoint.y - self.size/2)\n\t\tp4 = Point( midpoint.x - self.size/2, midpoint.y - self.size/2)\n\t\tp5 = Point( midpoint.x - self.size/2, midpoint.y + self.size/2)\n\t\tp6 = Point( midpoint.x + self.size/2, midpoint.y + self.size/2)\n\n\t\ti3 = np.sqrt(np.square(p3.x - block_up_low_right.x) + np.square(p3.y - block_up_low_right.y))\n\t\ti4 = np.sqrt(np.square(p4.x - block_up_low_left.x) + np.square(p4.y - block_up_low_left.y))\n\t\ti5 = np.sqrt(np.square(p5.x - block_lower_up_left.x) + np.square(p5.y - block_lower_up_left.y))\n\t\ti6 = np.sqrt(np.square(p6.x - block_lower_up_right.x) + np.square(p6.y - block_lower_up_right.y))\n\t\t\n\t\tself.p1 = p1\n\t\tself.p2 = p2\n\t\tself.p3 = p3\n\t\tself.p4 = p4\n\t\tself.p5 = p5\n\t\tself.p6 = p6\t\t\n\t\t\n\t\t'''\n\t\tupblock_lower_left = Point(current_block.x\t\t\t\t\t\t\t\t,current_block.y_gap)\n\t\tupblock_lower_right = Point(current_block.x + current_block.blockWidth\t,current_block.y_gap)\n\t\tlowblock_up_right\t= Point(current_block.x + current_block.blockWidth\t,current_block.y_gap + current_block.gapHeight)\n\t\tlowblock_up_left\t= Point(current_block.x\t\t\t\t\t\t\t\t,current_block.y_gap + current_block.gapHeight)\n\t\t\n\t\ti3 = np.sqrt(np.square(midpoint.x - upblock_lower_left.x) \t+ np.square(midpoint.y - upblock_lower_left.y))\n\t\ti4 = np.sqrt(np.square(midpoint.x - upblock_lower_right.x) \t+ np.square(midpoint.y - upblock_lower_right.y))\n\t\ti5 = np.sqrt(np.square(midpoint.x - lowblock_up_right.x) \t+ np.square(midpoint.y - lowblock_up_right.y))\n\t\ti6 = np.sqrt(np.square(midpoint.x - lowblock_up_left.x) \t+ np.square(midpoint.y - lowblock_up_left.y))\n\t\t'''\n\t\t\n\t\treturn np.array([i1, i2, i3, i4, i5, i6])\n\nclass Population:\n\tdef __init__(self):\n\t\tself.population = []\n\t\tself.eliminated = []\t\n\t\n\tdef createPopulation(self):\n\t\tfor i in range(12):\n\t\t\tbird = Bird(Point( 50, gameWindow.surfaceHeight/2))\n\t\t\tself.population.append(bird)\n\t\t\t\n\tdef move(self):\n\t\tfor bird in self.population:\n\t\t\tbird.move()\n\n\t\tpopCopy = self.population[:]\n\t\tfor bird in popCopy:\n\t\t\tif not bird.life:\n\t\t\t\tself.eliminated.append(bird)\n\t\t\t\tself.population.remove(bird)\n\n\t\tif self.population == []:\n\t\t\tself.evolve()\n\t\n\t#aka def reproduce(self)\n\tdef evolve(self):\n\t\tglobal generation\t\t\n\t\tgeneration = generation + 1\n\t\t\n\t\t#reset block since all birds are dead\n\t\t#evolve is called when entire population is dead\n\t\tblock.reset()\n\t\t\t\n\t\t#Draw block\n\t\t#block.draw()\n\t\t\n\t\tself.crossbreed() #takes top and creates babies\n\t\tself.mutate()\n\n\tdef crossbreed(self):\n\t\tself.eliminated.sort(key=lambda x: x.fitness, reverse=True)\n\t\t\n\t\t#assuming 12 birds in population\n\t\tbaby1, baby2 = self.getBabies(self.eliminated[0], self.eliminated[1]) \n\t\tbaby3, baby4 = self.getBabies(self.eliminated[2], self.eliminated[3])\n\t\tbaby5, baby6 = self.getBabies(self.eliminated[4], self.eliminated[5])\n\t\t\n\t\tfor i in range(6):\n\t\t\tself.population.append(self.eliminated[i])\n\t\t\n\t\tself.population.append(baby1)\n\t\tself.population.append(baby2)\n\t\tself.population.append(baby3)\n\t\tself.population.append(baby4)\n\t\tself.population.append(baby5)\n\t\tself.population.append(baby6)\n\t\t\n\t\t#clear self.eliminated list\n\t\tself.eliminated = []\n\n\tdef getBabies(self, parent_bird1, parent_bird2):\n\t\tbaby1 = Bird (Point( 50, gameWindow.surfaceHeight/2))\n\t\tbaby2 = Bird (Point( 50, gameWindow.surfaceHeight/2))\n\t\t\n\t\t#w1 matrix\n\t\tfor i in range(baby1.brain.w1_matrix.shape[0]):\n\t\t\tfor j in range(baby1.brain.w1_matrix.shape[1]):\n\t\t\t\tbaby1.brain.w1_matrix[i][j] = random.choice([parent_bird1.brain.w1_matrix[i][j], parent_bird2.brain.w1_matrix[i][j]])\t\t\n\t\t\t\tbaby2.brain.w1_matrix[i][j] = random.choice([parent_bird1.brain.w1_matrix[i][j], parent_bird2.brain.w1_matrix[i][j]])\n\n\t\t#w2 matrix\n\t\tfor i in range(baby1.brain.w2_matrix.shape[0]):\n\t\t\tfor j in range(baby1.brain.w2_matrix.shape[1]):\n\t\t\t\tbaby1.brain.w2_matrix[i][j] = random.choice([parent_bird1.brain.w2_matrix[i][j], parent_bird2.brain.w2_matrix[i][j]])\n\t\t\t\tbaby2.brain.w2_matrix[i][j] = random.choice([parent_bird1.brain.w2_matrix[i][j], parent_bird2.brain.w2_matrix[i][j]])\t\t\t\t\n\t\t\n\t\treturn baby1, baby2\n\t\t\n\tdef mutate(self):\t\t\n\t\t#Mutate single bird, 6/18 from W1 and 1/3 from W2\t\t\n\t\trandom_bird_index = random_obj.randint(0, len(self.population)-1)\n\t\t\t\t\n\t\tfor x in range(6):\n\t\t\trandom_row = random_obj.randint(0,5)\n\t\t\trandom_col = random_obj.randint(0,2)\n\t\t\tself.population[random_bird_index].brain.w1_matrix[random_row][random_col] = np.random.uniform(-1, 1)\n\n\t\tfor x in range(1):\n\t\t\trandom_row_w2 = random_obj.randint(0,2)\n\t\t\tself.population[random_bird_index].brain.w2_matrix[random_row_w2][0] = np.random.uniform(-1, 1)\n\t\t\n\tdef draw(self):\n\t\tfor bird in self.population:\n\t\t\tbird.draw()\n\t\t\ndef gameLoop():\n\tloop = True\n\tpopulation = Population()\n\tpopulation.createPopulation()\n\tspeed = 60\n\tcolors = Colors()\n\t\n\tglobal generation\n\t\n\twhile loop:\t\n\n\t\t#display info\n\t\ttext = \"Generation: \" + str(generation)\n\t\t#population.population.sort(key=lambda x: x.score, reverse=True)\n\t\t#top_bird = population.population[0]\n\t\t#text = text + \" Top Score: \" + str(top_bird.score) + \" Top Fitness: \" + str(top_bird.fitness)\n\t\tprint(text)\n\t\t\n\t\tfor event in pygame.event.get():\t\t\t\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\t\tgg_wp()\n\t\t\t\tif event.key == pygame.K_1:\n\t\t\t\t\tspeed = 60*1\n\t\t\t\tif event.key == pygame.K_2:\n\t\t\t\t\tspeed = 60*2\n\t\t\t\tif event.key == pygame.K_3:\n\t\t\t\t\tspeed = 60*3\n\t\t\t\tif event.key == pygame.K_4:\n\t\t\t\t\tspeed = 60*4\n\t\t\t\tif event.key == pygame.K_5:\n\t\t\t\t\tspeed = 60*5\n\t\t\t\tif event.key == pygame.K_6:\n\t\t\t\t\tspeed = 60*6\n\t\t\t\tif event.key == pygame.K_7:\n\t\t\t\t\tspeed = 60*7\n\t\t\t\tif event.key == pygame.K_8:\n\t\t\t\t\tspeed = 60*8\n\t\t\t\tif event.key == pygame.K_9:\n\t\t\t\t\tspeed = 60*9\t\t\n\n\t\t#Reset block\n\t\tif( block.x <= -block.blockWidth ):\n\t\t\tblock.reset()\n\t\t\t\t\n\t\t#Fill Game Window\n\t\tgameWindow.surface.fill(colors.azure4)\n\t\t\n\t\t#Block move\t\t\n\t\tblock.x \t= block.x - block.blockSpeed \t\t\n\t\t\n\t\t#draw block\n\t\tblock.draw()\t\n\t\t\n\t\tpopulation.move() \n\t\tpopulation.draw()\n\t\t\n\t\tpygame.display.update()\n\t\tgameWindow.clock.tick(speed)\ngameLoop()\n"
] | [
[
"numpy.random.uniform",
"numpy.sum",
"numpy.exp",
"numpy.random.rand",
"numpy.array",
"numpy.dot",
"numpy.square"
]
] |
Tesla2fox/MPDA-DE | [
"0a27d59ceba16d292fade01d95b1c1f336e5f604"
] | [
"MPDA_decode/instance.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 21 15:49:17 2018\n\n@author: robot\n\"\"\"\nfrom readCfg import *\nimport numpy as np\nfrom enum import Enum\n\nclass TaskModelType(Enum):\n ExpModel = 1\n LineModel = 2\n\n\n\n\nclass Instance(object):\n def __init__(self, insFileName = 'wtf'):\n self.insFileName = insFileName\n readCfg = Read_Cfg(insFileName)\n self.robNum = int(readCfg.getSingleVal('robNum'))\n self.taskNum = int(readCfg.getSingleVal('taskNum'))\n self.threhold = readCfg.getSingleVal('comp_threhold')\n self.robAbiLst = []\n self.robVelLst = []\n self.taskStateLst = []\n self.taskRateLst = []\n readCfg.get('rob_abi',self.robAbiLst)\n readCfg.get('rob_vel',self.robVelLst)\n readCfg.get('tsk_rat',self.taskRateLst)\n readCfg.get('tsk_state',self.taskStateLst)\n self.rob2taskDisMat = np.zeros((self.robNum,self.taskNum))\n disLst = []\n readCfg.get('rob2tskDis',disLst)\n# print(self.rob2taskDisMat)\n for i in range(self.robNum):\n for j in range(self.taskNum):\n# print(i,j)\n# print(i*self.robNum+j)\n# print(disLst[i*self.robNum+j])\n self.rob2taskDisMat[i][j] = disLst[i*self.taskNum+j]\n self.taskDisMat = np.zeros((self.taskNum,self.taskNum))\n disLst = []\n readCfg.get('tskDis',disLst)\n for i in range(self.taskNum):\n for j in range(self.taskNum):\n self.taskDisMat[i][j] = disLst[i*self.taskNum+j]\n # self.decode = DecodeSS(self.insFileName)\n self.taskModelType = TaskModelType.ExpModel\n\n def __str__(self):\n return self.insFileName + '\\n robNum = '+ str(self.robNum) +' task =' +str(self.taskNum)\n def __eq__(self,other):\n if self.insFileName == other.insFileName:\n return True\n else:\n return False\n def __ne__(self,other):\n if self.__eq__(other):\n return False\n return True\n def evaluate(self,encode):\n self.decode.encode = encode\n# makespan = self.decode.decode()\n try:\n makespan = self.decode.decode()\n pass\n except InvalidStateException:\n makespan = sys.float_info.max\n except RobotStuckException:\n makespan = sys.float_info.max\n# except Exception as e:\n# print(e)\n return makespan\n def genNoBackTrackEncode(self,encode):\n resEncode = np.zeros((self.robNum,self.taskNum),dtype =int)\n resEncode[:][:] = -1\n for i in range(self.robNum):\n ind = 0\n for j in range(self.taskNum):\n if encode[i][j] != -1:\n resEncode[i][ind] = encode[i][j]\n ind += 1\n return resEncode\n def calRob2TaskPeriod(self,robID,taskID):\n dis = self.rob2taskDisMat[robID][taskID]\n dis_time = dis/self.robVelLst[robID]\n return dis_time\n def calTask2TaskPeriod(self,robID,taskID1,taskID2):\n dis = self.taskDisMat[taskID1][taskID2]\n period = dis/self.robVelLst[robID]\n return period\n\n\nif __name__ =='__main__':\n\n wtf = Read_Cfg(\"wf\")\n\n # insName = 's100_5_10_max100_2.5_2.5_2.5_1.2_thre0.1_MPDAins.dat'\n # ins = Instance(BaseDir + '//data\\\\' + insName)\n # insName = 's100_5_10_max100_2.5_2.5_2.5_1.2_thre0.1_MPDAins.dat'\n # ins2 = Instance(BaseDir + '//data\\\\' + insName)\n # if ins == ins2:\n # print('asd')\n # print(ins)\n \n \n "
] | [
[
"numpy.zeros"
]
] |
TianjieZhang1993/PINNs | [
"9034ba7f4fef81c24954fa3cbf08a2d4a7fee85a"
] | [
"appendix/discrete_time_identification (Burgers)/Burgers_systematic.py"
] | [
"\"\"\"\n@author: Maziar Raissi\n\"\"\"\n\nimport sys\nsys.path.insert(0, '../../Utilities/')\n\nimport tensorflow as tf\nimport numpy as np\nimport time\nimport scipy.io\n\nnp.random.seed(1234)\ntf.set_random_seed(1234)\n\n\nclass PhysicsInformedNN:\n # Initialize the class\n def __init__(self, x0, u0, x1, u1, layers, dt, lb, ub, q):\n \n self.lb = lb\n self.ub = ub\n \n self.x0 = x0\n self.x1 = x1\n \n self.u0 = u0\n self.u1 = u1\n \n self.layers = layers\n self.dt = dt\n self.q = max(q,1)\n \n # Initialize NN\n self.weights, self.biases = self.initialize_NN(layers)\n \n # Initialize parameters\n self.lambda_1 = tf.Variable([0.0], dtype=tf.float32)\n self.lambda_2 = tf.Variable([-6.0], dtype=tf.float32) \n \n # Load IRK weights\n tmp = np.float32(np.loadtxt('../../Utilities/IRK_weights/Butcher_IRK%d.txt' % (q), ndmin = 2))\n weights = np.reshape(tmp[0:q**2+q], (q+1,q)) \n self.IRK_alpha = weights[0:-1,:]\n self.IRK_beta = weights[-1:,:] \n self.IRK_times = tmp[q**2+q:]\n \n # tf placeholders and graph\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=True))\n \n self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1]))\n self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1]))\n self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1]))\n self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1]))\n self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients \n self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients \n \n self.U0_pred = self.net_U0(self.x0_tf) # N0 x q\n self.U1_pred = self.net_U1(self.x1_tf) # N1 x q\n \n self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \\\n tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred)) \n \n self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, \n method = 'L-BFGS-B', \n options = {'maxiter': 50000,\n 'maxfun': 50000,\n 'maxcor': 50,\n 'maxls': 50,\n 'ftol' : 1.0 * np.finfo(float).eps}) \n \n self.optimizer_Adam = tf.train.AdamOptimizer()\n self.train_op_Adam = self.optimizer_Adam.minimize(self.loss)\n \n init = tf.global_variables_initializer()\n self.sess.run(init)\n \n def initialize_NN(self, layers): \n weights = []\n biases = []\n num_layers = len(layers) \n for l in range(0,num_layers-1):\n W = self.xavier_init(size=[layers[l], layers[l+1]])\n b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)\n weights.append(W)\n biases.append(b) \n return weights, biases\n \n def xavier_init(self, size):\n in_dim = size[0]\n out_dim = size[1] \n xavier_stddev = np.sqrt(2/(in_dim + out_dim))\n return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)\n \n def neural_net(self, X, weights, biases):\n num_layers = len(weights) + 1\n \n H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0\n for l in range(0,num_layers-2):\n W = weights[l]\n b = biases[l]\n H = tf.tanh(tf.add(tf.matmul(H, W), b))\n W = weights[-1]\n b = biases[-1]\n Y = tf.add(tf.matmul(H, W), b)\n return Y\n \n def fwd_gradients_0(self, U, x): \n g = tf.gradients(U, x, grad_ys=self.dummy_x0_tf)[0]\n return tf.gradients(g, self.dummy_x0_tf)[0]\n \n def fwd_gradients_1(self, U, x): \n g = tf.gradients(U, x, grad_ys=self.dummy_x1_tf)[0]\n return tf.gradients(g, self.dummy_x1_tf)[0] \n \n def net_U0(self, x):\n lambda_1 = self.lambda_1\n lambda_2 = tf.exp(self.lambda_2)\n U = self.neural_net(x, self.weights, self.biases) \n U_x = self.fwd_gradients_0(U, x)\n U_xx = self.fwd_gradients_0(U_x, x)\n F = -lambda_1*U*U_x + lambda_2*U_xx\n U0 = U - self.dt*tf.matmul(F, self.IRK_alpha.T)\n return U0\n \n def net_U1(self, x):\n lambda_1 = self.lambda_1\n lambda_2 = tf.exp(self.lambda_2)\n U = self.neural_net(x, self.weights, self.biases) \n U_x = self.fwd_gradients_1(U, x)\n U_xx = self.fwd_gradients_1(U_x, x)\n F = -lambda_1*U*U_x + lambda_2*U_xx\n U1 = U + self.dt*tf.matmul(F, (self.IRK_beta - self.IRK_alpha).T)\n return U1\n\n def callback(self, loss):\n print('Loss:', loss)\n \n def train(self, nIter):\n tf_dict = {self.x0_tf: self.x0, self.u0_tf: self.u0, \n self.x1_tf: self.x1, self.u1_tf: self.u1,\n self.dummy_x0_tf: np.ones((self.x0.shape[0], self.q)),\n self.dummy_x1_tf: np.ones((self.x1.shape[0], self.q))}\n \n start_time = time.time()\n for it in range(nIter):\n self.sess.run(self.train_op_Adam, tf_dict)\n \n # Print\n if it % 10 == 0:\n elapsed = time.time() - start_time\n loss_value = self.sess.run(self.loss, tf_dict)\n lambda_1_value = self.sess.run(self.lambda_1)\n lambda_2_value = np.exp(self.sess.run(self.lambda_2))\n print('It: %d, Loss: %.3e, l1: %.3f, l2: %.5f, Time: %.2f' % \n (it, loss_value, lambda_1_value, lambda_2_value, elapsed))\n start_time = time.time()\n \n self.optimizer.minimize(self.sess,\n feed_dict = tf_dict,\n fetches = [self.loss],\n loss_callback = self.callback)\n \n def predict(self, x_star):\n \n U0_star = self.sess.run(self.U0_pred, {self.x0_tf: x_star, self.dummy_x0_tf: np.ones((x_star.shape[0], self.q))}) \n U1_star = self.sess.run(self.U1_pred, {self.x1_tf: x_star, self.dummy_x1_tf: np.ones((x_star.shape[0], self.q))})\n \n return U0_star, U1_star\n\n \ndef main_loop(skip, noise, num_layers, num_neurons):\n \n N0 = 199\n N1 = 201\n \n data = scipy.io.loadmat('../Data/burgers_shock.mat')\n \n t_star = data['t'].flatten()[:,None]\n x_star = data['x'].flatten()[:,None]\n Exact = np.real(data['usol'])\n \n idx_t = 10\n \n idx_x = np.random.choice(Exact.shape[0], N0, replace=False)\n x0 = x_star[idx_x,:]\n u0 = Exact[idx_x,idx_t][:,None]\n u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1])\n \n idx_x = np.random.choice(Exact.shape[0], N1, replace=False)\n x1 = x_star[idx_x,:]\n u1 = Exact[idx_x,idx_t + skip][:,None]\n u1 = u1 + noise*np.std(u1)*np.random.randn(u1.shape[0], u1.shape[1])\n \n dt = np.asscalar(t_star[idx_t+skip] - t_star[idx_t]) \n q = int(np.ceil(0.5*np.log(np.finfo(float).eps)/np.log(dt)))\n \n layers = np.concatenate([[1], num_neurons*np.ones(num_layers), [q]]).astype(int).tolist() \n \n # Doman bounds\n lb = x_star.min(0)\n ub = x_star.max(0)\n\n model = PhysicsInformedNN(x0, u0, x1, u1, layers, dt, lb, ub, q)\n model.train(nIter = 50000)\n \n U0_pred, U1_pred = model.predict(x_star) \n \n lambda_1_value = model.sess.run(model.lambda_1)\n lambda_2_value = np.exp(model.sess.run(model.lambda_2))\n \n nu = 0.01/np.pi \n error_lambda_1 = np.abs(lambda_1_value - 1.0)/1.0 *100\n error_lambda_2 = np.abs(lambda_2_value - nu)/nu * 100\n \n print('Error lambda_1: %f%%' % (error_lambda_1))\n print('Error lambda_2: %f%%' % (error_lambda_2))\n \n return error_lambda_1, error_lambda_2\n \n \nif __name__ == \"__main__\": \n \n skip = [20, 40, 60, 80]\n noise = [0.0, 0.01, 0.05, 0.1]\n \n num_layers = [1,2,3,4]\n num_neurons = [10,25,50]\n \n error_lambda_1_table_1 = np.zeros((len(skip), len(noise)))\n error_lambda_2_table_1 = np.zeros((len(skip), len(noise)))\n \n error_lambda_1_table_2 = np.zeros((len(num_layers), len(num_neurons)))\n error_lambda_2_table_2 = np.zeros((len(num_layers), len(num_neurons)))\n \n for i in range(len(skip)):\n for j in range(len(noise)):\n error_lambda_1_table_1[i,j], error_lambda_2_table_1[i,j] = main_loop(skip[i], noise[j], num_layers[-1], num_neurons[-1])\n \n for i in range(len(num_layers)):\n for j in range(len(num_neurons)):\n error_lambda_1_table_2[i,j], error_lambda_2_table_2[i,j] = main_loop(skip[-1], noise[0], num_layers[i], num_neurons[j])\n \n \n np.savetxt('./tables/error_lambda_1_table_1.csv', error_lambda_1_table_1, delimiter=' & ', fmt='$%2.3f$', newline=' \\\\\\\\\\n')\n np.savetxt('./tables/error_lambda_2_table_1.csv', error_lambda_2_table_1, delimiter=' & ', fmt='$%2.3f$', newline=' \\\\\\\\\\n')\n\n np.savetxt('./tables/error_lambda_1_table_2.csv', error_lambda_1_table_2, delimiter=' & ', fmt='$%2.3f$', newline=' \\\\\\\\\\n')\n np.savetxt('./tables/error_lambda_2_table_2.csv', error_lambda_2_table_2, delimiter=' & ', fmt='$%2.3f$', newline=' \\\\\\\\\\n')\n\n"
] | [
[
"numpy.ones",
"numpy.savetxt",
"numpy.random.seed",
"tensorflow.matmul",
"numpy.log",
"tensorflow.Variable",
"tensorflow.global_variables_initializer",
"numpy.asscalar",
"numpy.reshape",
"numpy.random.choice",
"numpy.abs",
"tensorflow.gradients",
"tensorflow.set_random_seed",
"numpy.std",
"numpy.finfo",
"tensorflow.ConfigProto",
"tensorflow.placeholder",
"tensorflow.zeros",
"tensorflow.truncated_normal",
"tensorflow.train.AdamOptimizer",
"numpy.random.randn",
"tensorflow.exp",
"tensorflow.square",
"numpy.sqrt",
"numpy.real",
"numpy.loadtxt"
]
] |
justusschock/tensorboardX | [
"1c16e127f9a737c0b45d0447c20499dec666130c"
] | [
"tensorboardX/summary.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"## Generation of summaries.\n### Class for writing Summaries\n@@FileWriter\n@@FileWriterCache\n### Summary Ops\n@@tensor_summary\n@@scalar\n@@histogram\n@@audio\n@@image\n@@merge\n@@merge_all\n## Utilities\n@@get_summary_description\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport bisect\nimport logging\nimport numpy as np\nimport os\nimport re as _re\n\n# pylint: disable=unused-import\nfrom six import StringIO\nfrom six.moves import range\nfrom .proto.summary_pb2 import Summary\nfrom .proto.summary_pb2 import HistogramProto\nfrom .proto.summary_pb2 import SummaryMetadata\nfrom .proto.tensor_pb2 import TensorProto\nfrom .proto.tensor_shape_pb2 import TensorShapeProto\nfrom .proto.plugin_pr_curve_pb2 import PrCurvePluginData\nfrom .proto.plugin_text_pb2 import TextPluginData\nfrom .proto import layout_pb2\nfrom .x2num import make_np\nfrom .utils import _prepare_video, convert_to_HWC\n\n_INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\\w\\.]')\n\n\ndef _calc_scale_factor(tensor):\n converted = tensor.numpy() if not isinstance(tensor, np.ndarray) else tensor\n return 1 if converted.dtype == np.uint8 else 255\n\n\ndef _clean_tag(name):\n # In the past, the first argument to summary ops was a tag, which allowed\n # arbitrary characters. Now we are changing the first argument to be the node\n # name. This has a number of advantages (users of summary ops now can\n # take advantage of the tf name scope system) but risks breaking existing\n # usage, because a much smaller set of characters are allowed in node names.\n # This function replaces all illegal characters with _s, and logs a warning.\n # It also strips leading slashes from the name.\n if name is not None:\n new_name = _INVALID_TAG_CHARACTERS.sub('_', name)\n new_name = new_name.lstrip('/') # Remove leading slashes\n if new_name != name:\n logging.info(\n 'Summary name %s is illegal; using %s instead.' % (name, new_name))\n name = new_name\n return name\n\n\ndef _draw_single_box(image, xmin, ymin, xmax, ymax, display_str, color='black', color_text='black', thickness=2):\n from PIL import ImageDraw, ImageFont\n font = ImageFont.load_default()\n draw = ImageDraw.Draw(image)\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n draw.line([(left, top), (left, bottom), (right, bottom),\n (right, top), (left, top)], width=thickness, fill=color)\n if display_str:\n text_bottom = bottom\n # Reverse list and print from bottom to top.\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n draw.rectangle(\n [(left, text_bottom - text_height - 2 * margin),\n (left + text_width, text_bottom)], fill=color\n )\n draw.text(\n (left + margin, text_bottom - text_height - margin),\n display_str, fill=color_text, font=font\n )\n return image\n\n\ndef scalar(name, scalar, collections=None):\n \"\"\"Outputs a `Summary` protocol buffer containing a single scalar value.\n The generated Summary has a Tensor.proto containing the input Tensor.\n Args:\n name: A name for the generated node. Will also serve as the series name in\n TensorBoard.\n tensor: A real numeric Tensor containing a single value.\n collections: Optional list of graph collections keys. The new summary op is\n added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.\n Returns:\n A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.\n Raises:\n ValueError: If tensor has the wrong shape or type.\n \"\"\"\n name = _clean_tag(name)\n scalar = make_np(scalar)\n assert(scalar.squeeze().ndim == 0), 'scalar should be 0D'\n scalar = float(scalar)\n return Summary(value=[Summary.Value(tag=name, simple_value=scalar)])\n\n\ndef histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_counts):\n # pylint: disable=line-too-long\n \"\"\"Outputs a `Summary` protocol buffer with a histogram.\n The generated\n [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\n has one summary value containing a histogram for `values`.\n Args:\n name: A name for the generated node. Will also serve as a series name in\n TensorBoard.\n min: A float or int min value\n max: A float or int max value\n num: Int number of values\n sum: Float or int sum of all values\n sum_squares: Float or int sum of squares for all values\n bucket_limits: A numeric `Tensor` with upper value per bucket\n bucket_counts: A numeric `Tensor` with number of values per bucket\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer.\n \"\"\"\n hist = HistogramProto(min=min,\n max=max,\n num=num,\n sum=sum,\n sum_squares=sum_squares,\n bucket_limit=bucket_limits,\n bucket=bucket_counts)\n return Summary(value=[Summary.Value(tag=name, histo=hist)])\n\n\ndef histogram(name, values, bins, max_bins=None):\n # pylint: disable=line-too-long\n \"\"\"Outputs a `Summary` protocol buffer with a histogram.\n The generated\n [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\n has one summary value containing a histogram for `values`.\n This op reports an `InvalidArgument` error if any value is not finite.\n Args:\n name: A name for the generated node. Will also serve as a series name in\n TensorBoard.\n values: A real numeric `Tensor`. Any shape. Values to use to\n build the histogram.\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer.\n \"\"\"\n name = _clean_tag(name)\n values = make_np(values)\n hist = make_histogram(values.astype(float), bins, max_bins)\n return Summary(value=[Summary.Value(tag=name, histo=hist)])\n\n\ndef make_histogram(values, bins, max_bins=None):\n \"\"\"Convert values into a histogram proto using logic from histogram.cc.\"\"\"\n if values.size == 0:\n raise ValueError('The input has no element.')\n values = values.reshape(-1)\n counts, limits = np.histogram(values, bins=bins)\n num_bins = len(counts)\n if max_bins is not None and num_bins > max_bins:\n subsampling = num_bins // max_bins\n subsampling_remainder = num_bins % subsampling\n if subsampling_remainder != 0:\n counts = np.pad(counts, pad_width=[[0, subsampling - subsampling_remainder]],\n mode=\"constant\", constant_values=0)\n counts = counts.reshape(-1, subsampling).sum(axis=-1)\n new_limits = np.empty((counts.size + 1,), limits.dtype)\n new_limits[:-1] = limits[:-1:subsampling]\n new_limits[-1] = limits[-1]\n limits = new_limits\n\n # Find the first and the last bin defining the support of the histogram:\n cum_counts = np.cumsum(np.greater(counts, 0, dtype=np.int32))\n start, end = np.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side=\"right\")\n start = int(start)\n end = int(end) + 1\n del cum_counts\n\n # Tensorboard only includes the right bin limits. To still have the leftmost limit\n # included, we include an empty bin left.\n # If start == 0, we need to add an empty one left, otherwise we can just include the bin left to the\n # first nonzero-count bin:\n counts = counts[start - 1:end] if start > 0 else np.concatenate([[0], counts[:end]])\n limits = limits[start:end + 1]\n\n if counts.size == 0 or limits.size == 0:\n raise ValueError('The histogram is empty, please file a bug report.')\n\n sum_sq = values.dot(values)\n return HistogramProto(min=values.min(),\n max=values.max(),\n num=len(values),\n sum=values.sum(),\n sum_squares=sum_sq,\n bucket_limit=limits.tolist(),\n bucket=counts.tolist())\n\n\ndef image(tag, tensor, rescale=1, dataformats='NCHW'):\n \"\"\"Outputs a `Summary` protocol buffer with images.\n The summary has up to `max_images` summary values containing images. The\n images are built from `tensor` which must be 3-D with shape `[height, width,\n channels]` and where `channels` can be:\n * 1: `tensor` is interpreted as Grayscale.\n * 3: `tensor` is interpreted as RGB.\n * 4: `tensor` is interpreted as RGBA.\n The `name` in the outputted Summary.Value protobufs is generated based on the\n name, with a suffix depending on the max_outputs setting:\n * If `max_outputs` is 1, the summary value tag is '*name*/image'.\n * If `max_outputs` is greater than 1, the summary value tags are\n generated sequentially as '*name*/image/0', '*name*/image/1', etc.\n Args:\n tag: A name for the generated node. Will also serve as a series name in\n TensorBoard.\n tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width,\n channels]` where `channels` is 1, 3, or 4.\n 'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8).\n The image() function will scale the image values to [0, 255] by applying\n a scale factor of either 1 (uint8) or 255 (float32).\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer.\n \"\"\"\n tag = _clean_tag(tag)\n tensor = make_np(tensor)\n tensor = convert_to_HWC(tensor, dataformats)\n # Do not assume that user passes in values in [0, 255], use data type to detect\n scale_factor = _calc_scale_factor(tensor)\n tensor = tensor.astype(np.float32)\n tensor = (tensor * scale_factor).astype(np.uint8)\n image = make_image(tensor, rescale=rescale)\n return Summary(value=[Summary.Value(tag=tag, image=image)])\n\n\ndef image_boxes(tag, tensor_image, tensor_boxes, rescale=1, dataformats='CHW'):\n '''Outputs a `Summary` protocol buffer with images.'''\n tensor_image = make_np(tensor_image)\n tensor_image = convert_to_HWC(tensor_image, dataformats)\n tensor_boxes = make_np(tensor_boxes)\n tensor_image = tensor_image.astype(\n np.float32) * _calc_scale_factor(tensor_image)\n image = make_image(tensor_image.astype(np.uint8),\n rescale=rescale,\n rois=tensor_boxes)\n return Summary(value=[Summary.Value(tag=tag, image=image)])\n\n\ndef draw_boxes(disp_image, boxes):\n # xyxy format\n num_boxes = boxes.shape[0]\n list_gt = range(num_boxes)\n for i in list_gt:\n disp_image = _draw_single_box(disp_image,\n boxes[i, 0],\n boxes[i, 1],\n boxes[i, 2],\n boxes[i, 3],\n display_str=None,\n color='Red')\n return disp_image\n\n\ndef make_image(tensor, rescale=1, rois=None):\n \"\"\"Convert an numpy representation image to Image protobuf\"\"\"\n from PIL import Image\n height, width, channel = tensor.shape\n scaled_height = int(height * rescale)\n scaled_width = int(width * rescale)\n image = Image.fromarray(tensor)\n if rois is not None:\n image = draw_boxes(image, rois)\n image = image.resize((scaled_width, scaled_height), Image.ANTIALIAS)\n import io\n output = io.BytesIO()\n image.save(output, format='PNG')\n image_string = output.getvalue()\n output.close()\n return Summary.Image(height=height,\n width=width,\n colorspace=channel,\n encoded_image_string=image_string)\n\n\ndef video(tag, tensor, fps=4):\n tag = _clean_tag(tag)\n tensor = make_np(tensor)\n tensor = _prepare_video(tensor)\n # If user passes in uint8, then we don't need to rescale by 255\n scale_factor = _calc_scale_factor(tensor)\n tensor = tensor.astype(np.float32)\n tensor = (tensor * scale_factor).astype(np.uint8)\n video = make_video(tensor, fps)\n return Summary(value=[Summary.Value(tag=tag, image=video)])\n\n\ndef make_video(tensor, fps):\n try:\n import moviepy\n except ImportError:\n print('add_video needs package moviepy')\n return\n try:\n from moviepy import editor as mpy\n except ImportError:\n print(\"moviepy is installed, but can't import moviepy.editor.\",\n \"Some packages could be missing [imageio, requests]\")\n return\n import tempfile\n\n t, h, w, c = tensor.shape\n\n # encode sequence of images into gif string\n clip = mpy.ImageSequenceClip(list(tensor), fps=fps)\n with tempfile.NamedTemporaryFile() as f:\n filename = f.name + '.gif'\n\n try:\n clip.write_gif(filename, verbose=False, progress_bar=False)\n except TypeError:\n clip.write_gif(filename, verbose=False)\n\n with open(filename, 'rb') as f:\n tensor_string = f.read()\n\n try:\n os.remove(filename)\n except OSError:\n pass\n\n return Summary.Image(height=h, width=w, colorspace=c, encoded_image_string=tensor_string)\n\n\ndef audio(tag, tensor, sample_rate=44100):\n tensor = make_np(tensor)\n tensor = tensor.squeeze()\n if abs(tensor).max() > 1:\n print('warning: audio amplitude out of range, auto clipped.')\n tensor = tensor.clip(-1, 1)\n assert(tensor.ndim == 1), 'input tensor should be 1 dimensional.'\n\n tensor_list = [int(32767.0 * x) for x in tensor]\n import io\n import wave\n import struct\n fio = io.BytesIO()\n Wave_write = wave.open(fio, 'wb')\n Wave_write.setnchannels(1)\n Wave_write.setsampwidth(2)\n Wave_write.setframerate(sample_rate)\n tensor_enc = b''\n for v in tensor_list:\n tensor_enc += struct.pack('<h', v)\n\n Wave_write.writeframes(tensor_enc)\n Wave_write.close()\n audio_string = fio.getvalue()\n fio.close()\n audio = Summary.Audio(sample_rate=sample_rate,\n num_channels=1,\n length_frames=len(tensor_list),\n encoded_audio_string=audio_string,\n content_type='audio/wav')\n return Summary(value=[Summary.Value(tag=tag, audio=audio)])\n\n\ndef custom_scalars(layout):\n categoriesnames = layout.keys()\n categories = []\n layouts = []\n for k, v in layout.items():\n charts = []\n for chart_name, chart_meatadata in v.items():\n tags = chart_meatadata[1]\n if chart_meatadata[0] == 'Margin':\n assert len(tags) == 3\n mgcc = layout_pb2.MarginChartContent(series=[layout_pb2.MarginChartContent.Series(value=tags[0],\n lower=tags[1],\n upper=tags[2])])\n chart = layout_pb2.Chart(title=chart_name, margin=mgcc)\n else:\n mlcc = layout_pb2.MultilineChartContent(tag=tags)\n chart = layout_pb2.Chart(title=chart_name, multiline=mlcc)\n charts.append(chart)\n categories.append(layout_pb2.Category(title=k, chart=charts))\n\n layout = layout_pb2.Layout(category=categories)\n PluginData = [SummaryMetadata.PluginData(plugin_name='custom_scalars')]\n smd = SummaryMetadata(plugin_data=PluginData)\n tensor = TensorProto(dtype='DT_STRING',\n string_val=[layout.SerializeToString()],\n tensor_shape=TensorShapeProto())\n return Summary(value=[Summary.Value(tag='custom_scalars__config__', tensor=tensor, metadata=smd)])\n\n\ndef text(tag, text):\n import json\n PluginData = [SummaryMetadata.PluginData(\n plugin_name='text', content=TextPluginData(version=0).SerializeToString())]\n smd = SummaryMetadata(plugin_data=PluginData)\n tensor = TensorProto(dtype='DT_STRING',\n string_val=[text.encode(encoding='utf_8')],\n tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]))\n return Summary(value=[Summary.Value(tag=tag + '/text_summary', metadata=smd, tensor=tensor)])\n\n\ndef pr_curve_raw(tag, tp, fp, tn, fn, precision, recall, num_thresholds=127, weights=None):\n if num_thresholds > 127: # weird, value > 127 breaks protobuf\n num_thresholds = 127\n data = np.stack((tp, fp, tn, fn, precision, recall))\n pr_curve_plugin_data = PrCurvePluginData(\n version=0, num_thresholds=num_thresholds).SerializeToString()\n PluginData = [SummaryMetadata.PluginData(\n plugin_name='pr_curves', content=pr_curve_plugin_data)]\n smd = SummaryMetadata(plugin_data=PluginData)\n tensor = TensorProto(dtype='DT_FLOAT',\n float_val=data.reshape(-1).tolist(),\n tensor_shape=TensorShapeProto(\n dim=[TensorShapeProto.Dim(size=data.shape[0]), TensorShapeProto.Dim(size=data.shape[1])]))\n return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])\n\n\ndef pr_curve(tag, labels, predictions, num_thresholds=127, weights=None):\n # weird, value > 127 breaks protobuf\n num_thresholds = min(num_thresholds, 127)\n data = compute_curve(labels, predictions,\n num_thresholds=num_thresholds, weights=weights)\n pr_curve_plugin_data = PrCurvePluginData(\n version=0, num_thresholds=num_thresholds).SerializeToString()\n PluginData = [SummaryMetadata.PluginData(\n plugin_name='pr_curves', content=pr_curve_plugin_data)]\n smd = SummaryMetadata(plugin_data=PluginData)\n tensor = TensorProto(dtype='DT_FLOAT',\n float_val=data.reshape(-1).tolist(),\n tensor_shape=TensorShapeProto(\n dim=[TensorShapeProto.Dim(size=data.shape[0]), TensorShapeProto.Dim(size=data.shape[1])]))\n return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])\n\n\n# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py\ndef compute_curve(labels, predictions, num_thresholds=None, weights=None):\n _MINIMUM_COUNT = 1e-7\n\n if weights is None:\n weights = 1.0\n\n # Compute bins of true positives and false positives.\n bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))\n float_labels = labels.astype(np.float)\n histogram_range = (0, num_thresholds - 1)\n tp_buckets, _ = np.histogram(\n bucket_indices,\n bins=num_thresholds,\n range=histogram_range,\n weights=float_labels * weights)\n fp_buckets, _ = np.histogram(\n bucket_indices,\n bins=num_thresholds,\n range=histogram_range,\n weights=(1.0 - float_labels) * weights)\n\n # Obtain the reverse cumulative sum.\n tp = np.cumsum(tp_buckets[::-1])[::-1]\n fp = np.cumsum(fp_buckets[::-1])[::-1]\n tn = fp[0] - fp\n fn = tp[0] - tp\n precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)\n recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)\n return np.stack((tp, fp, tn, fn, precision, recall))\n"
] | [
[
"numpy.cumsum",
"numpy.empty",
"numpy.ceil",
"numpy.searchsorted",
"numpy.histogram",
"numpy.maximum",
"numpy.greater",
"numpy.floor",
"numpy.pad",
"numpy.stack",
"numpy.concatenate"
]
] |
v-morello/iqrm | [
"9f90e43c003af5d248d08b65d52fc88a73e12bbd",
"9f90e43c003af5d248d08b65d52fc88a73e12bbd"
] | [
"iqrm/core.py",
"iqrm/tests/test_iqrm.py"
] | [
"import itertools\nimport numpy as np\n\nfrom collections import defaultdict\n\n\ndef lagged_diff(x, k):\n \"\"\"\n Returns the sequence of x[i] - x[i - k], as an array with the same size as x.\n Boundary conditions are handled as follows:\n x[i] = x[0] if i < 0\n x[i] = x[n-1] if i >= n, where n = len(x)\n \"\"\"\n s = np.roll(x, k)\n if k >= 0:\n s[:k] = x[0]\n else:\n s[k:] = x[-1] # NOTE: lag is negative here\n return x - s\n\n\ndef outlier_mask(x, threshold=3.0):\n \"\"\"\n Returns an outlier mask for array x, based on Tukey's rule and assuming that the inlier\n distribution of x (the distribution of 'good' values) is Gaussian. 'threshold' represents a\n number of Gaussian sigmas.\n \"\"\"\n q1, med, q3 = np.percentile(x, [25, 50, 75])\n std = (q3 - q1) / 1.349\n return (x - med) > threshold * std\n\n\ndef genlags(radius, geofactor=1.5):\n lag = 1\n while lag <= radius:\n yield lag\n yield -lag\n lag = max(int(geofactor * lag), lag + 1)\n\n\ndef iqrm_mask(x, radius=5, threshold=3.0):\n \"\"\"\n Compute the IQRM mask for one-dimensional input data x.\n The input 'x' is expected to represent a per-channel statistic that measures RFI contamination\n in a block of time-frequency data. Any statistic can be used, but an important requirement is\n that larger values must indicate higher levels of RFI contamination.\n\n Parameters\n ----------\n x : list or ndarray\n Input data (1-dimensional)\n radius : int, optional\n Radius in number of elements. If a float is passed, it is truncated. A recommended value\n is 10% of the number of frequency channels\n threshold : float, optional\n Flagging threshold in number of Gaussian sigmas\n\n Returns\n -------\n mask : ndarray\n Boolean mask with the same size as the input 'x', where 'True' denotes an outlier\n votes_cast : dict of sets\n Dictionary of sets, where the keys are input array indices i that cast at least one vote,\n and the values are the set of array indices that received a vote from i.\n \"\"\"\n x = np.asarray(x)\n n = len(x)\n radius = int(radius)\n\n if not radius > 0:\n raise ValueError(\"radius must be > 0\")\n\n threshold = float(threshold)\n if not threshold > 0:\n raise ValueError(\"threshold must be > 0\")\n\n # These data structures both represent a directed graph\n # votes_cast[i] contains the recipients of votes cast by i\n # votes_received[i] contains the casters of votes received by i\n votes_cast = defaultdict(set)\n votes_received = defaultdict(set)\n\n for lag in genlags(radius):\n d = lagged_diff(x, lag)\n m = outlier_mask(d, threshold)\n\n # m[i] = True <=> point j = i - lag cast a vote on i\n # <=> point i received a vote from j = i - lag\n I = np.where(m)[0]\n J = np.clip(I - lag, 0, n - 1)\n\n for i, j in zip(I, J):\n votes_cast[j].add(i)\n votes_received[i].add(j)\n\n mask = np.zeros_like(x, dtype=bool)\n \n # i gets masked by j if both the following conditions are True:\n # 1) j has cast a vote on i\n # 2) j has cast strictly less votes in total than i has received in total\n for i, casters in votes_received.items():\n for j in casters:\n if j in votes_cast and len(votes_cast[j]) < len(votes_received[i]):\n mask[i] = True\n break\n\n return mask, dict(votes_cast)\n",
"import numpy as np\nfrom pytest import raises\n\nfrom iqrm import iqrm_mask\n\n\ndef generate_noise(nchan=1024, seed=0):\n # IMPORTANT: set the random seed for reproducible results\n np.random.seed(seed)\n return np.random.normal(size=nchan)\n\n\ndef generate_noise_with_outlier_range(start, end, nchan=1024, seed=0):\n s = generate_noise(nchan=nchan, seed=seed)\n s[start:end] += 100\n return s\n\n\ndef test_param_checks():\n nchan = 1024\n s = np.zeros(nchan)\n\n with raises(ValueError): # radius must be > 0\n iqrm_mask(s, radius=0)\n\n with raises(ValueError): # threshold must be > 0\n iqrm_mask(s, threshold=0)\n\n\ndef test_masking_noise():\n s = generate_noise()\n\n for radius in range(1, 6):\n mask, __ = iqrm_mask(s, radius=radius, threshold=4.0)\n assert np.alltrue(~mask)\n\n\ndef test_masking_single_outlier():\n nchan = 1024\n indices = [0, 1, 42, 213, 740, 1022, 1023]\n\n for index in indices:\n # NOTE: when using radius = 1, if the either the first or last element are the sole \n # outlier, they won't be detected (the single vote they receive is not valid). \n # We thus start at radius=2.\n for radius in (2, 3, 4, 6, 9):\n s = generate_noise_with_outlier_range(index, index+1, nchan=nchan)\n mask, __ = iqrm_mask(s, radius=radius, threshold=4.0)\n assert mask[index] == True\n\n\ndef test_masking_outlier_range():\n # The idea here is to generate data that looks like a top hat, i.e. noise plus a contiguous\n # range of high ouliers with similar values.\n\n # If the edges of the top-hat lie \"far away\" from the edges of the input array, then we expect\n # all outliers to be masked as long as:\n # max trial lag value > width\n\n # NOTE: for a top-hat that lies at the edge of the input array, the situation is different,\n # and the radius required to mask all outliers is such that:\n # max trial lag value > 2*width\n\n nchan = 1024\n indices = [67, 213, 486, 740, 959]\n trial_lag_sequence = (1, 2, 3, 4, 6, 9, 13)\n\n for index in indices:\n for jj, width in enumerate(trial_lag_sequence[:-1]):\n s = generate_noise_with_outlier_range(index, index+width, nchan=nchan)\n radius = trial_lag_sequence[jj+1]\n mask, __ = iqrm_mask(s, radius=radius, threshold=4.0)\n assert np.alltrue(mask[index:index+width])"
] | [
[
"numpy.zeros_like",
"numpy.roll",
"numpy.asarray",
"numpy.clip",
"numpy.where",
"numpy.percentile"
],
[
"numpy.random.normal",
"numpy.random.seed",
"numpy.alltrue",
"numpy.zeros"
]
] |
giacomov/pyggop | [
"81f0307281d00e367ab632fa52de41a56877b048"
] | [
"pyggop/Tau.py"
] | [
"import numpy as np\nfrom multiprocessing import Pool\n\nfrom grbod import *\n\nimport os, sys\nimport pickle\n\nimport scipy.interpolate\nfrom math import log10\n\nimport matplotlib.pyplot as plt\n\nfrom pyggop.ParallelPool import ParallelPool\n\n#This is the actual computation\ndef func(DRbar, R_0, b, m, a, xx, yy):\n \n R0_hat = 1.0 / ( 1.0 + 10**xx ) # = R_0/R_t0\n \n DR_hat = DRbar * R0_hat / R_0\n \n x = ( 10**yy )**2\n \n tau = tau_integral(x, R0_hat, DR_hat, \n b, m, a,\n reg={'Re':0,'Rt':1.e-4},\n verbose=True)\n return tau\n\ndef data_stream(DRbar, R_0, b, m, a, XX, YY):\n for idx, xx in np.ndenumerate(XX):\n yield idx, (DRbar, R_0, b, m, a, xx, YY[idx])\n\ndef proxy(args):\n return args[0], func(*args[1])\n\nclass Tau( object ):\n \n def __init__(self, m, b, a, DRbar, R_0=1.0, tau_star=1.0 ):\n \n self.m = float(m)\n self.b = float(b)\n self.a = float(a)\n self.DRbar = float( DRbar )\n self.R_0 = R_0\n self.tau_star = tau_star\n \n self.name = self._getUniqueName()\n \n self.loadLookupTable()\n \n def _getUniqueName(self):\n \n return \"%s-%s-%.2f-%.2g-%s\" % (self.m, self.b, self.a, \n self.DRbar, self.tau_star)\n \n def loadLookupTable(self):\n \n if not os.path.exists( '%s.pkl' % self.name ):\n \n #Lookup table does not exists. Create it\n \n self.computeLookupTable()\n \n #Load the lookup table\n results = pickle.load(open('%s.pkl'% self.name,'r')) \n \n \n #Get the extremes for the interpolation\n \n self.minX,self.maxX = results['X'].min(),results['X'].max()\n self.minY,self.maxY = results['Y'].min(),results['Y'].max()\n \n #print results['comment']\n \n self.tau_interp2 = scipy.interpolate.interp2d( results['X'],\n results['Y'],\n results['Z'].transpose(),\n bounds_error=True)\n \n def compute(self, XX, YY):\n \n result = np.zeros(shape=(XX.shape))\n \n pool = ParallelPool( )\n \n pool_res = pool.map(proxy, data_stream(self.DRbar, self.R_0, \n self.b, self.m, self.a,\n XX, YY))\n \n pool.close()\n \n for k,v in pool_res:\n \n result[k] = v\n \n return result\n \n def computeLookupTable( self, plot=False ):\n \n X = np.linspace(-11, 3, 50)#log10(R_t0/R0-1)\n \n Y = np.concatenate((np.arange(-6, -4,1 / 3.0),\n np.arange(-4, -1,1/ 3.0), \n np.arange(-1, -0.04, 0.1/ 8.0),\n np.arange(-0.04, 0.08, 0.01 / 8.0), \n np.arange(0.08, 0.9, 0.1/ 4.0),\n np.arange(1, 2.2,0.2/ 3.0)))\n \n #Y = np.concatenate((np.arange(-4, -1,1 / 2.0), \n # np.arange(-1, -0.04, 0.1 / 2.0),\n # np.arange(-0.04, 0.08, 0.02 / 2.0 ), \n # np.arange(0.08, 0.9, 0.1 / 2.0),\n # np.arange(1, 2.2,0.2 / 2.0)))\n \n XX, YY = np.meshgrid(X, Y, indexing='ij', sparse=False, copy=True)\n \n Z = self.compute(XX, YY)\n \n idx = np.isfinite(Z)\n \n Z[~idx] = 1e-30\n \n idx = (Z <= 0)\n Z[idx] = 1e-30\n \n print(\"Zmin = %s, Zmax = %s\" %(Z.min(),Z.max()))\n \n if plot:\n \n plt.figure(1)\n plt.contourf(X, Y, np.log10(Z.transpose()), 20)\n plt.colorbar()\n plt.savefig(\"interpolation_data.png\")\n \n final = {'X':X, 'Y':Y, 'Z':np.log10(Z)}\n final['comment'] = \"X = log10(R_t0/R0-1)\\nY = log10(gth_t0) = log10(sqrt(x))\\nZ = log10(tau_integral)\"\n \n pickle.dump(final, open( '%s.pkl' % self.name, 'w' ))\n \n def __call__(self, X, Y):\n \n try:\n \n val = self.tau_interp2( X, Y )\n #val = log10( func(self.DRbar, self.R_0, self.b, self.m, self.a, X, Y) )\n \n except:\n \n msg = (\"Request (X,Y) = (%s, %s) could not be satistfied. \" %(X,Y))\n msg += (\"Interpolation range is %s < X < %s , %s < Y < %s\" %(self.minX,self.maxX,self.minY,self.maxY))\n \n sys.stderr.write(msg)\n sys.stderr.write(\"\\n\")\n \n raise ValueError(msg)\n \n return val\n \n"
] | [
[
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.ndenumerate",
"numpy.arange",
"numpy.log10",
"matplotlib.pyplot.colorbar",
"numpy.meshgrid",
"numpy.linspace",
"numpy.isfinite"
]
] |
schlevik/EGG | [
"428d5aed3eb6fb0296f6856fb77b0a1cdceb33f1"
] | [
"egg/zoo/basic_games/play.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\nimport egg.core as core\nfrom egg.core import Callback, Interaction, PrintValidationEvents\nfrom egg.zoo.basic_games.architectures import DiscriReceiver, RecoReceiver, Sender\nfrom egg.zoo.basic_games.data_readers import AttValDiscriDataset, AttValRecoDataset\n\n\n# the following section specifies parameters that are specific to our games: we will also inherit the\n# standard EGG parameters from https://github.com/facebookresearch/EGG/blob/master/egg/core/util.py\ndef get_params(params):\n parser = argparse.ArgumentParser()\n # arguments controlling the game type\n parser.add_argument(\n \"--game_type\",\n type=str,\n default=\"reco\",\n help=\"Selects whether to play a reco(nstruction) or discri(mination) game (default: reco)\",\n )\n # arguments concerning the input data and how they are processed\n parser.add_argument(\n \"--train_data\", type=str, default=None, help=\"Path to the train data\"\n )\n parser.add_argument(\n \"--validation_data\", type=str, default=None, help=\"Path to the validation data\"\n )\n # (the following is only used in the reco game)\n parser.add_argument(\n \"--n_attributes\",\n type=int,\n default=None,\n help=\"Number of attributes in Sender input (must match data set, and it is only used in reco game)\",\n )\n parser.add_argument(\n \"--n_values\",\n type=int,\n default=None,\n help=\"Number of values for each attribute (must match data set)\",\n )\n parser.add_argument(\n \"--validation_batch_size\",\n type=int,\n default=0,\n help=\"Batch size when processing validation data, whereas training data batch_size is controlled by batch_size (default: same as training data batch size)\",\n )\n # arguments concerning the training method\n parser.add_argument(\n \"--mode\",\n type=str,\n default=\"rf\",\n help=\"Selects whether Reinforce or Gumbel-Softmax relaxation is used for training {rf, gs} (default: rf)\",\n )\n parser.add_argument(\n \"--temperature\",\n type=float,\n default=1.0,\n help=\"GS temperature for the sender, only relevant in Gumbel-Softmax (gs) mode (default: 1.0)\",\n )\n parser.add_argument(\n \"--sender_entropy_coeff\",\n type=float,\n default=1e-1,\n help=\"Reinforce entropy regularization coefficient for Sender, only relevant in Reinforce (rf) mode (default: 1e-1)\",\n )\n # arguments concerning the agent architectures\n parser.add_argument(\n \"--sender_cell\",\n type=str,\n default=\"rnn\",\n help=\"Type of the cell used for Sender {rnn, gru, lstm} (default: rnn)\",\n )\n parser.add_argument(\n \"--receiver_cell\",\n type=str,\n default=\"rnn\",\n help=\"Type of the cell used for Receiver {rnn, gru, lstm} (default: rnn)\",\n )\n parser.add_argument(\n \"--sender_hidden\",\n type=int,\n default=10,\n help=\"Size of the hidden layer of Sender (default: 10)\",\n )\n parser.add_argument(\n \"--receiver_hidden\",\n type=int,\n default=10,\n help=\"Size of the hidden layer of Receiver (default: 10)\",\n )\n parser.add_argument(\n \"--sender_embedding\",\n type=int,\n default=10,\n help=\"Output dimensionality of the layer that embeds symbols produced at previous step in Sender (default: 10)\",\n )\n parser.add_argument(\n \"--receiver_embedding\",\n type=int,\n default=10,\n help=\"Output dimensionality of the layer that embeds the message symbols for Receiver (default: 10)\",\n )\n # arguments controlling the script output\n parser.add_argument(\n \"--print_validation_events\",\n default=False,\n action=\"store_true\",\n help=\"If this flag is passed, at the end of training the script prints the input validation data, the corresponding messages produced by the Sender, and the output probabilities produced by the Receiver (default: do not print)\",\n )\n args = core.init(parser, params)\n return args\n\n\ndef main(params):\n opts = get_params(params)\n if opts.validation_batch_size == 0:\n opts.validation_batch_size = opts.batch_size\n print(opts, flush=True)\n\n # the following if statement controls aspects specific to the two game tasks: loss, input data and architecture of the Receiver\n # (the Sender is identical in both cases, mapping a single input attribute-value vector to a variable-length message)\n if opts.game_type == \"discri\":\n # the game object we will encounter below takes as one of its mandatory arguments a loss: a loss in EGG is expected to take as arguments the sender input,\n # the message, the Receiver input, the Receiver output and the labels (although some of these elements might not actually be used by a particular loss);\n # together with the actual loss computation, the loss function can return a dictionary with other auxiliary statistics: in this case, accuracy\n def loss(\n _sender_input,\n _message,\n _receiver_input,\n receiver_output,\n labels,\n _aux_input,\n ):\n # in the discriminative case, accuracy is computed by comparing the index with highest score in Receiver output (a distribution of unnormalized\n # probabilities over target poisitions) and the corresponding label read from input, indicating the ground-truth position of the target\n acc = (receiver_output.argmax(dim=1) == labels).detach().float()\n # similarly, the loss computes cross-entropy between the Receiver-produced target-position probability distribution and the labels\n loss = F.cross_entropy(receiver_output, labels, reduction=\"none\")\n return loss, {\"acc\": acc}\n\n # the input data are read into DataLodaer objects, which are pytorch constructs implementing standard data processing functionalities, such as shuffling\n # and batching\n # within our games, we implement dataset classes, such as AttValDiscriDataset, to read the input text files and convert the information they contain\n # into the form required by DataLoader\n # look at the definition of the AttValDiscrDataset (the class to read discrimination game data) in data_readers.py for further details\n # note that, for the training dataset, we first instantiate the AttValDiscriDataset object and then feed it to DataLoader, whereas for the\n # validation data (confusingly called \"test\" data due to code heritage inertia) we directly declare the AttValDiscriDataset when instantiating\n # DataLoader: the reason for this difference is that we need the train_ds object to retrieve the number of features of the input vectors\n train_ds = AttValDiscriDataset(path=opts.train_data, n_values=opts.n_values)\n train_loader = DataLoader(\n train_ds, batch_size=opts.batch_size, shuffle=True, num_workers=1\n )\n test_loader = DataLoader(\n AttValDiscriDataset(path=opts.validation_data, n_values=opts.n_values),\n batch_size=opts.validation_batch_size,\n shuffle=False,\n num_workers=1,\n )\n # note that the number of features retrieved here concerns inputs after they are converted to 1-hot vectors\n n_features = train_ds.get_n_features()\n # we define here the core of the Receiver for the discriminative game, see the architectures.py file for details:\n # note that this will be embedded in a wrapper below to define the full agent\n receiver = DiscriReceiver(n_features=n_features, n_hidden=opts.receiver_hidden)\n\n else: # reco game\n\n def loss(\n sender_input, _message, _receiver_input, receiver_output, labels, _aux_input\n ):\n # in the case of the recognition game, for each attribute we compute a different cross-entropy score\n # based on comparing the probability distribution produced by the Receiver over the values of each attribute\n # with the corresponding ground truth, and then averaging across attributes\n # accuracy is instead computed by considering as a hit only cases where, for each attribute, the Receiver\n # assigned the largest probability to the correct value\n # most of this function consists of the usual pytorch madness needed to reshape tensors in order to perform these computations\n n_attributes = opts.n_attributes\n n_values = opts.n_values\n batch_size = sender_input.size(0)\n receiver_output = receiver_output.view(batch_size * n_attributes, n_values)\n receiver_guesses = receiver_output.argmax(dim=1)\n correct_samples = (\n (receiver_guesses == labels.view(-1))\n .view(batch_size, n_attributes)\n .detach()\n )\n acc = (torch.sum(correct_samples, dim=-1) == n_attributes).float()\n labels = labels.view(batch_size * n_attributes)\n loss = F.cross_entropy(receiver_output, labels, reduction=\"none\")\n loss = loss.view(batch_size, -1).mean(dim=1)\n return loss, {\"acc\": acc}\n\n # again, see data_readers.py in this directory for the AttValRecoDataset data reading class\n train_loader = DataLoader(\n AttValRecoDataset(\n path=opts.train_data,\n n_attributes=opts.n_attributes,\n n_values=opts.n_values,\n ),\n batch_size=opts.batch_size,\n shuffle=True,\n num_workers=1,\n )\n test_loader = DataLoader(\n AttValRecoDataset(\n path=opts.validation_data,\n n_attributes=opts.n_attributes,\n n_values=opts.n_values,\n ),\n batch_size=opts.validation_batch_size,\n shuffle=False,\n num_workers=1,\n )\n # the number of features for the Receiver (input) and the Sender (output) is given by n_attributes*n_values because\n # they are fed/produce 1-hot representations of the input vectors\n n_features = opts.n_attributes * opts.n_values\n # we define here the core of the receiver for the discriminative game, see the architectures.py file for details\n # this will be embedded in a wrapper below to define the full architecture\n receiver = RecoReceiver(n_features=n_features, n_hidden=opts.receiver_hidden)\n\n # we are now outside the block that defined game-type-specific aspects of the games: note that the core Sender architecture\n # (see architectures.py for details) is shared by the two games (it maps an input vector to a hidden layer that will be use to initialize\n # the message-producing RNN): this will also be embedded in a wrapper below to define the full architecture\n sender = Sender(n_hidden=opts.sender_hidden, n_features=n_features)\n\n # now, we instantiate the full sender and receiver architectures, and connect them and the loss into a game object\n # the implementation differs slightly depending on whether communication is optimized via Gumbel-Softmax ('gs') or Reinforce ('rf', default)\n if opts.mode.lower() == \"gs\":\n # in the following lines, we embed the Sender and Receiver architectures into standard EGG wrappers that are appropriate for Gumbel-Softmax optimization\n # the Sender wrapper takes the hidden layer produced by the core agent architecture we defined above when processing input, and uses it to initialize\n # the RNN that generates the message\n sender = core.RnnSenderGS(\n sender,\n vocab_size=opts.vocab_size,\n embed_dim=opts.sender_embedding,\n hidden_size=opts.sender_hidden,\n cell=opts.sender_cell,\n max_len=opts.max_len,\n temperature=opts.temperature,\n )\n # the Receiver wrapper takes the symbol produced by the Sender at each step (more precisely, in Gumbel-Softmax mode, a function of the overall probability\n # of non-eos symbols upt to the step is used), maps it to a hidden layer through a RNN, and feeds this hidden layer to the\n # core Receiver architecture we defined above (possibly with other Receiver input, as determined by the core architecture) to generate the output\n receiver = core.RnnReceiverGS(\n receiver,\n vocab_size=opts.vocab_size,\n embed_dim=opts.receiver_embedding,\n hidden_size=opts.receiver_hidden,\n cell=opts.receiver_cell,\n )\n game = core.SenderReceiverRnnGS(sender, receiver, loss)\n # callback functions can be passed to the trainer object (see below) to operate at certain steps of training and validation\n # for example, the TemperatureUpdater (defined in callbacks.py in the core directory) will update the Gumbel-Softmax temperature hyperparameter\n # after each epoch\n callbacks = [core.TemperatureUpdater(agent=sender, decay=0.9, minimum=0.1)]\n else: # NB: any other string than gs will lead to rf training!\n # here, the interesting thing to note is that we use the same core architectures we defined above, but now we embed them in wrappers that are suited to\n # Reinforce-based optmization\n sender = core.RnnSenderReinforce(\n sender,\n vocab_size=opts.vocab_size,\n embed_dim=opts.sender_embedding,\n hidden_size=opts.sender_hidden,\n cell=opts.sender_cell,\n max_len=opts.max_len,\n )\n receiver = core.RnnReceiverDeterministic(\n receiver,\n vocab_size=opts.vocab_size,\n embed_dim=opts.receiver_embedding,\n hidden_size=opts.receiver_hidden,\n cell=opts.receiver_cell,\n )\n game = core.SenderReceiverRnnReinforce(\n sender,\n receiver,\n loss,\n sender_entropy_coeff=opts.sender_entropy_coeff,\n receiver_entropy_coeff=0,\n )\n callbacks = []\n\n # we are almost ready to train: we define here an optimizer calling standard pytorch functionality\n optimizer = core.build_optimizer(game.parameters())\n # in the following statement, we finally instantiate the trainer object with all the components we defined (the game, the optimizer, the data\n # and the callbacks)\n if opts.print_validation_events == True:\n # we add a callback that will print loss and accuracy after each training and validation pass (see ConsoleLogger in callbacks.py in core directory)\n # if requested by the user, we will also print a detailed log of the validation pass after full training: look at PrintValidationEvents in\n # language_analysis.py (core directory)\n trainer = core.Trainer(\n game=game,\n optimizer=optimizer,\n train_data=train_loader,\n validation_data=test_loader,\n callbacks=callbacks\n + [\n core.ConsoleLogger(print_train_loss=True, as_json=True),\n core.PrintValidationEvents(n_epochs=opts.n_epochs),\n ],\n )\n else:\n trainer = core.Trainer(\n game=game,\n optimizer=optimizer,\n train_data=train_loader,\n validation_data=test_loader,\n callbacks=callbacks\n + [core.ConsoleLogger(print_train_loss=True, as_json=True)],\n )\n\n # and finally we train!\n trainer.train(n_epochs=opts.n_epochs)\n\n\nif __name__ == \"__main__\":\n import sys\n\n main(sys.argv[1:])\n"
] | [
[
"torch.sum",
"torch.utils.data.DataLoader",
"torch.nn.functional.cross_entropy"
]
] |
kasimte/Pytorch-pensieve | [
"e550d9c36ed35b49592955fefbc17d1424d37b3b"
] | [
"test/model.py"
] | [
"from __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utils import norm_col_init, weights_init\n\n# class agentNET(torch.nn.Module):\n# def __init__(self, num_inputs = 1, num_outputs = 6):\n# super(agentNET, self).__init__()\n#\n# self.conv1 = nn.Conv1d(num_inputs, 16, 3, stride=1, padding=1)\n# self.conv2 = nn.Conv1d(16, 16, 2, stride=1)\n# self.conv3 = nn.Conv1d(16, 8, 2, stride=1)\n#\n# self.lstm = nn.LSTMCell(32, 20)\n# self.fc1 = nn.Linear(20, 10)\n#\n# self.critic_linear = nn.Linear(10, 1)\n# self.actor_linear = nn.Linear(10, num_outputs)\n#\n# self.apply(weights_init)\n# self.actor_linear.weight.data = norm_col_init(\n# self.actor_linear.weight.data, 0.01)\n# self.actor_linear.bias.data.fill_(0)\n#\n# self.critic_linear.weight.data = norm_col_init(\n# self.critic_linear.weight.data, 1.0)\n# self.critic_linear.bias.data.fill_(0)\n#\n# self.fc1.weight.data = norm_col_init(\n# self.fc1.weight.data, 1.0)\n# self.fc1.bias.data.fill_(0)\n#\n# self.lstm.bias_ih.data.fill_(0)\n# self.lstm.bias_hh.data.fill_(0)\n#\n# self.train()\n#\n# def forward(self, inputs):\n# inputs, (hx, cx) = inputs\n# x = F.elu(self.conv1(inputs))\n# x = F.elu(self.conv2(x))\n# x = F.elu(self.conv3(x))\n#\n# x = x.view(x.size(0), -1)\n#\n# hx, cx = self.lstm(x, (hx, cx))\n#\n# x = F.elu(self.fc1(hx))\n#\n# return self.critic_linear(x), self.actor_linear(x), (hx, cx)\n\nclass agentNET(torch.nn.Module):\n def __init__(self, num_inputs = 1, num_outputs = 6, S_LEN = 8):\n super(agentNET, self).__init__()\n\n self.conv1 = nn.Conv2d(num_inputs, 32, (6, 3), stride=1)\n self.conv2 = nn.Conv2d(32, 64, (1, 3), stride=1)\n self.conv3 = nn.Conv2d(64, 128, (1, 2), stride=1)\n\n self.lstm = nn.LSTMCell(128 * (S_LEN - 2 -2 -1), 96)\n self.fc1 = nn.Linear(96, 48)\n self.fc2 = nn.Linear(48, 24)\n\n self.critic_linear = nn.Linear(24, 1)\n self.actor_linear = nn.Linear(24, num_outputs)\n\n self.apply(weights_init)\n self.actor_linear.weight.data = norm_col_init(\n self.actor_linear.weight.data, 0.01)\n self.actor_linear.bias.data.fill_(0)\n\n self.critic_linear.weight.data = norm_col_init(\n self.critic_linear.weight.data, 1.0)\n self.critic_linear.bias.data.fill_(0)\n\n self.fc1.weight.data = norm_col_init(\n self.fc1.weight.data, 1.0)\n self.fc1.bias.data.fill_(0)\n\n self.fc2.weight.data = norm_col_init(\n self.fc2.weight.data, 1.0)\n self.fc2.bias.data.fill_(0)\n\n self.lstm.bias_ih.data.fill_(0)\n self.lstm.bias_hh.data.fill_(0)\n\n self.train()\n\n def forward(self, inputs):\n inputs, (hx, cx) = inputs\n x = F.relu(self.conv1(inputs))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n\n x = x.view(x.size(0), -1)\n\n hx, cx = self.lstm(x, (hx, cx))\n\n x = F.relu(self.fc1(hx))\n x = F.relu(self.fc2(x))\n\n return self.critic_linear(x), self.actor_linear(x), (hx, cx)"
] | [
[
"torch.nn.LSTMCell",
"torch.nn.Conv2d",
"torch.nn.Linear"
]
] |
nla-group/classix | [
"8584162e4c89ba05a62faf1e20104768cf5bb43c"
] | [
"exp/run_scale_lx.py"
] | [
"import sklearn.datasets as data\nfrom classix import CLASSIX\nimport matplotlib.pyplot as plt\n\ndef rn_scale_explore():\n plt.style.use('bmh')\n\n TOL = 0.1 \n random_state = 1\n moons, _ = data.make_moons(n_samples=1000, noise=0.05, random_state=random_state)\n blobs, _ = data.make_blobs(n_samples=1500, centers=[(-0.85,2.75), (1.75,2.25)], cluster_std=0.5, random_state=random_state)\n X = np.vstack([blobs, moons])\n\n for scale in np.arange(1, 3.3, 0.1):\n clx = CLASSIX(sorting='pca', radius=TOL, group_merging='distance', verbose=0)\n clx.fit_transform(X)\n clx.visualize_linkage(scale=scale, figsize=(8,8), labelsize=24, path='img')\n\n\n for tol in np.arange(0.1, 1.3, 0.1):\n clx = CLASSIX(sorting='pca', radius=tol, group_merging='distance', verbose=0)\n clx.fit_transform(X)\n clx.visualize_linkage(scale=1.5, figsize=(8,8), labelsize=24, plot_boundary=True, path='img')\n \n "
] | [
[
"matplotlib.pyplot.style.use",
"sklearn.datasets.make_moons",
"sklearn.datasets.make_blobs"
]
] |
RSLancs/Extracting_plant_names_and_collocates_from_historical_texts | [
"c8d6746978786ca4b83dc550114700530077c543"
] | [
"merge_geo_with collocates.py"
] | [
"##python27\r\n\r\nfrom pprint import pprint\r\nimport pandas as pd\r\n\r\n\r\n##..............open manually merged geoparsed results\r\ngeo = pd.read_csv('./data/merger_xml_extracted_geoparsed_collocates.csv')\r\ngeo = [tuple(x) for x in geo.values] # df to list\r\nprint(geo[1])\r\n\r\n\r\n##..........open collocate results....\r\ncollocate = pd.read_csv('./data/INDEXED_no_overlaps-abrev-dups_collocate_results_15.01.19.csv')\r\ncollocate = [tuple(x) for x in collocate.values] # df to list\r\nprint(collocate[1])\r\n\r\n\r\n#............merge results........................\r\n\r\nmerged = []\r\nfor ig in geo:\r\n\tfor ic in collocate:\r\n\t\tif ig[0] == ic[0]:\r\n\t\t\tmerged.append([ic[0],ic[2],ic[3],ic[4],ic[5],ic[6],ic[7],ig[0],ig[3],ig[5],ig[6]])\r\n\r\nmy_df = pd.DataFrame(merged) # transform result list to dataframe\r\n\r\nmy_df.columns = ['para_index',\r\n\t\t\t\t\t'text',\r\n\t\t\t\t\t'year',\r\n\t\t\t\t\t'spc_acc',\r\n\t\t\t\t\t'spc_syn',\r\n\t\t\t\t\t'find_index',\r\n\t\t\t\t\t'window', \r\n\t\t\t\t\t'geo_para_index',\r\n\t\t\t\t\t'standoff_loc_word',\r\n\t\t\t\t\t'lat',\r\n\t\t\t\t\t'lon' ] # add column labels\r\n\r\na = my_df.to_csv('./data/geo_locations_collocate_merger.csv')"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
ishine/tf-kaldi-speaker-master | [
"4b93110c4aa54f4764c58d9ffef3aec2efce39db"
] | [
"egs/voxceleb/sre/backend/tools/s-norm-get-enroll.py"
] | [
"import os\r\nimport numpy as np\r\nimport logging\r\nimport argparse\r\nimport sys\r\n\r\nlogger = logging.getLogger('s-norm score.')\r\nlogger.setLevel(logging.INFO)\r\nhandler = logging.StreamHandler(sys.stdout)\r\nhandler.setLevel(logging.INFO)\r\nformatter = logging.Formatter(\"%(asctime)s [%(pathname)s:%(lineno)s - \"\r\n \"%(funcName)s - %(levelname)s ] %(message)s\")\r\nhandler.setFormatter(formatter)\r\nlogger.addHandler(handler)\r\nlogger.info('Starting s-norm')\r\n\r\ndef get_args():\r\n \"\"\"\r\n get args from stdin.\r\n \"\"\"\r\n\r\n parser = argparse.ArgumentParser(description='snorm score.', formatter_class=argparse.ArgumentDefaultsHelpFormatter,\r\n conflict_handler='resolve')\r\n\r\n parser.add_argument('--score-file', dest='score_file', type=str, help='score file')\r\n parser.add_argument('--enroll-file', dest='enroll_file', type=str, help='score file')\r\n\r\n args = parser.parse_args()\r\n\r\n return args\r\n\r\ndef write_snorm(enroll_file, model_line, means_l, stds_l):\r\n f_snorm = open(enroll_file, 'w')\r\n \r\n len_model = len(model_line)\r\n \r\n \r\n for index in range(len_model):\r\n f_snorm.write(model_line[index] + ' ')\r\n f_snorm.write(str(means_l[index]))\r\n f_snorm.write(' ')\r\n f_snorm.write(str(stds_l[index]))\r\n f_snorm.write('\\n')\r\n \r\n f_snorm.close()\r\n\r\n\r\ndef snorm(args):\r\n score_file = args.score_file\r\n enroll_file = args.enroll_file\r\n\r\n f_score = open(score_file, 'r')\r\n score_lines = f_score.readlines()\r\n\r\n score_lines = [line.strip() for line in score_lines]\r\n score_lines = [line.split('|') for line in score_lines if line != '']\r\n\r\n model_line = score_lines[0].copy()\r\n del model_line[0]\r\n scores = np.delete(score_lines, 0, axis=0)\r\n test_line = [var[0] for var in scores]\r\n scores = np.delete(scores, 0, axis=1)\r\n\r\n leng, wid = scores.shape\r\n\r\n scores = [float(score) for ss in scores for score in ss]\r\n scores = np.array(scores)\r\n scores.shape = leng, wid\r\n\r\n \r\n\r\n snorm_scores = np.zeros((leng, wid))\r\n\r\n means_w = np.zeros(wid)\r\n stds_w = np.zeros(wid)\r\n\r\n\r\n for ww in range(wid):\r\n score_ww = scores[:,ww].copy()\r\n score_ww.sort()\r\n for i in range(leng):\r\n if score_ww[i] != -1000.0:\r\n break\r\n score_ww = score_ww[-int(leng*0.3):]\r\n #print(ww)\r\n means_w[ww] = np.mean(score_ww)\r\n stds_w[ww] = np.std(score_ww, ddof=1)\r\n del score_ww\r\n\r\n write_snorm(enroll_file, model_line, means_w, stds_w)\r\n \r\n f_score.close()\r\n \r\n\r\ndef main():\r\n args = get_args()\r\n snorm(args)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
] | [
[
"numpy.zeros",
"numpy.delete",
"numpy.array",
"numpy.std",
"numpy.mean"
]
] |
QB3/sparse-ho-qbe | [
"73358caeff2ff08ca4e88af419e7dae753d43ea9"
] | [
"sparse_ho/implicit_forward.py"
] | [
"import numpy as np\nfrom scipy.sparse import issparse\nfrom sparse_ho.forward import get_beta_jac_iterdiff\n\n\nclass ImplicitForward():\n def __init__(\n self, criterion, tol_jac=1e-3, n_iter=100, n_iter_jac=100,\n use_sk=False, verbose=False):\n self.criterion = criterion\n self.n_iter = n_iter\n self.tol_jac = tol_jac\n self.n_iter_jac = n_iter_jac\n self.use_sk = use_sk\n self.verbose = verbose\n\n def get_beta_jac_v(\n self, X, y, log_alpha, model, get_v, mask0=None, dense0=None,\n quantity_to_warm_start=None, max_iter=1000, tol=1e-3,\n compute_jac=False, backward=False, full_jac_v=False):\n\n mask, dense, jac = get_beta_jac_fast_iterdiff(\n X, y, log_alpha, self.criterion.X_val, self.criterion.y_val,\n get_v, mask0=mask0, dense0=dense0,\n jac0=quantity_to_warm_start,\n # tol_jac=self.tol_jac,\n tol_jac=tol, use_sk=self.use_sk,\n tol=tol, niter_jac=self.n_iter_jac, model=model,\n max_iter=self.criterion.model.max_iter, verbose=self.verbose)\n\n jac_v = model.get_jac_v(mask, dense, jac, get_v)\n if full_jac_v:\n jac_v = model.get_full_jac_v(mask, jac_v, X.shape[1])\n\n return mask, dense, jac_v, jac\n\n def get_val_grad(\n self, log_alpha, mask0=None, dense0=None, beta_star=None,\n jac0=None, max_iter=1000, tol=1e-3, compute_jac=True,\n backward=False):\n return self.criterion.get_val_grad(\n log_alpha, self.get_beta_jac_v, max_iter=max_iter, tol=tol,\n compute_jac=compute_jac, backward=backward)\n\n def get_val(\n self, log_alpha, mask0=None, dense0=None, beta_star=None,\n jac0=None, max_iter=1000, tol=1e-3, compute_jac=True,\n backward=False):\n return self.criterion.get_val(\n log_alpha, self.get_beta_jac_v, max_iter=max_iter, tol=tol,\n compute_jac=compute_jac, backward=backward)\n\n\ndef get_beta_jac_fast_iterdiff(\n X, y, log_alpha, X_val, y_val, get_v, model, mask0=None, dense0=None, jac0=None, tol=1e-3, max_iter=1000, niter_jac=1000, tol_jac=1e-6, use_sk=False, verbose=False):\n n_samples, n_features = X.shape\n\n mask, dense, _ = get_beta_jac_iterdiff(\n X, y, log_alpha, mask0=mask0, dense0=dense0, jac0=jac0, tol=tol,\n max_iter=max_iter, compute_jac=False, model=model, use_sk=use_sk,\n verbose=verbose)\n\n dbeta0_new = model._init_dbeta0(mask, mask0, jac0)\n reduce_alpha = model._reduce_alpha(np.exp(log_alpha), mask)\n\n v = None\n _, r = model._init_beta_r(X, y, mask, dense)\n jac = get_only_jac(\n model.reduce_X(mask), model.reduce_y(mask), r, reduce_alpha, model.sign(dense), v,\n dbeta=dbeta0_new, niter_jac=niter_jac, tol_jac=tol_jac, model=model, mask=mask, dense=dense, verbose=verbose)\n\n return mask, dense, jac\n\n\ndef get_only_jac(\n Xs, y, r, alpha, sign_beta, v, dbeta=None, niter_jac=100, tol_jac=1e-4, model=\"lasso\", mask=None, dense=None, verbose=False):\n n_samples, n_features = Xs.shape\n\n is_sparse = issparse(Xs)\n L = model.get_L(Xs, is_sparse)\n\n objs = []\n\n if dbeta is None:\n model._init_dbeta(n_features)\n # if model == \"lasso\":\n # dbeta = np.zeros(n_features)\n # if model == \"mcp\":\n # dbeta = np.zeros((n_features, 2))\n # elif model == \"wlasso\":\n # dbeta = np.zeros((n_features, n_features))\n else:\n dbeta = dbeta.copy()\n\n dr = model._init_dr(dbeta, Xs, y, mask)\n for i in range(niter_jac):\n if verbose:\n print(\"%i -st iterations over %i\" % (i, niter_jac))\n if is_sparse:\n model._update_only_jac_sparse(\n Xs.data, Xs.indptr, Xs.indices, y, n_samples,\n n_features, dbeta, r, dr, L, alpha, sign_beta)\n else:\n model._update_only_jac(\n Xs, y, r, dbeta, dr, L, alpha, sign_beta, mask)\n\n objs.append(\n model.get_jac_obj(Xs, y, sign_beta, dbeta, r, dr, alpha, mask))\n\n # m1 = norm(- v.T @ Xs.T @ dr + sign_beta * n_samples * alpha)\n # m2 = tol_jac * np.sqrt(n_features) * n_samples * alpha * norm(v)\n # crit = m1 <= m2\n # print(\"m1 %.2f\", m1)\n # print(\"m2 %.2f\", m2)\n # print(\"m1 = %f\" % norm(v @ (dbeta - dbeta_old)))\n # print(\"tol_crit %f\" % tol_crit)\n # if norm(v @ (dbeta - dbeta_old)) < tol_crit:\n # if norm((dbeta - dbeta_old)) < tol_jac * norm(dbeta):\n # crit =\n print('jac obj', objs[-1])\n if i > 1 and np.abs(objs[-2] - objs[-1]) < np.abs(objs[-1]) * tol_jac:\n break\n # dbeta_old = dbeta.copy()\n # dr_old = dr.copy()\n\n return dbeta\n"
] | [
[
"scipy.sparse.issparse",
"numpy.abs",
"numpy.exp"
]
] |
saethlin/yt | [
"992ae71974dca933346e91008c5a50f43a0a350e"
] | [
"yt/geometry/oct_geometry_handler.py"
] | [
"\"\"\"\nOctree geometry handler\n\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom yt.utilities.logger import ytLogger as mylog\nfrom yt.geometry.geometry_handler import Index\nfrom yt.fields.field_detector import FieldDetector\n\n\nclass OctreeIndex(Index):\n \"\"\"The Index subclass for oct AMR datasets\"\"\"\n def _setup_geometry(self):\n mylog.debug(\"Initializing Octree Geometry Handler.\")\n self._initialize_oct_handler()\n\n def get_smallest_dx(self):\n \"\"\"\n Returns (in code units) the smallest cell size in the simulation.\n \"\"\"\n return (self.dataset.domain_width /\n (self.dataset.domain_dimensions * 2**(self.max_level))).min()\n\n def convert(self, unit):\n return self.dataset.conversion_factors[unit]\n\n def _add_mesh_sampling_particle_field(self, deposit_field, ftype, ptype):\n units = self.ds.field_info[ftype, deposit_field].units\n take_log = self.ds.field_info[ftype, deposit_field].take_log\n field_name = \"cell_%s_%s\" % (ftype, deposit_field)\n\n def _cell_index(field, data):\n # Get the position of the particles\n pos = data[ptype, \"particle_position\"]\n Npart = pos.shape[0]\n ret = np.zeros(Npart)\n tmp = np.zeros(Npart)\n\n if isinstance(data, FieldDetector):\n return ret\n\n remaining = np.ones(Npart, dtype=bool)\n Nremaining = Npart\n\n Nobjs = len(data._current_chunk.objs)\n Nbits = int(np.ceil(np.log2(Nobjs)))\n\n for i, obj in enumerate(data._current_chunk.objs):\n if Nremaining == 0:\n break\n icell = obj['index', 'ones'].T.reshape(-1).astype(np.int64).cumsum().value - 1\n mesh_data = ((icell << Nbits) + i).astype(np.float64)\n # Access the mesh data and attach them to their particles\n tmp[:Nremaining] = obj.mesh_sampling_particle_field(pos[remaining], mesh_data)\n\n ret[remaining] = tmp[:Nremaining]\n\n remaining[remaining] = np.isnan(tmp[:Nremaining])\n Nremaining = remaining.sum()\n\n return data.ds.arr(ret.astype(np.float64), input_units='1')\n\n def _mesh_sampling_particle_field(field, data):\n \"\"\"\n Create a grid field for particle quantities using given method.\n \"\"\"\n ones = data[ptype, 'particle_ones']\n\n # Access \"cell_index\" field\n Npart = ones.shape[0]\n ret = np.zeros(Npart)\n cell_index = np.array(data[ptype, 'cell_index'], np.int64)\n\n if isinstance(data, FieldDetector):\n return ret\n\n # The index of the obj is stored on the first bits\n Nobjs = len(data._current_chunk.objs)\n Nbits = int(np.ceil(np.log2(Nobjs)))\n icell = cell_index >> Nbits\n iobj = cell_index - (icell << Nbits)\n for i, subset in enumerate(data._current_chunk.objs):\n mask = (iobj == i)\n\n subset.field_parameters = data.field_parameters\n\n cell_data = subset[ftype, deposit_field].T.reshape(-1)\n\n ret[mask] = cell_data[icell[mask]]\n\n return data.ds.arr(ret, input_units=cell_data.units)\n\n if (ptype, 'cell_index') not in self.ds.derived_field_list:\n self.ds.add_field(\n (ptype, 'cell_index'),\n function=_cell_index,\n sampling_type=\"particle\",\n units='1')\n\n self.ds.add_field(\n (ptype, field_name),\n function=_mesh_sampling_particle_field,\n sampling_type=\"particle\",\n units=units,\n take_log=take_log)\n\n"
] | [
[
"numpy.ones",
"numpy.log2",
"numpy.zeros",
"numpy.isnan",
"numpy.array"
]
] |
stiphyMT/plantcv | [
"b51b545f3aef0742a2d250c75cc998ba6c9e57b2"
] | [
"plantcv/plantcv/auto_crop.py"
] | [
"# Resize image\n\nimport os\nimport cv2\nimport numpy as np\nfrom plantcv.plantcv._debug import _debug\nfrom plantcv.plantcv import params\nfrom plantcv.plantcv import fatal_error\n\n\ndef auto_crop(img, obj, padding_x=0, padding_y=0, color='black'):\n \"\"\"\n Resize image.\n\n Inputs:\n img = RGB or grayscale image data\n obj = contours\n padding_x = integer or tuple to add padding the x direction\n padding_y = integer or tuple to add padding the y direction\n color = either 'black', 'white', or 'image'\n\n Returns:\n cropped = cropped image\n\n :param img: numpy.ndarray\n :param obj: list\n :param padding_x: int\n :param padding_y: int\n :param color: str\n :return cropped: numpy.ndarray\n \"\"\"\n\n params.device += 1\n img_copy = np.copy(img)\n img_copy2 = np.copy(img)\n\n # Get the height and width of the reference image\n height, width = np.shape(img)[:2]\n\n x, y, w, h = cv2.boundingRect(obj)\n cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 5)\n\n crop_img = img[y:y + h, x:x + w]\n\n if type(padding_x) is int and type(padding_y) is int:\n offsetx_left = int(np.rint(padding_x))\n offsetx_right = int(np.rint(padding_x))\n offsety_top = int(np.rint(padding_y))\n offsety_bottom = int(np.rint(padding_y))\n\n elif type(padding_x) is tuple and type(padding_y) is tuple:\n offsetx_left = padding_x[0]\n offsetx_right = padding_x[1]\n offsety_top = padding_y[0]\n offsety_bottom = padding_y[1]\n\n else:\n fatal_error('Both padding_x and padding_x parameters must be either int or tuple.')\n\n if color.upper() == 'BLACK':\n colorval = (0, 0, 0)\n cropped = cv2.copyMakeBorder(crop_img, offsety_top, offsety_bottom, offsetx_left,\n offsetx_right, cv2.BORDER_CONSTANT, value=colorval)\n elif color.upper() == 'WHITE':\n colorval = (255, 255, 255)\n cropped = cv2.copyMakeBorder(crop_img, offsety_top, offsety_bottom, offsetx_left,\n offsetx_right, cv2.BORDER_CONSTANT, value=colorval)\n elif color.upper() == 'IMAGE':\n # Check whether the ROI is correctly bounded inside the image\n if x - offsetx_right < 0 or y - offsety_top < 0 or x + w + offsetx_right > width or y + h + offsety_bottom > height:\n cropped = img_copy2[y:y + h, x:x + w]\n else:\n # If padding is the image, crop the image with a buffer rather than cropping and adding a buffer\n cropped = img_copy2[y - offsety_top:y + h + offsety_bottom, x - offsetx_left:x + w + offsetx_right]\n else:\n fatal_error('Color was provided but ' + str(color) + ' is not \"white\", \"black\", or \"image\"!')\n\n if len(np.shape(img_copy)) == 3:\n cmap = None\n else:\n cmap = 'gray'\n\n _debug(visual=img_copy,\n filename=os.path.join(params.debug_outdir, str(params.device) + \"_crop_area.png\"),\n cmap=cmap)\n _debug(visual=cropped,\n filename=os.path.join(params.debug_outdir, str(params.device) + \"_auto_cropped.png\"),\n cmap=cmap)\n\n return cropped\n"
] | [
[
"numpy.rint",
"numpy.shape",
"numpy.copy"
]
] |
Yurlungur/FLRW | [
"15424d2304e44d0e38110b655c5c28a6aeb34147"
] | [
"plot_all_variables_big_a.py"
] | [
"#!/usr/bin/env python2\n\n# Author: Jonah Miller ([email protected])\n# Time-stamp: <2013-12-14 16:50:20 (jonah)>\n\n# This is a companion program to my FLRW simulator. It takes a data\n# file and generates a plot of the scale factor, its derivative, the\n# density, and the pressure of the matter.\n# Call the program with\n# python2 plot_all_variables.py filename.dat\n\n# Imports\n# ----------------------------------------------------------------------\nimport numpy as np\nimport scipy as sp\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport sys, os\n# ----------------------------------------------------------------------\n\n\n# Parameters for plots\n# ----------------------------------------------------------------------\nxlabel = \"Cosmological time. (geometrized units)\"\nmy_linewidth = 5\nfontsize = 20\na_rescaling = 7000\n# ----------------------------------------------------------------------\n\n\ndef load_data(filename):\n \"\"\"\n Takes a file name as a string and extracts the simulation data\n from it. Returns a tuple of arrays:\n (times,a_values,b_values,rho_values,p_values)\n \"\"\"\n with open(filename,'r') as f:\n data = np.loadtxt(filename).transpose()\n times = data[0]\n a_values = a_rescaling * data[1]\n a_offset = a_values[0]\n a_values -= a_offset\n rho_values = data[2]\n p_values = data[3]\n return times,a_values,rho_values,p_values,a_offset\n\ndef plot_data(times,a_values,rho_values,p_values,a_offset,filename):\n \"\"\"\n Takes the times,a_values,b_values,rho_values, and p_values\n and makes a nice plot out of them. Takes labels, etc. into account.\n \"\"\"\n mpl.rcParams.update({'font.size': fontsize})\n lines = [plt.plot(times,y_set,linewidth=my_linewidth)\n for y_set in [a_values,rho_values,p_values]]\n plt.legend([\"{}a - {}\".format(a_rescaling,a_offset),r'$\\rho$',\"p\"],loc=9)\n plt.xlabel(xlabel)\n title_list = filename.split('.')[0].split('_')\n title_list[1] = \"universe:\"\n title = reduce(lambda x,y: \"{} {}\".format(x,y),title_list)\n plt.title(title)\n plt.show()\n return\n\ndef plot_file(filename):\n \"Plots the data in a file.\"\n times,a_values,rho_values,p_values,a_offset = load_data(filename)\n plot_data(times,a_values,rho_values,p_values,a_offset,filename)\n return\n\nif __name__ == \"__main__\":\n for filename in sys.argv[1:]:\n plot_file(filename)\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.rcParams.update",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.loadtxt"
]
] |
MaxXSoft/ZexGP | [
"c01d68d134990c0d18f30f12d93855ba5ffcbe29"
] | [
"examples/funcfit/__main__.py"
] | [
"'''\nAn example of fitting a function with ZexGP.\n'''\n\nfrom zexgp.kernel import Kernel\nfrom os import path\nfrom sys import float_info as fi\nfrom matplotlib import pyplot as plt\n\n\n# some necessary global variables\nfunc_val = []\ndomain = []\n\n\ndef init_func_val():\n '''\n Initialize function value.\n '''\n def func(x): return 1 / (1 + 25 * x ** 2)\n for i in range(100):\n x = (i - 50) / 50\n func_val.append(func(x))\n domain.append(x)\n\n\ndef get_int(i):\n '''\n Get a function that returns a specific integer.\n '''\n return lambda: i\n\n\ndef pow(x, y):\n try:\n return float(x ** y)\n except (OverflowError, ZeroDivisionError):\n return fi.max\n except TypeError:\n return float('nan')\n\n\ndef fitness(tree):\n '''\n Fitness function.\n '''\n sum = 0\n for i in range(100):\n x = (i - 50) / 50\n ans = tree.eval(x)\n exp = func_val[i]\n sum += abs(ans - exp)\n return 1 / (sum + 1) if sum == sum else 0\n\n\n# create kernel and load settings from disk\nk = Kernel()\nk.load_conf(path.dirname(__file__) + '/config.json')\nk.conf['maxRuns'] = 3\n\n# add functions\nk.add('+', func=lambda x, y: x + y)\nk.add('-', func=lambda x, y: x - y)\nk.add('*', func=lambda x, y: x * y)\nk.add('/', func=lambda x, y: x / y if y else fi.max)\nk.add('^', func=pow)\n\n# add terminals\nk.add('x', arg_index=0)\nfor i in range(1, 4):\n k.add(str(i), func=get_int(i))\n\n# set fitness function\nk.set_fitness(fitness)\n\n# run & print results\ninit_func_val()\nresults = k.run(jobs=4)\nfor i in results:\n print(i)\n\n# draw plots\nplt.figure()\nplt.subplot(221)\nplt.plot(domain, func_val, c='g')\nif results[0]:\n plt.subplot(222)\n plt.plot(domain, [results[0].eval(x) for x in domain])\nif results[1]:\n plt.subplot(223)\n plt.plot(domain, [results[1].eval(x) for x in domain])\nif results[2]:\n plt.subplot(224)\n plt.plot(domain, [results[2].eval(x) for x in domain])\nplt.suptitle('Results')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.plot"
]
] |
algebra2k/terrier | [
"8b6f4b0b0c30dc94411f197e610f634ce0ab5b0b"
] | [
"script/model/model.py"
] | [
"#!/usr/bin/env python3\n\nimport numpy as np\n\nimport lightgbm as lgb\n\nfrom sklearn import linear_model\nfrom sklearn import kernel_ridge\nfrom sklearn import ensemble\nfrom sklearn import preprocessing\nfrom sklearn import neural_network\nfrom sklearn import multioutput\n\n# import warnings filter\nfrom warnings import simplefilter\n\n# ignore all future warnings\nsimplefilter(action='ignore', category=FutureWarning)\n\n_LOGTRANS_EPS = 1e-4\n\n\ndef _get_base_ml_model(method):\n regressor = None\n if method == 'lr':\n regressor = linear_model.LinearRegression()\n if method == 'huber':\n regressor = linear_model.HuberRegressor(max_iter=100)\n regressor = multioutput.MultiOutputRegressor(regressor)\n if method == 'kr':\n regressor = kernel_ridge.KernelRidge(kernel='rbf')\n if method == 'rf':\n regressor = ensemble.RandomForestRegressor(n_estimators=50, n_jobs=8)\n if method == 'gbm':\n regressor = lgb.LGBMRegressor(max_depth=20, num_leaves=5000, n_estimators=100, min_child_samples=5,\n random_state=42)\n regressor = multioutput.MultiOutputRegressor(regressor)\n if method == 'nn':\n regressor = neural_network.MLPRegressor(hidden_layer_sizes=(25, 25), early_stopping=True,\n max_iter=1000000, alpha=0.01)\n\n return regressor\n\n\nclass Model:\n \"\"\"\n The class that wraps around standard ML libraries.\n With the implementation for different normalization handlings\n \"\"\"\n\n def __init__(self, method, normalize=True, log_transform=True, modeling_transformer=None):\n \"\"\"\n\n :param method: which ML method to use\n :param normalize: whether to perform standard normalization on data (both x and y)\n :param log_transform: whether to perform log transformation on data (both x and y)\n :param modeling_transformer: the customized data transformer (a pair of functions with the first for training\n and second for predict)\n \"\"\"\n self._base_model = _get_base_ml_model(method)\n self._normalize = normalize\n self._log_transform = log_transform\n self._xscaler = preprocessing.StandardScaler()\n self._yscaler = preprocessing.StandardScaler()\n self._modeling_transformer = modeling_transformer\n\n def train(self, x, y):\n if self._modeling_transformer is not None:\n y = self._modeling_transformer[0](x, y)\n\n if self._log_transform:\n x = np.log(x + _LOGTRANS_EPS)\n y = np.log(y + _LOGTRANS_EPS)\n\n if self._normalize:\n x = self._xscaler.fit_transform(x)\n y = self._yscaler.fit_transform(y)\n\n self._base_model.fit(x, y)\n\n def predict(self, x):\n original_x = x\n\n # transform the features\n if self._log_transform:\n x = np.log(x + _LOGTRANS_EPS)\n if self._normalize:\n x = self._xscaler.transform(x)\n\n # make prediction\n y = self._base_model.predict(x)\n\n # transform the y back\n if self._normalize:\n y = self._yscaler.inverse_transform(y)\n if self._log_transform == 1:\n y = np.exp(y) - _LOGTRANS_EPS\n y = np.clip(y, 0, None)\n\n if self._modeling_transformer is not None:\n y = self._modeling_transformer[1](original_x, y)\n\n return y\n"
] | [
[
"sklearn.kernel_ridge.KernelRidge",
"sklearn.linear_model.LinearRegression",
"numpy.exp",
"sklearn.multioutput.MultiOutputRegressor",
"sklearn.ensemble.RandomForestRegressor",
"numpy.log",
"numpy.clip",
"sklearn.linear_model.HuberRegressor",
"sklearn.preprocessing.StandardScaler",
"sklearn.neural_network.MLPRegressor"
]
] |
shivammaniharsahu/django_api | [
"6ffb3d9f70f30f5fd3ae06ec00a6dd7c7783a797",
"6ffb3d9f70f30f5fd3ae06ec00a6dd7c7783a797"
] | [
"Lib/site-packages/tensorflow_core/python/ops/gen_functional_ops.py",
"Lib/site-packages/tensorflow_core/_api/v1/compat/v1/experimental/__init__.py"
] | [
"\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\n\"\"\"\n\nimport collections as _collections\nimport six as _six\n\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import errors as _errors\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\nfrom tensorflow.python.util.deprecation import deprecated_endpoints\nfrom tensorflow.python.util import dispatch as _dispatch\nfrom tensorflow.python.util.tf_export import tf_export\nfrom tensorflow.python.util.tf_export import kwarg_only as _kwarg_only\nfrom tensorflow.tools.docs import doc_controls as _doc_controls\n\n\ndef case(branch_index, input, Tout, branches, output_shapes=[], name=None):\n r\"\"\"An n-way switch statement which calls a single branch function.\n\n An n-way switch statement, implementing the following:\n ```\n switch (branch_index) {\n case 0:\n output = branches[0](input);\n break;\n case 1:\n output = branches[1](input);\n break;\n ...\n case [[nbranches-1]]:\n default:\n output = branches[nbranches-1](input);\n break;\n }\n ```\n\n Args:\n branch_index: A `Tensor` of type `int32`.\n The branch selector, an int32 Tensor.\n input: A list of `Tensor` objects.\n A list of input tensors passed to the branch function.\n Tout: A list of `tf.DTypes`. A list of output types.\n branches: A list of functions decorated with @Defun that has length `>= 1`.\n A list of functions each of which takes 'inputs' and returns a list of\n tensors, whose types are the same as what every other branch returns.\n output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name, \"Case\",\n name, _ctx._post_execution_callbacks, branch_index, input, \"Tout\",\n Tout, \"branches\", branches, \"output_shapes\", output_shapes)\n return _result\n except _core._FallbackException:\n try:\n return case_eager_fallback(\n branch_index, input, Tout=Tout, branches=branches,\n output_shapes=output_shapes, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'case' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if not isinstance(branches, (list, tuple)):\n raise TypeError(\n \"Expected list for 'branches' argument to \"\n \"'case' Op, not %r.\" % branches)\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'case' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _, _, _op = _op_def_lib._apply_op_helper(\n \"Case\", branch_index=branch_index, input=input, Tout=Tout,\n branches=branches, output_shapes=output_shapes, name=name)\n _result = _op.outputs[:]\n if not _result:\n return _op\n _inputs_flat = _op.inputs\n _attrs = (\"Tin\", _op.get_attr(\"Tin\"), \"Tout\", _op.get_attr(\"Tout\"),\n \"branches\", _op.get_attr(\"branches\"), \"output_shapes\",\n _op.get_attr(\"output_shapes\"))\n _execute.record_gradient(\n \"Case\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef Case(branch_index, input, Tout, branches, output_shapes=[], name=None):\n return case(branch_index=branch_index, input=input, Tout=Tout, branches=branches, output_shapes=output_shapes, name=name)\nCase.__doc__ = case.__doc__\nCase = _doc_controls.do_not_generate_docs(_kwarg_only(Case))\ntf_export(\"raw_ops.Case\")(Case)\n\n\ndef case_eager_fallback(branch_index, input, Tout, branches, output_shapes=[], name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function case\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'case' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if not isinstance(branches, (list, tuple)):\n raise TypeError(\n \"Expected list for 'branches' argument to \"\n \"'case' Op, not %r.\" % branches)\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'case' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n branch_index = _ops.convert_to_tensor(branch_index, _dtypes.int32)\n _inputs_flat = [branch_index] + list(input)\n _attrs = (\"Tin\", _attr_Tin, \"Tout\", Tout, \"branches\", branches,\n \"output_shapes\", output_shapes)\n _result = _execute.execute(b\"Case\", len(Tout), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"Case\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef fake_param(dtype, shape, name=None):\n r\"\"\" This op is used as a placeholder in If branch functions. It doesn't provide a\n valid output when run, so must either be removed (e.g. replaced with a\n function input) or guaranteed not to be used (e.g. if mirroring an\n intermediate output needed for the gradient computation of the other branch).\n\n Args:\n dtype: A `tf.DType`. The type of the output.\n shape: A `tf.TensorShape` or list of `ints`.\n The purported shape of the output. This is only used for shape inference;\n the output will not necessarily have this shape. Can be a partial shape.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `dtype`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"FakeParam\", name, _ctx._post_execution_callbacks, \"dtype\", dtype,\n \"shape\", shape)\n return _result\n except _core._FallbackException:\n try:\n return fake_param_eager_fallback(\n dtype=dtype, shape=shape, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n dtype = _execute.make_type(dtype, \"dtype\")\n shape = _execute.make_shape(shape, \"shape\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"FakeParam\", dtype=dtype, shape=shape, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"dtype\", _op._get_attr_type(\"dtype\"), \"shape\",\n _op.get_attr(\"shape\"))\n _execute.record_gradient(\n \"FakeParam\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\ndef FakeParam(dtype, shape, name=None):\n return fake_param(dtype=dtype, shape=shape, name=name)\nFakeParam.__doc__ = fake_param.__doc__\nFakeParam = _doc_controls.do_not_generate_docs(_kwarg_only(FakeParam))\ntf_export(\"raw_ops.FakeParam\")(FakeParam)\n\n\ndef fake_param_eager_fallback(dtype, shape, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function fake_param\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n dtype = _execute.make_type(dtype, \"dtype\")\n shape = _execute.make_shape(shape, \"shape\")\n _inputs_flat = []\n _attrs = (\"dtype\", dtype, \"shape\", shape)\n _result = _execute.execute(b\"FakeParam\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"FakeParam\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef _for(start, limit, delta, input, body, name=None):\n r\"\"\" ```python\n output = input;\n for i in range(start, limit, delta)\n output = body(i, output);\n ```\n\n Args:\n start: A `Tensor` of type `int32`. The lower bound. An int32\n limit: A `Tensor` of type `int32`. The upper bound. An int32\n delta: A `Tensor` of type `int32`. The increment. An int32\n input: A list of `Tensor` objects.\n A list of input tensors whose types are T.\n body: A function decorated with @Defun.\n A function that takes a list of tensors (int32, T) and returns another\n list of tensors (T).\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects. Has the same type as `input`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name, \"For\",\n name, _ctx._post_execution_callbacks, start, limit, delta, input,\n \"body\", body)\n return _result\n except _core._FallbackException:\n try:\n return _for_eager_fallback(\n start, limit, delta, input, body=body, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n _, _, _op = _op_def_lib._apply_op_helper(\n \"For\", start=start, limit=limit, delta=delta, input=input, body=body,\n name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"body\", _op.get_attr(\"body\"))\n _execute.record_gradient(\n \"For\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef For(start, limit, delta, input, body, name=None):\n return _for(start=start, limit=limit, delta=delta, input=input, body=body, name=name)\nFor.__doc__ = _for.__doc__\nFor = _doc_controls.do_not_generate_docs(_kwarg_only(For))\ntf_export(\"raw_ops.For\")(For)\n\n\ndef _for_eager_fallback(start, limit, delta, input, body, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function _for\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n start = _ops.convert_to_tensor(start, _dtypes.int32)\n limit = _ops.convert_to_tensor(limit, _dtypes.int32)\n delta = _ops.convert_to_tensor(delta, _dtypes.int32)\n _inputs_flat = [start, limit, delta] + list(input)\n _attrs = (\"T\", _attr_T, \"body\", body)\n _result = _execute.execute(b\"For\", len(input), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"For\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef _if(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None):\n r\"\"\"output = cond ? then_branch(input) : else_branch(input)\n\n Args:\n cond: A `Tensor`.\n A Tensor. If the tensor is a scalar of non-boolean type, the\n scalar is converted to a boolean according to the\n following rule: if the scalar is a numerical value, non-zero means\n `True` and zero means False; if the scalar is a string, non-empty\n means `True` and empty means `False`. If the tensor is not a scalar,\n being empty means False and being non-empty means True.\n input: A list of `Tensor` objects. A list of input tensors.\n Tout: A list of `tf.DTypes`. A list of output types.\n then_branch: A function decorated with @Defun.\n A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what else_branch returns.\n else_branch: A function decorated with @Defun.\n A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what then_branch returns.\n output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name, \"If\", name,\n _ctx._post_execution_callbacks, cond, input, \"Tout\", Tout,\n \"then_branch\", then_branch, \"else_branch\", else_branch,\n \"output_shapes\", output_shapes)\n return _result\n except _core._FallbackException:\n try:\n return _if_eager_fallback(\n cond, input, Tout=Tout, then_branch=then_branch,\n else_branch=else_branch, output_shapes=output_shapes, name=name,\n ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'if' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'if' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _, _, _op = _op_def_lib._apply_op_helper(\n \"If\", cond=cond, input=input, Tout=Tout, then_branch=then_branch,\n else_branch=else_branch, output_shapes=output_shapes, name=name)\n _result = _op.outputs[:]\n if not _result:\n return _op\n _inputs_flat = _op.inputs\n _attrs = (\"Tcond\", _op._get_attr_type(\"Tcond\"), \"Tin\", _op.get_attr(\"Tin\"),\n \"Tout\", _op.get_attr(\"Tout\"), \"then_branch\",\n _op.get_attr(\"then_branch\"), \"else_branch\",\n _op.get_attr(\"else_branch\"), \"output_shapes\",\n _op.get_attr(\"output_shapes\"))\n _execute.record_gradient(\n \"If\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef If(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None):\n return _if(cond=cond, input=input, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes, name=name)\nIf.__doc__ = _if.__doc__\nIf = _doc_controls.do_not_generate_docs(_kwarg_only(If))\ntf_export(\"raw_ops.If\")(If)\n\n\ndef _if_eager_fallback(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function _if\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'if' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'if' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _attr_Tcond, (cond,) = _execute.args_to_matching_eager([cond], _ctx)\n _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n _inputs_flat = [cond] + list(input)\n _attrs = (\"Tcond\", _attr_Tcond, \"Tin\", _attr_Tin, \"Tout\", Tout,\n \"then_branch\", then_branch, \"else_branch\", else_branch, \"output_shapes\",\n output_shapes)\n _result = _execute.execute(b\"If\", len(Tout), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"If\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef partitioned_call(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None):\n r\"\"\"returns `f(inputs)`, where `f`'s body is placed and partitioned.\n\n Args:\n args: A list of `Tensor` objects. A list of input tensors.\n Tout: A list of `tf.DTypes`. A list of output types.\n f: A function decorated with @Defun.\n A function that takes 'args', a list of tensors, and returns 'output',\n another list of tensors. Input and output types are specified by 'Tin'\n and 'Tout'. The function body of f will be placed and partitioned across\n devices, setting this op apart from the regular Call op.\n config: An optional `string`. Defaults to `\"\"`.\n config_proto: An optional `string`. Defaults to `\"\"`.\n executor_type: An optional `string`. Defaults to `\"\"`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"PartitionedCall\", name, _ctx._post_execution_callbacks, args, \"Tout\",\n Tout, \"f\", f, \"config\", config, \"config_proto\", config_proto,\n \"executor_type\", executor_type)\n return _result\n except _core._FallbackException:\n try:\n return partitioned_call_eager_fallback(\n args, Tout=Tout, f=f, config=config, config_proto=config_proto,\n executor_type=executor_type, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'partitioned_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if config is None:\n config = \"\"\n config = _execute.make_str(config, \"config\")\n if config_proto is None:\n config_proto = \"\"\n config_proto = _execute.make_str(config_proto, \"config_proto\")\n if executor_type is None:\n executor_type = \"\"\n executor_type = _execute.make_str(executor_type, \"executor_type\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"PartitionedCall\", args=args, Tout=Tout, f=f, config=config,\n config_proto=config_proto,\n executor_type=executor_type, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"Tin\", _op.get_attr(\"Tin\"), \"Tout\", _op.get_attr(\"Tout\"), \"f\",\n _op.get_attr(\"f\"), \"config\", _op.get_attr(\"config\"),\n \"config_proto\", _op.get_attr(\"config_proto\"), \"executor_type\",\n _op.get_attr(\"executor_type\"))\n _execute.record_gradient(\n \"PartitionedCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef PartitionedCall(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None):\n return partitioned_call(args=args, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type, name=name)\nPartitionedCall.__doc__ = partitioned_call.__doc__\nPartitionedCall = _doc_controls.do_not_generate_docs(_kwarg_only(PartitionedCall))\ntf_export(\"raw_ops.PartitionedCall\")(PartitionedCall)\n\n\ndef partitioned_call_eager_fallback(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function partitioned_call\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'partitioned_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if config is None:\n config = \"\"\n config = _execute.make_str(config, \"config\")\n if config_proto is None:\n config_proto = \"\"\n config_proto = _execute.make_str(config_proto, \"config_proto\")\n if executor_type is None:\n executor_type = \"\"\n executor_type = _execute.make_str(executor_type, \"executor_type\")\n _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)\n _inputs_flat = list(args)\n _attrs = (\"Tin\", _attr_Tin, \"Tout\", Tout, \"f\", f, \"config\", config,\n \"config_proto\", config_proto, \"executor_type\", executor_type)\n _result = _execute.execute(b\"PartitionedCall\", len(Tout),\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"PartitionedCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef remote_call(target, args, Tout, f, name=None):\n r\"\"\"Runs function `f` on a remote device indicated by `target`.\n\n Args:\n target: A `Tensor` of type `string`.\n A fully specified device name where we want to run the function.\n args: A list of `Tensor` objects. A list of arguments for the function.\n Tout: A list of `tf.DTypes` that has length `>= 1`.\n The type list for the return values.\n f: A function decorated with @Defun. The function to run remotely.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"RemoteCall\", name, _ctx._post_execution_callbacks, target, args,\n \"Tout\", Tout, \"f\", f)\n return _result\n except _core._FallbackException:\n try:\n return remote_call_eager_fallback(\n target, args, Tout=Tout, f=f, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'remote_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n _, _, _op = _op_def_lib._apply_op_helper(\n \"RemoteCall\", target=target, args=args, Tout=Tout, f=f, name=name)\n _result = _op.outputs[:]\n if not _result:\n return _op\n _inputs_flat = _op.inputs\n _attrs = (\"Tin\", _op.get_attr(\"Tin\"), \"Tout\", _op.get_attr(\"Tout\"), \"f\",\n _op.get_attr(\"f\"))\n _execute.record_gradient(\n \"RemoteCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef RemoteCall(target, args, Tout, f, name=None):\n return remote_call(target=target, args=args, Tout=Tout, f=f, name=name)\nRemoteCall.__doc__ = remote_call.__doc__\nRemoteCall = _doc_controls.do_not_generate_docs(_kwarg_only(RemoteCall))\ntf_export(\"raw_ops.RemoteCall\")(RemoteCall)\n\n\ndef remote_call_eager_fallback(target, args, Tout, f, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function remote_call\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'remote_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)\n target = _ops.convert_to_tensor(target, _dtypes.string)\n _inputs_flat = [target] + list(args)\n _attrs = (\"Tin\", _attr_Tin, \"Tout\", Tout, \"f\", f)\n _result = _execute.execute(b\"RemoteCall\", len(Tout), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"RemoteCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef stateful_partitioned_call(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None):\n r\"\"\"returns `f(inputs)`, where `f`'s body is placed and partitioned.\n\n Args:\n args: A list of `Tensor` objects. A list of input tensors.\n Tout: A list of `tf.DTypes`. A list of output types.\n f: A function decorated with @Defun.\n A function that takes 'args', a list of tensors, and returns 'output',\n another list of tensors. Input and output types are specified by 'Tin'\n and 'Tout'. The function body of f will be placed and partitioned across\n devices, setting this op apart from the regular Call op. This op is\n stateful.\n config: An optional `string`. Defaults to `\"\"`.\n config_proto: An optional `string`. Defaults to `\"\"`.\n executor_type: An optional `string`. Defaults to `\"\"`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"StatefulPartitionedCall\", name, _ctx._post_execution_callbacks, args,\n \"Tout\", Tout, \"f\", f, \"config\", config, \"config_proto\", config_proto,\n \"executor_type\", executor_type)\n return _result\n except _core._FallbackException:\n try:\n return stateful_partitioned_call_eager_fallback(\n args, Tout=Tout, f=f, config=config, config_proto=config_proto,\n executor_type=executor_type, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'stateful_partitioned_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if config is None:\n config = \"\"\n config = _execute.make_str(config, \"config\")\n if config_proto is None:\n config_proto = \"\"\n config_proto = _execute.make_str(config_proto, \"config_proto\")\n if executor_type is None:\n executor_type = \"\"\n executor_type = _execute.make_str(executor_type, \"executor_type\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StatefulPartitionedCall\", args=args, Tout=Tout, f=f, config=config,\n config_proto=config_proto,\n executor_type=executor_type, name=name)\n _result = _op.outputs[:]\n if not _result:\n return _op\n _inputs_flat = _op.inputs\n _attrs = (\"Tin\", _op.get_attr(\"Tin\"), \"Tout\", _op.get_attr(\"Tout\"), \"f\",\n _op.get_attr(\"f\"), \"config\", _op.get_attr(\"config\"),\n \"config_proto\", _op.get_attr(\"config_proto\"), \"executor_type\",\n _op.get_attr(\"executor_type\"))\n _execute.record_gradient(\n \"StatefulPartitionedCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef StatefulPartitionedCall(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None):\n return stateful_partitioned_call(args=args, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type, name=name)\nStatefulPartitionedCall.__doc__ = stateful_partitioned_call.__doc__\nStatefulPartitionedCall = _doc_controls.do_not_generate_docs(_kwarg_only(StatefulPartitionedCall))\ntf_export(\"raw_ops.StatefulPartitionedCall\")(StatefulPartitionedCall)\n\n\ndef stateful_partitioned_call_eager_fallback(args, Tout, f, config=\"\", config_proto=\"\", executor_type=\"\", name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function stateful_partitioned_call\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'stateful_partitioned_call' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if config is None:\n config = \"\"\n config = _execute.make_str(config, \"config\")\n if config_proto is None:\n config_proto = \"\"\n config_proto = _execute.make_str(config_proto, \"config_proto\")\n if executor_type is None:\n executor_type = \"\"\n executor_type = _execute.make_str(executor_type, \"executor_type\")\n _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)\n _inputs_flat = list(args)\n _attrs = (\"Tin\", _attr_Tin, \"Tout\", Tout, \"f\", f, \"config\", config,\n \"config_proto\", config_proto, \"executor_type\", executor_type)\n _result = _execute.execute(b\"StatefulPartitionedCall\", len(Tout),\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"StatefulPartitionedCall\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef stateless_if(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None):\n r\"\"\"output = cond ? then_branch(input) : else_branch(input)\n\n Args:\n cond: A `Tensor`.\n A Tensor. If the tensor is a scalar of non-boolean type, the\n scalar is converted to a boolean according to the\n following rule: if the scalar is a numerical value, non-zero means\n `True` and zero means False; if the scalar is a string, non-empty\n means `True` and empty means `False`. If the tensor is not a scalar,\n being empty means False and being non-empty means True.\n\n This should only be used when the if then/else body functions do not\n have stateful ops.\n input: A list of `Tensor` objects. A list of input tensors.\n Tout: A list of `tf.DTypes`. A list of output types.\n then_branch: A function decorated with @Defun.\n A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what else_branch returns.\n else_branch: A function decorated with @Defun.\n A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what then_branch returns.\n output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"StatelessIf\", name, _ctx._post_execution_callbacks, cond, input,\n \"Tout\", Tout, \"then_branch\", then_branch, \"else_branch\", else_branch,\n \"output_shapes\", output_shapes)\n return _result\n except _core._FallbackException:\n try:\n return stateless_if_eager_fallback(\n cond, input, Tout=Tout, then_branch=then_branch,\n else_branch=else_branch, output_shapes=output_shapes, name=name,\n ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'stateless_if' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'stateless_if' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StatelessIf\", cond=cond, input=input, Tout=Tout,\n then_branch=then_branch, else_branch=else_branch,\n output_shapes=output_shapes, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"Tcond\", _op._get_attr_type(\"Tcond\"), \"Tin\", _op.get_attr(\"Tin\"),\n \"Tout\", _op.get_attr(\"Tout\"), \"then_branch\",\n _op.get_attr(\"then_branch\"), \"else_branch\",\n _op.get_attr(\"else_branch\"), \"output_shapes\",\n _op.get_attr(\"output_shapes\"))\n _execute.record_gradient(\n \"StatelessIf\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef StatelessIf(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None):\n return stateless_if(cond=cond, input=input, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes, name=name)\nStatelessIf.__doc__ = stateless_if.__doc__\nStatelessIf = _doc_controls.do_not_generate_docs(_kwarg_only(StatelessIf))\ntf_export(\"raw_ops.StatelessIf\")(StatelessIf)\n\n\ndef stateless_if_eager_fallback(cond, input, Tout, then_branch, else_branch, output_shapes=[], name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function stateless_if\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'stateless_if' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'stateless_if' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n _attr_Tcond, (cond,) = _execute.args_to_matching_eager([cond], _ctx)\n _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n _inputs_flat = [cond] + list(input)\n _attrs = (\"Tcond\", _attr_Tcond, \"Tin\", _attr_Tin, \"Tout\", Tout,\n \"then_branch\", then_branch, \"else_branch\", else_branch, \"output_shapes\",\n output_shapes)\n _result = _execute.execute(b\"StatelessIf\", len(Tout), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"StatelessIf\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef stateless_while(input, cond, body, name=None):\n r\"\"\"output = input; While (Cond(output)) { output = Body(output) }\n\n Args:\n input: A list of `Tensor` objects.\n A list of input tensors whose types are T.\n cond: A function decorated with @Defun.\n A function takes 'input' and returns a tensor. If the tensor is\n a scalar of non-boolean, the scalar is converted to a boolean\n according to the following rule: if the scalar is a numerical\n value, non-zero means True and zero means False; if the scalar is\n a string, non-empty means True and empty means False. If the\n tensor is not a scalar, non-emptiness means True and False\n otherwise.\n\n This should only be used when the while condition and body functions\n do not have stateful ops.\n body: A function decorated with @Defun.\n A function that takes a list of tensors and returns another\n list of tensors. Both lists have the same types as specified\n by T.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects. Has the same type as `input`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"StatelessWhile\", name, _ctx._post_execution_callbacks, input, \"cond\",\n cond, \"body\", body)\n return _result\n except _core._FallbackException:\n try:\n return stateless_while_eager_fallback(\n input, cond=cond, body=body, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StatelessWhile\", input=input, cond=cond, body=body, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"cond\", _op.get_attr(\"cond\"), \"body\",\n _op.get_attr(\"body\"))\n _execute.record_gradient(\n \"StatelessWhile\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef StatelessWhile(input, cond, body, name=None):\n return stateless_while(input=input, cond=cond, body=body, name=name)\nStatelessWhile.__doc__ = stateless_while.__doc__\nStatelessWhile = _doc_controls.do_not_generate_docs(_kwarg_only(StatelessWhile))\ntf_export(\"raw_ops.StatelessWhile\")(StatelessWhile)\n\n\ndef stateless_while_eager_fallback(input, cond, body, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function stateless_while\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n _inputs_flat = list(input)\n _attrs = (\"T\", _attr_T, \"cond\", cond, \"body\", body)\n _result = _execute.execute(b\"StatelessWhile\", len(input),\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"StatelessWhile\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef symbolic_gradient(input, Tout, f, name=None):\n r\"\"\"Computes the gradient function for function f via backpropagation.\n\n Args:\n input: A list of `Tensor` objects. a list of input tensors of size N + M;\n Tout: A list of `tf.DTypes` that has length `>= 1`.\n the type list for the input list.\n f: A function decorated with @Defun.\n The function we want to compute the gradient for.\n\n The function 'f' must be a numerical function which takes N inputs and\n produces M outputs. Its gradient function 'g', which is computed by\n this SymbolicGradient op is a function taking N + M inputs and\n produces N outputs.\n\n I.e. if we have\n (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),\n then, g is\n (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,\n dL/dy1, dL/dy2, ..., dL/dy_M),\n\n where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the\n loss function). dL/dx_i is the partial derivative of L with respect\n to x_i.\n\n (Needs some math expert to say the comment above better.)\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects of type `Tout`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"SymbolicGradient\", name, _ctx._post_execution_callbacks, input,\n \"Tout\", Tout, \"f\", f)\n return _result\n except _core._FallbackException:\n try:\n return symbolic_gradient_eager_fallback(\n input, Tout=Tout, f=f, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'symbolic_gradient' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n _, _, _op = _op_def_lib._apply_op_helper(\n \"SymbolicGradient\", input=input, Tout=Tout, f=f, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"Tin\", _op.get_attr(\"Tin\"), \"Tout\", _op.get_attr(\"Tout\"), \"f\",\n _op.get_attr(\"f\"))\n _execute.record_gradient(\n \"SymbolicGradient\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef SymbolicGradient(input, Tout, f, name=None):\n return symbolic_gradient(input=input, Tout=Tout, f=f, name=name)\nSymbolicGradient.__doc__ = symbolic_gradient.__doc__\nSymbolicGradient = _doc_controls.do_not_generate_docs(_kwarg_only(SymbolicGradient))\ntf_export(\"raw_ops.SymbolicGradient\")(SymbolicGradient)\n\n\ndef symbolic_gradient_eager_fallback(input, Tout, f, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function symbolic_gradient\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if not isinstance(Tout, (list, tuple)):\n raise TypeError(\n \"Expected list for 'Tout' argument to \"\n \"'symbolic_gradient' Op, not %r.\" % Tout)\n Tout = [_execute.make_type(_t, \"Tout\") for _t in Tout]\n _attr_Tin, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n _inputs_flat = list(input)\n _attrs = (\"Tin\", _attr_Tin, \"Tout\", Tout, \"f\", f)\n _result = _execute.execute(b\"SymbolicGradient\", len(Tout),\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"SymbolicGradient\", _inputs_flat, _attrs, _result, name)\n return _result\n\n\ndef _while(input, cond, body, output_shapes=[], parallel_iterations=10, name=None):\n r\"\"\"output = input; While (Cond(output)) { output = Body(output) }\n\n Args:\n input: A list of `Tensor` objects.\n A list of input tensors whose types are T.\n cond: A function decorated with @Defun.\n A function takes 'input' and returns a tensor. If the tensor is\n a scalar of non-boolean, the scalar is converted to a boolean\n according to the following rule: if the scalar is a numerical\n value, non-zero means True and zero means False; if the scalar is\n a string, non-empty means True and empty means False. If the\n tensor is not a scalar, non-emptiness means True and False\n otherwise.\n body: A function decorated with @Defun.\n A function that takes a list of tensors and returns another\n list of tensors. Both lists have the same types as specified\n by T.\n output_shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`.\n parallel_iterations: An optional `int`. Defaults to `10`.\n name: A name for the operation (optional).\n\n Returns:\n A list of `Tensor` objects. Has the same type as `input`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name, \"While\",\n name, _ctx._post_execution_callbacks, input, \"cond\", cond, \"body\",\n body, \"output_shapes\", output_shapes, \"parallel_iterations\",\n parallel_iterations)\n return _result\n except _core._FallbackException:\n try:\n return _while_eager_fallback(\n input, cond=cond, body=body, output_shapes=output_shapes,\n parallel_iterations=parallel_iterations, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'while' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n if parallel_iterations is None:\n parallel_iterations = 10\n parallel_iterations = _execute.make_int(parallel_iterations, \"parallel_iterations\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"While\", input=input, cond=cond, body=body,\n output_shapes=output_shapes,\n parallel_iterations=parallel_iterations, name=name)\n _result = _op.outputs[:]\n if not _result:\n return _op\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"cond\", _op.get_attr(\"cond\"), \"body\",\n _op.get_attr(\"body\"), \"output_shapes\",\n _op.get_attr(\"output_shapes\"), \"parallel_iterations\",\n _op.get_attr(\"parallel_iterations\"))\n _execute.record_gradient(\n \"While\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef While(input, cond, body, output_shapes=[], parallel_iterations=10, name=None):\n return _while(input=input, cond=cond, body=body, output_shapes=output_shapes, parallel_iterations=parallel_iterations, name=name)\nWhile.__doc__ = _while.__doc__\nWhile = _doc_controls.do_not_generate_docs(_kwarg_only(While))\ntf_export(\"raw_ops.While\")(While)\n\n\ndef _while_eager_fallback(input, cond, body, output_shapes=[], parallel_iterations=10, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function _while\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if output_shapes is None:\n output_shapes = []\n if not isinstance(output_shapes, (list, tuple)):\n raise TypeError(\n \"Expected list for 'output_shapes' argument to \"\n \"'while' Op, not %r.\" % output_shapes)\n output_shapes = [_execute.make_shape(_s, \"output_shapes\") for _s in output_shapes]\n if parallel_iterations is None:\n parallel_iterations = 10\n parallel_iterations = _execute.make_int(parallel_iterations, \"parallel_iterations\")\n _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\n _inputs_flat = list(input)\n _attrs = (\"T\", _attr_T, \"cond\", cond, \"body\", body, \"output_shapes\",\n output_shapes, \"parallel_iterations\", parallel_iterations)\n _result = _execute.execute(b\"While\", len(input), inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"While\", _inputs_flat, _attrs, _result, name)\n return _result\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"Case\"\n# input_arg {\n# name: \"branch_index\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"branches\"\n# type: \"list(func)\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"output_shapes\"\n# type: \"list(shape)\"\n# default_value {\n# list {\n# }\n# }\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"FakeParam\"\n# output_arg {\n# name: \"output\"\n# type_attr: \"dtype\"\n# }\n# attr {\n# name: \"dtype\"\n# type: \"type\"\n# }\n# attr {\n# name: \"shape\"\n# type: \"shape\"\n# }\n# }\n# op {\n# name: \"For\"\n# input_arg {\n# name: \"start\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"limit\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"delta\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"T\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"body\"\n# type: \"func\"\n# }\n# }\n# op {\n# name: \"If\"\n# input_arg {\n# name: \"cond\"\n# type_attr: \"Tcond\"\n# }\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tcond\"\n# type: \"type\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"then_branch\"\n# type: \"func\"\n# }\n# attr {\n# name: \"else_branch\"\n# type: \"func\"\n# }\n# attr {\n# name: \"output_shapes\"\n# type: \"list(shape)\"\n# default_value {\n# list {\n# }\n# }\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"PartitionedCall\"\n# input_arg {\n# name: \"args\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"f\"\n# type: \"func\"\n# }\n# attr {\n# name: \"config\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# attr {\n# name: \"config_proto\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# attr {\n# name: \"executor_type\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# }\n# op {\n# name: \"RemoteCall\"\n# input_arg {\n# name: \"target\"\n# type: DT_STRING\n# }\n# input_arg {\n# name: \"args\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"f\"\n# type: \"func\"\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"StatefulPartitionedCall\"\n# input_arg {\n# name: \"args\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"f\"\n# type: \"func\"\n# }\n# attr {\n# name: \"config\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# attr {\n# name: \"config_proto\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# attr {\n# name: \"executor_type\"\n# type: \"string\"\n# default_value {\n# s: \"\"\n# }\n# }\n# is_stateful: true\n# }\n# op {\n# name: \"StatelessIf\"\n# input_arg {\n# name: \"cond\"\n# type_attr: \"Tcond\"\n# }\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tcond\"\n# type: \"type\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"then_branch\"\n# type: \"func\"\n# }\n# attr {\n# name: \"else_branch\"\n# type: \"func\"\n# }\n# attr {\n# name: \"output_shapes\"\n# type: \"list(shape)\"\n# default_value {\n# list {\n# }\n# }\n# }\n# }\n# op {\n# name: \"StatelessWhile\"\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"T\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"cond\"\n# type: \"func\"\n# }\n# attr {\n# name: \"body\"\n# type: \"func\"\n# }\n# }\n# op {\n# name: \"SymbolicGradient\"\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"Tin\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"Tout\"\n# }\n# attr {\n# name: \"Tin\"\n# type: \"list(type)\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"Tout\"\n# type: \"list(type)\"\n# has_minimum: true\n# minimum: 1\n# }\n# attr {\n# name: \"f\"\n# type: \"func\"\n# }\n# }\n# op {\n# name: \"While\"\n# input_arg {\n# name: \"input\"\n# type_list_attr: \"T\"\n# }\n# output_arg {\n# name: \"output\"\n# type_list_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"list(type)\"\n# has_minimum: true\n# }\n# attr {\n# name: \"cond\"\n# type: \"func\"\n# }\n# attr {\n# name: \"body\"\n# type: \"func\"\n# }\n# attr {\n# name: \"output_shapes\"\n# type: \"list(shape)\"\n# default_value {\n# list {\n# }\n# }\n# }\n# attr {\n# name: \"parallel_iterations\"\n# type: \"int\"\n# default_value {\n# i: 10\n# }\n# }\n# is_stateful: true\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\242\\001\\n\\004Case\\022\\020\\n\\014branch_index\\030\\003\\022\\014\\n\\005input2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\\"\\032\\n\\010branches\\022\\nlist(func)(\\0010\\001\\\" \\n\\routput_shapes\\022\\013list(shape)\\032\\002\\n\\000\\210\\001\\001\\n;\\n\\tFakeParam\\032\\017\\n\\006output\\\"\\005dtype\\\"\\r\\n\\005dtype\\022\\004type\\\"\\016\\n\\005shape\\022\\005shape\\n`\\n\\003For\\022\\t\\n\\005start\\030\\003\\022\\t\\n\\005limit\\030\\003\\022\\t\\n\\005delta\\030\\003\\022\\n\\n\\005input2\\001T\\032\\013\\n\\006output2\\001T\\\"\\021\\n\\001T\\022\\nlist(type)(\\001\\\"\\014\\n\\004body\\022\\004func\\n\\272\\001\\n\\002If\\022\\r\\n\\004cond\\\"\\005Tcond\\022\\014\\n\\005input2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\r\\n\\005Tcond\\022\\004type\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\\"\\023\\n\\013then_branch\\022\\004func\\\"\\023\\n\\013else_branch\\022\\004func\\\" \\n\\routput_shapes\\022\\013list(shape)\\032\\002\\n\\000\\210\\001\\001\\n\\263\\001\\n\\017PartitionedCall\\022\\013\\n\\004args2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\\"\\t\\n\\001f\\022\\004func\\\"\\024\\n\\006config\\022\\006string\\032\\002\\022\\000\\\"\\032\\n\\014config_proto\\022\\006string\\032\\002\\022\\000\\\"\\033\\n\\rexecutor_type\\022\\006string\\032\\002\\022\\000\\nr\\n\\nRemoteCall\\022\\n\\n\\006target\\030\\007\\022\\013\\n\\004args2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\025\\n\\003Tin\\022\\nlist(type)(\\0010\\001\\\"\\026\\n\\004Tout\\022\\nlist(type)(\\0010\\001\\\"\\t\\n\\001f\\022\\004func\\210\\001\\001\\n\\276\\001\\n\\027StatefulPartitionedCall\\022\\013\\n\\004args2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\\"\\t\\n\\001f\\022\\004func\\\"\\024\\n\\006config\\022\\006string\\032\\002\\022\\000\\\"\\032\\n\\014config_proto\\022\\006string\\032\\002\\022\\000\\\"\\033\\n\\rexecutor_type\\022\\006string\\032\\002\\022\\000\\210\\001\\001\\n\\300\\001\\n\\013StatelessIf\\022\\r\\n\\004cond\\\"\\005Tcond\\022\\014\\n\\005input2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\r\\n\\005Tcond\\022\\004type\\\"\\023\\n\\003Tin\\022\\nlist(type)(\\001\\\"\\024\\n\\004Tout\\022\\nlist(type)(\\001\\\"\\023\\n\\013then_branch\\022\\004func\\\"\\023\\n\\013else_branch\\022\\004func\\\" \\n\\routput_shapes\\022\\013list(shape)\\032\\002\\n\\000\\nX\\n\\016StatelessWhile\\022\\n\\n\\005input2\\001T\\032\\013\\n\\006output2\\001T\\\"\\021\\n\\001T\\022\\nlist(type)(\\001\\\"\\014\\n\\004cond\\022\\004func\\\"\\014\\n\\004body\\022\\004func\\nj\\n\\020SymbolicGradient\\022\\014\\n\\005input2\\003Tin\\032\\016\\n\\006output2\\004Tout\\\"\\025\\n\\003Tin\\022\\nlist(type)(\\0010\\001\\\"\\026\\n\\004Tout\\022\\nlist(type)(\\0010\\001\\\"\\t\\n\\001f\\022\\004func\\n\\224\\001\\n\\005While\\022\\n\\n\\005input2\\001T\\032\\013\\n\\006output2\\001T\\\"\\021\\n\\001T\\022\\nlist(type)(\\001\\\"\\014\\n\\004cond\\022\\004func\\\"\\014\\n\\004body\\022\\004func\\\" \\n\\routput_shapes\\022\\013list(shape)\\032\\002\\n\\000\\\"\\036\\n\\023parallel_iterations\\022\\003int\\032\\002\\030\\n\\210\\001\\001\")\n",
"# This file is MACHINE GENERATED! Do not edit.\n# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.\n\"\"\"Public API for tf.experimental namespace.\n\"\"\"\n\nfrom __future__ import print_function as _print_function\n\nimport sys as _sys\n\nfrom tensorflow.python.eager.context import function_executor_type\n\ndel _print_function\n\nfrom tensorflow.python.util import module_wrapper as _module_wrapper\n\nif not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):\n _sys.modules[__name__] = _module_wrapper.TFModuleWrapper(\n _sys.modules[__name__], \"compat.v1.experimental\", public_apis=None, deprecation=False,\n has_lite=False)\n"
] | [
[
"tensorflow.python.eager.execute.args_to_matching_eager",
"tensorflow.python.eager.execute.make_type",
"tensorflow.python.eager.execute.record_gradient",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.eager.execute.make_int",
"tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute",
"tensorflow.python.eager.execute.make_str",
"tensorflow.python.eager.execute.make_shape",
"tensorflow.python.framework.op_def_library.OpDefLibrary",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.execute.execute",
"tensorflow.python.eager.execute.convert_to_mixed_eager_tensors",
"tensorflow.python.eager.core._status_to_exception",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.op_def_registry.register_op_list",
"tensorflow.core.framework.op_def_pb2.OpList",
"tensorflow.python.util.tf_export.kwarg_only"
],
[
"tensorflow.python.util.module_wrapper.TFModuleWrapper"
]
] |
linusidom/rl-workshop | [
"dbb2ca8b9a5330042a30655ee64c3a4be241d692"
] | [
"solutions/networks.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.autograd.variable import Variable\n\nclass QNetwork(nn.Module):\n def __init__(self, state_size, action_size, nb_hidden, seed=1412):\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.model = nn.Sequential(\n nn.Linear(state_size, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5)\n nn.Linear(nb_hidden, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5),\n nn.Linear(nb_hidden, action_size)\n )\n def forward(self, state):\n x = self.model(state)\n return(x)\n\nclass DuelingNetwork(nn.Module):\n def __init__(self, state_size, action_size, nb_hidden, seed=1412):\n super(DuelingNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n \n self.feature = nn.Sequential(\n nn.Linear(state_size, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5)\n )\n \n self.advantage = nn.Sequential(\n nn.Linear(nb_hidden, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5),\n nn.Linear(nb_hidden, action_size)\n )\n \n self.value = nn.Sequential(\n nn.Linear(nb_hidden, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5),\n nn.Linear(nb_hidden, 1)\n )\n\n def forward(self, state):\n x = self.feature(state)\n adv = self.advantage(x)\n val = self.value(x)\n result = adv + val - adv.mean() \n return(result)\n\n\nclass NoisyLinear(nn.Module):\n def __init__(self, in_features, out_features, std_init=0.1):\n super(NoisyLinear, self).__init__()\n \n self.in_features = in_features\n self.out_features = out_features\n self.std_init = std_init\n \n self.W_mu = nn.Parameter(torch.FloatTensor(out_features, in_features))\n self.W_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features))\n self.register_buffer('W_epsilon', torch.FloatTensor(out_features, in_features))\n \n self.b_mu = nn.Parameter(torch.FloatTensor(out_features))\n self.b_sigma = nn.Parameter(torch.FloatTensor(out_features))\n self.register_buffer('b_epsilon', torch.FloatTensor(out_features))\n \n self.init_parameters()\n self.reset_noise()\n \n def forward(self, x):\n if self.training: \n W = self.W_mu + self.W_sigma * Variable(self.W_epsilon)\n b = self.b_mu + self.b_sigma * Variable(self.b_epsilon)\n else:\n W = self.W_mu\n b = self.b_mu\n result = F.linear(x, W, b)\n return(result)\n \n def init_parameters(self):\n mu_range = 1 / self.in_features**(1/2)\n \n self.W_mu.data.uniform_(-mu_range, mu_range)\n self.W_sigma.data.fill_(self.std_init / (self.in_features)**(1/2))\n \n self.b_mu.data.uniform_(-mu_range, mu_range)\n self.b_sigma.data.fill_(self.std_init / (self.in_features)**(1/2))\n \n def reset_noise(self):\n epsilon_in = self.f_noise(self.in_features)\n epsilon_out = self.f_noise(self.out_features)\n \n self.W_epsilon.copy_(epsilon_out.ger(epsilon_in))\n self.b_epsilon.copy_(epsilon_out)\n \n def f_noise(self, size):\n x = torch.randn(size)\n x = x.sign() * (x.abs().sqrt())\n return(x)\n \nclass NoisyDuelingNetwork(nn.Module):\n def __init__(self, state_size, action_size, nb_hidden, seed=1412):\n super(NoisyDuelingNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.feature = nn.Sequential(\n nn.Linear(state_size, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5)\n )\n \n self.advantage = nn.Sequential(\n NoisyLinear(nb_hidden, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5),\n NoisyLinear(nb_hidden, action_size)\n )\n \n self.value = nn.Sequential(\n NoisyLinear(nb_hidden, nb_hidden),\n nn.ReLU(),\n# nn.BatchNorm1d(nb_hidden),\n# nn.Dropout(0.5),\n NoisyLinear(nb_hidden, 1)\n )\n\n def forward(self, state):\n x = self.feature(state)\n adv = self.advantage(x)\n val = self.value(x)\n result = adv + val - adv.mean() \n return(result)\n \n def reset_noise(self):\n# self._modules['feature'][0].reset_noise()\n self._modules['advantage'][0].reset_noise()\n self._modules['advantage'][2].reset_noise()\n self._modules['value'][0].reset_noise()\n self._modules['value'][2].reset_noise()"
] | [
[
"torch.FloatTensor",
"torch.autograd.variable.Variable",
"torch.nn.Linear",
"torch.nn.functional.linear",
"torch.randn",
"torch.manual_seed",
"torch.nn.ReLU"
]
] |
ky-zhang/utils | [
"f1c9d2580db5ef0f0291ae77312b3d538f292a12"
] | [
"plot/plot_folder.py"
] | [
"import os\nimport numpy\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport argparse\nfrom PIL import Image\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-input', default = '', help = 'input file folder')\nparser.add_argument('-label', default = '', help = 'label file')\nparser.add_argument('-output', default = '', help = 'output file folder')\nparser.add_argument('-range', default = '', help = 'axis range')\n\nargs = parser.parse_args()\n\nlabel = []\nif args.label != '':\n for line in open(args.label):\n label.append(line.strip())\n\nfiles = os.listdir(args.input)\nfor file in files:\n filepath = os.path.join(\"%s%s\" %(args.input, file))\n out_png_path = os.path.join(\"%s%s\" %(args.output, file))\n out_png_path = out_png_path.replace(\"txt\", \"png\")\n N = M = 0\n all_data = {}\n for i, line in enumerate(open(filepath)):\n vec = line.strip().split(' ')\n if i == 0:\n N = int(vec[0])\n M = int(vec[1])\n elif i <= N:\n if args.label == '':\n label.append(0)\n all_data.setdefault(label[i-1], []).append((float(vec[-2]), float(vec[-1])))\n\n colors = plt.cm.rainbow(numpy.linspace(0, 1, len(all_data)))\n\n for color, ll in zip(colors, sorted(all_data.keys())):\n x = [t[0] for t in all_data[ll]]\n y = [t[1] for t in all_data[ll]]\n plt.plot(x, y, '.', color = color, markersize = 1)\n if args.range != '':\n l = abs(float(args.range))\n plt.xlim(-l, l)\n plt.ylim(-l, l)\n # 坐标轴\n plt.axis('off')\n plt.savefig(out_png_path, dpi = 300)\n plt.close(\"all\")\n\n # 背景透明\n # img = Image.open(out_png_path)\n # img = img.convert(\"RGBA\")\n # datas = img.getdata()\n # newData = list()\n # for item in datas:\n # if item[0] >220 and item[1] > 220 and item[2] > 220:\n # newData.append(( 255, 255, 255, 0))\n # else:\n # newData.append(item)\n \n # img.putdata(newData)\n # img.save(out_png_path,\"PNG\")\n\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ylim",
"matplotlib.use",
"matplotlib.pyplot.plot"
]
] |
algteam/spacy_zh_model | [
"0b0cba1a3964aa426e5f96087849c90e69e2a89d"
] | [
"examples/keras_parikh_entailment/spacy_hook.py"
] | [
"import numpy as np\r\nfrom keras.models import model_from_json\r\n\r\ntry:\r\n import cPickle as pickle\r\nexcept ImportError:\r\n import pickle\r\n\r\n\r\nclass KerasSimilarityShim(object):\r\n entailment_types = [\"entailment\", \"contradiction\", \"neutral\"]\r\n\r\n @classmethod\r\n def load(cls, path, nlp, max_length=100, get_features=None):\r\n \r\n if get_features is None:\r\n get_features = get_word_ids\r\n \r\n with (path / 'config.json').open() as file_:\r\n model = model_from_json(file_.read())\r\n with (path / 'model').open('rb') as file_:\r\n weights = pickle.load(file_)\r\n \r\n embeddings = get_embeddings(nlp.vocab)\r\n weights.insert(1, embeddings)\r\n model.set_weights(weights)\r\n\r\n return cls(model, get_features=get_features, max_length=max_length)\r\n\r\n def __init__(self, model, get_features=None, max_length=100):\r\n self.model = model\r\n self.get_features = get_features\r\n self.max_length = max_length\r\n\r\n def __call__(self, doc):\r\n doc.user_hooks['similarity'] = self.predict\r\n doc.user_span_hooks['similarity'] = self.predict\r\n\r\n return doc\r\n\r\n def predict(self, doc1, doc2):\r\n x1 = self.get_features([doc1], max_length=self.max_length)\r\n x2 = self.get_features([doc2], max_length=self.max_length)\r\n scores = self.model.predict([x1, x2])\r\n\r\n return self.entailment_types[scores.argmax()], scores.max()\r\n\r\n\r\ndef get_embeddings(vocab, nr_unk=100):\r\n # the extra +1 is for a zero vector representing sentence-final padding\r\n num_vectors = max(lex.rank for lex in vocab) + 2 \r\n \r\n # create random vectors for OOV tokens\r\n oov = np.random.normal(size=(nr_unk, vocab.vectors_length))\r\n oov = oov / oov.sum(axis=1, keepdims=True)\r\n \r\n vectors = np.zeros((num_vectors + nr_unk, vocab.vectors_length), dtype='float32')\r\n vectors[1:(nr_unk + 1), ] = oov\r\n for lex in vocab:\r\n if lex.has_vector and lex.vector_norm > 0:\r\n vectors[nr_unk + lex.rank + 1] = lex.vector / lex.vector_norm \r\n\r\n return vectors\r\n\r\n\r\ndef get_word_ids(docs, max_length=100, nr_unk=100):\r\n Xs = np.zeros((len(docs), max_length), dtype='int32')\r\n \r\n for i, doc in enumerate(docs):\r\n for j, token in enumerate(doc):\r\n if j == max_length:\r\n break\r\n if token.has_vector:\r\n Xs[i, j] = token.rank + nr_unk + 1\r\n else:\r\n Xs[i, j] = token.rank % nr_unk + 1\r\n return Xs\r\n"
] | [
[
"numpy.random.normal",
"numpy.zeros"
]
] |
nabeelyousfi/MyoEmg | [
"c819712d93bfb58828b36669e55cd4d77453c1cf"
] | [
"train.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 29 13:39:13 2018\n\n@author: Hassan Yousuf & Nabeel Hussain\n\"\"\"\nfrom __future__ import print_function\nimport sklearn.ensemble\nfrom sklearn import metrics\nfrom myo import init, Hub, DeviceListener, StreamEmg\nfrom time import sleep\nimport numpy as np\nimport threading\nimport collections\nimport _pickle as cPickle\n\n# Complete code for training and predicting EMG data in Python using RandomForestClassifier via Myo Armband 2\n\ndef unison_shuffled_copies(a, b):\n assert len(a) == len(b)\n p = np.random.permutation(len(a))\n return a[p], b[p]\n\ndef rms(array):\n n = len(array)\n sum = 0\n for a in array:\n sum =+ a*a\n return np.sqrt((1/float(n))*sum)\n\ndef iav(array):\n sum = 0\n for a in array:\n sum += np.abs(a)\n return sum\n\ndef ssi(array):\n sum = 0\n for a in array:\n sum += a*a\n return sum\n\ndef var(array):\n n = len(array)\n sum = 0\n for a in array:\n sum += a*a\n return ((1/float(n-1))*sum)\n\ndef tm3(array):\n n = len(array)\n print('n : ', n)\n sum = 0\n for a in array:\n sum =+ a*a*a\n return np.power((1/float(n))*sum,1/float(3))\n\ndef wl(array):\n sum = 0\n for a in range(0,len(array)-1):\n sum =+ array[a+1] - array[a]\n return sum\n\ndef aac(array):\n n = len(array)\n sum = 0\n for a in range(0,n-1):\n sum =+ array[0+1] - array[0]\n return sum/float(n)\n\n\ndef featurize(array):\n n = []\n for a in array:\n n.append(rms(a))\n return n\n\nstatus = 0\nX = []\n\nclass Listener(DeviceListener):\n def __init__(self, queue_size=1):\n self.lock = threading.Lock()\n self.emg_data_queue = collections.deque(maxlen=queue_size)\n self.ori_data_queue = collections.deque(maxlen=queue_size)\n\n def on_connect(self, myo, timestamp, firmware_version):\n myo.set_stream_emg(StreamEmg.enabled)\n\n def on_emg_data(self, myo, timestamp, emg):\n if(status):\n X.append(np.asarray(emg))\n\n def on_orientation_data(self, myo, timestamp, quat):\n # print(\"Orientation:\", quat.x, quat.y, quat.z, quat.w)\n with self.lock:\n self.ori_data_queue.append(quat)\n\n def get_ori_data(self):\n with self.lock:\n return list(self.ori_data_queue)\n\ninit()\nhub = Hub()\nlistener = Listener()\nhub.run(1000, listener)\n\nstatus = 9999\n\nsleep(1)\n\nmyX = []\n\nreq_iter = 20\ntrain_1 = []\ntrain_2 = []\ntrain_3 = []\ntrain_4 = []\ntrain_5 = []\n\nges1 = ['Rock', 'Paper', 'Scissors', 'Lizard', 'Spock']\nges2 = ['Number 1', 'Number 2', 'Number 3', 'Number 4', 'Number 5']\nges = [\"Spread Fingers\", \"Wave Out\", \"Wave In\", \"Fist\", \"Rest\"]\n\nfor a in range(1,4):\n print(\"\\nGesture -- \", ges[0],\" : Ready?\")\n input(\"Press Enter to continue...\")\n X = []\n while(1):\n if len(X) > 20:\n # print(X[-1])\n train_1.append(np.asarray(X))\n X = []\n if len(train_1) > a*req_iter:\n break\n\n print(\"\\nGesture -- \", ges[1],\" : Ready?\")\n input(\"Press Enter to continue...\")\n X = []\n while(1):\n if len(X) > 20:\n # print(X[-1])\n train_2.append(np.asarray(X))\n X = []\n if len(train_2) > a*req_iter:\n break\n\n print(\"\\nGesture -- \", ges[2],\" : Ready?\")\n input(\"Press Enter to continue...\")\n X = []\n while(1):\n if len(X) > 20:\n # print(X[-1])\n train_3.append(np.asarray(X))\n X = []\n if len(train_3) > a*req_iter:\n break\n\n print(\"\\nGesture -- \", ges[3],\" : Ready?\")\n input(\"Press Enter to continue...\")\n X = []\n while(1):\n if len(X) > 20:\n # print(X[-1])\n train_4.append(np.asarray(X))\n X = []\n if len(train_4) > a*req_iter:\n break\n\n print(\"\\nGesture -- \", ges[4],\" : Ready?\")\n input(\"Press Enter to continue...\")\n X = []\n while(1):\n if len(X) > 20:\n # print(X[-1])\n train_5.append(np.asarray(X))\n X = []\n if len(train_5) > a*req_iter:\n break\n\ntrain_x = []\ntrain_y = []\n\nfor a in train_1:\n train_x.append(np.asarray(a))\n train_y.append(1)\n\nfor a in train_2:\n train_x.append(np.asarray(a))\n train_y.append(2)\n\nfor a in train_3:\n train_x.append(np.asarray(a))\n train_y.append(3)\n\nfor a in train_4:\n train_x.append(np.asarray(a))\n train_y.append(4)\n\nfor a in train_5:\n train_x.append(np.asarray(a))\n train_y.append(5)\n\ntrain_x_f = []\n\nfor a in train_x:\n x_f_h = []\n for b in range(0,8):\n x_f_h.append(rms(a[:, b]))\n x_f_h.append(iav(a[:, b]))\n x_f_h.append(ssi(a[:, b]))\n x_f_h.append(var(a[:, b]))\n # x_f_h.append(tm3(a[:, b]))\n x_f_h.append(wl(a[:, b]))\n x_f_h.append(aac(a[:, b]))\n train_x_f.append(x_f_h)\n\n# print(len(train_x_f), len(train_x))\nclf = sklearn.ensemble.AdaBoostClassifier(n_estimators=7, learning_rate=1) #, random_state=np.random.randint(0,9))\nclf2 = sklearn.ensemble.RandomForestClassifier()\nclf3 = sklearn.ensemble.RandomForestClassifier(n_estimators=25)\n\nclf.fit(train_x_f, train_y)\nclf2.fit(train_x_f, train_y)\nclf3.fit(train_x_f, train_y)\n\ny_i = clf.predict(train_x_f)\nprint('SkLearn : ', metrics.accuracy_score(train_y, y_i))\n\nprint(train_x_f[0])\n\nprint(\"Training Complete!\")\n\nwith open('META001.pkl', 'wb') as fid:\n cPickle.dump(clf, fid)\n\nwith open('META002.pkl', 'wb') as fid:\n cPickle.dump(clf2, fid)\n\nwith open('META003.pkl', 'wb') as fid:\n cPickle.dump(clf3, fid)\nsleep(1)\nhub.shutdown()"
] | [
[
"sklearn.metrics.accuracy_score",
"numpy.abs",
"numpy.asarray"
]
] |
mcstro/natural-neighbor-interpolation | [
"76ba7bb50c84aef35e993902c46824e5991df45d"
] | [
"tests/test_api.py"
] | [
"import scipy.interpolate\nimport numpy as np\nimport pytest\n\nimport naturalneighbor\n\n\[email protected](\"grid_ranges\", [\n [[0, 4, 0.6], [-3, 3, 1.0], [0, 1, 3]],\n [[0, 2, 1], [0, 2, 1j], [0, 2, 2j]],\n [[0, 2, 1 + 1j], [0, 2, -10j], [0, 2, 2j]],\n [[0, 2, 1 + 1j], [0, 2, -0.9j], [0, 2, 2.1j]],\n])\ndef test_output_size_matches_scipy(grid_ranges):\n points = np.random.rand(10, 3)\n values = np.random.rand(10)\n\n mesh_grids = tuple(np.mgrid[\n grid_ranges[0][0]:grid_ranges[0][1]:grid_ranges[0][2],\n grid_ranges[1][0]:grid_ranges[1][1]:grid_ranges[1][2],\n grid_ranges[2][0]:grid_ranges[2][1]:grid_ranges[2][2],\n ])\n\n scipy_result = scipy.interpolate.griddata(points, values, mesh_grids)\n nn_result = naturalneighbor.griddata(points, values, grid_ranges)\n\n assert scipy_result.shape == nn_result.shape\n"
] | [
[
"numpy.random.rand"
]
] |
Ahtkom/hello-world | [
"3a81bd25713513836c9242fe943b171ff731cfce"
] | [
"2021_1/sql_principle/prac_1/src/upload_geodata.py"
] | [
"import numpy as np\nimport pandas as pd\nimport psycopg2, sys\n\nhostname, user, dbname, passward = sys.argv[1:5]\n\n\ndef load_data():\n lon0, lat0 = 115.8, 29.4\n x = pd.read_excel('../data/x.xlsx', header=0, index_col=0).values\n y = pd.read_excel('../data/y.xlsx', header=0, index_col=0).values\n \n lon = lon0 + 360*x/np.pi/6371\n lat = lat0 + 180*y/np.pi/6371\n lon_list, lat_list = [], []\n\n for i in range(lon.shape[0]):\n lon_list.append([x for x in lon[i,:] if not np.isnan(x)])\n lat_list.append([x for x in lat[i,:] if not np.isnan(x)])\n\n return lon_list, lat_list\n\n \nif __name__ == '__main__':\n conn = psycopg2.connect(host=hostname, port=5432, user=user, \n dbname=dbname, password=passward)\n cursor = conn.cursor()\n \n lon_list, lat_list = load_data()\n flag, sep_list = 1, [0]\n for i in range(len(lon_list)):\n for j in range(len(lon_list[i])):\n cursor.execute(\"insert into my_point (point_id,lon,lat) values (%s,%s,%s)\",\n (flag, lon_list[i][j], lat_list[i][j]))\n flag += 1\n sep_list.append(flag-1)\n print(i+1, '/', len(lon_list), 'num:', len(lon_list[i]))\n # if i == 10:\n # break\n for i in range(len(sep_list)-1):\n cursor.execute(\"insert into my_line (line_id,point_list) values (%s,%s)\",\n (i+1, list(range(sep_list[i]+1,sep_list[i+1]+1))))\n\n # conn.commit()\n conn.close()\n\n"
] | [
[
"pandas.read_excel",
"numpy.isnan"
]
] |
rli596/manim | [
"e147a9fc6c117332221e42437481f3efba76499a"
] | [
"manim/scene/scene.py"
] | [
"\"\"\"Basic canvas for animations.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\"Scene\"]\n\nimport copy\nimport datetime\nimport inspect\nimport platform\nimport random\nimport threading\nimport time\nimport types\nfrom queue import Queue\nfrom typing import Callable\n\nimport srt\n\nfrom manim.scene.section import DefaultSectionType\n\ntry:\n import dearpygui.dearpygui as dpg\n\n dearpygui_imported = True\nexcept ImportError:\n dearpygui_imported = False\nimport numpy as np\nfrom tqdm import tqdm\nfrom watchdog.events import FileSystemEventHandler\nfrom watchdog.observers import Observer\n\nfrom manim.mobject.opengl.opengl_mobject import OpenGLPoint\n\nfrom .. import config, logger\nfrom ..animation.animation import Animation, Wait, prepare_animation\nfrom ..camera.camera import Camera\nfrom ..constants import *\nfrom ..gui.gui import configure_pygui\nfrom ..renderer.cairo_renderer import CairoRenderer\nfrom ..renderer.opengl_renderer import OpenGLRenderer\nfrom ..renderer.shader import Object3D\nfrom ..utils import opengl, space_ops\nfrom ..utils.exceptions import EndSceneEarlyException, RerunSceneException\nfrom ..utils.family import extract_mobject_family_members\nfrom ..utils.family_ops import restructure_list_to_exclude_certain_family_members\nfrom ..utils.file_ops import open_media_file\nfrom ..utils.iterables import list_difference_update, list_update\n\n\nclass RerunSceneHandler(FileSystemEventHandler):\n \"\"\"A class to handle rerunning a Scene after the input file is modified.\"\"\"\n\n def __init__(self, queue):\n super().__init__()\n self.queue = queue\n\n def on_modified(self, event):\n self.queue.put((\"rerun_file\", [], {}))\n\n\nclass Scene:\n \"\"\"A Scene is the canvas of your animation.\n\n The primary role of :class:`Scene` is to provide the user with tools to manage\n mobjects and animations. Generally speaking, a manim script consists of a class\n that derives from :class:`Scene` whose :meth:`Scene.construct` method is overridden\n by the user's code.\n\n Mobjects are displayed on screen by calling :meth:`Scene.add` and removed from\n screen by calling :meth:`Scene.remove`. All mobjects currently on screen are kept\n in :attr:`Scene.mobjects`. Animations are played by calling :meth:`Scene.play`.\n\n A :class:`Scene` is rendered internally by calling :meth:`Scene.render`. This in\n turn calls :meth:`Scene.setup`, :meth:`Scene.construct`, and\n :meth:`Scene.tear_down`, in that order.\n\n It is not recommended to override the ``__init__`` method in user Scenes. For code\n that should be ran before a Scene is rendered, use :meth:`Scene.setup` instead.\n\n Examples\n --------\n Override the :meth:`Scene.construct` method with your code.\n\n .. code-block:: python\n\n class MyScene(Scene):\n def construct(self):\n self.play(Write(Text(\"Hello World!\")))\n\n \"\"\"\n\n def __init__(\n self,\n renderer=None,\n camera_class=Camera,\n always_update_mobjects=False,\n random_seed=None,\n skip_animations=False,\n ):\n self.camera_class = camera_class\n self.always_update_mobjects = always_update_mobjects\n self.random_seed = random_seed\n self.skip_animations = skip_animations\n\n self.animations = None\n self.stop_condition = None\n self.moving_mobjects = []\n self.static_mobjects = []\n self.time_progression = None\n self.duration = None\n self.last_t = None\n self.queue = Queue()\n self.skip_animation_preview = False\n self.meshes = []\n self.camera_target = ORIGIN\n self.widgets = []\n self.dearpygui_imported = dearpygui_imported\n self.updaters = []\n self.point_lights = []\n self.ambient_light = None\n self.key_to_function_map = {}\n self.mouse_press_callbacks = []\n self.interactive_mode = False\n\n if config.renderer == \"opengl\":\n # Items associated with interaction\n self.mouse_point = OpenGLPoint()\n self.mouse_drag_point = OpenGLPoint()\n if renderer is None:\n renderer = OpenGLRenderer()\n\n if renderer is None:\n self.renderer = CairoRenderer(\n camera_class=self.camera_class,\n skip_animations=self.skip_animations,\n )\n else:\n self.renderer = renderer\n self.renderer.init_scene(self)\n\n self.mobjects = []\n # TODO, remove need for foreground mobjects\n self.foreground_mobjects = []\n if self.random_seed is not None:\n random.seed(self.random_seed)\n np.random.seed(self.random_seed)\n\n @property\n def camera(self):\n return self.renderer.camera\n\n def __deepcopy__(self, clone_from_id):\n cls = self.__class__\n result = cls.__new__(cls)\n clone_from_id[id(self)] = result\n for k, v in self.__dict__.items():\n if k in [\"renderer\", \"time_progression\"]:\n continue\n if k == \"camera_class\":\n setattr(result, k, v)\n setattr(result, k, copy.deepcopy(v, clone_from_id))\n result.mobject_updater_lists = []\n\n # Update updaters\n for mobject in self.mobjects:\n cloned_updaters = []\n for updater in mobject.updaters:\n # Make the cloned updater use the cloned Mobjects as free variables\n # rather than the original ones. Analyzing function bytecode with the\n # dis module will help in understanding this.\n # https://docs.python.org/3/library/dis.html\n # TODO: Do the same for function calls recursively.\n free_variable_map = inspect.getclosurevars(updater).nonlocals\n cloned_co_freevars = []\n cloned_closure = []\n for free_variable_name in updater.__code__.co_freevars:\n free_variable_value = free_variable_map[free_variable_name]\n\n # If the referenced variable has not been cloned, raise.\n if id(free_variable_value) not in clone_from_id:\n raise Exception(\n f\"{free_variable_name} is referenced from an updater \"\n \"but is not an attribute of the Scene, which isn't \"\n \"allowed.\",\n )\n\n # Add the cloned object's name to the free variable list.\n cloned_co_freevars.append(free_variable_name)\n\n # Add a cell containing the cloned object's reference to the\n # closure list.\n cloned_closure.append(\n types.CellType(clone_from_id[id(free_variable_value)]),\n )\n\n cloned_updater = types.FunctionType(\n updater.__code__.replace(co_freevars=tuple(cloned_co_freevars)),\n updater.__globals__,\n updater.__name__,\n updater.__defaults__,\n tuple(cloned_closure),\n )\n cloned_updaters.append(cloned_updater)\n mobject_clone = clone_from_id[id(mobject)]\n mobject_clone.updaters = cloned_updaters\n if len(cloned_updaters) > 0:\n result.mobject_updater_lists.append((mobject_clone, cloned_updaters))\n return result\n\n def render(self, preview=False):\n \"\"\"\n Renders this Scene.\n\n Parameters\n ---------\n preview : bool\n If true, opens scene in a file viewer.\n \"\"\"\n self.setup()\n try:\n self.construct()\n except EndSceneEarlyException:\n pass\n except RerunSceneException as e:\n self.remove(*self.mobjects)\n self.renderer.clear_screen()\n self.renderer.num_plays = 0\n return True\n self.tear_down()\n # We have to reset these settings in case of multiple renders.\n self.renderer.scene_finished(self)\n\n # Show info only if animations are rendered or to get image\n if (\n self.renderer.num_plays\n or config[\"format\"] == \"png\"\n or config[\"save_last_frame\"]\n ):\n logger.info(\n f\"Rendered {str(self)}\\nPlayed {self.renderer.num_plays} animations\",\n )\n\n # If preview open up the render after rendering.\n if preview:\n config[\"preview\"] = True\n\n if config[\"preview\"] or config[\"show_in_file_browser\"]:\n open_media_file(self.renderer.file_writer)\n\n def setup(self):\n \"\"\"\n This is meant to be implemented by any scenes which\n are commonly subclassed, and have some common setup\n involved before the construct method is called.\n \"\"\"\n pass\n\n def tear_down(self):\n \"\"\"\n This is meant to be implemented by any scenes which\n are commonly subclassed, and have some common method\n to be invoked before the scene ends.\n \"\"\"\n pass\n\n def construct(self):\n \"\"\"Add content to the Scene.\n\n From within :meth:`Scene.construct`, display mobjects on screen by calling\n :meth:`Scene.add` and remove them from screen by calling :meth:`Scene.remove`.\n All mobjects currently on screen are kept in :attr:`Scene.mobjects`. Play\n animations by calling :meth:`Scene.play`.\n\n Notes\n -----\n Initialization code should go in :meth:`Scene.setup`. Termination code should\n go in :meth:`Scene.tear_down`.\n\n Examples\n --------\n A typical manim script includes a class derived from :class:`Scene` with an\n overridden :meth:`Scene.contruct` method:\n\n .. code-block:: python\n\n class MyScene(Scene):\n def construct(self):\n self.play(Write(Text(\"Hello World!\")))\n\n See Also\n --------\n :meth:`Scene.setup`\n :meth:`Scene.render`\n :meth:`Scene.tear_down`\n\n \"\"\"\n pass # To be implemented in subclasses\n\n def next_section(\n self,\n name: str = \"unnamed\",\n type: str = DefaultSectionType.NORMAL,\n skip_animations: bool = False,\n ) -> None:\n \"\"\"Create separation here; the last section gets finished and a new one gets created.\n ``skip_animations`` skips the rendering of all animations in this section.\n Refer to :doc:`the documentation</tutorials/a_deeper_look>` on how to use sections.\n \"\"\"\n self.renderer.file_writer.next_section(name, type, skip_animations)\n\n def __str__(self):\n return self.__class__.__name__\n\n def get_attrs(self, *keys):\n \"\"\"\n Gets attributes of a scene given the attribute's identifier/name.\n\n Parameters\n ----------\n *keys : str\n Name(s) of the argument(s) to return the attribute of.\n\n Returns\n -------\n list\n List of attributes of the passed identifiers.\n \"\"\"\n return [getattr(self, key) for key in keys]\n\n def update_mobjects(self, dt):\n \"\"\"\n Begins updating all mobjects in the Scene.\n\n Parameters\n ----------\n dt: int or float\n Change in time between updates. Defaults (mostly) to 1/frames_per_second\n \"\"\"\n for mobject in self.mobjects:\n mobject.update(dt)\n\n def update_meshes(self, dt):\n for obj in self.meshes:\n for mesh in obj.get_family():\n mesh.update(dt)\n\n def update_self(self, dt):\n for func in self.updaters:\n func(dt)\n\n def should_update_mobjects(self) -> bool:\n \"\"\"\n Returns True if the mobjects of this scene should be updated.\n\n In particular, this checks whether\n\n - the :attr:`always_update_mobjects` attribute of :class:`.Scene`\n is set to ``True``,\n - the :class:`.Scene` itself has time-based updaters attached,\n - any mobject in this :class:`.Scene` has time-based updaters attached.\n\n This is only called when a single Wait animation is played.\n \"\"\"\n wait_animation = self.animations[0]\n if wait_animation.is_static_wait is None:\n should_update = (\n self.always_update_mobjects\n or self.updaters\n or any(\n [\n mob.has_time_based_updater()\n for mob in self.get_mobject_family_members()\n ],\n )\n )\n wait_animation.is_static_wait = not should_update\n return not wait_animation.is_static_wait\n\n def get_top_level_mobjects(self):\n \"\"\"\n Returns all mobjects which are not submobjects.\n\n Returns\n -------\n list\n List of top level mobjects.\n \"\"\"\n # Return only those which are not in the family\n # of another mobject from the scene\n families = [m.get_family() for m in self.mobjects]\n\n def is_top_level(mobject):\n num_families = sum((mobject in family) for family in families)\n return num_families == 1\n\n return list(filter(is_top_level, self.mobjects))\n\n def get_mobject_family_members(self):\n \"\"\"\n Returns list of family-members of all mobjects in scene.\n If a Circle() and a VGroup(Rectangle(),Triangle()) were added,\n it returns not only the Circle(), Rectangle() and Triangle(), but\n also the VGroup() object.\n\n Returns\n -------\n list\n List of mobject family members.\n \"\"\"\n if config.renderer == \"opengl\":\n family_members = []\n for mob in self.mobjects:\n family_members.extend(mob.get_family())\n return family_members\n else:\n return extract_mobject_family_members(\n self.mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n )\n\n def add(self, *mobjects):\n \"\"\"\n Mobjects will be displayed, from background to\n foreground in the order with which they are added.\n\n Parameters\n ---------\n *mobjects : Mobject\n Mobjects to add.\n\n Returns\n -------\n Scene\n The same scene after adding the Mobjects in.\n\n \"\"\"\n if config.renderer == \"opengl\":\n new_mobjects = []\n new_meshes = []\n for mobject_or_mesh in mobjects:\n if isinstance(mobject_or_mesh, Object3D):\n new_meshes.append(mobject_or_mesh)\n else:\n new_mobjects.append(mobject_or_mesh)\n self.remove(*new_mobjects)\n self.mobjects += new_mobjects\n self.remove(*new_meshes)\n self.meshes += new_meshes\n else:\n mobjects = [*mobjects, *self.foreground_mobjects]\n self.restructure_mobjects(to_remove=mobjects)\n self.mobjects += mobjects\n if self.moving_mobjects is not None:\n self.restructure_mobjects(\n to_remove=mobjects,\n mobject_list_name=\"moving_mobjects\",\n )\n self.moving_mobjects += mobjects\n return self\n\n def add_mobjects_from_animations(self, animations):\n curr_mobjects = self.get_mobject_family_members()\n for animation in animations:\n if animation.is_introducer():\n continue\n # Anything animated that's not already in the\n # scene gets added to the scene\n mob = animation.mobject\n if mob is not None and mob not in curr_mobjects:\n self.add(mob)\n curr_mobjects += mob.get_family()\n\n def remove(self, *mobjects):\n \"\"\"\n Removes mobjects in the passed list of mobjects\n from the scene and the foreground, by removing them\n from \"mobjects\" and \"foreground_mobjects\"\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobjects to remove.\n \"\"\"\n if config.renderer == \"opengl\":\n mobjects_to_remove = []\n meshes_to_remove = set()\n for mobject_or_mesh in mobjects:\n if isinstance(mobject_or_mesh, Object3D):\n meshes_to_remove.add(mobject_or_mesh)\n else:\n mobjects_to_remove.append(mobject_or_mesh)\n self.mobjects = restructure_list_to_exclude_certain_family_members(\n self.mobjects,\n mobjects_to_remove,\n )\n self.meshes = list(\n filter(lambda mesh: mesh not in set(meshes_to_remove), self.meshes),\n )\n return self\n else:\n for list_name in \"mobjects\", \"foreground_mobjects\":\n self.restructure_mobjects(mobjects, list_name, False)\n return self\n\n def add_updater(self, func):\n self.updaters.append(func)\n\n def remove_updater(self, func):\n self.updaters = [f for f in self.updaters if f is not func]\n\n def restructure_mobjects(\n self,\n to_remove,\n mobject_list_name=\"mobjects\",\n extract_families=True,\n ):\n \"\"\"\n tl:wr\n If your scene has a Group(), and you removed a mobject from the Group,\n this dissolves the group and puts the rest of the mobjects directly\n in self.mobjects or self.foreground_mobjects.\n\n In cases where the scene contains a group, e.g. Group(m1, m2, m3), but one\n of its submobjects is removed, e.g. scene.remove(m1), the list of mobjects\n will be edited to contain other submobjects, but not m1, e.g. it will now\n insert m2 and m3 to where the group once was.\n\n Parameters\n ----------\n to_remove : Mobject\n The Mobject to remove.\n\n mobject_list_name : str, optional\n The list of mobjects (\"mobjects\", \"foreground_mobjects\" etc) to remove from.\n\n extract_families : bool, optional\n Whether the mobject's families should be recursively extracted.\n\n Returns\n -------\n Scene\n The Scene mobject with restructured Mobjects.\n \"\"\"\n if extract_families:\n to_remove = extract_mobject_family_members(\n to_remove,\n use_z_index=self.renderer.camera.use_z_index,\n )\n _list = getattr(self, mobject_list_name)\n new_list = self.get_restructured_mobject_list(_list, to_remove)\n setattr(self, mobject_list_name, new_list)\n return self\n\n def get_restructured_mobject_list(self, mobjects, to_remove):\n \"\"\"\n Given a list of mobjects and a list of mobjects to be removed, this\n filters out the removable mobjects from the list of mobjects.\n\n Parameters\n ----------\n\n mobjects : list\n The Mobjects to check.\n\n to_remove : list\n The list of mobjects to remove.\n\n Returns\n -------\n list\n The list of mobjects with the mobjects to remove removed.\n \"\"\"\n\n new_mobjects = []\n\n def add_safe_mobjects_from_list(list_to_examine, set_to_remove):\n for mob in list_to_examine:\n if mob in set_to_remove:\n continue\n intersect = set_to_remove.intersection(mob.get_family())\n if intersect:\n add_safe_mobjects_from_list(mob.submobjects, intersect)\n else:\n new_mobjects.append(mob)\n\n add_safe_mobjects_from_list(mobjects, set(to_remove))\n return new_mobjects\n\n # TODO, remove this, and calls to this\n def add_foreground_mobjects(self, *mobjects):\n \"\"\"\n Adds mobjects to the foreground, and internally to the list\n foreground_mobjects, and mobjects.\n\n Parameters\n ----------\n *mobjects : Mobject\n The Mobjects to add to the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobjects added.\n \"\"\"\n self.foreground_mobjects = list_update(self.foreground_mobjects, mobjects)\n self.add(*mobjects)\n return self\n\n def add_foreground_mobject(self, mobject):\n \"\"\"\n Adds a single mobject to the foreground, and internally to the list\n foreground_mobjects, and mobjects.\n\n Parameters\n ----------\n mobject : Mobject\n The Mobject to add to the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobject added.\n \"\"\"\n return self.add_foreground_mobjects(mobject)\n\n def remove_foreground_mobjects(self, *to_remove):\n \"\"\"\n Removes mobjects from the foreground, and internally from the list\n foreground_mobjects.\n\n Parameters\n ----------\n *to_remove : Mobject\n The mobject(s) to remove from the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobjects removed.\n \"\"\"\n self.restructure_mobjects(to_remove, \"foreground_mobjects\")\n return self\n\n def remove_foreground_mobject(self, mobject):\n \"\"\"\n Removes a single mobject from the foreground, and internally from the list\n foreground_mobjects.\n\n Parameters\n ----------\n mobject : Mobject\n The mobject to remove from the foreground.\n\n Returns\n ------\n Scene\n The Scene, with the foreground mobject removed.\n \"\"\"\n return self.remove_foreground_mobjects(mobject)\n\n def bring_to_front(self, *mobjects):\n \"\"\"\n Adds the passed mobjects to the scene again,\n pushing them to he front of the scene.\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobject(s) to bring to the front of the scene.\n\n Returns\n ------\n Scene\n The Scene, with the mobjects brought to the front\n of the scene.\n \"\"\"\n self.add(*mobjects)\n return self\n\n def bring_to_back(self, *mobjects):\n \"\"\"\n Removes the mobject from the scene and\n adds them to the back of the scene.\n\n Parameters\n ----------\n *mobjects : Mobject\n The mobject(s) to push to the back of the scene.\n\n Returns\n ------\n Scene\n The Scene, with the mobjects pushed to the back\n of the scene.\n \"\"\"\n self.remove(*mobjects)\n self.mobjects = list(mobjects) + self.mobjects\n return self\n\n def clear(self):\n \"\"\"\n Removes all mobjects present in self.mobjects\n and self.foreground_mobjects from the scene.\n\n Returns\n ------\n Scene\n The Scene, with all of its mobjects in\n self.mobjects and self.foreground_mobjects\n removed.\n \"\"\"\n self.mobjects = []\n self.foreground_mobjects = []\n return self\n\n def get_moving_mobjects(self, *animations):\n \"\"\"\n Gets all moving mobjects in the passed animation(s).\n\n Parameters\n ----------\n *animations : Animation\n The animations to check for moving mobjects.\n\n Returns\n ------\n list\n The list of mobjects that could be moving in\n the Animation(s)\n \"\"\"\n # Go through mobjects from start to end, and\n # as soon as there's one that needs updating of\n # some kind per frame, return the list from that\n # point forward.\n animation_mobjects = [anim.mobject for anim in animations]\n mobjects = self.get_mobject_family_members()\n for i, mob in enumerate(mobjects):\n update_possibilities = [\n mob in animation_mobjects,\n len(mob.get_family_updaters()) > 0,\n mob in self.foreground_mobjects,\n ]\n if any(update_possibilities):\n return mobjects[i:]\n return []\n\n def get_moving_and_static_mobjects(self, animations):\n all_mobjects = list_update(self.mobjects, self.foreground_mobjects)\n all_mobject_families = extract_mobject_family_members(\n all_mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n only_those_with_points=True,\n )\n moving_mobjects = self.get_moving_mobjects(*animations)\n all_moving_mobject_families = extract_mobject_family_members(\n moving_mobjects,\n use_z_index=self.renderer.camera.use_z_index,\n )\n static_mobjects = list_difference_update(\n all_mobject_families,\n all_moving_mobject_families,\n )\n return all_moving_mobject_families, static_mobjects\n\n def compile_animations(self, *args, **kwargs):\n \"\"\"\n Creates _MethodAnimations from any _AnimationBuilders and updates animation\n kwargs with kwargs passed to play().\n\n Parameters\n ----------\n *args : Tuple[:class:`Animation`]\n Animations to be played.\n **kwargs\n Configuration for the call to play().\n\n Returns\n -------\n Tuple[:class:`Animation`]\n Animations to be played.\n \"\"\"\n animations = []\n for arg in args:\n try:\n animations.append(prepare_animation(arg))\n except TypeError:\n if inspect.ismethod(arg):\n raise TypeError(\n \"Passing Mobject methods to Scene.play is no longer\"\n \" supported. Use Mobject.animate instead.\",\n )\n else:\n raise TypeError(\n f\"Unexpected argument {arg} passed to Scene.play().\",\n )\n\n for animation in animations:\n for k, v in kwargs.items():\n setattr(animation, k, v)\n\n return animations\n\n def _get_animation_time_progression(self, animations, duration):\n \"\"\"\n You will hardly use this when making your own animations.\n This method is for Manim's internal use.\n\n Uses :func:`~.get_time_progression` to obtain a\n CommandLine ProgressBar whose ``fill_time`` is\n dependent on the qualities of the passed Animation,\n\n Parameters\n ----------\n animations : List[:class:`~.Animation`, ...]\n The list of animations to get\n the time progression for.\n\n duration : int or float\n duration of wait time\n\n Returns\n -------\n time_progression\n The CommandLine Progress Bar.\n \"\"\"\n if len(animations) == 1 and isinstance(animations[0], Wait):\n stop_condition = animations[0].stop_condition\n if stop_condition is not None:\n time_progression = self.get_time_progression(\n duration,\n f\"Waiting for {stop_condition.__name__}\",\n n_iterations=-1, # So it doesn't show % progress\n override_skip_animations=True,\n )\n else:\n time_progression = self.get_time_progression(\n duration,\n f\"Waiting {self.renderer.num_plays}\",\n )\n else:\n time_progression = self.get_time_progression(\n duration,\n \"\".join(\n [\n f\"Animation {self.renderer.num_plays}: \",\n str(animations[0]),\n (\", etc.\" if len(animations) > 1 else \"\"),\n ],\n ),\n )\n return time_progression\n\n def get_time_progression(\n self,\n run_time,\n description,\n n_iterations=None,\n override_skip_animations=False,\n ):\n \"\"\"\n You will hardly use this when making your own animations.\n This method is for Manim's internal use.\n\n Returns a CommandLine ProgressBar whose ``fill_time``\n is dependent on the ``run_time`` of an animation,\n the iterations to perform in that animation\n and a bool saying whether or not to consider\n the skipped animations.\n\n Parameters\n ----------\n run_time : float\n The ``run_time`` of the animation.\n\n n_iterations : int, optional\n The number of iterations in the animation.\n\n override_skip_animations : bool, optional\n Whether or not to show skipped animations in the progress bar.\n\n Returns\n -------\n time_progression\n The CommandLine Progress Bar.\n \"\"\"\n if self.renderer.skip_animations and not override_skip_animations:\n times = [run_time]\n else:\n step = 1 / config[\"frame_rate\"]\n times = np.arange(0, run_time, step)\n time_progression = tqdm(\n times,\n desc=description,\n total=n_iterations,\n leave=config[\"progress_bar\"] == \"leave\",\n ascii=True if platform.system() == \"Windows\" else None,\n disable=config[\"progress_bar\"] == \"none\",\n )\n return time_progression\n\n def get_run_time(self, animations):\n \"\"\"\n Gets the total run time for a list of animations.\n\n Parameters\n ----------\n animations : List[:class:`Animation`, ...]\n A list of the animations whose total\n ``run_time`` is to be calculated.\n\n Returns\n -------\n float\n The total ``run_time`` of all of the animations in the list.\n \"\"\"\n\n if len(animations) == 1 and isinstance(animations[0], Wait):\n if animations[0].stop_condition is not None:\n return 0\n else:\n return animations[0].duration\n\n else:\n return np.max([animation.run_time for animation in animations])\n\n def play(\n self,\n *args,\n subcaption=None,\n subcaption_duration=None,\n subcaption_offset=0,\n **kwargs,\n ):\n r\"\"\"Plays an animation in this scene.\n\n Parameters\n ----------\n\n args\n Animations to be played.\n subcaption\n The content of the external subcaption that should\n be added during the animation.\n subcaption_duration\n The duration for which the specified subcaption is\n added. If ``None`` (the default), the run time of the\n animation is taken.\n subcaption_offset\n An offset (in seconds) for the start time of the\n added subcaption.\n kwargs\n All other keywords are passed to the renderer.\n\n \"\"\"\n start_time = self.renderer.time\n self.renderer.play(self, *args, **kwargs)\n run_time = self.renderer.time - start_time\n if subcaption:\n if subcaption_duration is None:\n subcaption_duration = run_time\n # The start of the subcaption needs to be offset by the\n # run_time of the animation because it is added after\n # the animation has already been played (and Scene.renderer.time\n # has already been updated).\n self.add_subcaption(\n content=subcaption,\n duration=subcaption_duration,\n offset=-run_time + subcaption_offset,\n )\n\n def wait(\n self,\n duration: float = DEFAULT_WAIT_TIME,\n stop_condition: Callable[[], bool] | None = None,\n frozen_frame: bool | None = None,\n ):\n \"\"\"Plays a \"no operation\" animation.\n\n Parameters\n ----------\n duration\n The run time of the animation.\n stop_condition\n A function without positional arguments that is evaluated every time\n a frame is rendered. The animation only stops when the return value\n of the function is truthy. Overrides any value passed to ``duration``.\n frozen_frame\n If True, updater functions are not evaluated, and the animation outputs\n a frozen frame. If False, updater functions are called and frames\n are rendered as usual. If None (the default), the scene tries to\n determine whether or not the frame is frozen on its own.\n\n See also\n --------\n :class:`.Wait`, :meth:`.should_mobjects_update`\n \"\"\"\n self.play(\n Wait(\n run_time=duration,\n stop_condition=stop_condition,\n frozen_frame=frozen_frame,\n )\n )\n\n def pause(self, duration: float = DEFAULT_WAIT_TIME):\n \"\"\"Pauses the scene (i.e., displays a frozen frame).\n\n This is an alias for :meth:`.wait` with ``frozen_frame``\n set to ``True``.\n\n Parameters\n ----------\n duration\n The duration of the pause.\n\n See also\n --------\n :meth:`.wait`, :class:`.Wait`\n \"\"\"\n self.wait(duration=duration, frozen_frame=True)\n\n def wait_until(self, stop_condition, max_time=60):\n \"\"\"\n Like a wrapper for wait().\n You pass a function that determines whether to continue waiting,\n and a max wait time if that is never fulfilled.\n\n Parameters\n ----------\n stop_condition : function\n The function whose boolean return value determines whether to continue waiting\n\n max_time : int or float, optional\n The maximum wait time in seconds, if the stop_condition is never fulfilled.\n \"\"\"\n self.wait(max_time, stop_condition=stop_condition)\n\n def compile_animation_data(self, *animations: Animation, **play_kwargs):\n \"\"\"Given a list of animations, compile the corresponding\n static and moving mobjects, and gather the animation durations.\n\n This also begins the animations.\n\n Parameters\n ----------\n skip_rendering : bool, optional\n Whether the rendering should be skipped, by default False\n\n Returns\n -------\n self, None\n None if there is nothing to play, or self otherwise.\n \"\"\"\n # NOTE TODO : returns statement of this method are wrong. It should return nothing, as it makes a little sense to get any information from this method.\n # The return are kept to keep webgl renderer from breaking.\n if len(animations) == 0:\n raise ValueError(\"Called Scene.play with no animations\")\n\n self.animations = self.compile_animations(*animations, **play_kwargs)\n self.add_mobjects_from_animations(self.animations)\n\n self.last_t = 0\n self.stop_condition = None\n self.moving_mobjects = []\n self.static_mobjects = []\n\n if len(self.animations) == 1 and isinstance(self.animations[0], Wait):\n if self.should_update_mobjects():\n self.update_mobjects(dt=0) # Any problems with this?\n self.stop_condition = self.animations[0].stop_condition\n else:\n self.duration = self.animations[0].duration\n # Static image logic when the wait is static is done by the renderer, not here.\n self.animations[0].is_static_wait = True\n return None\n elif config.renderer != \"opengl\":\n # Paint all non-moving objects onto the screen, so they don't\n # have to be rendered every frame\n (\n self.moving_mobjects,\n self.static_mobjects,\n ) = self.get_moving_and_static_mobjects(self.animations)\n self.duration = self.get_run_time(self.animations)\n return self\n\n def begin_animations(self) -> None:\n \"\"\"Start the animations of the scene.\"\"\"\n for animation in self.animations:\n animation._setup_scene(self)\n animation.begin()\n\n def is_current_animation_frozen_frame(self) -> bool:\n \"\"\"Returns whether the current animation produces a static frame (generally a Wait).\"\"\"\n return (\n isinstance(self.animations[0], Wait)\n and len(self.animations) == 1\n and self.animations[0].is_static_wait\n )\n\n def play_internal(self, skip_rendering=False):\n \"\"\"\n This method is used to prep the animations for rendering,\n apply the arguments and parameters required to them,\n render them, and write them to the video file.\n\n Parameters\n ----------\n args\n Animation or mobject with mobject method and params\n kwargs\n named parameters affecting what was passed in ``args``,\n e.g. ``run_time``, ``lag_ratio`` and so on.\n \"\"\"\n self.duration = self.get_run_time(self.animations)\n self.time_progression = self._get_animation_time_progression(\n self.animations,\n self.duration,\n )\n for t in self.time_progression:\n self.update_to_time(t)\n if not skip_rendering and not self.skip_animation_preview:\n self.renderer.render(self, t, self.moving_mobjects)\n if self.stop_condition is not None and self.stop_condition():\n self.time_progression.close()\n break\n\n for animation in self.animations:\n animation.finish()\n animation.clean_up_from_scene(self)\n if not self.renderer.skip_animations:\n self.update_mobjects(0)\n self.renderer.static_image = None\n # Closing the progress bar at the end of the play.\n self.time_progression.close()\n\n def check_interactive_embed_is_valid(self):\n if config[\"force_window\"]:\n return True\n if self.skip_animation_preview:\n logger.warning(\n \"Disabling interactive embed as 'skip_animation_preview' is enabled\",\n )\n return False\n elif config[\"write_to_movie\"]:\n logger.warning(\"Disabling interactive embed as 'write_to_movie' is enabled\")\n return False\n elif config[\"format\"]:\n logger.warning(\n \"Disabling interactive embed as '--format' is set as \"\n + config[\"format\"],\n )\n return False\n elif not self.renderer.window:\n logger.warning(\"Disabling interactive embed as no window was created\")\n return False\n elif config.dry_run:\n logger.warning(\"Disabling interactive embed as dry_run is enabled\")\n return False\n return True\n\n def interactive_embed(self):\n \"\"\"\n Like embed(), but allows for screen interaction.\n \"\"\"\n if not self.check_interactive_embed_is_valid():\n return\n self.interactive_mode = True\n\n def ipython(shell, namespace):\n import manim.opengl\n\n def load_module_into_namespace(module, namespace):\n for name in dir(module):\n namespace[name] = getattr(module, name)\n\n load_module_into_namespace(manim, namespace)\n load_module_into_namespace(manim.opengl, namespace)\n\n def embedded_rerun(*args, **kwargs):\n self.queue.put((\"rerun_keyboard\", args, kwargs))\n shell.exiter()\n\n namespace[\"rerun\"] = embedded_rerun\n\n shell(local_ns=namespace)\n self.queue.put((\"exit_keyboard\", [], {}))\n\n def get_embedded_method(method_name):\n return lambda *args, **kwargs: self.queue.put((method_name, args, kwargs))\n\n local_namespace = inspect.currentframe().f_back.f_locals\n for method in (\"play\", \"wait\", \"add\", \"remove\"):\n embedded_method = get_embedded_method(method)\n # Allow for calling scene methods without prepending 'self.'.\n local_namespace[method] = embedded_method\n\n from IPython.terminal.embed import InteractiveShellEmbed\n from traitlets.config import Config\n\n cfg = Config()\n cfg.TerminalInteractiveShell.confirm_exit = False\n shell = InteractiveShellEmbed(config=cfg)\n\n keyboard_thread = threading.Thread(\n target=ipython,\n args=(shell, local_namespace),\n )\n # run as daemon to kill thread when main thread exits\n if not shell.pt_app:\n keyboard_thread.daemon = True\n keyboard_thread.start()\n\n if self.dearpygui_imported and config[\"enable_gui\"]:\n if not dpg.is_dearpygui_running():\n gui_thread = threading.Thread(\n target=configure_pygui,\n args=(self.renderer, self.widgets),\n kwargs={\"update\": False},\n )\n gui_thread.start()\n else:\n configure_pygui(self.renderer, self.widgets, update=True)\n\n self.camera.model_matrix = self.camera.default_model_matrix\n\n self.interact(shell, keyboard_thread)\n\n def interact(self, shell, keyboard_thread):\n event_handler = RerunSceneHandler(self.queue)\n file_observer = Observer()\n file_observer.schedule(event_handler, config[\"input_file\"], recursive=True)\n file_observer.start()\n\n self.quit_interaction = False\n keyboard_thread_needs_join = shell.pt_app is not None\n assert self.queue.qsize() == 0\n\n last_time = time.time()\n while not (self.renderer.window.is_closing or self.quit_interaction):\n if not self.queue.empty():\n tup = self.queue.get_nowait()\n if tup[0].startswith(\"rerun\"):\n # Intentionally skip calling join() on the file thread to save time.\n if not tup[0].endswith(\"keyboard\"):\n if shell.pt_app:\n shell.pt_app.app.exit(exception=EOFError)\n file_observer.unschedule_all()\n raise RerunSceneException\n keyboard_thread.join()\n\n kwargs = tup[2]\n if \"from_animation_number\" in kwargs:\n config[\"from_animation_number\"] = kwargs[\n \"from_animation_number\"\n ]\n # # TODO: This option only makes sense if interactive_embed() is run at the\n # # end of a scene by default.\n # if \"upto_animation_number\" in kwargs:\n # config[\"upto_animation_number\"] = kwargs[\n # \"upto_animation_number\"\n # ]\n\n keyboard_thread.join()\n file_observer.unschedule_all()\n raise RerunSceneException\n elif tup[0].startswith(\"exit\"):\n # Intentionally skip calling join() on the file thread to save time.\n if not tup[0].endswith(\"keyboard\") and shell.pt_app:\n shell.pt_app.app.exit(exception=EOFError)\n keyboard_thread.join()\n # Remove exit_keyboard from the queue if necessary.\n while self.queue.qsize() > 0:\n self.queue.get()\n keyboard_thread_needs_join = False\n break\n else:\n method, args, kwargs = tup\n getattr(self, method)(*args, **kwargs)\n else:\n self.renderer.animation_start_time = 0\n dt = time.time() - last_time\n last_time = time.time()\n self.renderer.render(self, dt, self.moving_mobjects)\n self.update_mobjects(dt)\n self.update_meshes(dt)\n self.update_self(dt)\n\n # Join the keyboard thread if necessary.\n if shell is not None and keyboard_thread_needs_join:\n shell.pt_app.app.exit(exception=EOFError)\n keyboard_thread.join()\n # Remove exit_keyboard from the queue if necessary.\n while self.queue.qsize() > 0:\n self.queue.get()\n\n file_observer.stop()\n file_observer.join()\n\n if self.dearpygui_imported and config[\"enable_gui\"]:\n dpg.stop_dearpygui()\n\n if self.renderer.window.is_closing:\n self.renderer.window.destroy()\n\n def embed(self):\n if not config[\"preview\"]:\n logger.warning(\"Called embed() while no preview window is available.\")\n return\n if config[\"write_to_movie\"]:\n logger.warning(\"embed() is skipped while writing to a file.\")\n return\n\n self.renderer.animation_start_time = 0\n self.renderer.render(self, -1, self.moving_mobjects)\n\n # Configure IPython shell.\n from IPython.terminal.embed import InteractiveShellEmbed\n\n shell = InteractiveShellEmbed()\n\n # Have the frame update after each command\n shell.events.register(\n \"post_run_cell\",\n lambda *a, **kw: self.renderer.render(self, -1, self.moving_mobjects),\n )\n\n # Use the locals of the caller as the local namespace\n # once embedded, and add a few custom shortcuts.\n local_ns = inspect.currentframe().f_back.f_locals\n # local_ns[\"touch\"] = self.interact\n for method in (\n \"play\",\n \"wait\",\n \"add\",\n \"remove\",\n \"interact\",\n # \"clear\",\n # \"save_state\",\n # \"restore\",\n ):\n local_ns[method] = getattr(self, method)\n shell(local_ns=local_ns, stack_depth=2)\n\n # End scene when exiting an embed.\n raise Exception(\"Exiting scene.\")\n\n def update_to_time(self, t):\n dt = t - self.last_t\n self.last_t = t\n for animation in self.animations:\n animation.update_mobjects(dt)\n alpha = t / animation.run_time\n animation.interpolate(alpha)\n self.update_mobjects(dt)\n self.update_meshes(dt)\n self.update_self(dt)\n\n def add_subcaption(\n self, content: str, duration: float = 1, offset: float = 0\n ) -> None:\n r\"\"\"Adds an entry in the corresponding subcaption file\n at the current time stamp.\n\n The current time stamp is obtained from ``Scene.renderer.time``.\n\n Parameters\n ----------\n\n content\n The subcaption content.\n duration\n The duration (in seconds) for which the subcaption is shown.\n offset\n This offset (in seconds) is added to the starting time stamp\n of the subcaption.\n\n Examples\n --------\n\n This example illustrates both possibilities for adding\n subcaptions to Manimations::\n\n class SubcaptionExample(Scene):\n def construct(self):\n square = Square()\n circle = Circle()\n\n # first option: via the add_subcaption method\n self.add_subcaption(\"Hello square!\", duration=1)\n self.play(Create(square))\n\n # second option: within the call to Scene.play\n self.play(\n Transform(square, circle),\n subcaption=\"The square transforms.\"\n )\n\n \"\"\"\n subtitle = srt.Subtitle(\n index=len(self.renderer.file_writer.subcaptions),\n content=content,\n start=datetime.timedelta(seconds=self.renderer.time + offset),\n end=datetime.timedelta(seconds=self.renderer.time + offset + duration),\n )\n self.renderer.file_writer.subcaptions.append(subtitle)\n\n def add_sound(self, sound_file, time_offset=0, gain=None, **kwargs):\n \"\"\"\n This method is used to add a sound to the animation.\n\n Parameters\n ----------\n\n sound_file : str\n The path to the sound file.\n time_offset : int,float, optional\n The offset in the sound file after which\n the sound can be played.\n gain : float\n Amplification of the sound.\n\n Examples\n --------\n .. manim:: SoundExample\n\n class SoundExample(Scene):\n # Source of sound under Creative Commons 0 License. https://freesound.org/people/Druminfected/sounds/250551/\n def construct(self):\n dot = Dot().set_color(GREEN)\n self.add_sound(\"click.wav\")\n self.add(dot)\n self.wait()\n self.add_sound(\"click.wav\")\n dot.set_color(BLUE)\n self.wait()\n self.add_sound(\"click.wav\")\n dot.set_color(RED)\n self.wait()\n\n Download the resource for the previous example `here <https://github.com/ManimCommunity/manim/blob/main/docs/source/_static/click.wav>`_ .\n \"\"\"\n if self.renderer.skip_animations:\n return\n time = self.renderer.time + time_offset\n self.renderer.file_writer.add_sound(sound_file, time, gain, **kwargs)\n\n def on_mouse_motion(self, point, d_point):\n self.mouse_point.move_to(point)\n if SHIFT_VALUE in self.renderer.pressed_keys:\n shift = -d_point\n shift[0] *= self.camera.get_width() / 2\n shift[1] *= self.camera.get_height() / 2\n transform = self.camera.inverse_rotation_matrix\n shift = np.dot(np.transpose(transform), shift)\n self.camera.shift(shift)\n\n def on_mouse_scroll(self, point, offset):\n if not config.use_projection_stroke_shaders:\n factor = 1 + np.arctan(-2.1 * offset[1])\n self.camera.scale(factor, about_point=self.camera_target)\n self.mouse_scroll_orbit_controls(point, offset)\n\n def on_key_press(self, symbol, modifiers):\n try:\n char = chr(symbol)\n except OverflowError:\n logger.warning(\"The value of the pressed key is too large.\")\n return\n\n if char == \"r\":\n self.camera.to_default_state()\n self.camera_target = np.array([0, 0, 0], dtype=np.float32)\n elif char == \"q\":\n self.quit_interaction = True\n else:\n if char in self.key_to_function_map:\n self.key_to_function_map[char]()\n\n def on_key_release(self, symbol, modifiers):\n pass\n\n def on_mouse_drag(self, point, d_point, buttons, modifiers):\n self.mouse_drag_point.move_to(point)\n if buttons == 1:\n self.camera.increment_theta(-d_point[0])\n self.camera.increment_phi(d_point[1])\n elif buttons == 4:\n camera_x_axis = self.camera.model_matrix[:3, 0]\n horizontal_shift_vector = -d_point[0] * camera_x_axis\n vertical_shift_vector = -d_point[1] * np.cross(OUT, camera_x_axis)\n total_shift_vector = horizontal_shift_vector + vertical_shift_vector\n self.camera.shift(1.1 * total_shift_vector)\n\n self.mouse_drag_orbit_controls(point, d_point, buttons, modifiers)\n\n def mouse_scroll_orbit_controls(self, point, offset):\n camera_to_target = self.camera_target - self.camera.get_position()\n camera_to_target *= np.sign(offset[1])\n shift_vector = 0.01 * camera_to_target\n self.camera.model_matrix = (\n opengl.translation_matrix(*shift_vector) @ self.camera.model_matrix\n )\n\n def mouse_drag_orbit_controls(self, point, d_point, buttons, modifiers):\n # Left click drag.\n if buttons == 1:\n # Translate to target the origin and rotate around the z axis.\n self.camera.model_matrix = (\n opengl.rotation_matrix(z=-d_point[0])\n @ opengl.translation_matrix(*-self.camera_target)\n @ self.camera.model_matrix\n )\n\n # Rotation off of the z axis.\n camera_position = self.camera.get_position()\n camera_y_axis = self.camera.model_matrix[:3, 1]\n axis_of_rotation = space_ops.normalize(\n np.cross(camera_y_axis, camera_position),\n )\n rotation_matrix = space_ops.rotation_matrix(\n d_point[1],\n axis_of_rotation,\n homogeneous=True,\n )\n\n maximum_polar_angle = self.camera.maximum_polar_angle\n minimum_polar_angle = self.camera.minimum_polar_angle\n\n potential_camera_model_matrix = rotation_matrix @ self.camera.model_matrix\n potential_camera_location = potential_camera_model_matrix[:3, 3]\n potential_camera_y_axis = potential_camera_model_matrix[:3, 1]\n sign = (\n np.sign(potential_camera_y_axis[2])\n if potential_camera_y_axis[2] != 0\n else 1\n )\n potential_polar_angle = sign * np.arccos(\n potential_camera_location[2]\n / np.linalg.norm(potential_camera_location),\n )\n if minimum_polar_angle <= potential_polar_angle <= maximum_polar_angle:\n self.camera.model_matrix = potential_camera_model_matrix\n else:\n sign = np.sign(camera_y_axis[2]) if camera_y_axis[2] != 0 else 1\n current_polar_angle = sign * np.arccos(\n camera_position[2] / np.linalg.norm(camera_position),\n )\n if potential_polar_angle > maximum_polar_angle:\n polar_angle_delta = maximum_polar_angle - current_polar_angle\n else:\n polar_angle_delta = minimum_polar_angle - current_polar_angle\n rotation_matrix = space_ops.rotation_matrix(\n polar_angle_delta,\n axis_of_rotation,\n homogeneous=True,\n )\n self.camera.model_matrix = rotation_matrix @ self.camera.model_matrix\n\n # Translate to target the original target.\n self.camera.model_matrix = (\n opengl.translation_matrix(*self.camera_target)\n @ self.camera.model_matrix\n )\n # Right click drag.\n elif buttons == 4:\n camera_x_axis = self.camera.model_matrix[:3, 0]\n horizontal_shift_vector = -d_point[0] * camera_x_axis\n vertical_shift_vector = -d_point[1] * np.cross(OUT, camera_x_axis)\n total_shift_vector = horizontal_shift_vector + vertical_shift_vector\n\n self.camera.model_matrix = (\n opengl.translation_matrix(*total_shift_vector)\n @ self.camera.model_matrix\n )\n self.camera_target += total_shift_vector\n\n def set_key_function(self, char, func):\n self.key_to_function_map[char] = func\n\n def on_mouse_press(self, point, button, modifiers):\n for func in self.mouse_press_callbacks:\n func()\n"
] | [
[
"numpy.sign",
"numpy.transpose",
"numpy.arctan",
"numpy.random.seed",
"numpy.cross",
"numpy.arange",
"numpy.max",
"numpy.array",
"numpy.linalg.norm"
]
] |
MichaelAquilina/numpy | [
"6e8b869d52ec5a1242df69bcd9323a4b0947933b"
] | [
"numpy/distutils/exec_command.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nexec_command\n\nImplements exec_command function that is (almost) equivalent to\ncommands.getstatusoutput function but on NT, DOS systems the\nreturned status is actually correct (though, the returned status\nvalues may be different by a factor). In addition, exec_command\ntakes keyword arguments for (re-)defining environment variables.\n\nProvides functions:\n\n exec_command --- execute command in a specified directory and\n in the modified environment.\n find_executable --- locate a command using info from environment\n variable PATH. Equivalent to posix `which`\n command.\n\nAuthor: Pearu Peterson <[email protected]>\nCreated: 11 January 2003\n\nRequires: Python 2.x\n\nSuccesfully tested on:\n\n======== ============ =================================================\nos.name sys.platform comments\n======== ============ =================================================\nposix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3\n PyCrust 0.9.3, Idle 1.0.2\nposix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2\nposix sunos5 SunOS 5.9, Python 2.2, 2.3.2\nposix darwin Darwin 7.2.0, Python 2.3\nnt win32 Windows Me\n Python 2.3(EE), Idle 1.0, PyCrust 0.7.2\n Python 2.1.1 Idle 0.8\nnt win32 Windows 98, Python 2.1.1. Idle 0.8\nnt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests\n fail i.e. redefining environment variables may\n not work. FIXED: don't use cygwin echo!\n Comment: also `cmd /c echo` will not work\n but redefining environment variables do work.\nposix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)\nnt win32 Windows XP, Python 2.3.3\n======== ============ =================================================\n\nKnown bugs:\n\n* Tests, that send messages to stderr, fail when executed from MSYS prompt\n because the messages are lost at some point.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\n__all__ = ['exec_command', 'find_executable']\n\nimport os\nimport sys\nimport shlex\n\nfrom numpy.distutils.misc_util import is_sequence, make_temp_file\nfrom numpy.distutils import log\nfrom numpy.distutils.compat import get_exception\n\nfrom numpy.compat import open_latin1\n\ndef temp_file_name():\n fo, name = make_temp_file()\n fo.close()\n return name\n\ndef get_pythonexe():\n pythonexe = sys.executable\n if os.name in ['nt', 'dos']:\n fdir, fn = os.path.split(pythonexe)\n fn = fn.upper().replace('PYTHONW', 'PYTHON')\n pythonexe = os.path.join(fdir, fn)\n assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)\n return pythonexe\n\ndef find_executable(exe, path=None, _cache={}):\n \"\"\"Return full path of a executable or None.\n\n Symbolic links are not followed.\n \"\"\"\n key = exe, path\n try:\n return _cache[key]\n except KeyError:\n pass\n log.debug('find_executable(%r)' % exe)\n orig_exe = exe\n\n if path is None:\n path = os.environ.get('PATH', os.defpath)\n if os.name=='posix':\n realpath = os.path.realpath\n else:\n realpath = lambda a:a\n\n if exe.startswith('\"'):\n exe = exe[1:-1]\n\n suffixes = ['']\n if os.name in ['nt', 'dos', 'os2']:\n fn, ext = os.path.splitext(exe)\n extra_suffixes = ['.exe', '.com', '.bat']\n if ext.lower() not in extra_suffixes:\n suffixes = extra_suffixes\n\n if os.path.isabs(exe):\n paths = ['']\n else:\n paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]\n\n for path in paths:\n fn = os.path.join(path, exe)\n for s in suffixes:\n f_ext = fn+s\n if not os.path.islink(f_ext):\n f_ext = realpath(f_ext)\n if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):\n log.info('Found executable %s' % f_ext)\n _cache[key] = f_ext\n return f_ext\n\n log.warn('Could not locate executable %s' % orig_exe)\n return None\n\n############################################################\n\ndef _preserve_environment( names ):\n log.debug('_preserve_environment(%r)' % (names))\n env = {}\n for name in names:\n env[name] = os.environ.get(name)\n return env\n\ndef _update_environment( **env ):\n log.debug('_update_environment(...)')\n for name, value in env.items():\n os.environ[name] = value or ''\n\ndef _supports_fileno(stream):\n \"\"\"\n Returns True if 'stream' supports the file descriptor and allows fileno().\n \"\"\"\n if hasattr(stream, 'fileno'):\n try:\n r = stream.fileno()\n return True\n except IOError:\n return False\n else:\n return False\n\ndef exec_command(command, execute_in='', use_shell=None, use_tee=None,\n _with_python = 1, **env ):\n \"\"\"\n Return (status,output) of executed command.\n\n Parameters\n ----------\n command : str\n A concatenated string of executable and arguments.\n execute_in : str\n Before running command ``cd execute_in`` and after ``cd -``.\n use_shell : {bool, None}, optional\n If True, execute ``sh -c command``. Default None (True)\n use_tee : {bool, None}, optional\n If True use tee. Default None (True)\n\n\n Returns\n -------\n res : str\n Both stdout and stderr messages.\n\n Notes\n -----\n On NT, DOS systems the returned status is correct for external commands.\n Wild cards will not work for non-posix systems or when use_shell=0.\n\n \"\"\"\n log.debug('exec_command(%r,%s)' % (command,\\\n ','.join(['%s=%r'%kv for kv in env.items()])))\n\n if use_tee is None:\n use_tee = os.name=='posix'\n if use_shell is None:\n use_shell = os.name=='posix'\n execute_in = os.path.abspath(execute_in)\n oldcwd = os.path.abspath(os.getcwd())\n\n if __name__[-12:] == 'exec_command':\n exec_dir = os.path.dirname(os.path.abspath(__file__))\n elif os.path.isfile('exec_command.py'):\n exec_dir = os.path.abspath('.')\n else:\n exec_dir = os.path.abspath(sys.argv[0])\n if os.path.isfile(exec_dir):\n exec_dir = os.path.dirname(exec_dir)\n\n if oldcwd!=execute_in:\n os.chdir(execute_in)\n log.debug('New cwd: %s' % execute_in)\n else:\n log.debug('Retaining cwd: %s' % oldcwd)\n\n oldenv = _preserve_environment( list(env.keys()) )\n _update_environment( **env )\n\n try:\n # _exec_command is robust but slow, it relies on\n # usable sys.std*.fileno() descriptors. If they\n # are bad (like in win32 Idle, PyCrust environments)\n # then _exec_command_python (even slower)\n # will be used as a last resort.\n #\n # _exec_command_posix uses os.system and is faster\n # but not on all platforms os.system will return\n # a correct status.\n if (_with_python and _supports_fileno(sys.stdout) and\n sys.stdout.fileno() == -1):\n st = _exec_command_python(command,\n exec_command_dir = exec_dir,\n **env)\n elif os.name=='posix':\n st = _exec_command_posix(command,\n use_shell=use_shell,\n use_tee=use_tee,\n **env)\n else:\n st = _exec_command(command, use_shell=use_shell,\n use_tee=use_tee,**env)\n finally:\n if oldcwd!=execute_in:\n os.chdir(oldcwd)\n log.debug('Restored cwd to %s' % oldcwd)\n _update_environment(**oldenv)\n\n return st\n\ndef _exec_command_posix( command,\n use_shell = None,\n use_tee = None,\n **env ):\n log.debug('_exec_command_posix(...)')\n\n if is_sequence(command):\n command_str = ' '.join(list(command))\n else:\n command_str = command\n\n tmpfile = temp_file_name()\n stsfile = None\n if use_tee:\n stsfile = temp_file_name()\n filter = ''\n if use_tee == 2:\n filter = r'| tr -cd \"\\n\" | tr \"\\n\" \".\"; echo'\n command_posix = '( %s ; echo $? > %s ) 2>&1 | tee %s %s'\\\n % (command_str, stsfile, tmpfile, filter)\n else:\n stsfile = temp_file_name()\n command_posix = '( %s ; echo $? > %s ) > %s 2>&1'\\\n % (command_str, stsfile, tmpfile)\n #command_posix = '( %s ) > %s 2>&1' % (command_str,tmpfile)\n\n log.debug('Running os.system(%r)' % (command_posix))\n status = os.system(command_posix)\n\n if use_tee:\n if status:\n # if command_tee fails then fall back to robust exec_command\n log.warn('_exec_command_posix failed (status=%s)' % status)\n return _exec_command(command, use_shell=use_shell, **env)\n\n if stsfile is not None:\n f = open_latin1(stsfile, 'r')\n status_text = f.read()\n status = int(status_text)\n f.close()\n os.remove(stsfile)\n\n f = open_latin1(tmpfile, 'r')\n text = f.read()\n f.close()\n os.remove(tmpfile)\n\n if text[-1:]=='\\n':\n text = text[:-1]\n\n return status, text\n\n\ndef _exec_command_python(command,\n exec_command_dir='', **env):\n log.debug('_exec_command_python(...)')\n\n python_exe = get_pythonexe()\n cmdfile = temp_file_name()\n stsfile = temp_file_name()\n outfile = temp_file_name()\n\n f = open(cmdfile, 'w')\n f.write('import os\\n')\n f.write('import sys\\n')\n f.write('sys.path.insert(0,%r)\\n' % (exec_command_dir))\n f.write('from exec_command import exec_command\\n')\n f.write('del sys.path[0]\\n')\n f.write('cmd = %r\\n' % command)\n f.write('os.environ = %r\\n' % (os.environ))\n f.write('s,o = exec_command(cmd, _with_python=0, **%r)\\n' % (env))\n f.write('f=open(%r,\"w\")\\nf.write(str(s))\\nf.close()\\n' % (stsfile))\n f.write('f=open(%r,\"w\")\\nf.write(o)\\nf.close()\\n' % (outfile))\n f.close()\n\n cmd = '%s %s' % (python_exe, cmdfile)\n status = os.system(cmd)\n if status:\n raise RuntimeError(\"%r failed\" % (cmd,))\n os.remove(cmdfile)\n\n f = open_latin1(stsfile, 'r')\n status = int(f.read())\n f.close()\n os.remove(stsfile)\n\n f = open_latin1(outfile, 'r')\n text = f.read()\n f.close()\n os.remove(outfile)\n\n return status, text\n\ndef quote_arg(arg):\n if arg[0]!='\"' and ' ' in arg:\n return '\"%s\"' % arg\n return arg\n\ndef _exec_command( command, use_shell=None, use_tee = None, **env ):\n log.debug('_exec_command(...)')\n\n if use_shell is None:\n use_shell = os.name=='posix'\n if use_tee is None:\n use_tee = os.name=='posix'\n using_command = 0\n if use_shell:\n # We use shell (unless use_shell==0) so that wildcards can be\n # used.\n sh = os.environ.get('SHELL', '/bin/sh')\n if is_sequence(command):\n argv = [sh, '-c', ' '.join(list(command))]\n else:\n argv = [sh, '-c', command]\n else:\n # On NT, DOS we avoid using command.com as it's exit status is\n # not related to the exit status of a command.\n if is_sequence(command):\n argv = command[:]\n else:\n argv = shlex.split(command)\n\n if hasattr(os, 'spawnvpe'):\n spawn_command = os.spawnvpe\n else:\n spawn_command = os.spawnve\n argv[0] = find_executable(argv[0]) or argv[0]\n if not os.path.isfile(argv[0]):\n log.warn('Executable %s does not exist' % (argv[0]))\n if os.name in ['nt', 'dos']:\n # argv[0] might be internal command\n argv = [os.environ['COMSPEC'], '/C'] + argv\n using_command = 1\n\n _so_has_fileno = _supports_fileno(sys.stdout)\n _se_has_fileno = _supports_fileno(sys.stderr)\n so_flush = sys.stdout.flush\n se_flush = sys.stderr.flush\n if _so_has_fileno:\n so_fileno = sys.stdout.fileno()\n so_dup = os.dup(so_fileno)\n if _se_has_fileno:\n se_fileno = sys.stderr.fileno()\n se_dup = os.dup(se_fileno)\n\n outfile = temp_file_name()\n fout = open(outfile, 'w')\n if using_command:\n errfile = temp_file_name()\n ferr = open(errfile, 'w')\n\n log.debug('Running %s(%s,%r,%r,os.environ)' \\\n % (spawn_command.__name__, os.P_WAIT, argv[0], argv))\n\n if sys.version_info[0] >= 3 and os.name == 'nt':\n # Pre-encode os.environ, discarding un-encodable entries,\n # to avoid it failing during encoding as part of spawn. Failure\n # is possible if the environment contains entries that are not\n # encoded using the system codepage as windows expects.\n #\n # This is not necessary on unix, where os.environ is encoded\n # using the surrogateescape error handler and decoded using\n # it as part of spawn.\n encoded_environ = {}\n for k, v in os.environ.items():\n try:\n encoded_environ[k.encode(sys.getfilesystemencoding())] = v.encode(\n sys.getfilesystemencoding())\n except UnicodeEncodeError:\n log.debug(\"ignoring un-encodable env entry %s\", k)\n else:\n encoded_environ = os.environ\n\n argv0 = argv[0]\n if not using_command:\n argv[0] = quote_arg(argv0)\n\n so_flush()\n se_flush()\n if _so_has_fileno:\n os.dup2(fout.fileno(), so_fileno)\n\n if _se_has_fileno:\n if using_command:\n #XXX: disabled for now as it does not work from cmd under win32.\n # Tests fail on msys\n os.dup2(ferr.fileno(), se_fileno)\n else:\n os.dup2(fout.fileno(), se_fileno)\n try:\n status = spawn_command(os.P_WAIT, argv0, argv, encoded_environ)\n except Exception:\n errmess = str(get_exception())\n status = 999\n sys.stderr.write('%s: %s'%(errmess, argv[0]))\n\n so_flush()\n se_flush()\n if _so_has_fileno:\n os.dup2(so_dup, so_fileno)\n if _se_has_fileno:\n os.dup2(se_dup, se_fileno)\n\n fout.close()\n fout = open_latin1(outfile, 'r')\n text = fout.read()\n fout.close()\n os.remove(outfile)\n\n if using_command:\n ferr.close()\n ferr = open_latin1(errfile, 'r')\n errmess = ferr.read()\n ferr.close()\n os.remove(errfile)\n if errmess and not status:\n # Not sure how to handle the case where errmess\n # contains only warning messages and that should\n # not be treated as errors.\n #status = 998\n if text:\n text = text + '\\n'\n #text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess)\n text = text + errmess\n print (errmess)\n if text[-1:]=='\\n':\n text = text[:-1]\n if status is None:\n status = 0\n\n if use_tee:\n print (text)\n\n return status, text\n\n\ndef test_nt(**kws):\n pythonexe = get_pythonexe()\n echo = find_executable('echo')\n using_cygwin_echo = echo != 'echo'\n if using_cygwin_echo:\n log.warn('Using cygwin echo in win32 environment is not supported')\n\n s, o=exec_command(pythonexe\\\n +' -c \"import os;print os.environ.get(\\'AAA\\',\\'\\')\"')\n assert s==0 and o=='', (s, o)\n\n s, o=exec_command(pythonexe\\\n +' -c \"import os;print os.environ.get(\\'AAA\\')\"',\n AAA='Tere')\n assert s==0 and o=='Tere', (s, o)\n\n os.environ['BBB'] = 'Hi'\n s, o=exec_command(pythonexe\\\n +' -c \"import os;print os.environ.get(\\'BBB\\',\\'\\')\"')\n assert s==0 and o=='Hi', (s, o)\n\n s, o=exec_command(pythonexe\\\n +' -c \"import os;print os.environ.get(\\'BBB\\',\\'\\')\"',\n BBB='Hey')\n assert s==0 and o=='Hey', (s, o)\n\n s, o=exec_command(pythonexe\\\n +' -c \"import os;print os.environ.get(\\'BBB\\',\\'\\')\"')\n assert s==0 and o=='Hi', (s, o)\n elif 0:\n s, o=exec_command('echo Hello')\n assert s==0 and o=='Hello', (s, o)\n\n s, o=exec_command('echo a%AAA%')\n assert s==0 and o=='a', (s, o)\n\n s, o=exec_command('echo a%AAA%', AAA='Tere')\n assert s==0 and o=='aTere', (s, o)\n\n os.environ['BBB'] = 'Hi'\n s, o=exec_command('echo a%BBB%')\n assert s==0 and o=='aHi', (s, o)\n\n s, o=exec_command('echo a%BBB%', BBB='Hey')\n assert s==0 and o=='aHey', (s, o)\n s, o=exec_command('echo a%BBB%')\n assert s==0 and o=='aHi', (s, o)\n\n s, o=exec_command('this_is_not_a_command')\n assert s and o!='', (s, o)\n\n s, o=exec_command('type not_existing_file')\n assert s and o!='', (s, o)\n\n s, o=exec_command('echo path=%path%')\n assert s==0 and o!='', (s, o)\n\n s, o=exec_command('%s -c \"import sys;sys.stderr.write(sys.platform)\"' \\\n % pythonexe)\n assert s==0 and o=='win32', (s, o)\n\n s, o=exec_command('%s -c \"raise \\'Ignore me.\\'\"' % pythonexe)\n assert s==1 and o, (s, o)\n\n s, o=exec_command('%s -c \"import sys;sys.stderr.write(\\'0\\');sys.stderr.write(\\'1\\');sys.stderr.write(\\'2\\')\"'\\\n % pythonexe)\n assert s==0 and o=='012', (s, o)\n\n s, o=exec_command('%s -c \"import sys;sys.exit(15)\"' % pythonexe)\n assert s==15 and o=='', (s, o)\n\n s, o=exec_command('%s -c \"print \\'Heipa\\'\"' % pythonexe)\n assert s==0 and o=='Heipa', (s, o)\n\n print ('ok')\n\ndef test_posix(**kws):\n s, o=exec_command(\"echo Hello\",**kws)\n assert s==0 and o=='Hello', (s, o)\n\n s, o=exec_command('echo $AAA',**kws)\n assert s==0 and o=='', (s, o)\n\n s, o=exec_command('echo \"$AAA\"',AAA='Tere',**kws)\n assert s==0 and o=='Tere', (s, o)\n\n\n s, o=exec_command('echo \"$AAA\"',**kws)\n assert s==0 and o=='', (s, o)\n\n os.environ['BBB'] = 'Hi'\n s, o=exec_command('echo \"$BBB\"',**kws)\n assert s==0 and o=='Hi', (s, o)\n\n s, o=exec_command('echo \"$BBB\"',BBB='Hey',**kws)\n assert s==0 and o=='Hey', (s, o)\n\n s, o=exec_command('echo \"$BBB\"',**kws)\n assert s==0 and o=='Hi', (s, o)\n\n\n s, o=exec_command('this_is_not_a_command',**kws)\n assert s!=0 and o!='', (s, o)\n\n s, o=exec_command('echo path=$PATH',**kws)\n assert s==0 and o!='', (s, o)\n\n s, o=exec_command('python -c \"import sys,os;sys.stderr.write(os.name)\"',**kws)\n assert s==0 and o=='posix', (s, o)\n\n s, o=exec_command('python -c \"raise \\'Ignore me.\\'\"',**kws)\n assert s==1 and o, (s, o)\n\n s, o=exec_command('python -c \"import sys;sys.stderr.write(\\'0\\');sys.stderr.write(\\'1\\');sys.stderr.write(\\'2\\')\"',**kws)\n assert s==0 and o=='012', (s, o)\n\n s, o=exec_command('python -c \"import sys;sys.exit(15)\"',**kws)\n assert s==15 and o=='', (s, o)\n\n s, o=exec_command('python -c \"print \\'Heipa\\'\"',**kws)\n assert s==0 and o=='Heipa', (s, o)\n\n print ('ok')\n\ndef test_execute_in(**kws):\n pythonexe = get_pythonexe()\n tmpfile = temp_file_name()\n fn = os.path.basename(tmpfile)\n tmpdir = os.path.dirname(tmpfile)\n f = open(tmpfile, 'w')\n f.write('Hello')\n f.close()\n\n s, o = exec_command('%s -c \"print \\'Ignore the following IOError:\\','\\\n 'open(%r,\\'r\\')\"' % (pythonexe, fn),**kws)\n assert s and o!='', (s, o)\n s, o = exec_command('%s -c \"print open(%r,\\'r\\').read()\"' % (pythonexe, fn),\n execute_in = tmpdir,**kws)\n assert s==0 and o=='Hello', (s, o)\n os.remove(tmpfile)\n print ('ok')\n\ndef test_svn(**kws):\n s, o = exec_command(['svn', 'status'],**kws)\n assert s, (s, o)\n print ('svn ok')\n\ndef test_cl(**kws):\n if os.name=='nt':\n s, o = exec_command(['cl', '/V'],**kws)\n assert s, (s, o)\n print ('cl ok')\n\nif os.name=='posix':\n test = test_posix\nelif os.name in ['nt', 'dos']:\n test = test_nt\nelse:\n raise NotImplementedError('exec_command tests for ', os.name)\n\n############################################################\n\nif __name__ == \"__main__\":\n\n test(use_tee=0)\n test(use_tee=1)\n test_execute_in(use_tee=0)\n test_execute_in(use_tee=1)\n test_svn(use_tee=1)\n test_cl(use_tee=1)\n"
] | [
[
"numpy.distutils.compat.get_exception",
"numpy.distutils.misc_util.is_sequence",
"numpy.distutils.log.debug",
"numpy.distutils.misc_util.make_temp_file",
"numpy.distutils.log.warn",
"numpy.distutils.log.info",
"numpy.compat.open_latin1"
]
] |
yihui8776/TensorRT-DETR | [
"1f32e9a2f98e26ec5b2376f9a2695193887430fb"
] | [
"trt_int8_quant.py"
] | [
"\n#\n# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ~~~Medcare AI Lab~~~\n\nimport os\nimport glob\nimport cv2\nfrom PIL import Image\nimport numpy as np\nimport argparse\n\nimport torchvision.transforms as T\nfrom trt_util.common import build_engine_onnx_v2\nfrom trt_util.calibrator import Calibrator\n\n\ntransform = T.Compose([\n T.Resize((800,800)), # PIL.Image.BILINEAR\n T.ToTensor(),\n T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\ndef preprocess(img_pil):\n img = transform(img_pil).cpu().numpy()\n return img\n\n# def preprocess(img_pil):\n# img = img_pil.resize((800, 800),Image.BILINEAR)\n# img = np.array(img).astype(np.float32) / 255.0\n# img = img.transpose(2,0,1)\n# # print(img.shape)\n# img = (img - np.array([ [[0.485]], [[0.456]], [[0.406]] ]))/np.array([ [[0.229]], [[0.224]], [[0.225]] ])\n\n# # img = img.transpose(1,2,0)\n# # img = np.expand_dims(img, axis=0)\n# img = np.ascontiguousarray(img)\n# img = np.array(img).astype(np.float32)\n# print(img.shape)\n# return img\n\nclass DataLoader:\n def __init__(self,calib_img_dir=\"./calib_train_image\",batch=1,batch_size=32):\n self.index = 0\n self.length = batch\n self.batch_size = batch_size\n self.calib_img_dir = calib_img_dir\n # self.img_list = [i.strip() for i in open('calib.txt').readlines()]\n self.img_list = glob.glob(os.path.join(self.calib_img_dir, \"*.jpg\"))\n print(f'[INFO] found all {len(self.img_list)} images to calib.')\n assert len(self.img_list) > self.batch_size * self.length, '[Error] {} must contains more than {} images to calib'.format(self.calib_img_dir,self.batch_size * self.length)\n self.calibration_data = np.zeros((self.batch_size,3,800,800), dtype=np.float32)\n\n def reset(self):\n self.index = 0\n\n def next_batch(self):\n if self.index < self.length:\n for i in range(self.batch_size):\n assert os.path.exists(self.img_list[i + self.index * self.batch_size]), '[Error] Batch not found!!'\n # data preprocess\n img = Image.open(self.img_list[i + self.index * self.batch_size])\n # img = cv2.imread(self.img_list[i + self.index * self.batch_size])\n img = preprocess(img)\n # self.calibration_data[i] = np.ones((3,800,800), dtype=np.float32)\n self.calibration_data[i] = img\n\n self.index += 1\n return np.ascontiguousarray(self.calibration_data, dtype=np.float32)\n else:\n return np.array([])\n\n def __len__(self):\n return self.length\n\ndef main(onnx_model_path,engine_model_path,calib_img_dir,calibration_table,fp16,int8,batch,batch_size):\n\n fp16_mode = fp16 \n int8_mode = int8 \n\n # calibration\n calibration_stream = DataLoader(calib_img_dir=calib_img_dir,batch=batch,batch_size=batch_size)\n engine_model_path = engine_model_path\n\n # 校准产生校准表,但是我们并没有生成校准表!\n engine_fixed = build_engine_onnx_v2(onnx_model_path, engine_model_path, fp16_mode=fp16_mode, \n int8_mode=int8_mode,max_batch_size=batch_size, calibration_stream=calibration_stream, \n calibration_table_path=calibration_table, save_engine=True)\n assert engine_fixed, '[Error] Broken engine_fixed'\n print('[INFO] ====> onnx to tensorrt completed !\\n')\n \nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='TensorRT INT8 Quant.')\n parser.add_argument('--onnx_model_path', type= str , default='./detr_sim.onnx', help='ONNX Model Path') \n parser.add_argument('--engine_model_path', type= str , default='./detr_int8.plan', help='TensorRT Engine File')\n parser.add_argument('--calib_img_dir', type= str , default='./calib_train_image', help='Calib Image Dir') \n parser.add_argument('--calibration_table', type=str,default=\"./detr_calibration.cache\", help='Calibration Table')\n parser.add_argument('--batch', type=int,default=958, help='Number of Batch: [total_image/batch_size]') # 30660/batch_size\n parser.add_argument('--batch_size', type=int,default=32, help='Batch Size')\n\n parser.add_argument('--fp16', action=\"store_true\", help='Open FP16 Mode')\n parser.add_argument('--int8', action=\"store_true\", help='Open INT8 Mode')\n\n args = parser.parse_args()\n main(args.onnx_model_path,args.engine_model_path,args.calib_img_dir,args.calibration_table,\n args.fp16,args.int8,args.batch,args.batch_size)\n\n # python3 trt_int8_quant.py --onnx_model_path ./detr_sim.onnx --engine_model_path ./detr_int8.plan --calib_img_dir ./calib_train_image --calibration_table ./detr_calibration.cache --batch 1 --int8\n\n"
] | [
[
"numpy.array",
"numpy.ascontiguousarray",
"numpy.zeros"
]
] |
robrkerr/tensorflow-models | [
"3656a07e89be134c2bc333c60a6c709e475024a6"
] | [
"differential_privacy/dp_sgd/dp_optimizer/utils.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utils for building and training NN models.\n\"\"\"\nfrom __future__ import division\n\nimport math\n\nimport numpy\nimport tensorflow as tf\n\n\nclass LayerParameters(object):\n \"\"\"class that defines a non-conv layer.\"\"\"\n def __init__(self):\n self.name = \"\"\n self.num_units = 0\n self._with_bias = False\n self.relu = False\n self.gradient_l2norm_bound = 0.0\n self.bias_gradient_l2norm_bound = 0.0\n self.trainable = True\n self.weight_decay = 0.0\n\n\nclass ConvParameters(object):\n \"\"\"class that defines a conv layer.\"\"\"\n def __init__(self):\n self.patch_size = 5\n self.stride = 1\n self.in_channels = 1\n self.out_channels = 0\n self.with_bias = True\n self.relu = True\n self.max_pool = True\n self.max_pool_size = 2\n self.max_pool_stride = 2\n self.trainable = False\n self.in_size = 28\n self.name = \"\"\n self.num_outputs = 0\n self.bias_stddev = 0.1\n\n\n# Parameters for a layered neural network.\nclass NetworkParameters(object):\n \"\"\"class that define the overall model structure.\"\"\"\n def __init__(self):\n self.input_size = 0\n self.projection_type = 'NONE' # NONE, RANDOM, PCA\n self.projection_dimensions = 0\n self.default_gradient_l2norm_bound = 0.0\n self.layer_parameters = [] # List of LayerParameters\n self.conv_parameters = [] # List of ConvParameters\n\n\ndef GetTensorOpName(x):\n \"\"\"Get the name of the op that created a tensor.\n\n Useful for naming related tensors, as ':' in name field of op is not permitted\n\n Args:\n x: the input tensor.\n Returns:\n the name of the op.\n \"\"\"\n\n t = x.name.rsplit(\":\", 1)\n if len(t) == 1:\n return x.name\n else:\n return t[0]\n\n\ndef BuildNetwork(inputs, network_parameters):\n \"\"\"Build a network using the given parameters.\n\n Args:\n inputs: a Tensor of floats containing the input data.\n network_parameters: NetworkParameters object\n that describes the parameters for the network.\n Returns:\n output, training_parameters: where the outputs (a tensor) is the output\n of the network, and training_parameters (a dictionary that maps the\n name of each variable to a dictionary of parameters) is the parameters\n used during training.\n \"\"\"\n\n training_parameters = {}\n num_inputs = network_parameters.input_size\n outputs = inputs\n projection = None\n\n # First apply convolutions, if needed\n for conv_param in network_parameters.conv_parameters:\n outputs = tf.reshape(\n outputs,\n [-1, conv_param.in_size, conv_param.in_size,\n conv_param.in_channels])\n conv_weights_name = \"%s_conv_weight\" % (conv_param.name)\n conv_bias_name = \"%s_conv_bias\" % (conv_param.name)\n conv_std_dev = 1.0 / (conv_param.patch_size\n * math.sqrt(conv_param.in_channels))\n conv_weights = tf.Variable(\n tf.truncated_normal([conv_param.patch_size,\n conv_param.patch_size,\n conv_param.in_channels,\n conv_param.out_channels],\n stddev=conv_std_dev),\n trainable=conv_param.trainable,\n name=conv_weights_name)\n conv_bias = tf.Variable(\n tf.truncated_normal([conv_param.out_channels],\n stddev=conv_param.bias_stddev),\n trainable=conv_param.trainable,\n name=conv_bias_name)\n training_parameters[conv_weights_name] = {}\n training_parameters[conv_bias_name] = {}\n conv = tf.nn.conv2d(outputs, conv_weights,\n strides=[1, conv_param.stride,\n conv_param.stride, 1],\n padding=\"SAME\")\n relud = tf.nn.relu(conv + conv_bias)\n mpd = tf.nn.max_pool(relud, ksize=[1,\n conv_param.max_pool_size,\n conv_param.max_pool_size, 1],\n strides=[1, conv_param.max_pool_stride,\n conv_param.max_pool_stride, 1],\n padding=\"SAME\")\n outputs = mpd\n num_inputs = conv_param.num_outputs\n # this should equal\n # in_size * in_size * out_channels / (stride * max_pool_stride)\n\n # once all the convs are done, reshape to make it flat\n outputs = tf.reshape(outputs, [-1, num_inputs])\n\n # Now project, if needed\n if network_parameters.projection_type is not \"NONE\":\n projection = tf.Variable(tf.truncated_normal(\n [num_inputs, network_parameters.projection_dimensions],\n stddev=1.0 / math.sqrt(num_inputs)), trainable=False, name=\"projection\")\n num_inputs = network_parameters.projection_dimensions\n outputs = tf.matmul(outputs, projection)\n\n # Now apply any other layers\n\n for layer_parameters in network_parameters.layer_parameters:\n num_units = layer_parameters.num_units\n hidden_weights_name = \"%s_weight\" % (layer_parameters.name)\n hidden_weights = tf.Variable(\n tf.truncated_normal([num_inputs, num_units],\n stddev=1.0 / math.sqrt(num_inputs)),\n name=hidden_weights_name, trainable=layer_parameters.trainable)\n training_parameters[hidden_weights_name] = {}\n if layer_parameters.gradient_l2norm_bound:\n training_parameters[hidden_weights_name][\"gradient_l2norm_bound\"] = (\n layer_parameters.gradient_l2norm_bound)\n if layer_parameters.weight_decay:\n training_parameters[hidden_weights_name][\"weight_decay\"] = (\n layer_parameters.weight_decay)\n\n outputs = tf.matmul(outputs, hidden_weights)\n if layer_parameters.with_bias:\n hidden_biases_name = \"%s_bias\" % (layer_parameters.name)\n hidden_biases = tf.Variable(tf.zeros([num_units]),\n name=hidden_biases_name)\n training_parameters[hidden_biases_name] = {}\n if layer_parameters.bias_gradient_l2norm_bound:\n training_parameters[hidden_biases_name][\n \"bias_gradient_l2norm_bound\"] = (\n layer_parameters.bias_gradient_l2norm_bound)\n\n outputs += hidden_biases\n if layer_parameters.relu:\n outputs = tf.nn.relu(outputs)\n # num_inputs for the next layer is num_units in the current layer.\n num_inputs = num_units\n\n return outputs, projection, training_parameters\n\n\ndef VaryRate(start, end, saturate_epochs, epoch):\n \"\"\"Compute a linearly varying number.\n\n Decrease linearly from start to end until epoch saturate_epochs.\n\n Args:\n start: the initial number.\n end: the end number.\n saturate_epochs: after this we do not reduce the number; if less than\n or equal to zero, just return start.\n epoch: the current learning epoch.\n Returns:\n the caculated number.\n \"\"\"\n if saturate_epochs <= 0:\n return start\n\n step = (start - end) / (saturate_epochs - 1)\n if epoch < saturate_epochs:\n return start - step * epoch\n else:\n return end\n\n\ndef BatchClipByL2norm(t, upper_bound, name=None):\n \"\"\"Clip an array of tensors by L2 norm.\n\n Shrink each dimension-0 slice of tensor (for matrix it is each row) such\n that the l2 norm is at most upper_bound. Here we clip each row as it\n corresponds to each example in the batch.\n\n Args:\n t: the input tensor.\n upper_bound: the upperbound of the L2 norm.\n name: optional name.\n Returns:\n the clipped tensor.\n \"\"\"\n\n assert upper_bound > 0\n with tf.op_scope([t, upper_bound], name, \"batch_clip_by_l2norm\") as name:\n saved_shape = tf.shape(t)\n batch_size = tf.slice(saved_shape, [0], [1])\n t2 = tf.reshape(t, tf.concat(0, [batch_size, [-1]]))\n upper_bound_inv = tf.fill(tf.slice(saved_shape, [0], [1]),\n tf.constant(1.0/upper_bound))\n # Add a small number to avoid divide by 0\n l2norm_inv = tf.rsqrt(tf.reduce_sum(t2 * t2, [1]) + 0.000001)\n scale = tf.minimum(l2norm_inv, upper_bound_inv) * upper_bound\n clipped_t = tf.matmul(tf.diag(scale), t2)\n clipped_t = tf.reshape(clipped_t, saved_shape, name=name)\n return clipped_t\n\n\ndef SoftThreshold(t, threshold_ratio, name=None):\n \"\"\"Soft-threshold a tensor by the mean value.\n\n Softthreshold each dimension-0 vector (for matrix it is each column) by\n the mean of absolute value multiplied by the threshold_ratio factor. Here\n we soft threshold each column as it corresponds to each unit in a layer.\n\n Args:\n t: the input tensor.\n threshold_ratio: the threshold ratio.\n name: the optional name for the returned tensor.\n Returns:\n the thresholded tensor, where each entry is soft-thresholded by\n threshold_ratio times the mean of the aboslute value of each column.\n \"\"\"\n\n assert threshold_ratio >= 0\n with tf.op_scope([t, threshold_ratio], name, \"soft_thresholding\") as name:\n saved_shape = tf.shape(t)\n t2 = tf.reshape(t, tf.concat(0, [tf.slice(saved_shape, [0], [1]), -1]))\n t_abs = tf.abs(t2)\n t_x = tf.sign(t2) * tf.nn.relu(t_abs -\n (tf.reduce_mean(t_abs, [0],\n keep_dims=True) *\n threshold_ratio))\n return tf.reshape(t_x, saved_shape, name=name)\n\n\ndef AddGaussianNoise(t, sigma, name=None):\n \"\"\"Add i.i.d. Gaussian noise (0, sigma^2) to every entry of t.\n\n Args:\n t: the input tensor.\n sigma: the stddev of the Gaussian noise.\n name: optional name.\n Returns:\n the noisy tensor.\n \"\"\"\n\n with tf.op_scope([t, sigma], name, \"add_gaussian_noise\") as name:\n noisy_t = t + tf.random_normal(tf.shape(t), stddev=sigma)\n return noisy_t\n\n\ndef GenerateBinomialTable(m):\n \"\"\"Generate binomial table.\n\n Args:\n m: the size of the table.\n Returns:\n A two dimensional array T where T[i][j] = (i choose j),\n for 0<= i, j <=m.\n \"\"\"\n\n table = numpy.zeros((m + 1, m + 1), dtype=numpy.float64)\n for i in range(m + 1):\n table[i, 0] = 1\n for i in range(1, m + 1):\n for j in range(1, m + 1):\n v = table[i - 1, j] + table[i - 1, j -1]\n assert not math.isnan(v) and not math.isinf(v)\n table[i, j] = v\n return tf.convert_to_tensor(table)\n"
] | [
[
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.abs",
"tensorflow.diag",
"tensorflow.convert_to_tensor",
"tensorflow.slice",
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.minimum",
"tensorflow.nn.max_pool",
"tensorflow.sign",
"tensorflow.constant",
"tensorflow.op_scope",
"numpy.zeros",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.truncated_normal",
"tensorflow.reduce_mean",
"tensorflow.nn.conv2d",
"tensorflow.nn.relu"
]
] |
jtpils/compression | [
"b758903c6df5eeafb5d444e8cec85d1b2bc132d3"
] | [
"tensorflow_compression/__init__.py"
] | [
"# Copyright 2018 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Data compression tools.\"\"\"\n\nfrom __future__ import absolute_import as _absolute_import\nfrom __future__ import division as _division\nfrom __future__ import print_function as _print_function\n\ntry:\n import tensorflow as _tensorflow\n _tf_version = [int(v) for v in _tensorflow.version.VERSION.split(\".\")]\n assert _tf_version[0] == 1 and _tf_version[1] >= 14\nexcept (ImportError, AssertionError):\n raise RuntimeError(\"For tensorflow_compression, please install TensorFlow \"\n \"1.14 or above. TensorFlow 2 is not yet supported.\")\n\n\n# pylint: disable=wildcard-import\nfrom tensorflow_compression.python.layers.entropy_models import *\nfrom tensorflow_compression.python.layers.gdn import *\nfrom tensorflow_compression.python.layers.initializers import *\nfrom tensorflow_compression.python.layers.parameterizers import *\nfrom tensorflow_compression.python.layers.signal_conv import *\nfrom tensorflow_compression.python.ops.math_ops import *\nfrom tensorflow_compression.python.ops.padding_ops import *\nfrom tensorflow_compression.python.ops.range_coding_ops import *\nfrom tensorflow_compression.python.ops.spectral_ops import *\n# pylint: enable=wildcard-import\n"
] | [
[
"tensorflow.version.VERSION.split"
]
] |
Kali-Hac/SPC-MGR | [
"3eccceeba97e0dca62132187c6645b98620f3bd1"
] | [
"SPC-MGR.py"
] | [
"\"\"\"\r\nThe SPC-MGR is built based in part on graph attention mechanism (https://arxiv.org/abs/1710.10903),\r\npart on MG-SCR (https://www.ijcai.org/proceedings/2021/0135),\r\nand includes open-source codes provided by\r\nthe project of Graph Attention Network (GAT) at https://github.com/PetarV-/GAT,\r\nand the project of MG-SCR at https://github.com/Kali-Hac/MG-SCR.\r\n\"\"\"\r\n\r\nimport time\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport os, sys\r\nfrom models import GAT as MSRL # (Veličković et al.)\r\nfrom utils import process_L3 as process\r\nfrom utils.faiss_rerank import compute_jaccard_distance\r\nfrom tensorflow.python.layers.core import Dense\r\nfrom sklearn.preprocessing import label_binarize\r\nfrom sklearn.cluster import DBSCAN\r\nimport torch\r\nimport collections\r\nfrom sklearn.metrics import average_precision_score\r\n\r\ndataset = ''\r\nprobe = ''\r\npre_dir = 'ReID_Models/'\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\n\r\nnb_nodes = 20 # number of nodes in joint-scale graph\r\nnhood = 1 # structural relation learning (nhood=1 for neighbor nodes)\r\nfusion_lambda = 1 # collaboration fusion coefficient\r\nft_size = 3 # originial node feature dimension (D)\r\ntime_step = 6 # sequence length (f)\r\n\r\n\r\n# training params\r\nbatch_size = 256\r\nnb_epochs = 100000\r\npatience = 250 # patience for early stopping\r\nhid_units = [8] # numbers of hidden units per each attention head in each layer\r\nMs = [8, 1] # additional entry for the output layer\r\nk1, k2 = 20, 6 # parameters to compute feature distance matrix\r\nresidual = False\r\nnonlinearity = tf.nn.elu\r\n\r\n\r\ntf.app.flags.DEFINE_string('dataset', 'KS20', \"Dataset: IAS, KS20, BIWI, CASIA-B or KGBD\")\r\ntf.app.flags.DEFINE_string('length', '6', \"4, 6, 8, 10 or 12\")\r\ntf.app.flags.DEFINE_string('t', '0.07', \"temperature for contrastive learning\")\r\ntf.app.flags.DEFINE_string('lr', '0.00035', \"learning rate\")\r\ntf.app.flags.DEFINE_string('eps', '0.6', \"distance parameter in DBSCAN\")\r\ntf.app.flags.DEFINE_string('min_samples', '2', \"minimum sample number in DBSCAN\")\r\ntf.app.flags.DEFINE_string('probe', 'probe', \"for testing probe\")\r\ntf.app.flags.DEFINE_string('gpu', '0', \"GPU number\")\r\ntf.app.flags.DEFINE_string('probe_view', '', \"test different views on CASIA B or KS20\")\r\ntf.app.flags.DEFINE_string('gallery_view', '', \"test different views on CASIA B or KS20\")\r\ntf.app.flags.DEFINE_string('struct_only', '0', \"struct_only\")\r\ntf.app.flags.DEFINE_string('m', '8', \"structural relation heads\")\r\ntf.app.flags.DEFINE_string('probe_type', '', \"probe.gallery\")\r\ntf.app.flags.DEFINE_string('patience', '200', \"epochs for early stopping\")\r\ntf.app.flags.DEFINE_string('fusion_lambda', '1', \"collaboration fusion coefficient\")\r\ntf.app.flags.DEFINE_string('S_dataset', '', \"Source Dataset\")\r\ntf.app.flags.DEFINE_string('S_probe', '', \"Source Dataset probe\")\r\ntf.app.flags.DEFINE_string('mode', 'UF', \"Unsupervised Fine-tuning (UF) or Direct Generalization (DG)\")\r\ntf.app.flags.DEFINE_string('evaluate', '0', \"evaluate on the best model\")\r\nFLAGS = tf.app.flags.FLAGS\r\n\r\n\r\n# check parameters\r\nif FLAGS.dataset not in ['IAS', 'KGBD', 'KS20', 'BIWI', 'CASIA_B']:\r\n\traise Exception('Dataset must be IAS, KGBD, KS20, BIWI or CASIA B.')\r\nif not FLAGS.gpu.isdigit() or int(FLAGS.gpu) < 0:\r\n\traise Exception('GPU number must be a positive integer.')\r\nif FLAGS.dataset == 'CASIA_B':\r\n\tpass\r\nelse:\r\n\tif FLAGS.length not in ['4', '6', '8', '10', '12']:\r\n\t\traise Exception('Length number must be 4, 6, 8, 10 or 12.')\r\nif FLAGS.probe not in ['probe', 'Walking', 'Still', 'A', 'B']:\r\n\traise Exception('Dataset probe must be \"A\" (for IAS-A), \"B\" (for IAS-B), \"probe\" (for KS20, KGBD).')\r\nif float(FLAGS.fusion_lambda) < 0 or float(FLAGS.fusion_lambda) > 1:\r\n\traise Exception('Multi-Level Graph Fusion coefficient must be not less than 0 or not larger than 1.')\r\nif FLAGS.mode not in ['UF', 'DG']:\r\n\traise Exception('Mode must be UF or DG.')\r\nif FLAGS.mode == 'DG' and FLAGS.S_dataset == '':\r\n\traise Exception('DG mode must set a source dataset.')\r\nif FLAGS.mode == 'UF' and FLAGS.S_dataset != '':\r\n\traise Exception('UF mode does not use a source dataset.')\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = FLAGS.gpu\r\ndataset = FLAGS.dataset\r\n\r\n# optimal paramters\r\nif dataset == 'KGBD':\r\n\tbatch_size = 256\r\n\tFLAGS.lr = '0.00035'\r\n\tFLAGS.min_samples = '4'\r\n\tFLAGS.t = '0.06'\r\nelif dataset == 'CASIA_B':\r\n\tbatch_size = 128\r\n\tFLAGS.lr = '0.00035'\r\n\tFLAGS.min_samples = '2'\r\n\tFLAGS.eps = '0.75'\r\n\tFLAGS.t = '0.075'\r\nelse:\r\n\tbatch_size = 128\r\n\tFLAGS.lr = '0.00035'\r\nif dataset == 'KS20' or dataset == 'IAS':\r\n\tFLAGS.t = '0.08'\r\n\tFLAGS.eps = '0.8'\r\nelif dataset == 'BIWI':\r\n\tFLAGS.t = '0.07'\r\n\r\n\r\neps = float(FLAGS.eps)\r\nmin_samples = int(FLAGS.min_samples)\r\n\r\ntime_step = int(FLAGS.length)\r\nfusion_lambda = float(FLAGS.fusion_lambda)\r\nprobe = FLAGS.probe\r\npatience = int(FLAGS.patience)\r\n\r\n\r\nglobal_att = False\r\nstruct_only = False\r\nP = '8'\r\n\r\n\r\nchange = ''\r\n\r\n\r\n\r\nif FLAGS.probe_type != '':\r\n\tchange += '_CME'\r\nif FLAGS.fusion_lambda != '1':\r\n\tchange = '_lambda_' + FLAGS.fusion_lambda\r\n\r\nif FLAGS.struct_only == '1':\r\n\tstruct_only = True\r\n\r\n\r\nif FLAGS.dataset == 'KGBD':\r\n\tFLAGS.m = '16'\r\nif FLAGS.m != '8':\r\n\tm = FLAGS.m\r\n\tMs = [int(m), 1]\r\n\r\ntry:\r\n\tos.mkdir(pre_dir)\r\nexcept:\r\n\tpass\r\n\r\nif struct_only:\r\n\tpre_dir += '_struct_only'\r\nif P != '8':\r\n\tpre_dir += '_P_' + P\r\n\r\n\r\nif dataset == 'KS20':\r\n\tnb_nodes = 25\r\n\r\nif dataset == 'CASIA_B':\r\n\tnb_nodes = 14\r\n\r\n\r\n\r\nprint('----- Model hyperparams -----')\r\n# print('skeleton_nodes: ' + str(nb_nodes))\r\nprint('seqence_length: ' + str(time_step))\r\nprint('fusion_lambda: ' + str(fusion_lambda))\r\nprint('batch_size: ' + str(batch_size))\r\nprint('lr: ' + str(FLAGS.lr))\r\nprint('temperature: ' + FLAGS.t)\r\nprint('eps: ' + FLAGS.eps)\r\nprint('min_samples: ' + FLAGS.min_samples)\r\nprint('m: ' + FLAGS.m)\r\nprint('fusion_lambda: ' + FLAGS.fusion_lambda)\r\n# print('patience: ' + FLAGS.patience)\r\n\r\nprint('Mode: ' + FLAGS.mode)\r\nprint('Evaluate: ' + FLAGS.evaluate)\r\n\r\nif FLAGS.mode == 'DG':\r\n\tprint('----- Mode Information -----')\r\n\tprint('Source Dataset: ' + FLAGS.S_dataset)\r\n\tprint('Target Dataset: ' + FLAGS.dataset)\r\n\tprint('Target Probe: ' + FLAGS.probe)\r\nelif FLAGS.mode == 'UF':\r\n\tprint('----- Dataset Information -----')\r\n\tprint('Dataset: ' + dataset)\r\n\tif dataset == 'CASIA_B':\r\n\t\tprint('Probe.Gallery: ', FLAGS.probe_type.split('.')[0], FLAGS.probe_type.split('.')[1])\r\n\telse:\r\n\t\tprint('Probe: ' + FLAGS.probe)\r\n\r\n\"\"\"\r\n Obtain training and testing data in part-level, body-scale, and hyper-body-scale.\r\n Generate corresponding adjacent matrix and bias.\r\n\"\"\"\r\nif FLAGS.probe_type == '':\r\n\tif FLAGS.probe_view == '' and FLAGS.gallery_view == '':\r\n\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_test_P, X_test_B, X_test_H_B, _, y_test, \\\r\n\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\tprocess.gen_train_data(dataset=dataset, split=probe, time_step=time_step,\r\n\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size, )\r\n\telse:\r\n\t\tif dataset == 'KS20':\r\n\t\t\t_, _, _, _, _, _, _, X_test_P, X_test_B, X_test_H_B, _, y_test, \\\r\n\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\tprocess.gen_train_data(dataset=dataset, split='view_'+FLAGS.probe_view, time_step=time_step,\r\n\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t )\r\n\t\t\tX_train_P_all = []\r\n\t\t\tX_train_B_all = []\r\n\t\t\tX_train_H_B_all = []\r\n\t\t\ty_train_all = []\r\n\t\t\tfor i in range(5):\r\n\t\t\t\tif str(i) not in [FLAGS.probe_view, FLAGS.gallery_view]:\r\n\t\t\t\t\t_, _, _, _, _, _, _, X_train_P, X_train_B, X_train_H_B, _, y_train, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='view_' + str(i), time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\t\t\tX_train_H_B_all.extend(X_train_H_B)\r\n\t\t\t\t\tX_train_P_all.extend(X_train_P)\r\n\t\t\t\t\tX_train_B_all.extend(X_train_B)\r\n\t\t\t\t\ty_train_all.extend(y_train_all)\r\n\t\t\tX_train_P = np.array(X_train_P_all)\r\n\t\t\tX_train_B = np.array(X_train_B_all)\r\n\t\t\tX_train_H_B = np.array(X_train_H_B_all)\r\n\t\t\ty_train = np.array(y_train)\r\n\r\nelse:\r\n\tfrom utils import process_cme_L3 as process\r\n\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_test_P, X_test_B, X_test_H_B, _, y_test, \\\r\n\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\tprocess.gen_train_data(dataset=dataset, split=probe, time_step=time_step,\r\n\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size, PG_type=FLAGS.probe_type.split('.')[0])\r\n\tprint('## [Probe].[Gallery]', FLAGS.probe_type)\r\n\r\n\r\nall_ftr_size = hid_units[0] * (15 + 3)\r\nloaded_graph = tf.Graph()\r\n\r\ncluster_epochs = 15000\r\ndisplay = 20\r\n\r\nif FLAGS.evaluate == '1':\r\n\tFLAGS.S_dataset = FLAGS.dataset\r\n\tFLAGS.S_probe = FLAGS.probe\r\n\tFLAGS.mode = 'DG'\r\n\r\nif FLAGS.mode == 'UF':\r\n\twith tf.Graph().as_default():\r\n\t\twith tf.name_scope('Input'):\r\n\t\t\tP_in = tf.placeholder(dtype=tf.float32, shape=(batch_size * time_step, 10, ft_size))\r\n\t\t\tB_in = tf.placeholder(dtype=tf.float32, shape=(batch_size * time_step, 5, ft_size))\r\n\t\t\tH_B_in = tf.placeholder(dtype=tf.float32, shape=(batch_size * time_step, 3, ft_size))\r\n\t\t\tP_bias_in = tf.placeholder(dtype=tf.float32, shape=(1, 10, 10))\r\n\t\t\tB_bias_in = tf.placeholder(dtype=tf.float32, shape=(1, 5, 5))\r\n\t\t\tH_B_bias_in = tf.placeholder(dtype=tf.float32, shape=(1, 3, 3))\r\n\t\t\tattn_drop = tf.placeholder(dtype=tf.float32, shape=())\r\n\t\t\tffd_drop = tf.placeholder(dtype=tf.float32, shape=())\r\n\t\t\tis_train = tf.placeholder(dtype=tf.bool, shape=())\r\n\t\t\tpseudo_lab = tf.placeholder(dtype=tf.int32, shape=(batch_size,))\r\n\t\t\tcluster_ftr = tf.placeholder(dtype=tf.float32, shape=(None, all_ftr_size))\r\n\r\n\t\twith tf.name_scope(\"MG\"), tf.variable_scope(\"MG\", reuse=tf.AUTO_REUSE):\r\n\t\t\tdef SRL(J_in, J_bias_in, nb_nodes):\r\n\t\t\t\tW_h = tf.Variable(tf.random_normal([3, hid_units[-1]]))\r\n\t\t\t\tb_h = tf.Variable(tf.zeros(shape=[hid_units[-1], ]))\r\n\t\t\t\tJ_h = tf.reshape(J_in, [-1, ft_size])\r\n\r\n\t\t\t\tJ_h = tf.matmul(J_h, W_h) + b_h\r\n\t\t\t\tJ_h = tf.reshape(J_h, [batch_size*time_step, nb_nodes, hid_units[-1]])\r\n\t\t\t\tJ_seq_ftr = MSRL.inference(J_h, 0, nb_nodes, is_train,\r\n\t\t\t\t attn_drop, ffd_drop,\r\n\t\t\t\t bias_mat=J_bias_in,\r\n\t\t\t\t hid_units=hid_units, n_heads=Ms,\r\n\t\t\t\t residual=residual, activation=nonlinearity, r_pool=True)\r\n\t\t\t\treturn J_seq_ftr\r\n\r\n\r\n\t\t\tdef FCRL(s1, s2, s1_num, s2_num, hid_in):\r\n\t\t\t\tr_unorm = tf.matmul(s2, tf.transpose(s1, [0, 2, 1]))\r\n\t\t\t\tatt_w = tf.nn.softmax(r_unorm)\r\n\t\t\t\tatt_w = tf.expand_dims(att_w, axis=-1)\r\n\t\t\t\ts1 = tf.reshape(s1, [s1.shape[0], 1, s1.shape[1], hid_in])\r\n\t\t\t\tc_ftr = tf.reduce_sum(att_w * s1, axis=2)\r\n\t\t\t\tc_ftr = tf.reshape(c_ftr, [-1, hid_in])\r\n\t\t\t\tatt_w = tf.reshape(att_w, [-1, s1_num * s2_num])\r\n\t\t\t\treturn r_unorm, c_ftr\r\n\r\n\r\n\t\t\tdef MSC(P_in, B_in, H_B_in, P_bias_in, B_bias_in, H_B_bias_in, hid_in, hid_out):\r\n\t\t\t\th_P_seq_ftr = SRL(J_in=P_in, J_bias_in=P_bias_in, nb_nodes=10)\r\n\t\t\t\th_B_seq_ftr = SRL(J_in=B_in, J_bias_in=B_bias_in, nb_nodes=5)\r\n\t\t\t\th_H_B_seq_ftr = SRL(J_in=H_B_in, J_bias_in=H_B_bias_in, nb_nodes=3)\r\n\r\n\t\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, 10, hid_in])\r\n\t\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, 5, hid_in])\r\n\t\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, 3, hid_in])\r\n\r\n\r\n\t\t\t\tW_cs_23 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\t\t\t\tW_cs_24 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\t\t\t\tW_cs_34 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\r\n\r\n\t\t\t\tW_self_2 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\t\t\t\tW_self_3 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\t\t\t\tW_self_4 = tf.Variable(tf.random_normal([hid_in, hid_out]))\r\n\r\n\t\t\t\tself_a_2, self_r_2 = FCRL(h_P_seq_ftr, h_P_seq_ftr, 10, 10, hid_in)\r\n\t\t\t\tself_a_3, self_r_3 = FCRL(h_B_seq_ftr, h_B_seq_ftr, 5, 5, hid_in)\r\n\t\t\t\tself_a_4, self_r_4 = FCRL(h_H_B_seq_ftr, h_H_B_seq_ftr, 3, 3, hid_in)\r\n\r\n\t\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, hid_in])\r\n\t\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, hid_in])\r\n\t\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, hid_in])\r\n\r\n\r\n\t\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, 10, hid_in])\r\n\t\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, 5, hid_in])\r\n\t\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, 3, hid_in])\r\n\r\n\r\n\t\t\t\ta_23, r_23 = FCRL(h_B_seq_ftr, h_P_seq_ftr, 5, 10, hid_in)\r\n\t\t\t\ta_24, r_24 = FCRL(h_H_B_seq_ftr, h_P_seq_ftr, 3, 10, hid_in)\r\n\t\t\t\ta_34, r_34 = FCRL(h_H_B_seq_ftr, h_B_seq_ftr, 3, 5, hid_in)\r\n\r\n\r\n\t\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, hid_in])\r\n\t\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, hid_in])\r\n\t\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, hid_in])\r\n\r\n\t\t\t\tif not struct_only:\r\n\t\t\t\t\th_P_seq_ftr = h_P_seq_ftr + float(FLAGS.fusion_lambda) * (\r\n\t\t\t\t\t\t\t\ttf.matmul(self_r_2, W_self_2) + tf.matmul(r_23, W_cs_23) + tf.matmul(r_24, W_cs_24))\r\n\t\t\t\t\th_B_seq_ftr = h_B_seq_ftr + float(FLAGS.fusion_lambda) * (tf.matmul(self_r_3, W_self_3) + tf.matmul(r_34, W_cs_34))\r\n\t\t\t\t\th_H_B_seq_ftr = h_H_B_seq_ftr + float(FLAGS.fusion_lambda) * (tf.matmul(self_r_4, W_self_4))\r\n\r\n\t\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, 10, hid_out])\r\n\t\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, 5, hid_out])\r\n\t\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, 3, hid_out])\r\n\r\n\t\t\t\treturn h_H_B_seq_ftr, h_B_seq_ftr, h_P_seq_ftr\r\n\r\n\t\t\th_H_B_seq_ftr, h_B_seq_ftr, h_P_seq_ftr = MSC(P_in, B_in, H_B_in, P_bias_in, B_bias_in, H_B_bias_in,\r\n\t\t\t hid_units[-1], hid_units[-1])\r\n\r\n\t\t\th_P_seq_ftr = tf.reshape(h_P_seq_ftr, [-1, hid_units[-1]])\r\n\t\t\th_B_seq_ftr = tf.reshape(h_B_seq_ftr, [-1, hid_units[-1]])\r\n\t\t\th_H_B_seq_ftr = tf.reshape(h_H_B_seq_ftr, [-1, hid_units[-1]])\r\n\r\n\t\t\toptimizer = tf.train.AdamOptimizer(learning_rate=float(FLAGS.lr))\r\n\t\t\tP_encode = tf.reduce_mean(tf.reshape(h_P_seq_ftr, [batch_size, time_step, -1]), axis=1)\r\n\t\t\tB_encode = tf.reduce_mean(tf.reshape(h_B_seq_ftr, [batch_size, time_step, -1]), axis=1)\r\n\t\t\tH_B_encode = tf.reduce_mean(tf.reshape(h_H_B_seq_ftr, [batch_size, time_step, -1]), axis=1)\r\n\r\n\t\t\tP_encode = tf.reshape(P_encode, [batch_size, -1])\r\n\t\t\tB_encode = tf.reshape(B_encode, [batch_size, -1])\r\n\t\t\tH_B_encode = tf.reshape(H_B_encode, [batch_size, -1])\r\n\r\n\t\t\tall_ftr = tf.concat([P_encode, B_encode, H_B_encode], axis=-1)\r\n\t\t\tall_ftr = tf.reshape(all_ftr, [batch_size, -1])\r\n\r\n\t\t\toutput = tf.matmul(all_ftr, tf.transpose(cluster_ftr))\r\n\r\n\t\t\tdef cluster_loss(pseudo_lab, all_ftr, cluster_ftr):\r\n\t\t\t\tall_ftr = tf.nn.l2_normalize(all_ftr, axis=-1)\r\n\t\t\t\tcluster_ftr = tf.nn.l2_normalize(cluster_ftr, axis=-1)\r\n\t\t\t\toutput = tf.matmul(all_ftr, tf.transpose(cluster_ftr))\r\n\t\t\t\toutput /= float(FLAGS.t)\r\n\t\t\t\tloss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=pseudo_lab, logits=output))\r\n\t\t\t\treturn loss\r\n\r\n\r\n\t\t\tdef empty_loss(b):\r\n\t\t\t\treturn tf.zeros([1])\r\n\r\n\r\n\t\t\tcontrastive_loss = tf.cond(tf.reduce_sum(pseudo_lab) > 0,\r\n\t\t\t lambda: cluster_loss(pseudo_lab, all_ftr, cluster_ftr),\r\n\t\t\t lambda: empty_loss(pseudo_lab))\r\n\t\t\tcluster_train_op = optimizer.minimize(contrastive_loss)\r\n\r\n\r\n\r\n\t\tsaver = tf.train.Saver()\r\n\t\tinit_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\r\n\r\n\t\twith tf.Session(config=config) as sess:\r\n\t\t\tsess.run(init_op)\r\n\t\t\tdef train_loader(X_train_P, X_train_B, X_train_H_B, y_train):\r\n\t\t\t\ttr_step = 0\r\n\t\t\t\ttr_size = X_train_P.shape[0]\r\n\t\t\t\ttrain_logits_all = []\r\n\t\t\t\ttrain_labels_all = []\r\n\t\t\t\ttrain_features_all = []\r\n\t\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\tX_input_P = X_train_P[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\t\tX_input_B = X_train_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\t\tX_input_H_B = X_train_H_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\t\tlabels = y_train[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t\t feed_dict={\r\n\t\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t\t is_train: True,\r\n\t\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\t\ttrain_features_all.extend(all_features.tolist())\r\n\t\t\t\t\ttrain_labels_all.extend(labels.tolist())\r\n\t\t\t\t\ttr_step += 1\r\n\r\n\t\t\t\ttrain_features_all = np.array(train_features_all).astype(np.float32)\r\n\t\t\t\ttrain_features_all = torch.from_numpy(train_features_all)\r\n\t\t\t\treturn train_features_all, train_labels_all\r\n\r\n\r\n\t\t\tdef gal_loader(X_train_P, X_train_B, X_train_H_B, y_train):\r\n\t\t\t\ttr_step = 0\r\n\t\t\t\ttr_size = X_train_P.shape[0]\r\n\t\t\t\tgal_logits_all = []\r\n\t\t\t\tgal_labels_all = []\r\n\t\t\t\tgal_features_all = []\r\n\r\n\t\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\tX_input_P = X_train_P[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\t\tX_input_B = X_train_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\t\tX_input_H_B = X_train_H_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\t\tlabels = y_train[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\r\n\t\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t\t feed_dict={\r\n\t\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t\t is_train: True,\r\n\t\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\t\tgal_features_all.extend(all_features.tolist())\r\n\t\t\t\t\tgal_labels_all.extend(labels.tolist())\r\n\t\t\t\t\ttr_step += 1\r\n\r\n\t\t\t\treturn gal_features_all, gal_labels_all\r\n\r\n\r\n\t\t\tdef evaluation():\r\n\t\t\t\tvl_step = 0\r\n\t\t\t\tvl_size = X_test_P.shape[0]\r\n\t\t\t\tpro_labels_all = []\r\n\t\t\t\tpro_features_all = []\r\n\t\t\t\tloaded_graph = tf.get_default_graph()\r\n\t\t\t\twhile vl_step * batch_size < vl_size:\r\n\t\t\t\t\tif (vl_step + 1) * batch_size > vl_size:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\tX_input_P = X_test_P[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\t\tX_input_B = X_test_B[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\t\tX_input_H_B = X_test_H_B[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\t\tlabels = y_test[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t\t feed_dict={\r\n\t\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t\t is_train: False,\r\n\t\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\t\tpro_labels_all.extend(labels.tolist())\r\n\t\t\t\t\tpro_features_all.extend(all_features.tolist())\r\n\t\t\t\t\tvl_step += 1\r\n\t\t\t\tX = np.array(gal_features_all)\r\n\t\t\t\ty = np.array(gal_labels_all)\r\n\t\t\t\tt_X = np.array(pro_features_all)\r\n\t\t\t\tt_y = np.array(pro_labels_all)\r\n\t\t\t\t# print(X.shape, t_X.shape)\r\n\t\t\t\tt_y = np.argmax(t_y, axis=-1)\r\n\t\t\t\ty = np.argmax(y, axis=-1)\r\n\r\n\t\t\t\tdef mean_ap(distmat, query_ids=None, gallery_ids=None,\r\n\t\t\t\t query_cams=None, gallery_cams=None):\r\n\t\t\t\t\t# distmat = to_numpy(distmat)\r\n\t\t\t\t\tm, n = distmat.shape\r\n\t\t\t\t\t# Fill up default values\r\n\t\t\t\t\tif query_ids is None:\r\n\t\t\t\t\t\tquery_ids = np.arange(m)\r\n\t\t\t\t\tif gallery_ids is None:\r\n\t\t\t\t\t\tgallery_ids = np.arange(n)\r\n\t\t\t\t\tif query_cams is None:\r\n\t\t\t\t\t\tquery_cams = np.zeros(m).astype(np.int32)\r\n\t\t\t\t\tif gallery_cams is None:\r\n\t\t\t\t\t\tgallery_cams = np.ones(n).astype(np.int32)\r\n\t\t\t\t\t# Ensure numpy array\r\n\t\t\t\t\tquery_ids = np.asarray(query_ids)\r\n\t\t\t\t\tgallery_ids = np.asarray(gallery_ids)\r\n\t\t\t\t\tquery_cams = np.asarray(query_cams)\r\n\t\t\t\t\tgallery_cams = np.asarray(gallery_cams)\r\n\t\t\t\t\t# Sort and find correct matches\r\n\t\t\t\t\tindices = np.argsort(distmat, axis=1)\r\n\t\t\t\t\tmatches = (gallery_ids[indices] == query_ids[:, np.newaxis])\r\n\t\t\t\t\t# Compute AP for each query\r\n\t\t\t\t\taps = []\r\n\t\t\t\t\tif (FLAGS.probe_view != '' and (FLAGS.probe_view == FLAGS.gallery_view or FLAGS.probe_type == 'nm.nm')) or (FLAGS.probe_type == 'cl.cl' or FLAGS.probe_type == 'bg.bg'):\r\n\t\t\t\t\t\tfor i in range(1, m):\r\n\t\t\t\t\t\t\tvalid = ((gallery_ids[indices[i]] != query_ids[i]) |\r\n\t\t\t\t\t\t\t (gallery_cams[indices[i]] != query_cams[i]))\r\n\t\t\t\t\t\t\ty_true = matches[i, valid]\r\n\t\t\t\t\t\t\ty_score = -distmat[i][indices[i]][valid]\r\n\t\t\t\t\t\t\tif not np.any(y_true): continue\r\n\t\t\t\t\t\t\taps.append(average_precision_score(y_true, y_score))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tfor i in range(m):\r\n\t\t\t\t\t\t\tvalid = ((gallery_ids[indices[i]] != query_ids[i]) |\r\n\t\t\t\t\t\t\t (gallery_cams[indices[i]] != query_cams[i]))\r\n\t\t\t\t\t\t\ty_true = matches[i, valid]\r\n\t\t\t\t\t\t\ty_score = -distmat[i][indices[i]][valid]\r\n\t\t\t\t\t\t\tif not np.any(y_true): continue\r\n\t\t\t\t\t\t\taps.append(average_precision_score(y_true, y_score))\r\n\t\t\t\t\tif len(aps) == 0:\r\n\t\t\t\t\t\traise RuntimeError(\"No valid query\")\r\n\t\t\t\t\treturn np.mean(aps)\r\n\r\n\r\n\t\t\t\tdef metrics(X, y, t_X, t_y):\r\n\t\t\t\t\t# compute Euclidean distance\r\n\t\t\t\t\tif dataset != 'CASIA_B':\r\n\t\t\t\t\t\ta, b = torch.from_numpy(t_X), torch.from_numpy(X)\r\n\t\t\t\t\t\tm, n = a.size(0), b.size(0)\r\n\t\t\t\t\t\ta = a.view(m, -1)\r\n\t\t\t\t\t\tb = b.view(n, -1)\r\n\t\t\t\t\t\tdist_m = torch.pow(a, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\r\n\t\t\t\t\t\t torch.pow(b, 2).sum(dim=1, keepdim=True).expand(n, m).t()\r\n\t\t\t\t\t\tdist_m.addmm_(1, -2, a, b.t())\r\n\t\t\t\t\t\tdist_m = dist_m.sqrt()\r\n\t\t\t\t\t\tmAP = mean_ap(distmat=dist_m.numpy(), query_ids=t_y, gallery_ids=y)\r\n\t\t\t\t\t\t_, dist_sort = dist_m.sort(1)\r\n\t\t\t\t\t\tdist_sort = dist_sort.numpy()\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tX = np.array(X)\r\n\t\t\t\t\t\tt_X = np.array(t_X)\r\n\t\t\t\t\t\t# pred = [cp.argmin(cp.linalg.norm(X - i, axis=1)).tolist() for i in t_X]\r\n\t\t\t\t\t\tdist_m = [(np.linalg.norm(X - i, axis=1)).tolist() for i in t_X]\r\n\t\t\t\t\t\tdist_m = np.array(dist_m)\r\n\t\t\t\t\t\tmAP = mean_ap(distmat=dist_m, query_ids=t_y, gallery_ids=y)\r\n\t\t\t\t\t\tdist_sort = [np.argsort(np.linalg.norm(X - i, axis=1)).tolist() for i in t_X]\r\n\t\t\t\t\t\tdist_sort = np.array(dist_sort)\r\n\r\n\t\t\t\t\ttop_1 = top_5 = top_10 = 0\r\n\t\t\t\t\tprobe_num = dist_sort.shape[0]\r\n\t\t\t\t\tif (FLAGS.probe_view != '' and (FLAGS.probe_view == FLAGS.gallery_view or FLAGS.probe_type == 'nm.nm')) or (FLAGS.probe_type == 'cl.cl' or FLAGS.probe_type == 'bg.bg'):\r\n\t\t\t\t\t\tfor i in range(probe_num):\r\n\t\t\t\t\t\t\t# print(dist_sort[i, :10])\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:2]]:\r\n\t\t\t\t\t\t\t\ttop_1 += 1\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:6]]:\r\n\t\t\t\t\t\t\t\ttop_5 += 1\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:11]]:\r\n\t\t\t\t\t\t\t\ttop_10 += 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tfor i in range(probe_num):\r\n\t\t\t\t\t\t\t# print(dist_sort[i, :10])\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :1]]:\r\n\t\t\t\t\t\t\t\ttop_1 += 1\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :5]]:\r\n\t\t\t\t\t\t\t\ttop_5 += 1\r\n\t\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :10]]:\r\n\t\t\t\t\t\t\t\ttop_10 += 1\r\n\t\t\t\t\treturn mAP, top_1 / probe_num, top_5 / probe_num, top_10 / probe_num\r\n\r\n\t\t\t\tmAP, top_1, top_5, top_10 = metrics(X, y, t_X, t_y)\r\n\t\t\t\treturn mAP, top_1, top_5, top_10\r\n\r\n\t\t\tmax_acc_1 = 0\r\n\t\t\tmax_acc_2 = 0\r\n\t\t\tbest_cluster_info_1 = [0, 0]\r\n\t\t\tbest_cluster_info_2 = [0, 0]\r\n\t\t\tcur_patience = 0\r\n\t\t\tif dataset == 'KGBD' or dataset == 'KS20':\r\n\t\t\t\tif FLAGS.gallery_view == '' and FLAGS.probe_view == '':\r\n\t\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='gallery', time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\t\telse:\r\n\t\t\t\t\t_, _, _, _, _, _, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, b_, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='view_'+FLAGS.gallery_view, time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\telif dataset == 'BIWI':\r\n\t\t\t\tif probe == 'Walking':\r\n\t\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='Still', time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\t\telse:\r\n\t\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='Walking', time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\telif dataset == 'IAS':\r\n\t\t\t\tif probe == 'A':\r\n\t\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='B', time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\t\telse:\r\n\t\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='A', time_step=time_step,\r\n\t\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t\t )\r\n\t\t\telif dataset == 'CASIA_B':\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split=probe, time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t PG_type=FLAGS.probe_type.split('.')[1])\r\n\t\t\tfor epoch in range(cluster_epochs):\r\n\t\t\t\ttrain_features_all, train_labels_all = train_loader(X_train_P, X_train_B, X_train_H_B, y_train)\r\n\t\t\t\tgal_features_all, gal_labels_all = gal_loader(X_gal_P, X_gal_B, X_gal_H_B, y_gal)\r\n\t\t\t\tmAP, top_1, top_5, top_10 = evaluation()\r\n\t\t\t\tcur_patience += 1\r\n\t\t\t\tif epoch > 0 and top_1 > max_acc_2:\r\n\t\t\t\t\tmax_acc_1 = mAP\r\n\t\t\t\t\tbest_cluster_info_1[0] = num_cluster\r\n\t\t\t\t\tbest_cluster_info_1[1] = outlier_num\r\n\t\t\t\t\tcur_patience = 0\r\n\t\t\t\t\tif FLAGS.mode == 'UF' and FLAGS.S_dataset == '':\r\n\t\t\t\t\t\tif FLAGS.probe_view == '' and FLAGS.gallery_view == '' and FLAGS.dataset != 'CASIA_B':\r\n\t\t\t\t\t\t\t# checkpt_file = pre_dir + dataset + '/' + probe + '_' + str(fusion_lambda) + '_' + str(\r\n\t\t\t\t\t\t\t# \tnhood) + '_' + str(\r\n\t\t\t\t\t\t\t# \ttime_step) + '_' + FLAGS.min_samples + '_' + FLAGS.lr + '_' + FLAGS.eps + '_' + \\\r\n\t\t\t\t\t\t\t# FLAGS.t + '_' + change + '_best.ckpt'\r\n\t\t\t\t\t\t\tcheckpt_file = pre_dir + dataset + '/' + probe + change + '_best.ckpt'\r\n\t\t\t\t\t\telif FLAGS.dataset == 'CASIA_B':\r\n\t\t\t\t\t\t\t# checkpt_file = pre_dir + dataset + '/' + probe + '_' + str(fusion_lambda) + '_' + str(\r\n\t\t\t\t\t\t\t# \tnhood) + '_' + str(\r\n\t\t\t\t\t\t\t# \ttime_step) + '_' + FLAGS.min_samples + '_' + FLAGS.lr + '_' + FLAGS.eps + '_' + \\\r\n\t\t\t\t\t\t\t# FLAGS.t + '_' + change + '_' + FLAGS.probe_type + '_best.ckpt'\r\n\t\t\t\t\t\t\tcheckpt_file = pre_dir + dataset + '/' + probe + change + '_' + FLAGS.probe_type + '_best.ckpt'\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t# checkpt_file = pre_dir + dataset + '/' + probe + '_' + str(fusion_lambda) + '_' + str(\r\n\t\t\t\t\t\t\t# \tnhood) + '_' + str(\r\n\t\t\t\t\t\t\t# \ttime_step) + '_' + FLAGS.min_samples + '_' + FLAGS.lr + '_' + FLAGS.eps + '_' + \\\r\n\t\t\t\t\t\t\t# FLAGS.t + '_' + FLAGS.probe_view + 'v' + FLAGS.gallery_view + change + '_best.ckpt'\r\n\t\t\t\t\t\t\tcheckpt_file = pre_dir + dataset + '/' + probe + '_' + FLAGS.probe_view + 'v' + FLAGS.gallery_view + change + '_best.ckpt'\r\n\t\t\t\t\t\tprint(checkpt_file)\r\n\t\t\t\t\t\tsaver.save(sess, checkpt_file)\r\n\t\t\t\tif epoch > 0 and top_1 > max_acc_2:\r\n\t\t\t\t\tmax_acc_2 = top_1\r\n\t\t\t\t\tbest_cluster_info_2[0] = num_cluster\r\n\t\t\t\t\tbest_cluster_info_2[1] = outlier_num\r\n\t\t\t\t\tcur_patience = 0\r\n\t\t\t\tif epoch > 0:\r\n\t\t\t\t\tif FLAGS.probe_view != '' and FLAGS.gallery_view != '':\r\n\t\t\t\t\t\tprint('[UF] View: %s v %s | mAP: %.4f (%.4f) | Top-1: %.4f (%.4f) | Top-5: %.4f | Top-10: %.4f | % d + o: %d |' % (\r\n\t\t\t\t\t\t\tFLAGS.probe_view, FLAGS.gallery_view, mAP, max_acc_1,\r\n\t\t\t\t\t\t\ttop_1, max_acc_2, top_5, top_10,\r\n\t\t\t\t\t\t\tbest_cluster_info_2[0], best_cluster_info_2[1]))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tprint(\r\n\t\t\t\t\t\t\t'[UF] %s - %s | mAP: %.4f (%.4f) | Top-1: %.4f (%.4f) | Top-5: %.4f | Top-10: %.4f | % d + o: %d |' % (\r\n\t\t\t\t\t\t\tFLAGS.dataset, FLAGS.probe, mAP, max_acc_1,\r\n\t\t\t\t\t\t\ttop_1, max_acc_2, top_5, top_10,\r\n\t\t\t\t\t\t\tbest_cluster_info_2[0], best_cluster_info_2[1]))\r\n\t\t\t\tif cur_patience == patience:\r\n\t\t\t\t\tbreak\r\n\t\t\t\trerank_dist = compute_jaccard_distance(train_features_all, k1=k1, k2=k2)\r\n\r\n\t\t\t\tif dataset == 'IAS' or dataset == 'KS20':\r\n\t\t\t\t\tcluster = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed', n_jobs=-1)\r\n\t\t\t\telse:\r\n\t\t\t\t\tcluster = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed', n_jobs=-1)\r\n\t\t\t\tpseudo_labels = cluster.fit_predict(rerank_dist)\r\n\t\t\t\t# discard outliers\r\n\t\t\t\ttrain_features_all = train_features_all[np.where(pseudo_labels != -1)]\r\n\t\t\t\tX_train_P_new = X_train_P[np.where(pseudo_labels != -1)]\r\n\t\t\t\tX_train_B_new = X_train_B[np.where(pseudo_labels != -1)]\r\n\t\t\t\tX_train_H_B_new = X_train_H_B[np.where(pseudo_labels != -1)]\r\n\t\t\t\toutlier_num = np.sum(pseudo_labels == -1)\r\n\t\t\t\tpseudo_labels = pseudo_labels[np.where(pseudo_labels != -1)]\r\n\t\t\t\tnum_cluster = len(set(pseudo_labels)) - (1 if -1 in pseudo_labels else 0)\r\n\r\n\r\n\t\t\t\tdef generate_cluster_features(labels, features):\r\n\t\t\t\t\tcenters = collections.defaultdict(list)\r\n\t\t\t\t\tfor i, label in enumerate(labels):\r\n\t\t\t\t\t\tif label == -1:\r\n\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\tcenters[labels[i]].append(features[i])\r\n\r\n\t\t\t\t\tcenters = [\r\n\t\t\t\t\t\ttorch.stack(centers[idx], dim=0).mean(0) for idx in sorted(centers.keys())\r\n\t\t\t\t\t]\r\n\t\t\t\t\tcenters = torch.stack(centers, dim=0)\r\n\t\t\t\t\treturn centers\r\n\r\n\r\n\t\t\t\tcluster_features = generate_cluster_features(pseudo_labels, train_features_all)\r\n\t\t\t\tcluster_features = cluster_features.numpy()\r\n\t\t\t\tcluster_features = cluster_features.astype(np.float64)\r\n\r\n\t\t\t\ttr_step = 0\r\n\t\t\t\ttr_size = X_train_P_new.shape[0]\r\n\t\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\tX_input_P = X_train_P_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\t\tX_input_B = X_train_B_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\t\tX_input_H_B = X_train_H_B_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\t\tlabels = pseudo_labels[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t\t_, loss, P_en, B_en, all_features = sess.run(\r\n\t\t\t\t\t\t[cluster_train_op, contrastive_loss, P_encode, B_encode, all_ftr],\r\n\t\t\t\t\t\tfeed_dict={\r\n\t\t\t\t\t\t\tP_in: X_input_P,\r\n\t\t\t\t\t\t\tB_in: X_input_B,\r\n\t\t\t\t\t\t\tH_B_in: X_input_H_B,\r\n\t\t\t\t\t\t\tP_bias_in: biases_P,\r\n\t\t\t\t\t\t\tB_bias_in: biases_B,\r\n\t\t\t\t\t\t\tH_B_bias_in: biases_H_B,\r\n\t\t\t\t\t\t\tis_train: True,\r\n\t\t\t\t\t\t\tattn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t\t\tpseudo_lab: labels,\r\n\t\t\t\t\t\t\tcluster_ftr: cluster_features})\r\n\t\t\t\t\tif tr_step % display == 0:\r\n\t\t\t\t\t\tprint('[%s] Batch num: %d | Cluser num: %d | Outlier: %d | Loss: %.5f |' %\r\n\t\t\t\t\t\t (str(epoch), tr_step, num_cluster, outlier_num, loss))\r\n\t\t\t\t\ttr_step += 1\r\n\t\t\tsess.close()\r\n\r\nelif FLAGS.mode == 'DG' and FLAGS.S_dataset != '':\r\n\tif FLAGS.S_dataset == 'KGBD':\r\n\t\tbatch_size = 256\r\n\t\tFLAGS.lr = '0.00035'\r\n\t\tFLAGS.min_samples = '4'\r\n\t\tFLAGS.eps = '0.6'\r\n\telif FLAGS.S_dataset == 'CASIA_B':\r\n\t\tbatch_size = 128\r\n\t\tFLAGS.lr = '0.00035'\r\n\t\tFLAGS.min_samples = '2'\r\n\t\tFLAGS.eps = '0.75'\r\n\telse:\r\n\t\tbatch_size = 128\r\n\t\tFLAGS.lr = '0.00035'\r\n\tif FLAGS.S_dataset == 'IAS' or FLAGS.S_dataset == 'KS20':\r\n\t\t# if FLAGS.mode != 'DG':\r\n\t\tFLAGS.eps = '0.8'\r\n\t\tFLAGS.min_samples = '2'\r\n\t\tif FLAGS.S_dataset == 'KS20':\r\n\t\t\tFLAGS.min_samples = '2'\r\n\r\n\tif FLAGS.S_dataset == 'BIWI':\r\n\t\tFLAGS.min_samples = '2'\r\n\t\tif FLAGS.S_probe == 'Walking':\r\n\t\t\tFLAGS.eps = '0.6'\r\n\t\telse:\r\n\t\t\tFLAGS.eps = '0.7'\r\n\t# checkpt_file = pre_dir + FLAGS.S_dataset + '/' + FLAGS.S_probe + '_' + str(fusion_lambda) + '_' + str(\r\n\t# \tnhood) + '_' + str(\r\n\t# \ttime_step) + '_' + FLAGS.min_samples + '_' + FLAGS.lr + '_' + FLAGS.eps + '_' + \\\r\n\t# \t\t\t\t\t\t FLAGS.t + '_' + change + '_best.ckpt'\r\n\tcheckpt_file = pre_dir + dataset + '/' + probe + change + '_best.ckpt'\r\n\tchange = '_DG'\r\n\twith tf.Session(graph=loaded_graph, config=config) as sess:\r\n\t\tloader = tf.train.import_meta_graph(checkpt_file + '.meta')\r\n\t\tP_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder:0\")\r\n\t\tB_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder_1:0\")\r\n\t\tH_B_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder_2:0\")\r\n\t\tP_bias_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder_3:0\")\r\n\t\tB_bias_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder_4:0\")\r\n\t\tH_B_bias_in = loaded_graph.get_tensor_by_name(\"Input/Placeholder_5:0\")\r\n\t\tattn_drop = loaded_graph.get_tensor_by_name(\"Input/Placeholder_6:0\")\r\n\t\tffd_drop = loaded_graph.get_tensor_by_name(\"Input/Placeholder_7:0\")\r\n\t\tis_train = loaded_graph.get_tensor_by_name(\"Input/Placeholder_8:0\")\r\n\t\tpseudo_lab = loaded_graph.get_tensor_by_name(\"Input/Placeholder_9:0\")\r\n\t\tcluster_ftr = loaded_graph.get_tensor_by_name(\"Input/Placeholder_10:0\")\r\n\r\n\t\tP_encode = loaded_graph.get_tensor_by_name(\"MG/MG/Reshape_45:0\")\r\n\t\tB_encode = loaded_graph.get_tensor_by_name(\"MG/MG/Reshape_46:0\")\r\n\t\tH_B_encode = loaded_graph.get_tensor_by_name(\"MG/MG/Reshape_47:0\")\r\n\t\tall_ftr = loaded_graph.get_tensor_by_name(\"MG/MG/Reshape_48:0\")\r\n\r\n\t\tcontrastive_loss = loaded_graph.get_tensor_by_name(\"MG/MG/cond/Merge:0\")\r\n\t\tcluster_train_op = loaded_graph.get_operation_by_name(\"MG/MG/Adam\")\r\n\r\n\t\tinit_op = tf.global_variables_initializer()\r\n\t\tsess.run(init_op)\r\n\t\tloader.restore(sess, checkpt_file)\r\n\t\tsaver = tf.train.Saver()\r\n\r\n\r\n\t\tdef train_loader(X_train_P, X_train_B, X_train_H_B, y_train):\r\n\t\t\ttr_step = 0\r\n\t\t\ttr_size = X_train_P.shape[0]\r\n\t\t\ttrain_logits_all = []\r\n\t\t\ttrain_labels_all = []\r\n\t\t\ttrain_features_all = []\r\n\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t\tX_input_P = X_train_P[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\tX_input_B = X_train_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\tX_input_H_B = X_train_H_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\tlabels = y_train[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t feed_dict={\r\n\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t is_train: True,\r\n\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\ttrain_features_all.extend(all_features.tolist())\r\n\t\t\t\ttrain_labels_all.extend(labels.tolist())\r\n\t\t\t\ttr_step += 1\r\n\r\n\t\t\ttrain_features_all = np.array(train_features_all).astype(np.float32)\r\n\t\t\ttrain_features_all = torch.from_numpy(train_features_all)\r\n\t\t\treturn train_features_all, train_labels_all\r\n\r\n\r\n\t\tdef gal_loader(X_train_P, X_train_B, X_train_H_B, y_train):\r\n\t\t\ttr_step = 0\r\n\t\t\ttr_size = X_train_P.shape[0]\r\n\t\t\tgal_logits_all = []\r\n\t\t\tgal_labels_all = []\r\n\t\t\tgal_features_all = []\r\n\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t\tX_input_P = X_train_P[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\tX_input_B = X_train_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\tX_input_H_B = X_train_H_B[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\tlabels = y_train[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t feed_dict={\r\n\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t is_train: True,\r\n\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\tgal_features_all.extend(all_features.tolist())\r\n\t\t\t\tgal_labels_all.extend(labels.tolist())\r\n\t\t\t\ttr_step += 1\r\n\r\n\t\t\treturn gal_features_all, gal_labels_all\r\n\r\n\r\n\t\tdef evaluation():\r\n\t\t\tvl_step = 0\r\n\t\t\tvl_size = X_test_P.shape[0]\r\n\t\t\tpro_labels_all = []\r\n\t\t\tpro_features_all = []\r\n\t\t\tloaded_graph = tf.get_default_graph()\r\n\t\t\twhile vl_step * batch_size < vl_size:\r\n\t\t\t\tif (vl_step + 1) * batch_size > vl_size:\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t\tX_input_P = X_test_P[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\tX_input_B = X_test_B[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\tX_input_H_B = X_test_H_B[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\tlabels = y_test[vl_step * batch_size:(vl_step + 1) * batch_size]\r\n\t\t\t\tP_en, B_en, all_features = sess.run([P_encode, B_encode, all_ftr],\r\n\t\t\t\t feed_dict={\r\n\t\t\t\t\t P_in: X_input_P,\r\n\t\t\t\t\t B_in: X_input_B,\r\n\t\t\t\t\t H_B_in: X_input_H_B,\r\n\t\t\t\t\t P_bias_in: biases_P,\r\n\t\t\t\t\t B_bias_in: biases_B,\r\n\t\t\t\t\t H_B_bias_in: biases_H_B,\r\n\t\t\t\t\t is_train: False,\r\n\t\t\t\t\t attn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t pseudo_lab: np.zeros([batch_size, ]),\r\n\t\t\t\t\t cluster_ftr: np.zeros(\r\n\t\t\t\t\t\t [batch_size, all_ftr_size])})\r\n\t\t\t\tpro_labels_all.extend(labels.tolist())\r\n\t\t\t\tpro_features_all.extend(all_features.tolist())\r\n\t\t\t\tvl_step += 1\r\n\t\t\tX = np.array(gal_features_all)\r\n\t\t\ty = np.array(gal_labels_all)\r\n\t\t\tt_X = np.array(pro_features_all)\r\n\t\t\tt_y = np.array(pro_labels_all)\r\n\t\t\t# print(X.shape, t_X.shape)\r\n\t\t\tt_y = np.argmax(t_y, axis=-1)\r\n\t\t\ty = np.argmax(y, axis=-1)\r\n\r\n\t\t\tdef mean_ap(distmat, query_ids=None, gallery_ids=None,\r\n\t\t\t query_cams=None, gallery_cams=None):\r\n\t\t\t\t# distmat = to_numpy(distmat)\r\n\t\t\t\tm, n = distmat.shape\r\n\t\t\t\t# Fill up default values\r\n\t\t\t\tif query_ids is None:\r\n\t\t\t\t\tquery_ids = np.arange(m)\r\n\t\t\t\tif gallery_ids is None:\r\n\t\t\t\t\tgallery_ids = np.arange(n)\r\n\t\t\t\tif query_cams is None:\r\n\t\t\t\t\tquery_cams = np.zeros(m).astype(np.int32)\r\n\t\t\t\tif gallery_cams is None:\r\n\t\t\t\t\tgallery_cams = np.ones(n).astype(np.int32)\r\n\t\t\t\t# Ensure numpy array\r\n\t\t\t\tquery_ids = np.asarray(query_ids)\r\n\t\t\t\tgallery_ids = np.asarray(gallery_ids)\r\n\t\t\t\tquery_cams = np.asarray(query_cams)\r\n\t\t\t\tgallery_cams = np.asarray(gallery_cams)\r\n\t\t\t\t# Sort and find correct matches\r\n\t\t\t\tindices = np.argsort(distmat, axis=1)\r\n\t\t\t\tmatches = (gallery_ids[indices] == query_ids[:, np.newaxis])\r\n\t\t\t\t# Compute AP for each query\r\n\t\t\t\taps = []\r\n\t\t\t\tif (FLAGS.probe_view != '' and FLAGS.probe_view == FLAGS.gallery_view) or (FLAGS.probe_type == 'nm.nm' or FLAGS.probe_type == 'cl.cl' or FLAGS.probe_type == 'bg.bg'):\r\n\t\t\t\t\tfor i in range(1, m):\r\n\t\t\t\t\t\tvalid = ((gallery_ids[indices[i]] != query_ids[i]) |\r\n\t\t\t\t\t\t (gallery_cams[indices[i]] != query_cams[i]))\r\n\t\t\t\t\t\ty_true = matches[i, valid]\r\n\t\t\t\t\t\ty_score = -distmat[i][indices[i]][valid]\r\n\t\t\t\t\t\tif not np.any(y_true): continue\r\n\t\t\t\t\t\taps.append(average_precision_score(y_true, y_score))\r\n\t\t\t\telse:\r\n\t\t\t\t\tfor i in range(m):\r\n\t\t\t\t\t\tvalid = ((gallery_ids[indices[i]] != query_ids[i]) |\r\n\t\t\t\t\t\t (gallery_cams[indices[i]] != query_cams[i]))\r\n\t\t\t\t\t\ty_true = matches[i, valid]\r\n\t\t\t\t\t\ty_score = -distmat[i][indices[i]][valid]\r\n\t\t\t\t\t\tif not np.any(y_true): continue\r\n\t\t\t\t\t\taps.append(average_precision_score(y_true, y_score))\r\n\t\t\t\tif len(aps) == 0:\r\n\t\t\t\t\traise RuntimeError(\"No valid query\")\r\n\t\t\t\treturn np.mean(aps)\r\n\r\n\t\t\tdef metrics(X, y, t_X, t_y):\r\n\t\t\t\ta, b = torch.from_numpy(t_X), torch.from_numpy(X)\r\n\t\t\t\t# compute Euclidean distance\r\n\t\t\t\tm, n = a.size(0), b.size(0)\r\n\t\t\t\ta = a.view(m, -1)\r\n\t\t\t\tb = b.view(n, -1)\r\n\t\t\t\tdist_m = torch.pow(a, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\r\n\t\t\t\t torch.pow(b, 2).sum(dim=1, keepdim=True).expand(n, m).t()\r\n\t\t\t\tdist_m.addmm_(1, -2, a, b.t())\r\n\t\t\t\tdist_m = dist_m.sqrt()\r\n\r\n\t\t\t\tmAP = mean_ap(distmat=dist_m.numpy(), query_ids=t_y, gallery_ids=y)\r\n\t\t\t\t_, dist_sort = dist_m.sort(1)\r\n\t\t\t\tdist_sort = dist_sort.numpy()\r\n\r\n\t\t\t\ttop_1 = top_5 = top_10 = 0\r\n\t\t\t\tprobe_num = dist_sort.shape[0]\r\n\t\t\t\tif (FLAGS.probe_view != '' and FLAGS.probe_view == FLAGS.gallery_view) or (FLAGS.probe_type == 'nm.nm' or FLAGS.probe_type == 'cl.cl' or FLAGS.probe_type == 'bg.bg'):\r\n\t\t\t\t\tfor i in range(probe_num):\r\n\t\t\t\t\t\t# print(dist_sort[i, :10])\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:2]]:\r\n\t\t\t\t\t\t\ttop_1 += 1\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:6]]:\r\n\t\t\t\t\t\t\ttop_5 += 1\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, 1:11]]:\r\n\t\t\t\t\t\t\ttop_10 += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tfor i in range(probe_num):\r\n\t\t\t\t\t\t# print(dist_sort[i, :10])\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :1]]:\r\n\t\t\t\t\t\t\ttop_1 += 1\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :5]]:\r\n\t\t\t\t\t\t\ttop_5 += 1\r\n\t\t\t\t\t\tif t_y[i] in y[dist_sort[i, :10]]:\r\n\t\t\t\t\t\t\ttop_10 += 1\r\n\t\t\t\treturn mAP, top_1 / probe_num, top_5 / probe_num, top_10 / probe_num\r\n\r\n\t\t\tmAP, top_1, top_5, top_10 = metrics(X, y, t_X, t_y)\r\n\t\t\treturn mAP, top_1, top_5, top_10\r\n\r\n\r\n\t\tmax_acc_1 = 0\r\n\t\tmax_acc_2 = 0\r\n\t\tbest_cluster_info_1 = [0, 0]\r\n\t\tbest_cluster_info_2 = [0, 0]\r\n\t\tcur_patience = 0\r\n\t\tif dataset == 'KGBD' or dataset == 'KS20':\r\n\t\t\tif FLAGS.gallery_view == '' and FLAGS.probe_view == '':\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='gallery', time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\t\telse:\r\n\t\t\t\t_, _, _, _, _, _, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='view_' + FLAGS.gallery_view, time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\telif dataset == 'BIWI':\r\n\t\t\tif probe == 'Walking':\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='Still', time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\t\telse:\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='Walking', time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\telif dataset == 'IAS':\r\n\t\t\tif probe == 'A':\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='B', time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\t\telse:\r\n\t\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\t\tprocess.gen_train_data(dataset=dataset, split='A', time_step=time_step,\r\n\t\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,\r\n\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t )\r\n\t\telif dataset == 'CASIA_B':\r\n\t\t\t_, X_train_P, X_train_B, X_train_H_B, _, y_train, _, X_gal_P, X_gal_B, X_gal_H_B, _, y_gal, \\\r\n\t\t\t_, _, adj_P, biases_P, adj_B, biases_B, adj_H_B, biases_H_B, _, _, nb_classes = \\\r\n\t\t\t\tprocess.gen_train_data(dataset=dataset, split=probe, time_step=time_step,\r\n\t\t\t\t nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,\r\n\t\t\t\t PG_type=FLAGS.probe_type.split('.')[1])\r\n\t\tfor epoch in range(cluster_epochs):\r\n\t\t\ttrain_features_all, train_labels_all = train_loader(X_train_P, X_train_B, X_train_H_B, y_train)\r\n\t\t\t# train_features_all = train_features_all.numpy()\r\n\t\t\tgal_features_all, gal_labels_all = gal_loader(X_gal_P, X_gal_B, X_gal_H_B, y_gal)\r\n\t\t\tmAP, top_1, top_5, top_10 = evaluation()\r\n\t\t\tcur_patience += 1\r\n\t\t\tif epoch > 0 and top_1 > max_acc_2:\r\n\t\t\t\tmax_acc_1 = mAP\r\n\t\t\t\tbest_cluster_info_1[0] = num_cluster\r\n\t\t\t\tbest_cluster_info_1[1] = outlier_num\r\n\t\t\t\tcur_patience = 0\r\n\t\t\t\tif FLAGS.mode == 'DG' and FLAGS.S_dataset != '':\r\n\t\t\t\t\tif FLAGS.probe_view == '' and FLAGS.gallery_view == '':\r\n\t\t\t\t\t\t# checkpt_file = pre_dir + dataset + '/' + probe + '_' + str(fusion_lambda) + '_' + str(\r\n\t\t\t\t\t\t# \tnhood) + '_' + str(\r\n\t\t\t\t\t\t# \ttime_step) + '_' + FLAGS.min_samples + '_' + FLAGS.lr + '_' + FLAGS.eps + '_' + \\\r\n\t\t\t\t\t\t# FLAGS.density_lambda + '_' + change + '_best.ckpt'\r\n\t\t\t\t\t\tcheckpt_file = pre_dir + dataset + '/' + probe + change + '_best.ckpt'\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcheckpt_file = pre_dir + dataset + '/' + probe + '_' + FLAGS.probe_view + 'v' + \\\r\n\t\t\t\t\t\t FLAGS.gallery_view + change + '_best.ckpt'\r\n\t\t\t\t\tprint(checkpt_file)\r\n\t\t\t\t\tsaver.save(sess, checkpt_file)\r\n\t\t\tif epoch > 0 and top_1 > max_acc_2:\r\n\t\t\t\tmax_acc_2 = top_1\r\n\t\t\t\tbest_cluster_info_2[0] = num_cluster\r\n\t\t\t\tbest_cluster_info_2[1] = outlier_num\r\n\t\t\t\tcur_patience = 0\r\n\t\t\tif FLAGS.evaluate == '1':\r\n\t\t\t\tprint(\r\n\t\t\t\t\t\t'[Evaluate on %s - %s] | mAP: %.4f | Top-1: %.4f | Top-5: %.4f | Top-10: %.4f' % (\r\n\t\t\t\t\tFLAGS.dataset, FLAGS.probe, mAP,\r\n\t\t\t\t\ttop_1, top_5, top_10))\r\n\t\t\t\texit()\r\n\t\t\telse:\r\n\t\t\t\tif FLAGS.probe_view != '' and FLAGS.gallery_view != '':\r\n\t\t\t\t\tprint(\r\n\t\t\t\t\t'[DG] View: %s v %s | mAP: %.4f (%.4f) | Top-1: %.4f (%.4f) | Top-5: %.4f | Top-10: %.4f | % d + o: %d |' % (\r\n\t\t\t\t\t\tFLAGS.probe_view, FLAGS.gallery_view, mAP, max_acc_1,\r\n\t\t\t\t\t\ttop_1, max_acc_2, top_5, top_10,\r\n\t\t\t\t\t\tbest_cluster_info_2[0], best_cluster_info_2[1]))\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\r\n\t\t\t\t\t\t\t'[DG] %s - %s | mAP: %.4f (%.4f) | Top-1: %.4f (%.4f) | Top-5: %.4f | Top-10: %.4f | % d + o: %d |' % (\r\n\t\t\t\t\t\tFLAGS.dataset, FLAGS.probe, mAP, max_acc_1,\r\n\t\t\t\t\t\ttop_1, max_acc_2, top_5, top_10,\r\n\t\t\t\t\t\tbest_cluster_info_2[0], best_cluster_info_2[1]))\r\n\t\t\tif cur_patience == patience:\r\n\t\t\t\tbreak\r\n\t\t\trerank_dist = compute_jaccard_distance(train_features_all, k1=k1, k2=k2)\r\n\t\t\tif dataset == 'IAS' or dataset == 'KS20':\r\n\t\t\t\tcluster = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed', n_jobs=-1)\r\n\t\t\telse:\r\n\t\t\t\tcluster = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed', n_jobs=-1)\r\n\t\t\tpseudo_labels = cluster.fit_predict(rerank_dist)\r\n\t\t\t# discard outliers\r\n\t\t\ttrain_features_all = train_features_all[np.where(pseudo_labels != -1)]\r\n\r\n\t\t\tX_train_P_new = X_train_P[np.where(pseudo_labels != -1)]\r\n\t\t\tX_train_B_new = X_train_B[np.where(pseudo_labels != -1)]\r\n\t\t\tX_train_H_B_new = X_train_H_B[np.where(pseudo_labels != -1)]\r\n\t\t\toutlier_num = np.sum(pseudo_labels == -1)\r\n\t\t\tpseudo_labels = pseudo_labels[np.where(pseudo_labels != -1)]\r\n\t\t\t# print(pseudo_labels)\r\n\t\t\tnum_cluster = len(set(pseudo_labels)) - (1 if -1 in pseudo_labels else 0)\r\n\r\n\r\n\t\t\tdef generate_cluster_features(labels, features):\r\n\t\t\t\tcenters = collections.defaultdict(list)\r\n\t\t\t\tfor i, label in enumerate(labels):\r\n\t\t\t\t\tif label == -1:\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\tcenters[labels[i]].append(features[i])\r\n\r\n\t\t\t\tcenters = [\r\n\t\t\t\t\ttorch.stack(centers[idx], dim=0).mean(0) for idx in sorted(centers.keys())\r\n\t\t\t\t]\r\n\t\t\t\t# print(centers)\r\n\r\n\t\t\t\tcenters = torch.stack(centers, dim=0)\r\n\t\t\t\treturn centers\r\n\r\n\r\n\t\t\tcluster_features = generate_cluster_features(pseudo_labels, train_features_all)\r\n\t\t\tcluster_features = cluster_features.numpy()\r\n\t\t\tcluster_features = cluster_features.astype(np.float64)\r\n\r\n\t\t\ttr_step = 0\r\n\t\t\ttr_size = X_train_P_new.shape[0]\r\n\t\t\t# pro_en_P = []\r\n\t\t\t# pro_en_B = []\r\n\t\t\twhile tr_step * batch_size < tr_size:\r\n\t\t\t\tif (tr_step + 1) * batch_size > tr_size:\r\n\t\t\t\t\tbreak\r\n\t\t\t\tX_input_P = X_train_P_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_P = X_input_P.reshape([-1, 10, 3])\r\n\t\t\t\tX_input_B = X_train_B_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_B = X_input_B.reshape([-1, 5, 3])\r\n\t\t\t\tX_input_H_B = X_train_H_B_new[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\tX_input_H_B = X_input_H_B.reshape([-1, 3, 3])\r\n\t\t\t\tlabels = pseudo_labels[tr_step * batch_size:(tr_step + 1) * batch_size]\r\n\t\t\t\t_, loss, P_en, B_en, all_features = sess.run(\r\n\t\t\t\t\t[cluster_train_op, contrastive_loss, P_encode, B_encode, all_ftr],\r\n\t\t\t\t\tfeed_dict={\r\n\t\t\t\t\t\tP_in: X_input_P,\r\n\t\t\t\t\t\tB_in: X_input_B,\r\n\t\t\t\t\t\tH_B_in: X_input_H_B,\r\n\t\t\t\t\t\tP_bias_in: biases_P,\r\n\t\t\t\t\t\tB_bias_in: biases_B,\r\n\t\t\t\t\t\tH_B_bias_in: biases_H_B,\r\n\t\t\t\t\t\tis_train: True,\r\n\t\t\t\t\t\tattn_drop: 0.0, ffd_drop: 0.0,\r\n\t\t\t\t\t\tpseudo_lab: labels,\r\n\t\t\t\t\t\tcluster_ftr: cluster_features})\r\n\t\t\t\tif tr_step % display == 0:\r\n\t\t\t\t\tprint('[%s] Batch num: %d | Cluser num: %d | Outlier: %d | Loss: %.5f |' %\r\n\t\t\t\t\t (str(epoch), tr_step, num_cluster, outlier_num, loss))\r\n\t\t\t\ttr_step += 1\r\n\t\tsess.close()\r\n\r\n\r\nprint('----- Model hyperparams -----')\r\n# print('skeleton_nodes: ' + str(nb_nodes))\r\nprint('seqence_length: ' + str(time_step))\r\nprint('fusion_lambda: ' + str(fusion_lambda))\r\nprint('batch_size: ' + str(batch_size))\r\nprint('lr: ' + str(FLAGS.lr))\r\nprint('temperature: ' + FLAGS.t)\r\nprint('eps: ' + FLAGS.eps)\r\nprint('min_samples: ' + FLAGS.min_samples)\r\nprint('m: ' + FLAGS.m)\r\nprint('fusion_lambda: ' + FLAGS.fusion_lambda)\r\nprint('patience: ' + FLAGS.patience)\r\n\r\nprint('Mode: ' + FLAGS.mode)\r\n\r\nif FLAGS.mode == 'DG':\r\n\tprint('----- Mode Information -----')\r\n\tprint('Source Dataset: ' + FLAGS.S_dataset)\r\n\tprint('Target Dataset: ' + FLAGS.dataset)\r\n\tprint('Target Probe: ' + FLAGS.probe)\r\nelif FLAGS.mode == 'UF':\r\n\tprint('----- Dataset Information -----')\r\n\tprint('Dataset: ' + dataset)\r\n\tprint('Probe: ' + FLAGS.probe)\r\n\r\n\r\n"
] | [
[
"numpy.sum",
"numpy.ones",
"torch.stack",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.reshape",
"numpy.any",
"numpy.argsort",
"tensorflow.variable_scope",
"numpy.asarray",
"tensorflow.matmul",
"tensorflow.name_scope",
"tensorflow.concat",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.nn.softmax",
"tensorflow.reduce_sum",
"tensorflow.random_normal",
"tensorflow.global_variables_initializer",
"sklearn.cluster.DBSCAN",
"tensorflow.Graph",
"torch.from_numpy",
"numpy.where",
"tensorflow.transpose",
"tensorflow.train.import_meta_graph",
"numpy.mean",
"numpy.zeros",
"tensorflow.expand_dims",
"numpy.argmax",
"numpy.arange",
"tensorflow.train.Saver",
"tensorflow.Session",
"sklearn.metrics.average_precision_score",
"numpy.linalg.norm",
"tensorflow.local_variables_initializer",
"tensorflow.nn.l2_normalize",
"tensorflow.ConfigProto",
"torch.pow",
"tensorflow.placeholder",
"tensorflow.zeros",
"tensorflow.get_default_graph",
"numpy.array"
]
] |
EnjoyLifeFund/py36pkgs | [
"0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2"
] | [
"chainer/functions/pooling/pooling_2d.py"
] | [
"import collections\n\nimport numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import conv\nfrom chainer.utils import type_check\n\n\nif cuda.cudnn_enabled:\n cudnn = cuda.cudnn\n libcudnn = cudnn.cudnn\n _cudnn_version = libcudnn.getVersion()\n\n\ndef _check_cudnn_acceptable_type(x_dtype):\n return _cudnn_version >= 3000 or x_dtype != numpy.float16\n\n\ndef _pair(x):\n if isinstance(x, collections.Iterable):\n return x\n return x, x\n\n\nclass Pooling2D(function.Function):\n\n \"\"\"Base class of pooling function over a set of 2d planes.\"\"\"\n\n def __init__(self, ksize, stride=None, pad=0, cover_all=True,\n use_cudnn=True):\n if stride is None:\n stride = ksize\n\n self.kh, self.kw = _pair(ksize)\n self.sy, self.sx = _pair(stride)\n self.ph, self.pw = _pair(pad)\n\n self.cover_all = cover_all\n self.use_cudnn = use_cudnn\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n in_types[0].dtype.kind == 'f',\n in_types[0].ndim == 4\n )\n\n def forward_gpu(self, x):\n # Implementation using cudnn\n x = cuda.cupy.ascontiguousarray(x[0])\n n, c, h, w = x.shape\n y_h = conv.get_conv_outsize(\n h, self.kh, self.sy, self.ph, self.cover_all)\n assert y_h > 0, 'Height in the output should be positive.'\n y_w = conv.get_conv_outsize(\n w, self.kw, self.sx, self.pw, self.cover_all)\n assert y_w > 0, 'Width in the output should be positive.'\n y = cuda.cupy.empty((n, c, y_h, y_w), dtype=x.dtype)\n\n handle = cudnn.get_handle()\n pool_desc = self.create_pool_desc()\n x_desc = cudnn.create_tensor_descriptor(x)\n y_desc = cudnn.create_tensor_descriptor(y)\n\n oz_dtype = 'd' if x.dtype == 'd' else 'f'\n one = numpy.array(1, dtype=oz_dtype).ctypes\n zero = numpy.array(0, dtype=oz_dtype).ctypes\n libcudnn.poolingForward(\n handle, pool_desc.value, one.data, x_desc.value,\n x.data.ptr, zero.data, y_desc.value, y.data.ptr)\n self.y = y\n\n return y,\n\n def backward_gpu(self, x, gy):\n # Implementation using cudnn\n x = cuda.cupy.ascontiguousarray(x[0])\n handle = cudnn.get_handle()\n pool_desc = self.create_pool_desc()\n\n gy = cuda.cupy.ascontiguousarray(gy[0])\n\n x_desc = cudnn.create_tensor_descriptor(x)\n y_desc = cudnn.create_tensor_descriptor(gy)\n\n oz_dtype = 'd' if x.dtype == 'd' else 'f'\n one = numpy.array(1, dtype=oz_dtype).ctypes\n zero = numpy.array(0, dtype=oz_dtype).ctypes\n gx = cuda.cupy.empty_like(x)\n libcudnn.poolingBackward(\n handle, pool_desc.value, one.data, y_desc.value,\n self.y.data.ptr, y_desc.value, gy.data.ptr, x_desc.value,\n x.data.ptr, zero.data, x_desc.value, gx.data.ptr)\n return gx,\n\n def create_pool_desc(self):\n raise NotImplementedError()\n"
] | [
[
"numpy.array"
]
] |
dhb2128/scanpy | [
"78649a991197af4685a8fe2f7a0d24064e3056bd"
] | [
"scanpy/preprocessing/_normalization.py"
] | [
"import numpy as np\nfrom scipy.sparse import issparse\nfrom sklearn.utils import sparsefuncs\nfrom .. import logging as logg\nfrom ..utils import doc_params\nfrom ._docs import doc_norm_descr, doc_quant_descr, doc_params_bulk, doc_norm_quant, doc_norm_return, doc_ex_quant, doc_ex_total\n\ndef _normalize_data(X, counts, after=None, copy=False):\n X = X.copy() if copy else X\n after = np.median(counts[counts>0]) if after is None else after\n counts += (counts == 0)\n counts /= after\n if issparse(X):\n X = sparsefuncs.inplace_row_scale(X, 1/counts)\n else:\n X /= counts[:, None]\n return X if copy else None\n\n@doc_params(quant_descr=doc_quant_descr, params_bulk=doc_params_bulk, norm_quant=doc_norm_quant,\n norm_return=doc_norm_return, ex_quant=doc_ex_quant)\ndef normalize_quantile(adata, target_sum=None, quantile=1, key_added=None,\n layers=None, layer_norm=None, inplace=True):\n \"\"\"\\\n {quant_descr}\n\n {params_bulk}\n {norm_quant}\n\n {norm_return}\n\n {ex_quant}\n \"\"\"\n if quantile < 0 or quantile > 1:\n raise ValueError('Choose quantile between 0 and 1.')\n\n X = adata.X\n gene_subset = None\n if not inplace:\n # not recarray because need to support sparse\n dat = {}\n\n if quantile < 1:\n logg.msg('normalizing by count per cell for \\\n genes that make up less than quantile * total count per cell', r=True)\n X = adata.X\n\n counts_per_cell = X.sum(1)\n counts_per_cell = np.ravel(counts_per_cell)\n\n gene_subset = (X>counts_per_cell[:, None]*quantile).sum(0)\n gene_subset = (np.ravel(gene_subset) == 0)\n else:\n logg.msg('normalizing by total count per cell', r=True)\n\n X = X if gene_subset is None else adata[:, gene_subset].X\n counts_per_cell = X.sum(1)\n # get rid of adata view\n counts_per_cell = np.ravel(counts_per_cell).copy()\n del X\n del gene_subset\n\n if key_added is not None:\n adata.obs[key_added] = counts_per_cell\n\n cell_subset = counts_per_cell>0\n if not np.all(cell_subset):\n logg.warn('Some cells have total count of genes equal to zero')\n\n if layer_norm == 'after':\n after = target_sum\n elif layer_norm == 'X':\n after = np.median(counts_per_cell[cell_subset])\n elif layer_norm is None:\n after = None\n else:\n raise ValueError('layer_norm should be \"after\", \"X\" or None')\n del cell_subset\n\n if inplace:\n if hasattr(adata.X, '__itruediv__'):\n _normalize_data(adata.X, counts_per_cell, target_sum)\n else:\n adata.X = _normalize_data(adata.X, counts_per_cell, target_sum, copy=True)\n else:\n dat['X'] = _normalize_data(adata.X, counts_per_cell, target_sum, copy=True)\n\n layers = adata.layers.keys() if layers == 'all' else layers\n if layers is not None:\n for layer in layers:\n L = adata.layers[layer]\n counts = np.ravel(L.sum(1))\n if inplace:\n if hasattr(L, '__itruediv__'):\n _normalize_data(L, counts, after)\n else:\n adata.layers[layer] = _normalize_data(L, counts, after, copy=True)\n else:\n dat[layer] = _normalize_data(L, counts, after, copy=True)\n\n logg.msg(' finished', t=True, end=': ')\n logg.msg('normalized adata.X')\n if key_added is not None:\n logg.msg('and added \\'{}\\', counts per cell before normalization (adata.obs)'\n .format(key_added))\n\n return dat if not inplace else None\n\n@doc_params(norm_descr=doc_norm_descr, params_bulk=doc_params_bulk, norm_return=doc_norm_return, ex_total=doc_ex_total)\ndef normalize_total(adata, target_sum=None, key_added=None, layers=None, layer_norm=None, inplace=True):\n \"\"\"\\\n {norm_descr}\n\n {params_bulk}\n\n {norm_return}\n\n {ex_total}\n \"\"\"\n return normalize_quantile(adata=adata, target_sum=target_sum,\n key_added=key_added, layers=layers,\n layer_norm=layer_norm, quantile=1, inplace=inplace)\n"
] | [
[
"sklearn.utils.sparsefuncs.inplace_row_scale",
"scipy.sparse.issparse",
"numpy.median",
"numpy.ravel",
"numpy.all"
]
] |
vahndi/probability | [
"6ddf88e6f3d947c96b879e426030f60eb5cb2d59"
] | [
"tests/test_calculations/base_test.py"
] | [
"from unittest.case import TestCase\n\nfrom pandas import Series, DataFrame\n\nfrom probability.distributions import Beta, Dirichlet\n\n\nclass BaseTest(TestCase):\n\n def setUp(self) -> None:\n\n self.b1 = Beta(700, 300)\n self.b2 = Beta(600, 400)\n self.b3 = Beta(500, 500)\n self.d1 = Dirichlet([500, 300, 200])\n self.d2 = Dirichlet({'x': 100, 'y': 200, 'z': 300})\n self.b1__mul__b2 = self.b1 * self.b2\n self.b3__mul__b1__mul__b2 = self.b3 * self.b1__mul__b2\n self.b1__mul__comp__b1 = self.b1 * (1 - self.b1)\n self.b_series = Series({\n 'b1': self.b1, 'b2': self.b2, 'b3': self.b3\n })\n self.b_frame = DataFrame({\n 'c1': {'r1': self.b1, 'r2': self.b2},\n 'c2': {'r1': self.b2, 'r2': self.b3}\n })\n self.float_series = Series({'$100': 0.8, '$200': 0.6})\n"
] | [
[
"pandas.Series",
"pandas.DataFrame"
]
] |
trainorpj/probability | [
"944272707d352b12b91c419082fb3ec34b83b494"
] | [
"tensorflow_probability/python/bijectors/affine_scalar_test.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Affine Scalar Tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python.bijectors import bijector_test_util\nfrom tensorflow_probability.python.internal import test_case\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass _AffineScalarBijectorTest(object):\n \"\"\"Tests correctness of the Y = scale @ x + shift transformation.\"\"\"\n\n def testProperties(self):\n # scale corresponds to 1.\n bijector = tfb.AffineScalar(shift=-1.)\n self.assertStartsWith(bijector.name, \"affine_scalar\")\n\n def testTinyScale(self):\n log_scale = tf.cast(-2000., self.dtype)\n x = tf.cast(1., self.dtype)\n scale = tf.exp(log_scale)\n fldj_linear = tfb.AffineScalar(scale=scale).forward_log_det_jacobian(\n x, event_ndims=0)\n fldj_log = tfb.AffineScalar(log_scale=log_scale).forward_log_det_jacobian(\n x, event_ndims=0)\n fldj_linear_, fldj_log_ = self.evaluate([fldj_linear, fldj_log])\n # Using the linear scale will saturate to 0, and produce bad log-det\n # Jacobians.\n self.assertNotEqual(fldj_linear_, fldj_log_)\n self.assertAllClose(-2000., fldj_log_)\n\n def testNoBatchScalar(self):\n def static_run(fun, x, **kwargs):\n return self.evaluate(fun(x, **kwargs))\n\n def dynamic_run(fun, x_value, **kwargs):\n x_value = np.array(x_value, dtype=self.dtype)\n x = tf1.placeholder_with_default(x_value, shape=None)\n return self.evaluate(fun(x, **kwargs))\n\n for run in (static_run, dynamic_run):\n bijector = tfb.AffineScalar(shift=self.dtype(-1.), scale=self.dtype(2.))\n x = self.dtype([1., 2, 3]) # Three scalar samples (no batches).\n self.assertAllClose([1., 3, 5], run(bijector.forward, x))\n self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))\n self.assertAllClose(\n -np.log(2.),\n run(bijector.inverse_log_det_jacobian, x, event_ndims=0))\n\n def testOneBatchScalarViaIdentityUserProvidesShiftOnly(self):\n def static_run(fun, x, **kwargs):\n return self.evaluate(fun(x, **kwargs))\n\n def dynamic_run(fun, x_value, **kwargs):\n x_value = np.array(x_value, dtype=self.dtype)\n x = tf1.placeholder_with_default(x_value, shape=None)\n return self.evaluate(fun(x, **kwargs))\n\n for run in (static_run, dynamic_run):\n # Batched shift\n bijector = tfb.AffineScalar(shift=self.dtype([1.]))\n x = self.dtype([1.]) # One sample from one batches.\n self.assertAllClose([2.], run(bijector.forward, x))\n self.assertAllClose([0.], run(bijector.inverse, x))\n self.assertAllClose(\n 0.,\n run(bijector.inverse_log_det_jacobian, x, event_ndims=0))\n\n def testOneBatchScalarViaIdentityUserProvidesScaleOnly(self):\n def static_run(fun, x, **kwargs):\n return self.evaluate(fun(x, **kwargs))\n\n def dynamic_run(fun, x_value, **kwargs):\n x_value = np.array(x_value)\n x = tf1.placeholder_with_default(x_value, shape=None)\n return self.evaluate(fun(x, **kwargs))\n\n for run in (static_run, dynamic_run):\n # Batched scale\n bijector = tfb.AffineScalar(scale=self.dtype([2.]))\n x = self.dtype([1.]) # One sample from one batches.\n self.assertAllClose([2.], run(bijector.forward, x))\n self.assertAllClose([0.5], run(bijector.inverse, x))\n self.assertAllClose(\n [np.log(0.5)],\n run(bijector.inverse_log_det_jacobian, x, event_ndims=0))\n\n def testTwoBatchScalarIdentityViaIdentity(self):\n def static_run(fun, x, **kwargs):\n return self.evaluate(fun(x, **kwargs))\n\n def dynamic_run(fun, x_value, **kwargs):\n x_value = np.array(x_value, dtype=self.dtype)\n x = tf1.placeholder_with_default(x_value, shape=None)\n return self.evaluate(fun(x, **kwargs))\n\n for run in (static_run, dynamic_run):\n # Batch of 2 shifts\n bijector = tfb.AffineScalar(shift=self.dtype([1., -1]))\n x = self.dtype([1., 1]) # One sample from each of two batches.\n self.assertAllClose([2., 0], run(bijector.forward, x))\n self.assertAllClose([0., 2], run(bijector.inverse, x))\n self.assertAllClose(\n 0.,\n run(bijector.inverse_log_det_jacobian, x, event_ndims=0))\n\n def testTwoBatchScalarIdentityViaScale(self):\n def static_run(fun, x, **kwargs):\n return self.evaluate(fun(x, **kwargs))\n\n def dynamic_run(fun, x_value, **kwargs):\n x_value = np.array(x_value, dtype=self.dtype)\n x = tf1.placeholder_with_default(x_value, shape=None)\n return self.evaluate(fun(x, **kwargs))\n\n for run in (static_run, dynamic_run):\n # Batch of 2 scales and 2 shifts\n bijector = tfb.AffineScalar(\n shift=self.dtype([1., -1]),\n scale=self.dtype([2., 1]))\n x = self.dtype([1., 1]) # One sample from each of two batches.\n self.assertAllClose([3., 0], run(bijector.forward, x))\n self.assertAllClose([0., 2], run(bijector.inverse, x))\n self.assertAllClose(\n [-np.log(2), 0.],\n run(bijector.inverse_log_det_jacobian, x, event_ndims=0))\n\n def testScalarCongruency(self):\n bijector = tfb.AffineScalar(shift=self.dtype(3.6), scale=self.dtype(0.42))\n bijector_test_util.assert_scalar_congruency(\n bijector,\n lower_x=self.dtype(-2.),\n upper_x=self.dtype(2.),\n eval_func=self.evaluate)\n\n def testScalarCongruencyLogScale(self):\n bijector = tfb.AffineScalar(\n shift=self.dtype(3.6), log_scale=self.dtype(np.log(0.42)))\n bijector_test_util.assert_scalar_congruency(\n bijector,\n lower_x=self.dtype(-2.),\n upper_x=self.dtype(2.),\n eval_func=self.evaluate)\n\n def testVariableGradients(self):\n b = tfb.AffineScalar(\n shift=tf.Variable(1.),\n scale=tf.Variable(2.))\n\n with tf.GradientTape() as tape:\n y = b.forward(.1)\n self.assertAllNotNone(tape.gradient(y, [b.shift, b.scale]))\n\n def testImmutableScaleAssertion(self):\n with self.assertRaisesOpError(\"Argument `scale` must be non-zero\"):\n b = tfb.AffineScalar(scale=0., validate_args=True)\n _ = self.evaluate(b.forward(1.))\n\n def testVariableScaleAssertion(self):\n v = tf.Variable(0.)\n self.evaluate(v.initializer)\n with self.assertRaisesOpError(\"Argument `scale` must be non-zero\"):\n b = tfb.AffineScalar(scale=v, validate_args=True)\n _ = self.evaluate(b.forward(1.))\n\n def testModifiedVariableScaleAssertion(self):\n v = tf.Variable(1.)\n self.evaluate(v.initializer)\n b = tfb.AffineScalar(scale=v, validate_args=True)\n with self.assertRaisesOpError(\"Argument `scale` must be non-zero\"):\n with tf.control_dependencies([v.assign(0.)]):\n _ = self.evaluate(b.forward(1.))\n\n\nclass AffineScalarBijectorTestFloat32(test_case.TestCase,\n _AffineScalarBijectorTest):\n dtype = np.float32\n\n\nclass AffineScalarBijectorTestFloat64(test_case.TestCase,\n _AffineScalarBijectorTest):\n dtype = np.float64\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.compat.v2.exp",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.GradientTape",
"numpy.log",
"numpy.array",
"tensorflow.compat.v1.placeholder_with_default",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.Variable"
]
] |
yuanqingsunny/recommenders-addons | [
"7fe0e213ff59fe3528e7c1877a3885cc7ca355d4"
] | [
"tensorflow_recommenders_addons/dynamic_embedding/python/ops/dynamic_feature_filter.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Filter of variables\"\"\"\n\nfrom tensorflow_recommenders_addons import dynamic_embedding\nfrom tensorflow_recommenders_addons.dynamic_embedding.python.ops import dynamic_embedding_variable\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_logging_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"dynamic_embedding.FilterPolicy\")\nclass FilterPolicy(object):\n \"\"\"\n FilterPolicy records the status of variable, while provides\n interfaces for continuously updating with the status with training\n progress, and filtering the eligible sparse tensor keys for training.\n The FilterPolicy is an abstract class, which can be inherited\n for customization.\n\n FilterPolicy holds `create_status`, `update`, `filter` and 'restrict' methods:\n create_status: creating the representation of the status.\n update: updating the status in iteration. The update operation usually\n runs before the training operation.\n filter: filtering sparse tensor keys for training according to\n the status. It's used to prohibit some feature keys from training.\n restrict: restricting the status table size to prevent the over growth of\n memory usage.\n \"\"\"\n\n def __init__(self, var):\n \"\"\"\n Construct the FilterPolicy from variable.\n\n Args:\n var: dynamic_ebmedding.Variable.\n \"\"\"\n if not isinstance(var, dynamic_embedding.Variable):\n raise TypeError(\"parameter var type should be\" \\\n \"dynamic_embedding.Variable.\")\n\n self.var = var\n self.threshold = 0\n self.create_status()\n\n def create_status(self, **kwargs):\n \"\"\"\n Create status for recording the variable.\n \"\"\"\n raise NotImplementedError\n\n def update(self, **kwargs):\n \"\"\"\n Update the status. The returned update operation is\n usually used with training operation, to keep the status\n following change of variables.\n\n Returns:\n An operation to update the status.\n \"\"\"\n raise NotImplementedError\n\n def filter(self, **kwargs):\n \"\"\"\n filter the feature keys following the direction by records\n of status.\n\n Returns:\n A list of feature keys that filter for training.\n \"\"\"\n raise NotImplementedError\n\n def restrict(self, **kwargs):\n \"\"\"\n Restrict the status table size to prevent out-of-memory\n when status table size is growing.\n\n Returns:\n An operation to restrict the status.\n \"\"\"\n raise NotImplementedError\n\n\n@tf_export(\"dynamic_embedding.FrequencyFilterPolicy\")\nclass FrequencyFilterPolicy(FilterPolicy):\n \"\"\"\n A status inherts from FilterPolicy, providing\n updating and filtering for variable by frequency rule.\n\n When call filter method, the class will filter values on\n ids by following eliminated-below-threshold rule for every ids\n in record. And when call update method, the record of every\n ids will be increased by 1.\n \"\"\"\n\n def __init__(self, var, **kwargs):\n default_count = kwargs.get('default_value', 0)\n self.default_count = constant_op.constant([default_count, 0], dtypes.int32)\n super(FrequencyFilterPolicy, self).__init__(var)\n\n def create_status(self, **kwargs):\n \"\"\"\n Create relative frequency status variables.\n \"\"\"\n scope = variable_scope.get_variable_scope()\n if scope.name:\n scope_name = scope.name + '/frequency_status_for_filter'\n else:\n scope_name = 'frequency_status_for_filter'\n\n with ops.name_scope(scope_name, \"frequency_status_for_filter\",\n []) as unique_scope:\n full_name = unique_scope + '/' + self.var.name\n self.freq_var = dynamic_embedding.get_variable(\n key_dtype=self.var.key_dtype,\n value_dtype=dtypes.int32,\n dim=2,\n name=full_name,\n devices=self.var.devices,\n partitioner=self.var.partition_fn,\n initializer=self.default_count,\n trainable=False,\n init_size=self.var.init_size,\n checkpoint=self.var.checkpoint,\n # checkpoint_path=self.var.checkpoint_path\n )\n\n def update(self, input_tensor=None, **kwargs):\n \"\"\"\n Update the frequency status. The corresponding frequency\n records will be increased by 1.\n\n Args:\n **kwargs: keyword arguments, including\n input_tensor: SparseTensor or dense tensor.\n Feature keys need to count.\n\n Returns:\n An operation for updating the frequency status.\n \"\"\"\n maxint32 = 2147483647\n update_ops = []\n\n if input_tensor is None:\n raise KeyError(\"update method expects parameter `input_tensor`.\")\n elif isinstance(input_tensor, ops.Tensor):\n values = input_tensor\n elif isinstance(input_tensor, sparse_tensor.SparseTensor):\n values = input_tensor.values\n else:\n raise TypeError(\"input_tensor must be \" \\\n \"either a SparseTensor or dense Tensor.\")\n\n values, _ = array_ops.unique(values)\n status_values = array_ops.reshape(values, (-1,))\n partition_index = \\\n self.var.partition_fn(status_values, self.var.shard_num)\n partitioned_values_list, partitioned_indices_list = \\\n dynamic_embedding_variable.make_partition(status_values,\n partition_index,\n self.var.shard_num)\n\n for idx, dev in enumerate(self.freq_var.devices):\n with ops.device(dev):\n feature_status = \\\n self.freq_var.tables[idx].lookup(\n partitioned_values_list[idx],\n dynamic_default_values=self.default_count,\n )\n feature_counts = array_ops.slice(feature_status, [0, 0], [-1, 1])\n feature_tstps = array_ops.slice(feature_status, [0, 1], [-1, 1])\n feature_tstps = array_ops.tile(\n array_ops.reshape(gen_logging_ops.timestamp(), [1]),\n array_ops.reshape(array_ops.size(feature_counts), (-1,)),\n )\n feature_tstps = math_ops.cast(feature_tstps, dtypes.int32)\n feature_tstps = array_ops.reshape(feature_tstps, (-1, 1))\n\n condition = math_ops.less(feature_counts, maxint32)\n feature_counts = array_ops.where(condition, feature_counts + 1,\n feature_counts)\n\n feature_status = array_ops.concat([feature_counts, feature_tstps], 1)\n\n mht_update = \\\n self.freq_var.tables[idx].insert(\n partitioned_values_list[idx],\n feature_status,\n )\n update_ops.append(mht_update)\n\n return control_flow_ops.group(update_ops)\n\n def filter(self, input_tensor=None, **kwargs):\n \"\"\"\n Filter feature keys below the threshold before training.\n Prevent unpopular feature keys from affecting training.\n\n Args:\n **kwargs: keyword arguments, including\n input_tensor: SparseTensor or DenseTensor.\n Feature keys need to filter.\n frequency_threshold: int. Filtering feature keys whose frequency values\n are less than the threshold.\n\n Returns:\n Tensor that are filtered for training.\n \"\"\"\n\n if input_tensor is None:\n raise KeyError(\"filter method expects parameter `input_tensor`.\")\n elif isinstance(input_tensor, ops.Tensor):\n input_type = \"DenseTensor\"\n values = input_tensor\n elif isinstance(input_tensor, sparse_tensor.SparseTensor):\n input_type = \"SparseTensor\"\n values = input_tensor.values\n indices = input_tensor.indices\n else:\n raise TypeError(\"input_tensor must be \" \\\n \"either a SparseTensor or dense Tensor.\")\n\n if 'frequency_threshold' in kwargs:\n frequency_threshold = kwargs['frequency_threshold']\n else:\n raise KeyError(\"filter method expects parameter `frequency_threshold`.\")\n if not isinstance(frequency_threshold, int):\n raise TypeError(\"frequency_threshold must be an integer.\")\n if frequency_threshold < 0:\n raise ValueError(\"frequency_threshold must be greater or equal to zero.\")\n\n unique_values, value_idx = array_ops.unique(values)\n status_values = array_ops.reshape(unique_values, (-1,))\n partition_index = \\\n self.var.partition_fn(status_values, self.var.shard_num)\n partitioned_values_list, partitioned_indices_list = \\\n dynamic_embedding_variable.make_partition(status_values,\n partition_index,\n self.var.shard_num)\n\n mask = []\n for idx, dev in enumerate(self.freq_var.devices):\n with ops.device(dev):\n feature_status = \\\n self.freq_var.tables[idx].lookup(\n partitioned_values_list[idx],\n dynamic_default_values=self.default_count,\n )\n\n feature_counts = array_ops.slice(feature_status, [0, 0], [-1, 1])\n sub_fv = array_ops.reshape(feature_counts, (-1,))\n partitioned_mask = math_ops.greater_equal(sub_fv, frequency_threshold)\n mask.append(partitioned_mask)\n\n total_mask = dynamic_embedding_variable._stitch(mask,\n partitioned_indices_list)\n total_mask = array_ops.gather(total_mask, value_idx)\n total_mask.set_shape([None])\n filter_values = array_ops.boolean_mask(values, total_mask)\n if input_type == \"DenseTensor\":\n filter_tensor = filter_values\n elif input_type == \"SparseTensor\":\n filter_indices = array_ops.boolean_mask(indices, total_mask)\n filter_tensor = sparse_tensor.SparseTensor(\n indices=filter_indices,\n values=filter_values,\n dense_shape=input_tensor.dense_shape)\n\n return filter_tensor\n\n def restrict(self, **kwargs):\n \"\"\"\n Restrict the status table size, eliminate the oldest\n feature keys, if the size of variable grow too large for\n threshold.\n\n Args:\n **kwargs: Keyword arguments, including\n threshold: int. The threshold for feature number\n in variable. When restrict method is called, the table\n size will be reduced to 'threshold'.\n factor: int,float,tf.int32,tf.int64,tf.float32.\n If the table size is greater than threshold * factor,\n restricting wiil be triggered.\n\n Returns:\n An operation to restrict the size of variable.\n \"\"\"\n try:\n self.threshold = kwargs['threshold']\n except:\n raise KeyError(\"restrict method expects parameter `threshold`.\")\n if not isinstance(self.threshold, int):\n raise TypeError(\"threshold must be an integer.\")\n if self.threshold < 0:\n raise ValueError(\"threshold must be greater or equal to zero.\")\n\n factor = kwargs.get('factor', 1.0)\n if isinstance(factor, ops.Tensor):\n if factor.dtype not in (dtypes.int32, dtypes.int64, dtypes.float32):\n raise TypeError(\n 'factor expects int, float, tf.int32, tf.int64, or tf.float32')\n factor = math_ops.cast(factor, dtype=dtypes.float32)\n if not isinstance(factor, (int, float)):\n raise TypeError(\n 'factor expects int, float, tf.int32, tf.int64, or tf.float32')\n\n cond_size = math_ops.cast(self.threshold, dtype=dtypes.float32) * factor\n cond_size = math_ops.cast(cond_size, dtype=dtypes.int64)\n condition = math_ops.greater(self.freq_var.size(), cond_size)\n restrict_status_ops = list()\n no_ops = list()\n\n for idx, dev in enumerate(self.freq_var.devices):\n with ops.device(dev):\n sub_tk, sub_tv = self.freq_var.tables[idx].export()\n sharded_threshold = int(self.threshold / self.freq_var.shard_num)\n\n sub_tv = array_ops.slice(sub_tv, [0, 1], [-1, 1])\n sub_tv = array_ops.reshape(sub_tv, (-1,))\n first_dim = array_ops.shape(sub_tv)[0]\n\n k_on_top = math_ops.cast(first_dim - sharded_threshold,\n dtype=dtypes.int32)\n k_on_top = math_ops.maximum(k_on_top, 0)\n _, removed_keys_ids = nn_ops.top_k(-sub_tv, k_on_top, sorted=False)\n removed_keys = array_ops.gather(sub_tk, removed_keys_ids)\n restrict_status_ops.append(\n self.freq_var.tables[idx].remove(removed_keys))\n no_ops.append(control_flow_ops.no_op())\n restrict_op = control_flow_ops.cond(condition, lambda: restrict_status_ops,\n lambda: no_ops)\n\n return restrict_op\n\n\n@tf_export(\"dynamic_embedding.ProbabilityFilterPolicy\")\nclass ProbabilityFilterPolicy(FilterPolicy):\n \"\"\"\n A status inherts from FilterPolicy, providing\n updating and filtering for variable by probability rule.\n\n When call filter method, the class will filter values on\n ids by following probability rule for new ids (no recorded\n in the table). And when call update method, new ids will\n be stored in the table.\n \"\"\"\n\n def __init__(self, var, **kwargs):\n self.default_tstp = constant_op.constant(0, dtypes.int32)\n super(ProbabilityFilterPolicy, self).__init__(var)\n\n def create_status(self, **kwargs):\n \"\"\"\n Create relative probability status variables.\n \"\"\"\n scope = variable_scope.get_variable_scope()\n if scope.name:\n scope_name = scope.name + '/probability_status_for_filter'\n else:\n scope_name = 'probability_status_for_filter'\n\n with ops.name_scope(scope_name, \"probability_status_for_filter\",\n []) as unique_scope:\n full_name = unique_scope + '/' + self.var.name\n self.tstp_var = dynamic_embedding.get_variable(\n key_dtype=self.var.key_dtype,\n value_dtype=dtypes.int32,\n dim=1,\n name=full_name,\n devices=self.var.devices,\n partitioner=self.var.partition_fn,\n initializer=self.default_tstp,\n trainable=False,\n init_size=self.var.init_size,\n checkpoint=self.var.checkpoint,\n # checkpoint_path=self.var.checkpoint_path\n )\n\n def update(self, input_tensor=None, **kwargs):\n \"\"\"\n Update the probability status table. The filter ids will be\n stored in the table and record timestamp.\n\n Args:\n **kwargs: keyword arguments, including\n input_tensor: SparseTensor or dense tensor.\n Feature keys need to count.\n\n Returns:\n An operation for updating the frequency status.\n \"\"\"\n update_ops = []\n\n if input_tensor is None:\n raise KeyError(\"update method expects parameter `input_tensor`.\")\n elif isinstance(input_tensor, ops.Tensor):\n values = input_tensor\n elif isinstance(input_tensor, sparse_tensor.SparseTensor):\n values = input_tensor.values\n else:\n raise TypeError(\"input_tensor must be \" \\\n \"either a SparseTensor or dense Tensor.\")\n\n values, _ = array_ops.unique(values)\n status_values = array_ops.reshape(values, (-1,))\n partition_index = \\\n self.var.partition_fn(status_values, self.var.shard_num)\n partitioned_values_list, partitioned_indices_list = \\\n dynamic_embedding_variable.make_partition(status_values,\n partition_index,\n self.var.shard_num)\n\n for idx, dev in enumerate(self.tstp_var.devices):\n with ops.device(dev):\n value_size = array_ops.size(partitioned_values_list[idx])\n feature_tstps = array_ops.tile(\n array_ops.reshape(gen_logging_ops.timestamp(), [1]),\n array_ops.reshape(value_size, (-1,)),\n )\n feature_tstps = math_ops.cast(feature_tstps, dtypes.int32)\n feature_status = array_ops.reshape(feature_tstps, (-1, 1))\n\n mht_update = \\\n self.tstp_var.tables[idx].insert(\n partitioned_values_list[idx],\n feature_status,\n )\n update_ops.append(mht_update)\n\n return control_flow_ops.group(update_ops)\n\n def filter(self, input_tensor=None, **kwargs):\n \"\"\"\n Filter new feature keys by probability before training.\n Prevent unpopular features from affecting training.\n\n Args:\n **kwargs: keyword arguments, including\n input_tensor: SparseTensor or DenseTensor.\n Feature keys need to filter.\n probability: float. Filtering new feature keys by\n probability, and permitting old keys.\n\n Returns:\n Tensor that are filtered for training.\n \"\"\"\n\n if input_tensor is None:\n raise KeyError(\"filter method expects parameter `input_tensor`.\")\n elif isinstance(input_tensor, ops.Tensor):\n input_type = \"DenseTensor\"\n values = input_tensor\n elif isinstance(input_tensor, sparse_tensor.SparseTensor):\n input_type = \"SparseTensor\"\n values = input_tensor.values\n indices = input_tensor.indices\n else:\n raise TypeError(\"input_tensor must be \" \\\n \"either a SparseTensor or dense Tensor.\")\n\n if 'probability' in kwargs:\n probability = kwargs['probability']\n else:\n raise KeyError(\"filter method expects parameter `probability`.\")\n if not isinstance(probability, float):\n raise TypeError(\"probability must be a float.\")\n if probability < 0.0 or probability > 1.0:\n raise ValueError(\"probability value must be in [0.0, 1.0].\")\n\n unique_values, value_idx = array_ops.unique(values)\n status_values = array_ops.reshape(unique_values, (-1,))\n partition_index = \\\n self.var.partition_fn(status_values, self.var.shard_num)\n partitioned_values_list, partitioned_indices_list = \\\n dynamic_embedding_variable.make_partition(status_values,\n partition_index,\n self.var.shard_num)\n\n fv_list = []\n for idx, dev in enumerate(self.tstp_var.devices):\n with ops.device(dev):\n feature_status = \\\n self.tstp_var.tables[idx].lookup(\n partitioned_values_list[idx],\n dynamic_default_values=self.default_tstp,\n )\n\n sub_fv = array_ops.reshape(feature_status, (-1,))\n fv_list.append(sub_fv)\n\n total_fv = dynamic_embedding_variable._stitch(fv_list,\n partitioned_indices_list)\n total_fv = array_ops.gather(total_fv, value_idx)\n\n value_size = array_ops.size(values)\n old_prob = array_ops.ones(value_size)\n new_prob = array_ops.fill([value_size], probability)\n random_prob = random_ops.random_uniform([value_size], maxval=1.0)\n\n condition = math_ops.greater(total_fv, self.default_tstp)\n total_prob = array_ops.where(condition, old_prob, new_prob)\n\n total_mask = math_ops.greater_equal(total_prob, random_prob)\n filter_values = array_ops.boolean_mask(values, total_mask)\n\n if input_type == \"DenseTensor\":\n filter_tensor = filter_values\n elif input_type == \"SparseTensor\":\n filter_indices = array_ops.boolean_mask(indices, total_mask)\n filter_tensor = sparse_tensor.SparseTensor(\n indices=filter_indices,\n values=filter_values,\n dense_shape=input_tensor.dense_shape)\n\n return filter_tensor\n\n def restrict(self, **kwargs):\n \"\"\"\n Restrict the status table size, eliminate the oldest\n feature keys, if the size of variable grow too large for\n threshold.\n\n Args:\n **kwargs: Keyword arguments, including\n threshold: int. The threshold for feature number\n in variable. When restrict method is called, the table\n size will be reduced to 'threshold'.\n factor: int,float,tf.int32,tf.int64,tf.float32.\n If the table size is greater than threshold * factor,\n restricting wiil be triggered.\n\n Returns:\n An operation to restrict the size of variable.\n \"\"\"\n try:\n self.threshold = kwargs['threshold']\n except:\n raise KeyError(\"restrict method expects parameter `threshold`.\")\n if not isinstance(self.threshold, int):\n raise TypeError(\"threshold must be an integer.\")\n if self.threshold < 0:\n raise ValueError(\"threshold must be greater or equal to zero.\")\n\n factor = kwargs.get('factor', 1.0)\n if isinstance(factor, ops.Tensor):\n if factor.dtype not in (dtypes.int32, dtypes.int64, dtypes.float32):\n raise TypeError(\n 'factor expects int, float, tf.int32, tf.int64, or tf.float32')\n factor = math_ops.cast(factor, dtype=dtypes.float32)\n if not isinstance(factor, (int, float)):\n raise TypeError(\n 'factor expects int, float, tf.int32, tf.int64, or tf.float32')\n\n cond_size = math_ops.cast(self.threshold, dtype=dtypes.float32) * factor\n cond_size = math_ops.cast(cond_size, dtype=dtypes.int64)\n condition = math_ops.greater(self.tstp_var.size(), cond_size)\n restrict_status_ops = list()\n no_ops = list()\n\n for idx, dev in enumerate(self.tstp_var.devices):\n with ops.device(dev):\n sub_tk, sub_tv = self.tstp_var.tables[idx].export()\n sharded_threshold = int(self.threshold / self.tstp_var.shard_num)\n\n sub_tv = array_ops.reshape(sub_tv, (-1,))\n first_dim = array_ops.shape(sub_tv)[0]\n\n k_on_top = math_ops.cast(first_dim - sharded_threshold,\n dtype=dtypes.int32)\n k_on_top = math_ops.maximum(k_on_top, 0)\n _, removed_keys_ids = nn_ops.top_k(-sub_tv, k_on_top, sorted=False)\n removed_keys = array_ops.gather(sub_tk, removed_keys_ids)\n restrict_status_ops.append(\n self.tstp_var.tables[idx].remove(removed_keys))\n no_ops.append(control_flow_ops.no_op())\n restrict_op = control_flow_ops.cond(condition, lambda: restrict_status_ops,\n lambda: no_ops)\n\n return restrict_op\n\n\n@tf_export(\"dynamic_embedding.FeatureFilter\")\nclass FeatureFilter(object):\n \"\"\"\n A feature_filter for constraining the variables sparse feature number,\n with keeping recording and eliminating the obsolete feature keys.\n Notice: FrequencyFilterPolicy running order: update->filter->train\n 1.update feature keys frequency\n 2.filter feature keys by frequency\n 3.train with filtering feature keys\n ProbabilityFilterPolicy running order: filter->update->train\n 1.filter feature keys by probability\n 2.update with filtering feature keys\n 3.trian with filtering feature keys\n # Example:\n\n ```python\n # Get a FeatureFilter.\n feature_filter = tf.dynamic_embedding.FeatureFilter(\n var_list=var_list,\n policy=FrequencyFilterPolicy,\n )\n\n # Call update to get an operation to update policy status,\n # record feature keys status.\n # There is no need to call update in inference.\n update_op = feature_filter.update(input_tensor_list=tensor_list)\n\n # Call filter to get qualified feature keys for training.\n # There is no need to call filter in inference.\n threshold = 10\n filter_tensor_list = feature_filter.filter(frequency_threshold=threshold,\n input_tensor_list=tensor_list)\n use_filter = mode != PREDICT and\n math_ops.equal(math_ops.mod(global_step, 100), 0)\n cur_tensor_list = tf.cond(use_filter,\n lambda:filter_tensor_list,\n lambda:tensor_list)\n \n # Call restrict to get an operation to restrict policy status,\n # limit the status table size.\n # There is no need to call restrict in inference.\n restrict_op = feature_filter.restrict(threshold=size)\n \n # Training with filtering keys\n # Call the minimize to the loss with optimizer.\n test_var, _ = tf.dynamic_embedding.embedding_lookup_sparse(\n embeddings,\n cur_tensor_list[idx],\n sp_weights=None,\n combiner=\"sum\",\n return_trainable=True)\n pred = math_ops.matmul(test_var, x)\n loss = pred * pred\n\n with tf.control_dependencies(update_op):\n train_op = opt.minimize(loss)\n\n with tf.Session() as sess:\n ...\n\n for step in range(num_iter):\n ...\n #Traning with filter keys\n #Need close 'update', 'filter' and 'restrict' in inference\n sess.run(train_op)\n if step % 1000 == 0:\n sess.run(restrict_op)\n ...\n\n ...\n ```\n\n \"\"\"\n\n def __init__(self,\n var_list=None,\n default_value_list=None,\n policy=FrequencyFilterPolicy):\n \"\"\"\n Creates a `FeatureFilter` object. Each variable in var_list\n of the same FeatureFilter instance share the same policy.\n\n Args:\n var_list: A list of `tf.dynamic_embedding.Variable` objects.\n default_value_list: A list of 'int' for default_value initializing.\n Some policies may use this for initializing status.\n policy: A FilterPolicy class to specify the rules for\n recoding, updating, and filtering the variable status in var_list.\n \"\"\"\n if not issubclass(policy, FilterPolicy):\n raise TypeError(\"policy must be subclass of\" \\\n \"FilterPolicy object.\")\n\n if var_list in [None, []]:\n raise ValueError(\"var_list must have a variable at least.\")\n if default_value_list is not None:\n if len(default_value_list) != len(var_list):\n raise ValueError(\"default_value_list length\" \\\n \"must be equal to var_list.\")\n else:\n default_value_list = len(var_list) * [0]\n\n self.var_list = var_list\n self.policy_list = []\n\n for idx, var in enumerate(self.var_list):\n self.policy_list.append(policy(var,\n default_value=default_value_list[idx]))\n\n def update(self, input_tensor_list=None, **kwargs):\n \"\"\"\n Update the status for every variable in var_list.\n Each variable processes different sparse tensor keys.\n\n Args:\n input_tensor_list: A list of `Tensor` objects.\n For each variable, a sparse tensor should be passed to\n the FilterPolicy to update method according to the index.\n **kwargs: Optional keyword arguments to be passed to\n the FilterPolicy update method.\n\n Returns:\n A list of operations to update the status for every variable.\n \"\"\"\n update_ops = []\n\n if input_tensor_list is None:\n raise KeyError(\"update method expects parameter\" \\\n \"`input_tensor_list`.\")\n elif not isinstance(input_tensor_list, list):\n raise TypeError(\"input_tensor_list must be a list.\")\n elif len(input_tensor_list) != len(self.var_list):\n raise ValueError(\"input_tensor_list length\" \\\n \"must be equal to var_list length.\")\n\n for idx, policy in enumerate(self.policy_list):\n update_ops.append(\n policy.update(input_tensor=input_tensor_list[idx], **kwargs))\n\n return update_ops\n\n def filter(self, input_tensor_list=None, **kwargs):\n \"\"\"\n Filter keys for every variable in var_list.\n Each variable processes different sparse tensor keys.\n\n Args:\n input_tensor_list: A list of `Tensor` objects.\n For each variable, a sparse tensor should be passed\n the FilterPolicy to update method according to the index.\n **kwargs: Optional keyword arguments to be passed to\n the FilterPolicy filter method.\n\n Returns:\n Tensor list that filter for training\n \"\"\"\n filter_list = []\n\n if input_tensor_list is None:\n raise KeyError(\"update method expects parameter\" \\\n \"`input_tensor_list`.\")\n elif not isinstance(input_tensor_list, list):\n raise TypeError(\"input_tensor_list must be a list.\")\n elif len(input_tensor_list) != len(self.var_list):\n raise ValueError(\"input_tensor_list length\" \\\n \"must be equal to var_list length.\")\n\n for idx, policy in enumerate(self.policy_list):\n filter_list.append(\n policy.filter(input_tensor=input_tensor_list[idx], **kwargs))\n\n return filter_list\n\n def restrict(self, **kwargs):\n \"\"\"\n Restrict the variables for every variable in var_list.\n\n Args:\n **kwargs: Optional keyword arguments passed to the\n method policy.restrict(**kwargs). For example,\n in the `restrict` method of `FilterFrequencyPolicy`\n has parameters `threshold` and `factor`.\n\n Returns:\n A list of operation to restrict variables.\n \"\"\"\n restrict_op = []\n for policy in self.policy_list:\n restrict_op.append(policy.restrict(**kwargs))\n return restrict_op\n"
] | [
[
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.math_ops.greater",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.gen_logging_ops.timestamp",
"tensorflow.python.ops.array_ops.unique",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.array_ops.fill",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.nn_ops.top_k",
"tensorflow.python.ops.array_ops.boolean_mask",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.greater_equal",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.control_flow_ops.no_op"
]
] |
sstsai-adl/d2go | [
"6cff773797b14698043589afe57ea67cd76286f9"
] | [
"tests/modeling/test_optimizer.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\nimport random\nimport unittest\n\nimport d2go.runner.default_runner as default_runner\nimport torch\nfrom d2go.optimizer import (\n build_optimizer_mapper,\n)\nfrom d2go.utils.testing import helper\n\n\nclass TestArch(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 4, kernel_size=5, stride=1, padding=1)\n self.bn = torch.nn.BatchNorm2d(4)\n self.relu = torch.nn.ReLU(inplace=True)\n self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))\n self.linear = torch.nn.Linear(4, 1)\n\n def forward(self, x):\n ret = self.conv(x)\n ret = self.bn(ret)\n ret = self.relu(ret)\n ret = self.avgpool(ret)\n ret = torch.transpose(ret, 1, 3)\n ret = self.linear(ret)\n return ret\n\n\ndef _test_each_optimizer(cfg):\n print(\"Solver: \" + str(cfg.SOLVER.OPTIMIZER))\n\n model = TestArch()\n criterion = torch.nn.BCEWithLogitsLoss()\n optimizer = build_optimizer_mapper(cfg, model)\n optimizer.zero_grad()\n\n random.seed(20210912)\n for _ in range(2500):\n target = torch.empty(1, 1, 1, 1).fill_(random.randint(0, 1))\n x = torch.add(torch.rand(1, 3, 16, 16), 2 * target)\n y_pred = model(x)\n loss = criterion(y_pred, target)\n loss.backward()\n optimizer.step()\n\n n_correct = 0\n for _ in range(200):\n target = torch.empty(1, 1, 1, 1).fill_(random.randint(0, 1))\n x = torch.add(torch.rand(1, 3, 16, 16), 2 * target)\n y_pred = torch.round(torch.sigmoid(model(x)))\n if y_pred == target:\n n_correct += 1\n\n print(\"Correct prediction rate {0}.\".format(n_correct / 200))\n\n\ndef _check_param_group(self, group, num_params=None, **kwargs):\n if num_params is not None:\n self.assertEqual(len(group[\"params\"]), num_params)\n for key, val in kwargs.items():\n self.assertEqual(group[key], val)\n\n\ndef get_optimizer_cfg(\n lr,\n weight_decay=None,\n weight_decay_norm=None,\n weight_decay_bias=None,\n lr_mult=None,\n):\n runner = default_runner.Detectron2GoRunner()\n cfg = runner.get_default_cfg()\n if lr is not None:\n cfg.SOLVER.BASE_LR = lr\n if weight_decay is not None:\n cfg.SOLVER.WEIGHT_DECAY = weight_decay\n if weight_decay_norm is not None:\n cfg.SOLVER.WEIGHT_DECAY_NORM = weight_decay_norm\n if weight_decay_bias is not None:\n cfg.SOLVER.WEIGHT_DECAY_BIAS = weight_decay_bias\n if lr_mult is not None:\n cfg.SOLVER.LR_MULTIPLIER_OVERWRITE = [lr_mult]\n return cfg\n\n\nclass TestOptimizer(unittest.TestCase):\n def test_create_optimizer_default(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 3, 1)\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(self.conv(x))\n\n model = Model()\n cfg = get_optimizer_cfg(\n lr=1.0, weight_decay=1.0, weight_decay_norm=1.0, weight_decay_bias=1.0\n )\n optimizer = build_optimizer_mapper(cfg, model)\n self.assertEqual(len(optimizer.param_groups), 1)\n _check_param_group(\n self, optimizer.param_groups[0], num_params=4, weight_decay=1.0, lr=1.0\n )\n\n def test_create_optimizer_lr(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = torch.nn.Conv2d(3, 3, 1)\n self.conv2 = torch.nn.Conv2d(3, 3, 1)\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(self.conv2(self.conv1(x)))\n\n model = Model()\n cfg = get_optimizer_cfg(\n lr=1.0,\n lr_mult={\"conv1\": 3.0, \"conv2\": 3.0},\n weight_decay=2.0,\n weight_decay_norm=2.0,\n )\n optimizer = build_optimizer_mapper(cfg, model)\n\n self.assertEqual(len(optimizer.param_groups), 2)\n\n _check_param_group(self, optimizer.param_groups[0], num_params=4, lr=3.0)\n _check_param_group(self, optimizer.param_groups[1], num_params=2, lr=1.0)\n\n def test_create_optimizer_weight_decay_norm(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 3, 1)\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(self.conv(x))\n\n model = Model()\n cfg = get_optimizer_cfg(\n lr=1.0, weight_decay=1.0, weight_decay_norm=2.0, weight_decay_bias=1.0\n )\n optimizer = build_optimizer_mapper(cfg, model)\n\n self.assertEqual(len(optimizer.param_groups), 2)\n\n _check_param_group(\n self, optimizer.param_groups[0], num_params=2, lr=1.0, weight_decay=1.0\n )\n _check_param_group(\n self, optimizer.param_groups[1], num_params=2, lr=1.0, weight_decay=2.0\n )\n\n def test_all_optimizers(self):\n runner = default_runner.Detectron2GoRunner()\n cfg = runner.get_default_cfg()\n multipliers = [None, [{\"conv\": 0.1}]]\n\n for optimizer_name in [\"SGD\", \"AdamW\", \"SGD_MT\", \"AdamW_MT\"]:\n for mult in multipliers:\n cfg.SOLVER.BASE_LR = 0.01\n cfg.SOLVER.OPTIMIZER = optimizer_name\n cfg.SOLVER.MULTIPLIERS = mult\n _test_each_optimizer(cfg)\n\n def test_full_model_grad_clipping(self):\n runner = default_runner.Detectron2GoRunner()\n cfg = runner.get_default_cfg()\n\n for optimizer_name in [\"SGD\", \"AdamW\", \"SGD_MT\", \"AdamW_MT\"]:\n cfg.SOLVER.BASE_LR = 0.02\n cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 0.2\n cfg.SOLVER.CLIP_GRADIENTS.ENABLED = True\n cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = \"full_model\"\n cfg.SOLVER.OPTIMIZER = optimizer_name\n _test_each_optimizer(cfg)\n\n def test_create_optimizer_custom(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 3, 1)\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(self.conv(x))\n\n def get_optimizer_param_groups(self, _opts):\n ret = [\n {\n \"params\": [self.conv.weight],\n \"lr\": 10.0,\n }\n ]\n return ret\n\n model = Model()\n cfg = get_optimizer_cfg(lr=1.0, weight_decay=1.0, weight_decay_norm=0.0)\n optimizer = build_optimizer_mapper(cfg, model)\n\n self.assertEqual(len(optimizer.param_groups), 3)\n\n _check_param_group(\n self, optimizer.param_groups[0], num_params=1, lr=10.0, weight_decay=1.0\n )\n _check_param_group(\n self, optimizer.param_groups[1], num_params=1, lr=1.0, weight_decay=1.0\n )\n _check_param_group(\n self, optimizer.param_groups[2], num_params=2, lr=1.0, weight_decay=0.0\n )\n\n @helper.enable_ddp_env\n def test_create_optimizer_custom_ddp(self):\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 3, 1)\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(self.conv(x))\n\n def get_optimizer_param_groups(self, _opts):\n ret = [\n {\n \"params\": [self.conv.weight],\n \"lr\": 10.0,\n }\n ]\n return ret\n\n model = Model()\n model = torch.nn.parallel.DistributedDataParallel(model)\n cfg = get_optimizer_cfg(lr=1.0, weight_decay=1.0, weight_decay_norm=0.0)\n optimizer = build_optimizer_mapper(cfg, model)\n\n self.assertEqual(len(optimizer.param_groups), 3)\n\n _check_param_group(\n self, optimizer.param_groups[0], num_params=1, lr=10.0, weight_decay=1.0\n )\n _check_param_group(\n self, optimizer.param_groups[1], num_params=1, lr=1.0, weight_decay=1.0\n )\n _check_param_group(\n self, optimizer.param_groups[2], num_params=2, lr=1.0, weight_decay=0.0\n )\n"
] | [
[
"torch.empty",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.rand",
"torch.nn.parallel.DistributedDataParallel",
"torch.nn.Conv2d",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.ReLU",
"torch.transpose"
]
] |
oliviernocent/AEROLAB | [
"4fd1077c5799b6c6a6b885e7baccf16925d3a36e"
] | [
"scripts/python/exposure.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nThis script computes the max mean mass concentration of several pollutants\nfrom a CSV file containing the following columns:\n - 'DateTime' : ISO 8601 date and time \n - 'Timestamp': seconds elapsed since 01/01/1970\n - 'PM10 (µg/m3)' (optional)\n - 'PM2.5 (µg/m3)' (optional)\n - 'PM1 (µg/m3)' (optional)\n - 'NO2 (µg/m3)' (optional)\n - 'CO (mg/m3)' (optional)\n - 'O3 (µg/m3)' (optional)\n\nUSAGE:\n\n./exposure.py [csv_file]\n\nIf no csv_file is provided, the script opens a file dialog box.\n'''\n\n__author__ = \"Olivier Nocent and Quentin Martinet\"\n__copyright__ = \"Copyright 2021, Université de Reims Champagne Ardenne\"\n__license__ = \"MIT\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"Olivier Nocent\"\n__email__ = \"[email protected]\"\n__status__ = \"Experimental\"\n\nfrom os import path\nimport sys\nimport easygui\nimport glob\nimport pandas as pd\nfrom aerolab_utils import *\n\nif len(sys.argv) == 1:\n filename = easygui.fileopenbox(\n title='Exposure estimation', msg='Choose a CSV file', filetypes=[['*.csv', 'CSV files']])\nelse:\n filename = sys.argv[1]\n if not path.exists(filename):\n print('\\nERROR:', filename, 'does not exist!\\n\\n')\n exit(0)\n\ndf = pd.read_csv(filename)\n\npollutants = ['PM10 (µg/m3)', 'PM2.5 (µg/m3)', 'PM1 (µg/m3)', 'NO2 (µg/m3)',\n 'CO (mg/m3)', 'O3 (µg/m3)']\n\nthreshold = {\n 'PM10 (µg/m3)': 45,\n 'PM2.5 (µg/m3)': 15,\n 'PM1 (µg/m3)': 15,\n 'NO2 (µg/m3)': 25,\n 'CO (mg/m3)': 4,\n 'O3 (µg/m3)': 100\n}\n\nmax_value = {}\nmax_index = {}\nfor pollutant in pollutants:\n max_value[pollutant] = 0\n max_index[pollutant] = 0\n\ni, end = 0, df['Timestamp'].iloc[-1] - 24 * 3600\nwhile df.loc[i, 'Timestamp'] < end:\n start = df.loc[i, 'Timestamp']\n df_24h = df[(df['Timestamp'] >= start) & (\n df['Timestamp'] < start + 24 * 3600)]\n\n for pollutant in pollutants:\n if pollutant in df.columns:\n mean_value = df_24h[pollutant].median()\n if mean_value > max_value[pollutant]:\n max_value[pollutant] = mean_value\n max_index[pollutant] = i\n\n i += 1\n\nif 'O3 (µg/m3)' in df.columns:\n i, end = 0, df['Timestamp'].iloc[-1] - 8 * 3600\n while df.loc[i, 'Timestamp'] < end:\n start = df.loc[i, 'Timestamp']\n df_8h = df[(df['Timestamp'] >= start) & (\n df['Timestamp'] < start + 8 * 3600)]\n\n mean_value = df_24h['O3 (µg/m3)'].median()\n if mean_value > max_value['O3 (µg/m3)']:\n max_value['O3 (µg/m3)'] = mean_value\n max_index['O3 (µg/m3)'] = i\n\n i += 1\n\nprint('\\nMaximum mean mass concentration during 24h:\\n')\nif 'PM10 (µg/m3)' in df.columns:\n print(f\"PM10 : {max_value['PM10 (µg/m3)']: >6.2f} µg/m3\\t\\t(45 µg/m3) at {df['DateTime'][max_index['PM10 (µg/m3)']]}\")\nif 'PM2.5 (µg/m3)' in df.columns:\n print(f\"PM2.5 : {max_value['PM2.5 (µg/m3)']: >6.2f} µg/m3\\t\\t(15 µg/m3) at {df['DateTime'][max_index['PM2.5 (µg/m3)']]}\")\nif 'PM1 (µg/m3)' in df.columns:\n print(f\"PM1 :' {max_value['PM1 (µg/m3)']: >6.2f} µg/m3\\t\\t( ? µg/m3) at {df['DateTime'][max_index['PM1 (µg/m3)']]}\")\nif 'NO2 (µg/m3)' in df.columns:\n print(f\"NO2 : {max_value['NO2 (µg/m3)']: >6.2f} µg/m3\\t\\t(25 µg/m3) at {df['DateTime'][max_index['NO2 (µg/m3)']]}\")\nif 'CO (mg/m3)' in df.columns:\n print(f\"CO : {max_value['CO (mg/m3)']: >6.2f} mg/m3\\t\\t( 4 mg/m3) at {df['DateTime'][max_index['CO (mg/m3)']]}\")\nif 'O3 (µg/m3)' in df.columns:\n print('\\nMaximum mean mass concentration during 8h:\\n')\n print(f\"O3 : {max_value['O3 (µg/m3)']: >6.2f} µg/m3\\t\\t(100 µg/m3) at {df['DateTime'][max_index['O3 (µg/m3)']]}\")\n\nperiod = {\n 'PM10 (µg/m3)': 0,\n 'PM2.5 (µg/m3)': 0,\n 'PM1 (µg/m3)': 0,\n 'NO2 (µg/m3)': 0,\n 'CO (mg/m3)': 0,\n 'O3 (µg/m3)': 0\n}\n\nfor i in range(1,len(df.index)):\n for pollutant in pollutants:\n if pollutant in df.columns and df[pollutant][i] > threshold[pollutant]:\n period[pollutant] += df['Timestamp'][i] - df['Timestamp'][i-1]\n\ntotal = df['Timestamp'][len(df.index)-1] - df['Timestamp'][0]\n\nprint(f'\\nTotal time above thresholds during {format_duration(total)}:\\n')\nfor pollutant in pollutants:\n if pollutant in df.columns:\n print(f'{pollutant} : {format_duration(period[pollutant])}')"
] | [
[
"pandas.read_csv"
]
] |
Vinicius-Tanigawa/Undergraduate-Research-Project | [
"e92372f07882484b127d7affe305eeec2238b8a9"
] | [
"SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Aerodynamics/Common/Fidelity_Zero/Lift/generate_wing_wake_grid.py"
] | [
"## @ingroup Methods-Aerodynamics-Common-Fidelity_Zero-Lift\n# generate_wing_wake_grid.py\n# \n# Created: April 2021, R. Erhard\n# Modified: \n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\nimport numpy as np\nimport pylab as plt\nfrom SUAVE.Core import Data\n\n\ndef generate_wing_wake_grid(geometry, H, L, hf, x_plane, Nzo=20, Nzf=35, Nyo=20, plot_grid=False):\n \"\"\" Generates the grid points for evaluating the viscous wing wake in a downstream plane.\n Uses smaller grid near the wing to better capture boundary layer.\n \n Inputs: \n geometry SUAVE vehicle data object\n H Height of full grid, normalized by wing span\n L Length of grid, normalized by wing span\n hf Height of finer grid portion\n x_plane Spanwise location of grid plane\n \n Nzo Number of vertical grid points outside finer region\n Nzf Number of vertical grid points inside finer region\n Nyo Number of horizontal grid points outside of wing span\n \"\"\"\n # unpack\n span = geometry.wings.main_wing.spans.projected\n half_span = span/2\n VD = geometry.vortex_distribution\n breaks = VD.chordwise_breaks\n \n # grid bounds\n z_bot = -H*half_span\n z_top = H*half_span\n Nzo_half = int(Nzo/2)\n Nyo_half = int(Nyo/2)\n \n # generate vertical grid point locations\n z_outer_bot = np.linspace(z_bot, -hf, Nzo_half)\n z_outer_top = np.linspace(hf, z_top, Nzo_half)\n \n # use finer concentration of grid points near the wing\n z_inner_bot = -hf*(np.flipud((1-np.cos(np.linspace(1e-6,1,Nzf)*np.pi/2))))\n z_inner_top = hf*(1-np.cos(np.linspace(0,1,Nzf)*np.pi/2))\n zlocs = np.concatenate([z_outer_bot, z_inner_bot, z_inner_top, z_outer_top])\n\n # generate spanwise grid point locations: placed between vortex lines to avoid discontinuities\n ypts = VD.YC[breaks]\n y_semispan = ypts[0:int(len(ypts)/2)]\n \n if L>=1.:\n # add grid points outside wingtip\n y_outerspan = np.linspace(1.01,L,Nyo_half)*half_span\n y_semispan = np.append(y_semispan, y_outerspan)\n else:\n # trim spanwise points to region of interest\n y_in = y_semispan<(L*half_span)\n y_semispan = y_semispan[y_in]\n \n ylocs = np.concatenate([np.flipud(-y_semispan),y_semispan])\n \n # declare new control points\n cp_YC = np.repeat(ylocs,len(zlocs)) \n cp_ZC = np.tile(zlocs,len(ylocs))\n cp_XC = np.ones_like(cp_YC)*x_plane \n \n grid_points = Data()\n grid_points.XC = cp_XC\n grid_points.YC = cp_YC\n grid_points.ZC = cp_ZC\n grid_points.yline = ylocs\n grid_points.zline = zlocs\n \n if plot_grid:\n yL = -span/2\n yR = span/2\n \n wing_y = np.array([yL, yR])\n wing_z = np.array([0,0])\n \n # plot the grid points\n fig = plt.figure()\n axes = fig.add_subplot(1,1,1)\n axes.plot(cp_YC,cp_ZC,'k.')\n \n # plot the wing projection\n axes.plot(wing_y,wing_z, 'r')\n \n axes.set_xlabel('y [m]')\n axes.set_ylabel(\"z [m]\")\n axes.set_title(\"New Grid Points\")\n \n plot_prop=True\n if plot_prop:\n for net in list(geometry.networks.keys()):\n for prop in list(geometry.networks[net].propellers.keys()):\n R = geometry.networks[net].propellers[prop].tip_radius\n origin = geometry.networks[net].propellers[prop].origin\n Na = geometry.networks[net].propellers[prop].number_azimuthal_stations\n \n psi = np.linspace(0,2*np.pi,Na+1)[:-1]\n ycoords = origin[0][1] + R*np.cos(psi)\n zcoords = origin[0][2] + R*np.sin(psi)\n axes.plot(ycoords,zcoords,'r')\n \n return grid_points"
] | [
[
"numpy.append",
"numpy.flipud",
"numpy.ones_like",
"numpy.cos",
"numpy.array",
"numpy.sin",
"numpy.concatenate",
"numpy.linspace"
]
] |
pitchdarkdata/InfluxDays2021_Demo | [
"d5625566cefd983203983e158f3325cfb2c16029"
] | [
"API/gerrit_api.py"
] | [
"\"\"\"\nThis Module interacts with Gerrit and retrieves Data from Gerrit\n\"\"\"\n\nimport os\nimport json\nimport logging\nimport argparse\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom json.decoder import JSONDecodeError\nfrom urllib.parse import urlunsplit, urlencode\nfrom typing import Tuple, Union\ntry:\n from requests import __version__, Session, adapters, exceptions, urllib3, status_codes\n logging.debug(f'Available request module of version {__version__}')\nexcept ImportError:\n logging.error('Please install requests module. Use pip install requests.')\n\nclass GerritApi:\n \"\"\"\n *Class name :* GerritHandler\n\n *Description :* Class to retrieve data from Gerrit\n \"\"\"\n GET_ALL_REPO_URI = \"/projects/?d\"\n GET_ALL_CHANGES_URI = \"/changes/?q=repo:{repo_name}\"\n GET_ALL_ACTIVE_USERS_URI = \"/accounts/?q=is:active\"\n GET_COMMITS_BY_AGE = \"/changes/?q=-age:\"\n GET_COMMITS_USING_AFTER = \"/changes/?q=after:\"\n\n def __init__(self, gerrit_server: str, username: str=None, password: str=None):\n \"\"\"\n *Method description :* Initializing values for Gerrit operations from OVF\n\n :param username: Username to login to Gerrit\n :type username: String\n :param password: Password to login to Gerrit\n :type password: String\n :param url: Gerrit URL to get commit details\n :type url: String\n \"\"\"\n self.gerrit_username = username\n self.gerrit_password = password\n self.gerrit_url = f\"https://{gerrit_server}\"\n logging.debug(f\"GerritDetails:: {self.gerrit_url}, {self.gerrit_username}, {self.gerrit_password}\")\n if username and password:\n self.rest_engine = RestEngine(auth=(self.gerrit_username, self.gerrit_password))\n else:\n self.rest_engine = RestEngine()\n\n def get_all_projects(self) -> dict:\n \"\"\"\n Method to get all repositories\n\n :returns: :class:`repo_details`: All repo details\n :rtype: :class:`repo_details`: Dict\n \"\"\"\n all_repo_details = {}\n get_all_repo_url = f\"{self.gerrit_url}{GerritApi.GET_ALL_REPO_URI}\"\n all_repo_resp = self.decode_response(self.rest_engine.rest_request(get_all_repo_url))\n for key, value in all_repo_resp.items():\n all_repo_details[key] = {\"id\": value.get(\"id\"), \"description\": value.get(\"description\"),\n \"state\": value.get(\"state\")}\n logging.info(f\"List of All repositories : {all_repo_details} {len(all_repo_details)}\")\n return all_repo_details\n\n def get_all_active_projects(self) -> list:\n \"\"\"\n Method to get all active repositories\n\n :returns: :class:`active_repo_list`: List of active repositories\n :rtype: :class:`active_repo_list`: List\n \"\"\"\n active_repo_list = []\n all_repo_details = self.get_all_projects()\n for key, value in all_repo_details.items():\n if value[\"state\"] == \"ACTIVE\":\n active_repo_list.append(key)\n logging.info(f\"List of active repositories : {active_repo_list} {len(active_repo_list)}\")\n return active_repo_list\n\n def get_active_user_accounts(self) -> list:\n \"\"\"\n *Method description :* Method to get active user accounts in server\n\n :returns: :class:`all_users_details`: List of commit changes as dict\n :rtype: :class:`all_users_details`: list\n \"\"\"\n all_users_details = []\n all_users_list, mocker_response = [], []\n all_users_url = f\"{self.gerrit_url}{GerritApi.GET_ALL_ACTIVE_USERS_URI}&S=0\"\n response = self.decode_response(self.rest_engine.rest_request(all_users_url))\n all_users_list.extend(response)\n mocker_response = self.no_limit_mocker(response, mocker_response,\n url_to_be_used=f\"{self.gerrit_url}{GerritApi.GET_ALL_ACTIVE_USERS_URI}\")\n if all_users_list:\n all_users_list.extend(mocker_response)\n logging.info(f\"Number Of Active User Accounts in Gerrit: {len(all_users_list)}\")\n for each_user in all_users_list:\n user_id = each_user.get(\"_account_id\")\n user_details_url = f\"{self.gerrit_url}/accounts/{user_id}/detail\"\n detailed_response = self.decode_response(self.rest_engine.rest_request(user_details_url))\n all_users_details.append(detailed_response)\n logging.info(f\"Active User Account Details in Gerrit: {all_users_details}\")\n return all_users_details\n\n def get_commit_details_in_given_period(self, start=None, duration=\"24Hours\", stop=datetime.utcnow()):\n all_commits_list, mocker_response = [], []\n if not start:\n start = self.get_start_time(duration, stop)\n commits_url = f\"{self.gerrit_url}{GerritApi.GET_COMMITS_USING_AFTER}\\\"{start}\\\"&S=0\"\n print(commits_url)\n response = self.decode_response(self.rest_engine.rest_request(commits_url))\n all_commits_list.extend(response)\n mocker_response = self.no_limit_mocker(response, mocker_response,\n url_to_be_used=f\"{self.gerrit_url}{GerritApi.GET_COMMITS_USING_AFTER}\\\"{start}\\\"\")\n if mocker_response:\n all_commits_list.extend(mocker_response)\n for each_commit in all_commits_list:\n owner_account_url = f\"{self.gerrit_url}/accounts/{each_commit.get('owner').get('_account_id')}/detail\"\n each_commit[\"owner\"] = self.decode_response(self.rest_engine.rest_request(owner_account_url)).get(\"name\")\n if each_commit.get(\"submitter\"):\n submitter_id = each_commit.get('submitter').get('_account_id')\n submit_account_url = f\"{self.gerrit_url}/accounts/{submitter_id}/detail\"\n each_commit[\"submitter\"] = self.decode_response(self.rest_engine.rest_request(\n submit_account_url)).get(\"name\")\n print(f\"Total commits from {start} is: {len(all_commits_list)}\")\n return all_commits_list\n\n @staticmethod\n def get_start_time(duration, stop):\n if \"minutes\" in str(duration).lower():\n min_delta = int(str(duration).lower().strip(\"minutes\"))\n start = stop - timedelta(minutes=min_delta)\n if \"hours\" in str(duration).lower():\n hour_delta = int(str(duration).lower().strip(\"hours\"))\n start = stop - timedelta(hours=hour_delta)\n elif \"days\" in str(duration).lower():\n day_delta = int(str(duration).lower().strip(\"days\"))\n start = stop - timedelta(days=day_delta)\n elif \"months\" in str(duration).lower():\n month_delta = int(str(duration).lower().strip(\"months\"))\n start = stop - timedelta(months=month_delta)\n return start\n\n @staticmethod\n def decode_response(response: str) -> dict:\n \"\"\"\n *Method description :* Method to decode rest response with Gerrit Magic Prefix\n\n :param response: Raw REST Response Content\n :type response: String\n :raises: :class:`ValueError`: Invaid Response Json Content\n :returns: :class:`resp_dict`: Dictionary of the given Response content\n :rtype: :class:`resp_dict`: Dictionary\n \"\"\"\n output = response[1]\n # prefix that comes with the json responses.\n gerrit_magic_json_prefix = \")]}'\\n\"\n if str(response[0]) == '200' and isinstance(response[1], str):\n if response[1].startswith(gerrit_magic_json_prefix):\n output = response[1][len(gerrit_magic_json_prefix):]\n try:\n output = json.loads(output)\n except ValueError:\n logging.error(f\"Invalid Json in response {output}\")\n else:\n logging.error(f'Rest Call Failed with the status code {response[0]} and response {response[1]}')\n return output\n\n def no_limit_mocker(self, response: str, mocker_response: list, url_to_be_used: str,\n def_limit: int =0) -> list:\n \"\"\"\n *Method description :* Method to mock no_limit option in Gerrit Server\n\n :param response: Previous GET Call Response\n :type response: String\n :param mocker_response: Mocker response list on which no_limit responses are accumulated\n :type mocker_response: list\n :param url_to_be_used: URL to be used for REST Call in no_limits mocker block\n :type url_to_be_used: String\n :param def_limit: Number Of Commits Limit for GET call\n :type def_limit: Integer\n :returns: :class:`mocker_response`: Get REST Response in List\n :rtype: :class:`mocker_response`: List\n \"\"\"\n if \"_more_\" in str(response):\n def_limit = def_limit + 500\n start_limit = def_limit - 500 + 1\n logging.info(f\"Fetching {start_limit} - {def_limit} Records. Please Wait...\")\n new_url = f\"{url_to_be_used}&S={str(def_limit)}&n=500\"\n int_response = self.decode_response(self.rest_engine.rest_request(new_url))\n mocker_response.extend(int_response)\n self.no_limit_mocker(int_response, mocker_response, url_to_be_used, def_limit)\n else:\n def_limit = def_limit + 500\n new_url = f\"{url_to_be_used}&S={str(def_limit)}&n=500\"\n int_response = self.decode_response(self.rest_engine.rest_request(new_url))\n mocker_response.extend(int_response)\n return mocker_response\n\nclass RestEngine:\n \"\"\"\n Class to perform rest operations like PUT, PATCH, POST, GET\n DELETE, HEAD, OPTIONS.\n \"\"\"\n def __init__(self, **session_args: str):\n \"\"\"\n *Method description :* Initialization method.\n\n 1. Initialize a http session with the session parameters passed by user\n 2. Default authentication is set to (username, password) as (admin, admin).\n And a header with json content type is added.\n 3. These session level parameters are overwritten when the same are provided\n at the method level.\n\n :param session_args: Rest arguments that can be set at the session level.\n Supported: 'headers', 'cookies', 'auth', 'proxies', 'hooks',\n 'params', 'verify', 'cert', 'stream', 'trust_env', 'max_redirects'\n :type session_args: dict\n \"\"\"\n self.http_session = Session()\n self.http_session.auth = session_args.get('auth')\n self.http_session.headers.update(session_args.get('headers', {}))\n #as verify is set to False,requests in this session will accept any TLS certificate\n #will ignore SSL certificate verification\n self.http_session.verify = session_args.get('verify', False)\n #Retries to establish a http secure connection.\n https_adapter = adapters.HTTPAdapter(max_retries=3)\n self.http_session.mount('https://', https_adapter)\n #To set other session parameters supported by requests\n self.http_session.params = session_args.get('params')\n self.http_session.proxies = session_args.get('proxies')\n self.http_session.cert = session_args.get('cert')\n self.http_session.hooks = session_args.get('hooks')\n self.http_session.stream = session_args.get('stream')\n self.http_session.max_redirects = session_args.get('max_redirects')\n self.http_session.cookies.update(session_args.get('cookies', {}))\n self.http_session.trust_env = session_args.get('trust_env')\n\n @staticmethod\n def build_api_url(netloc: str, scheme: str =\"https\", path: str =\"\", query: Union[str, dict]=\"\",\n fragments: str =\"\") -> str:\n \"\"\"Generates complete url from the inputs provided by the user.\n URL format : scheme://netloc/path?query#fragments\n\n #query str: page=12\n eg : https://docs.python.com/tutorial/index.html?page=12#datatypes\n\n #query dict: {page:12, type:tuple)\n eg : https://docs.python.com/tutorial/index.html?page=12&type=tuple#datatypes\n\n :param netloc: Network location part. Domain name should be given as input.\n (eg): example.com, 168.0.0.1:8080, jenkins.com:8443\n :type netloc: str\n :param scheme: URL scheme specifier. Can be either http or https, defaults to \"https\"\n :type scheme: str, optional\n :param path: Hierarchical path. Additional path to be added to the netloc, defaults to \"\"\n :type path: str, optional\n :param query: query string needed to be added. It will be added after the \"?\" symbol.\n Can be given directly as string or dict with multiple key value pairs. if multiple key\n value pairs are given then query string will be concatenated with \"&\" symbol, defaults to \"\"\n :type query: str or dict, optional\n :param fragments: Additional piece of information to be added to the url. This will be added\n after the \"#\" symbol, defaults to \"\"\n :type fragments: str, optional\n :return: complete api url\n :rtype: str\n \"\"\"\n query_str = urlencode(query) if isinstance(query, dict) else query\n api_url = urlunsplit((scheme, netloc, path, query_str, fragments))\n logging.debug(f\"Api url formed --> {api_url}\")\n return api_url\n\n def rest_request(self, uri: str, operation: str ='GET', **func_args: str) -> Tuple[int, str, dict]:\n \"\"\"\n *Method description :* Common rest request method be called for performing the rest operations.\n\n :param uri: rest uri\n :type uri: str\n :param operation: rest operation, could be GET, POST, PATCH, DELETE, PUT, HEAD, OPTIONS.\n :type operation: str\n :param func_args: Rest arguments such as 'auth', 'cookies', 'data', 'files',\n 'headers', 'hooks', 'json', 'params', 'timeout', 'allow_redirects', 'proxies',\n 'hooks', 'stream', 'verify', 'cert' that can be set at the method request level.\n Overrides the session arguments.\n :type func_args: dict\n :returns: :class:`response_code`: Response code of the rest request call performed\n :class:`response`: Response received from the rest request call\n :class:'response_headers`: Headers in response\n :rtype: :class:`response_code`: int\n :class:`response`: dict/str\n :class:`response_headers`: dict\n \"\"\"\n response_code, response, response_headers = None, None, None\n #suppress Insecure certificate warning\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n try:\n rest_response = self.http_session.request(operation.upper(), uri, **func_args)\n logging.debug(f'Request uri : {rest_response.request.url}')\n logging.debug(f'Request method : {rest_response.request.method}')\n logging.debug(f'Request headers : {rest_response.request.headers}')\n logging.debug(f'Request data : {rest_response.request.body}')\n response_code, response, response_headers = rest_response.status_code, rest_response.content, rest_response.headers\n #Uncomment the below line if status code has to raise an exception/error\n #rest_response.raise_for_status()\n if response:\n try:\n response = rest_response.json()\n except JSONDecodeError:\n #default utf-8 encoding is done.\n response = rest_response.text\n except exceptions.InvalidURL:\n logging.error(f'The uri {uri} passed for this {operation.upper()} method is invalid')\n except exceptions.HTTPError:\n logging.error(f'The {operation.upper()} method failed with the status code {response_code}' \\\n f' and status message would be any of {status_codes._codes[response_code]}.')\n except exceptions.SSLError:\n logging.error('SSL Certificate verification failed.')\n except exceptions.ConnectionError:\n logging.error(f'Failed to establish a connection with {uri}')\n except exceptions.InvalidHeader:\n logging.error(f'Invalid header exception. Request headers added : {rest_response.request.headers}')\n except exceptions.TooManyRedirects:\n logging.error('The URL redirects has crossed the maximum limit of 30.')\n except exceptions.Timeout:\n logging.error(f'{operation.upper()} request timed out. Can be either Connection or Read timeout.')\n except exceptions.RequestException:\n logging.error('Exception occurred while handling request. Please check if the input passed are correct.')\n except TypeError:\n logging.error('Please re-check if the input arguments passed are valid.')\n logging.debug(f'Rest Response : {response}')\n logging.debug(f'Rest Response status code : {response_code}')\n logging.debug(f'Rest Response headers : {response_headers}')\n if response_code:\n logging.debug(f'Possible status message for {response_code} : {status_codes._codes[response_code]}')\n return response_code, response, response_headers\n\nclass Common:\n \"\"\"\n Class to perform rest operations like PUT, PATCH, POST, GET\n DELETE, HEAD, OPTIONS.\n \"\"\"\n\n @staticmethod\n def convert_json_to_dict(json_file: str) -> Union[dict, None]:\n \"\"\"Converts the input json file into dictionary\n\n :param json_file: Name of the json file to be converted\n :type json_file: str\n :return: Converted dictionary\n :rtype: dict or None\n \"\"\"\n try:\n assert os.path.exists(json_file)\n with open(json_file, 'r') as file_obj:\n data_dict = json.load(file_obj)\n return data_dict\n except AssertionError:\n logging.error(f'Json file {json_file} doesnot exists')\n except json.decoder.JSONDecodeError as decode_err:\n logging.error(f'unable to parse {json_file}. Kindly validate the json file. Error occured: {decode_err}')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--servername\", type=str, help=\"Gerrit Server Name/IP\")\n parser.add_argument(\"-u\", \"--user\", type=str, help=\"Gerrit Login Username\", default=None)\n parser.add_argument(\"-p\", \"--password\", type=str, help=\"Gerrit Login Password\", default=None)\n parser.add_argument(\"-d\", \"--duration\", type=str, help=\"Duration for which gerrit changes to be fetched\\n\\\n Supported are Minutes, Hours, Days, Months. Examples: 120Minutes, 48Hours, 2Days, 1Month \\n\\\n Default : 24Hours\", default=\"24Hours\")\n args = parser.parse_args()\n if args.servername and args.duration:\n obj = GerritApi(f\"{args.servername}\")\n commits_list = obj.get_commit_details_in_given_period(duration=args.duration)\n print(f\"Gerrit commits for given {args.duration} is: {len(commits_list)}\\n\")\n print(\"Gerrit Commits Details are saved in new_commits.csv file\")\n cl_df = pd.DataFrame(commits_list)\n cl_df.to_csv('new_commits.csv')\n else:\n print(\"Please pass Gerrit server name with -s and duration with -d argument !!!\")\n"
] | [
[
"pandas.DataFrame"
]
] |
cenyk1230/cogdl | [
"fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce"
] | [
"cogdl/models/nn/lightgcn.py"
] | [
"\"\"\"\nfrom from https://github.com/huangtinglin/MixGCF\n\nCreated on October 1, 2020\n\n@author: Tinglin Huang ([email protected])\n\"\"\"\nimport torch\nimport torch.nn as nn\n\nfrom cogdl.models import BaseModel, register_model\n\n\nclass GraphConv(nn.Module):\n \"\"\"\n Graph Convolutional Network\n \"\"\"\n\n def __init__(self, n_hops, n_users, interact_mat, edge_dropout_rate=0.5, mess_dropout_rate=0.1):\n super(GraphConv, self).__init__()\n\n self.interact_mat = interact_mat\n self.n_users = n_users\n self.n_hops = n_hops\n self.edge_dropout_rate = edge_dropout_rate\n self.mess_dropout_rate = mess_dropout_rate\n\n self.dropout = nn.Dropout(p=mess_dropout_rate) # mess dropout\n\n def to(self, *args, **kwargs):\n self = super().to(*args, **kwargs)\n self.interact_mat = self.interact_mat.to(*args, **kwargs)\n return self\n\n def _sparse_dropout(self, x, rate=0.5):\n noise_shape = x._nnz()\n\n random_tensor = rate\n random_tensor += torch.rand(noise_shape).to(x.device)\n dropout_mask = torch.floor(random_tensor).type(torch.bool)\n i = x._indices()\n v = x._values()\n\n i = i[:, dropout_mask]\n v = v[dropout_mask]\n\n out = torch.sparse.FloatTensor(i, v, x.shape).to(x.device)\n return out * (1.0 / (1 - rate))\n\n def forward(self, user_embed, item_embed, mess_dropout=True, edge_dropout=True):\n # user_embed: [n_users, channel]\n # item_embed: [n_items, channel]\n\n # all_embed: [n_users+n_items, channel]\n all_embed = torch.cat([user_embed, item_embed], dim=0)\n agg_embed = all_embed\n embs = [all_embed]\n\n for hop in range(self.n_hops):\n interact_mat = (\n self._sparse_dropout(self.interact_mat, self.edge_dropout_rate) if edge_dropout else self.interact_mat\n )\n\n agg_embed = torch.sparse.mm(interact_mat, agg_embed)\n if mess_dropout:\n agg_embed = self.dropout(agg_embed)\n # agg_embed = F.normalize(agg_embed)\n embs.append(agg_embed)\n embs = torch.stack(embs, dim=1) # [n_entity, n_hops+1, emb_size]\n return embs[: self.n_users, :], embs[self.n_users :, :]\n\n\n@register_model(\"lightgcn\")\nclass LightGCN(BaseModel):\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--dim', type=int, default=64, help='embedding size')\n parser.add_argument('--l2', type=float, default=1e-4, help='l2 regularization weight, 1e-5 for NGCF')\n parser.add_argument(\"--mess_dropout\", type=bool, default=False, help=\"consider mess dropout or not\")\n parser.add_argument(\"--mess_dropout_rate\", type=float, default=0.1, help=\"ratio of mess dropout\")\n parser.add_argument(\"--edge_dropout\", type=bool, default=False, help=\"consider edge dropout or not\")\n parser.add_argument(\"--edge_dropout_rate\", type=float, default=0.1, help=\"ratio of edge sampling\")\n parser.add_argument(\"--ns\", type=str, default='mixgcf', help=\"rns,mixgcf\")\n parser.add_argument(\"--K\", type=int, default=1, help=\"number of negative in K-pair loss\")\n parser.add_argument(\"--n_negs\", type=int, default=64, help=\"number of candidate negative\")\n parser.add_argument(\"--pool\", type=str, default='mean', help=\"[concat, mean, sum, final]\")\n parser.add_argument(\"--context_hops\", type=int, default=3, help=\"hop\")\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n args.n_users,\n args.n_items,\n args.l2,\n args.dim,\n args.context_hops,\n args.mess_dropout,\n args.mess_dropout_rate,\n args.edge_dropout,\n args.edge_dropout_rate,\n args.pool,\n args.n_negs,\n args.ns,\n args.K,\n args.adj_mat,\n )\n\n def __init__(\n self,\n n_users,\n n_items,\n l2,\n dim,\n context_hops,\n mess_dropout,\n mess_dropout_rate,\n edge_dropout,\n edge_dropout_rate,\n pool,\n n_negs,\n ns,\n K,\n adj_mat,\n ):\n super(LightGCN, self).__init__()\n\n self.n_users = n_users\n self.n_items = n_items\n self.adj_mat = adj_mat\n\n self.decay = l2\n self.emb_size = dim\n self.context_hops = context_hops\n self.mess_dropout = mess_dropout\n self.mess_dropout_rate = mess_dropout_rate\n self.edge_dropout = edge_dropout\n self.edge_dropout_rate = edge_dropout_rate\n self.pool = pool\n self.n_negs = n_negs\n self.ns = ns\n self.K = K\n\n self._init_weight()\n self.user_embed = nn.Parameter(self.user_embed)\n self.item_embed = nn.Parameter(self.item_embed)\n\n self.gcn = self._init_model()\n\n def _init_weight(self):\n initializer = nn.init.xavier_uniform_\n self.user_embed = initializer(torch.empty(self.n_users, self.emb_size))\n self.item_embed = initializer(torch.empty(self.n_items, self.emb_size))\n\n # [n_users+n_items, n_users+n_items]\n self.sparse_norm_adj = self._convert_sp_mat_to_sp_tensor(self.adj_mat)\n\n def _init_model(self):\n return GraphConv(\n n_hops=self.context_hops,\n n_users=self.n_users,\n interact_mat=self.sparse_norm_adj,\n edge_dropout_rate=self.edge_dropout_rate,\n mess_dropout_rate=self.mess_dropout_rate,\n )\n\n def _convert_sp_mat_to_sp_tensor(self, X):\n coo = X.tocoo()\n i = torch.LongTensor([coo.row, coo.col])\n v = torch.from_numpy(coo.data).float()\n return torch.sparse.FloatTensor(i, v, coo.shape)\n\n def to(self, *args, **kwargs):\n self = super().to(*args, **kwargs)\n # self.sparse_norm_adj = self.sparse_norm_adj.to(*args, **kwargs)\n self.gcn.to(*args, **kwargs)\n return self\n\n def forward(self, batch=None):\n user = batch[\"users\"]\n pos_item = batch[\"pos_items\"]\n neg_item = batch[\"neg_items\"] # [batch_size, n_negs * K]\n\n # user_gcn_emb: [n_users, channel]\n # item_gcn_emb: [n_users, channel]\n user_gcn_emb, item_gcn_emb = self.gcn(\n self.user_embed, self.item_embed, edge_dropout=self.edge_dropout, mess_dropout=self.mess_dropout\n )\n\n if self.ns == \"rns\": # n_negs = 1\n neg_gcn_embs = item_gcn_emb[neg_item[:, : self.K]]\n else:\n neg_gcn_embs = []\n for k in range(self.K):\n neg_gcn_embs.append(\n self.negative_sampling(\n user_gcn_emb, item_gcn_emb, user, neg_item[:, k * self.n_negs : (k + 1) * self.n_negs], pos_item\n )\n )\n neg_gcn_embs = torch.stack(neg_gcn_embs, dim=1)\n\n return self.create_bpr_loss(user_gcn_emb[user], item_gcn_emb[pos_item], neg_gcn_embs)\n\n def negative_sampling(self, user_gcn_emb, item_gcn_emb, user, neg_candidates, pos_item):\n batch_size = user.shape[0]\n s_e, p_e = user_gcn_emb[user], item_gcn_emb[pos_item] # [batch_size, n_hops+1, channel]\n if self.pool != \"concat\":\n s_e = self.pooling(s_e).unsqueeze(dim=1)\n\n \"\"\"positive mixing\"\"\"\n seed = torch.rand(batch_size, 1, p_e.shape[1], 1).to(p_e.device) # (0, 1)\n n_e = item_gcn_emb[neg_candidates] # [batch_size, n_negs, n_hops, channel]\n n_e_ = seed * p_e.unsqueeze(dim=1) + (1 - seed) * n_e # mixing\n\n \"\"\"hop mixing\"\"\"\n scores = (s_e.unsqueeze(dim=1) * n_e_).sum(dim=-1) # [batch_size, n_negs, n_hops+1]\n indices = torch.max(scores, dim=1)[1].detach()\n neg_items_emb_ = n_e_.permute([0, 2, 1, 3]) # [batch_size, n_hops+1, n_negs, channel]\n # [batch_size, n_hops+1, channel]\n return neg_items_emb_[[[i] for i in range(batch_size)], range(neg_items_emb_.shape[1]), indices, :]\n\n def pooling(self, embeddings):\n # [-1, n_hops, channel]\n if self.pool == \"mean\":\n return embeddings.mean(dim=1)\n elif self.pool == \"sum\":\n return embeddings.sum(dim=1)\n elif self.pool == \"concat\":\n return embeddings.view(embeddings.shape[0], -1)\n else: # final\n return embeddings[:, -1, :]\n\n def generate(self, split=True):\n user_gcn_emb, item_gcn_emb = self.gcn(self.user_embed, self.item_embed, edge_dropout=False, mess_dropout=False)\n user_gcn_emb, item_gcn_emb = self.pooling(user_gcn_emb), self.pooling(item_gcn_emb)\n if split:\n return user_gcn_emb, item_gcn_emb\n else:\n return torch.cat([user_gcn_emb, item_gcn_emb], dim=0)\n\n def rating(self, u_g_embeddings=None, i_g_embeddings=None):\n return torch.matmul(u_g_embeddings, i_g_embeddings.t())\n\n def create_bpr_loss(self, user_gcn_emb, pos_gcn_embs, neg_gcn_embs):\n # user_gcn_emb: [batch_size, n_hops+1, channel]\n # pos_gcn_embs: [batch_size, n_hops+1, channel]\n # neg_gcn_embs: [batch_size, K, n_hops+1, channel]\n\n batch_size = user_gcn_emb.shape[0]\n\n u_e = self.pooling(user_gcn_emb)\n pos_e = self.pooling(pos_gcn_embs)\n neg_e = self.pooling(neg_gcn_embs.view(-1, neg_gcn_embs.shape[2], neg_gcn_embs.shape[3])).view(\n batch_size, self.K, -1\n )\n\n pos_scores = torch.sum(torch.mul(u_e, pos_e), axis=1)\n neg_scores = torch.sum(torch.mul(u_e.unsqueeze(dim=1), neg_e), axis=-1) # [batch_size, K]\n\n mf_loss = torch.mean(torch.log(1 + torch.exp(neg_scores - pos_scores.unsqueeze(dim=1)).sum(dim=1)))\n\n # cul regularizer\n regularize = (\n torch.norm(user_gcn_emb[:, 0, :]) ** 2\n + torch.norm(pos_gcn_embs[:, 0, :]) ** 2\n + torch.norm(neg_gcn_embs[:, :, 0, :]) ** 2\n ) / 2 # take hop=0\n emb_loss = self.decay * regularize / batch_size\n\n return mf_loss + emb_loss, mf_loss, emb_loss\n"
] | [
[
"torch.empty",
"torch.stack",
"torch.rand",
"torch.nn.Parameter",
"torch.norm",
"torch.mul",
"torch.sparse.FloatTensor",
"torch.from_numpy",
"torch.max",
"torch.sparse.mm",
"torch.LongTensor",
"torch.cat",
"torch.nn.Dropout",
"torch.floor"
]
] |
raven-computing/pydf | [
"70b14ad11aa629da6d1abb993a2a4c567db73ca9"
] | [
"raven/struct/dataframe/_dataframeutils.py"
] | [
"# Copyright (C) 2021 Raven Computing\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nProvides internal utility functions for DataFrame operations.\n\"\"\"\n\nimport numpy as np\n\nimport raven.struct.dataframe.core as dataframe\nimport raven.struct.dataframe.column as column\nimport raven.struct.dataframe.bytecolumn as bytecolumn\nimport raven.struct.dataframe.shortcolumn as shortcolumn\nimport raven.struct.dataframe.intcolumn as intcolumn\nimport raven.struct.dataframe.longcolumn as longcolumn\nimport raven.struct.dataframe.floatcolumn as floatcolumn\nimport raven.struct.dataframe.doublecolumn as doublecolumn\nimport raven.struct.dataframe.stringcolumn as stringcolumn\nimport raven.struct.dataframe.charcolumn as charcolumn\nimport raven.struct.dataframe.booleancolumn as booleancolumn\nimport raven.struct.dataframe.binarycolumn as binarycolumn\nimport raven.struct.dataframe._columnutils as columnutils\n\n# pylint: disable=C0103, R1702, R1705, R0911, R0912, R0914, R0915, W0212\n\ndef copy_of(df):\n \"\"\"Creates and returns a copy of the given DataFrame\n\n Args:\n df: The DataFrame instance to copy\n\n Returns:\n A copy of the specified DataFrame or None if the argument is None\n \"\"\"\n if df is None:\n return None\n\n df.flush()\n columns = [col.clone() for col in df._internal_columns()]\n copy = None\n if df.is_nullable():\n copy = dataframe.NullableDataFrame(columns)\n else:\n copy = dataframe.DefaultDataFrame(columns)\n\n return copy\n\ndef like(df):\n \"\"\"Creates and returns a DataFrame which has the same column structure\n and Column names as the specified DataFrame instance but is otherwise empty\n\n Args:\n df: The DataFrame from which to copy the Column structure\n\n Returns:\n A DataFrame with the same Column structure and names as the\n specified DataFrame, or None if the specified DataFrame is None\n \"\"\"\n if df is None:\n return None\n\n col = df.columns()\n if col == 0:\n return (dataframe.NullableDataFrame()\n if df.is_nullable()\n else dataframe.DefaultDataFrame())\n\n cols = [None] * col\n for i in range(col):\n cols[i] = column.Column.of_type(df.get_column(i).type_code())\n\n result = (dataframe.NullableDataFrame(cols)\n if df.is_nullable()\n else dataframe.DefaultDataFrame(cols))\n\n if df.has_column_names():\n result.set_column_names(df.get_column_names())\n\n return result\n\ndef is_numeric_fp(col):\n \"\"\"Indicates whether the specified Column has a type name\n of float or double.\n\n Args:\n col: The Column to check\n\n Returns:\n A bool which indicates whether the specified Column is\n a FloatColumn, NullableFloatColumn, DoubleColumn,\n NullableDoubleColumn\n \"\"\"\n return columnutils.is_numeric_fp(col)\n\ndef merge(*dataframes):\n \"\"\"Merges all given DataFrame instances into one DataFrame.\n\n All DataFames are merged by columns. All DataFrames must have an\n equal number of rows but may be of any type. All columns are added to\n the returned DataFrame in the order of the arguments passed to this\n method. Only passing one DataFrame to this method will simply\n return that instance.\n\n Columns with duplicate names are included in the returned DataFrame\n and a postfix is added to each duplicate column name.\n All columns of the returned DataFrame are backed by their origin,\n which means that changes to the original DataFrame are reflected in\n the merged DataFrame and vice versa. This does not apply, however,\n if columns need to be converted to a nullable type. For example, if\n one DataFrame argument is nullable, then all columns from non-nullable\n DataFrame arguments are converted to their corresponding\n nullable equivalent.\n\n If columns should be independent from their origin, then simply pass\n a clone (copy) of each DataFrame argument to this method.\n\n Example:\n merged = DataFrame.merge(DataFrame.copy(df1), DataFrame.copy(df2))\n\n Args:\n dataframes: The DataFrames to be merged\n\n Returns:\n A DataFrame composed of all columns of the given DataFrames\n \"\"\"\n if dataframes is None or len(dataframes) == 0:\n raise dataframe.DataFrameException(\"Arg must not be None or empty\")\n\n if len(dataframes) == 1:\n return dataframes[0]\n\n rows = dataframes[0].rows()\n cols = 0\n has_nullable = False\n has_names = False\n for i, df in enumerate(dataframes):\n if df is None:\n raise dataframe.DataFrameException(\n \"DataFrame argument must not be None\")\n\n cols += df.columns()\n if df.rows() != rows:\n raise dataframe.DataFrameException(\n (\"Size missmatch for DataFrame argument at index {}. \"\n \"Expected {} rows but found {}\")\n .format(i, rows, df.rows()))\n\n if df.is_nullable():\n has_nullable = True\n\n if df.has_column_names():\n has_names = True\n\n for _, df in enumerate(dataframes):\n df.flush()\n\n names = None\n if has_names:\n names = [None] * cols\n for i in range(cols):\n names[i] = str(i)\n\n k = 0\n for i, df in enumerate(dataframes):\n for j in range(df.columns()):\n c = df.get_column(j)\n if c.get_name():\n names[k] = c.get_name()\n k += 1\n\n for i in range(cols):\n k = 0\n already_set = False\n n = names[i]\n for j in range(cols):\n if i != j:\n if n == names[j]:\n if not already_set:\n names[i] = names[i] + \"_\" + str(k)\n k += 1\n already_set = True\n\n names[j] = names[j] + \"_\" + str(k)\n k += 1\n\n columns = [None] * cols\n k = 0\n for i, df in enumerate(dataframes):\n for j in range(df.columns()):\n if has_nullable:\n columns[k] = df.get_column(j).as_nullable()\n k += 1\n else:\n columns[k] = df.get_column(j)\n k += 1\n\n merged = None\n if has_nullable:\n merged = dataframe.NullableDataFrame(columns)\n else:\n merged = dataframe.DefaultDataFrame(columns)\n\n if has_names:\n merged.set_column_names(names)\n\n return merged\n\ndef convert(df, target_type):\n \"\"\"Converts the given DataFrame from a DefaultDataFrame to a NullableDataFrame\n or vice versa.\n\n Converting a DefaultDataFrame to a NullableDataFrame will not change\n any internal values, except that now you can add/insert null values to it.\n Converting a NullableDataFrame to a DefaultDataFrame will convert all None\n occurrences to the primitive defaults according to the Column they are located.\n\n Args:\n df: The DataFrame instance to convert. Must not be None\n target_type: The type to convert the given DataFrame to.\n May be 'default' or 'nullable'\n\n Returns:\n A DataFrame converted from the type of the argument passed to this method\n to the type specified\n \"\"\"\n if df is None or target_type is None:\n raise ValueError(\"Arg must not be null\")\n\n if not isinstance(target_type, str):\n raise ValueError(\"Target type argument must be specified as a string\")\n\n target_type = target_type.lower()\n if target_type not in (\"defaultdataframe\", \"default\", \"nullabledataframe\", \"nullable\"):\n raise ValueError(\"Unable to convert to '\" + str(target_type)\n + \"'. Must be either 'default' or 'nullable'\")\n\n if target_type == \"defaultdataframe\":\n target_type = \"default\"\n elif target_type == \"nullabledataframe\":\n target_type = \"nullable\"\n\n source_type = \"nullable\" if df.is_nullable() else \"default\"\n if target_type == source_type:\n return copy_of(df)\n\n rows = df.rows()\n converted = None\n # convert from Nullable to Default\n if target_type == \"default\":\n converted = dataframe.DefaultDataFrame()\n for col in df:\n tc = col.type_code()\n if tc == bytecolumn.NullableByteColumn.TYPE_CODE:\n vals = np.array([0] * rows, dtype=np.int8)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0 if val is None else val\n\n converted.add_column(bytecolumn.ByteColumn(col.get_name(), vals))\n elif tc == shortcolumn.NullableShortColumn.TYPE_CODE:\n vals = np.array([0] * rows, dtype=np.int16)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0 if val is None else val\n\n converted.add_column(shortcolumn.ShortColumn(col.get_name(), vals))\n elif tc == intcolumn.NullableIntColumn.TYPE_CODE:\n vals = np.array([0] * rows, dtype=np.int32)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0 if val is None else val\n\n converted.add_column(intcolumn.IntColumn(col.get_name(), vals))\n elif tc == longcolumn.NullableLongColumn.TYPE_CODE:\n vals = np.array([0] * rows, dtype=np.int64)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0 if val is None else val\n\n converted.add_column(longcolumn.LongColumn(col.get_name(), vals))\n elif tc == stringcolumn.NullableStringColumn.TYPE_CODE:\n vals = np.array([None] * rows, dtype=np.object)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = (stringcolumn.StringColumn.DEFAULT_VALUE\n if val is None or val == \"\"\n else val)\n\n converted.add_column(stringcolumn.StringColumn(col.get_name(), vals))\n elif tc == floatcolumn.NullableFloatColumn.TYPE_CODE:\n vals = np.array([0.0] * rows, dtype=np.float32)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0.0 if val is None else val\n\n converted.add_column(floatcolumn.FloatColumn(col.get_name(), vals))\n elif tc == doublecolumn.NullableDoubleColumn.TYPE_CODE:\n vals = np.array([0.0] * rows, dtype=np.float64)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = 0 if val is None else val\n\n converted.add_column(doublecolumn.DoubleColumn(col.get_name(), vals))\n elif tc == charcolumn.NullableCharColumn.TYPE_CODE:\n vals = np.array([0] * rows, dtype=np.uint8)\n default_val = ord(charcolumn.CharColumn.DEFAULT_VALUE)\n for i in range(rows):\n val = col._values[i]\n vals[i] = default_val if val is None else val\n\n converted.add_column(charcolumn.CharColumn(col.get_name(), vals))\n elif tc == booleancolumn.NullableBooleanColumn.TYPE_CODE:\n vals = np.array([False] * rows, dtype=np.bool)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = False if val is None else val\n\n converted.add_column(booleancolumn.BooleanColumn(col.get_name(), vals))\n elif tc == binarycolumn.NullableBinaryColumn.TYPE_CODE:\n vals = np.array([None] * rows, dtype=np.object)\n for i in range(rows):\n val = col.get_value(i)\n vals[i] = bytearray.fromhex(\"00\") if val is None else val\n\n converted.add_column(binarycolumn.BinaryColumn(col.get_name(), vals))\n else: # undefined type\n raise dataframe.DataFrameException(\n (\"Unable to convert dataframe. Unrecognized \"\n \"column type {}\".format(type(col))))\n\n else: # convert from Default to Nullable\n converted = dataframe.NullableDataFrame()\n for col in df:\n tc = col.type_code()\n vals = np.array([None] * rows, dtype=np.object)\n for i in range(rows):\n vals[i] = col.get_value(i)\n\n if tc == bytecolumn.ByteColumn.TYPE_CODE:\n converted.add_column(bytecolumn.NullableByteColumn(col.get_name(), vals))\n elif tc == shortcolumn.ShortColumn.TYPE_CODE:\n converted.add_column(shortcolumn.NullableShortColumn(col.get_name(), vals))\n elif tc == intcolumn.IntColumn.TYPE_CODE:\n converted.add_column(intcolumn.NullableIntColumn(col.get_name(), vals))\n elif tc == longcolumn.LongColumn.TYPE_CODE:\n converted.add_column(longcolumn.NullableLongColumn(col.get_name(), vals))\n elif tc == stringcolumn.StringColumn.TYPE_CODE:\n converted.add_column(stringcolumn.NullableStringColumn(col.get_name(), vals))\n elif tc == floatcolumn.FloatColumn.TYPE_CODE:\n converted.add_column(floatcolumn.NullableFloatColumn(col.get_name(), vals))\n elif tc == doublecolumn.DoubleColumn.TYPE_CODE:\n converted.add_column(doublecolumn.NullableDoubleColumn(col.get_name(), vals))\n elif tc == charcolumn.CharColumn.TYPE_CODE:\n converted.add_column(charcolumn.NullableCharColumn(col.get_name(), vals))\n elif tc == booleancolumn.BooleanColumn.TYPE_CODE:\n converted.add_column(booleancolumn.NullableBooleanColumn(col.get_name(), vals))\n elif tc == binarycolumn.BinaryColumn.TYPE_CODE:\n converted.add_column(binarycolumn.NullableBinaryColumn(col.get_name(), vals))\n else: # undefined type\n raise dataframe.DataFrameException(\n (\"Unable to convert dataframe. Unrecognized \"\n \"column type {}\".format(type(col))))\n\n return converted\n\ndef column_from_typename(typename):\n \"\"\"Constructs and returns a Column from the specified typename.\n\n The returned Column instance is a default (non-nullable) Column.\n\n Args:\n typename: The type name of the Column to return, as a str\n\n Returns:\n A Column instance from the specified type name,\n or None if the argument is not a valid type name\n \"\"\"\n return columnutils.column_from_typename(typename)\n\ndef join(df1, col1, df2, col2):\n \"\"\"Combines all rows from the specified DataFrames which have matching\n values in their columns with the corresponding specified name.\n\n Both DataFrames must have a column with the corresponding specified name\n and an identical element type. All columns in both DataFrame instances must\n be labeled by the time this method is called. The specified DataFrames may be\n of any types.\n\n All Columns in the second DataFrame argument that are also existent in\n the first DataFrame argument are excluded in the result DataFrame returned\n by this method. Therefore, in the case of duplicate Columns, the returned\n DataFrame only contains the corresponding Column from the first DataFrame.\n\n Args:\n df1: The first DataFrame to join. Must not be None\n col1: The name of the Column in the first DataFrame argument\n to match values for. Must be a str\n df2: The second DataFrame to join. Must not be None\n col2: The name of the Column in the second DataFrame argument\n to match values for. Must be a str\n\n Returns:\n A DataFrame with joined rows from both specified DataFrames\n that have matching values in the Columns with the specified names\n \"\"\"\n if df1 is None or df2 is None:\n raise dataframe.DataFrameException(\"DataFrame argument must not be None\")\n\n if df1 is df2:\n raise dataframe.DataFrameException(\"Join operation is self-referential\")\n\n if not col1:\n raise dataframe.DataFrameException(\n \"First column name argument must not be None or empty\")\n\n if not col2:\n raise dataframe.DataFrameException(\n \"Second column name argument must not be None or empty\")\n\n if not df1.has_column_names():\n raise dataframe.DataFrameException(\"DataFrame must has column labels\")\n\n if not df2.has_column_names():\n raise dataframe.DataFrameException(\"DataFrame argument must have column labels\")\n\n if not df2.has_column(col2):\n raise dataframe.DataFrameException(\n \"Invalid column name for DataFrame argument: '{}'\".format(col2))\n\n if df1.get_column(col1).type_name() != df2.get_column(col2).type_name():\n raise dataframe.DataFrameException(\n (\"Column '{}' in DataFrame argument has \"\n \"a different type. \"\n \"Expected {} but found {}\").format(\n df2.get_column(col2).get_name(),\n df1.get_column(col1).type_name(),\n df2.get_column(col2).type_name()))\n\n # create a set holding the names of all columns from df2\n # that should be bypassed in the result because they already exist in df1\n duplicates = set()\n names = df2.get_column_names()\n for _, n in enumerate(names):\n if df1.has_column(n):\n duplicates.add(n)\n\n # add the specified column name to make sure\n # it is not included in the below computations\n duplicates.add(col2)\n df1.flush()\n df2.flush()\n # find the elements common to both DataFrames\n intersec = df1.get_columns(col1).intersection_rows(df2.get_columns(col2))\n use_nullable = df1.is_nullable() or df2.is_nullable()\n result = (dataframe.NullableDataFrame() if use_nullable\n else dataframe.DefaultDataFrame())\n\n # add all columns from df1\n for i in range(df1.columns()):\n c = column.Column.of_type(df1.get_column(i).type_code())\n result.add_column(col=c.as_nullable() if use_nullable else c,\n name=df1.get_column(i).get_name())\n\n # add all columns from df2 as long as they are not already in df1\n for i in range(df2.columns()):\n col = df2.get_column(i)\n # if the column is in the collection, then it\n # is either 'col2' or another duplicate, so it is skipped\n if not col.get_name() in duplicates:\n c = column.Column.of_type(col.type_code())\n result.add_column(col=c.as_nullable() if use_nullable else c,\n name=col.get_name())\n\n # iterate over all common elements and add all rows to\n # the result from both DataFrames that match the common\n # element in their respective key column\n for i in range(intersec.rows()):\n filter_key = str(intersec.get_column(0).get_value(i))\n filter1 = df1.filter(col1, filter_key)\n filter2 = df2.filter(col2, filter_key)\n # remove 'col2' and any column already existent in df1\n for name in duplicates:\n filter2.remove_column(name)\n\n length_col1 = df1.columns()\n length_col2 = df2.columns() - len(duplicates)\n # reuse the row list\n length_row = length_col1 + length_col2\n row = [None] * length_row\n for j in range(filter1.rows()):\n for k in range(filter2.rows()):\n for l in range(length_col1):\n row[l] = filter1.get_column(l).get_value(j)\n\n for l in range(length_col2):\n row[length_col1 + l] = filter2.get_column(l).get_value(k)\n\n result.add_row(row)\n\n result.flush()\n return result\n\ndef _group_operation(df, col, operation):\n \"\"\"Performs a group_by operation for the specified DataFrame and Column.\n\n Operation codes:\n * 1 = Minimum\n * 2 = Maximum\n * 3 = Average\n * 4 = Sum\n\n Args:\n df: The DataFrame to use for the group operation\n col: The Column to use for the group operation\n operation: The operation code to use\n\n Returns:\n A DataFrame representing the result of the group operation\n \"\"\"\n if df._internal_next() == -1 or col < 0 or col >= df.columns():\n raise dataframe.DataFrameException(\"Invalid column index: {}\".format(col))\n\n c = df.get_column(col)\n n_numeric = 0\n for i in range(df.columns()):\n c_i = df.get_column(i)\n if not c_i._name:\n raise dataframe.DataFrameException(\n \"All columns must be labeled for group operations\")\n\n if c_i is not c and c_i.is_numeric():\n n_numeric += 1\n\n uniques = df.unique(col)\n n_uniques = len(uniques)\n contains_null = df.contains(col, \"None\") if df.is_nullable() else False\n col_length = n_uniques + 1 if contains_null else n_uniques\n cols = [None] * (n_numeric + 1)\n col_names = [None] * (n_numeric + 1)\n cols[0] = column.Column.of_type(c.type_code(), col_length)\n col_names[0] = c._name\n n_numeric = 1\n for i in range(df.columns()):\n c_i = df.get_column(i)\n if c_i is not c and c_i.is_numeric():\n if operation in (3, 4): # average or sum op\n cols[n_numeric] = (doublecolumn.NullableDoubleColumn(values=col_length)\n if df.is_nullable()\n else doublecolumn.DoubleColumn(values=col_length))\n\n else:\n cols[n_numeric] = column.Column.of_type(c_i.type_code(), col_length)\n\n col_names[n_numeric] = c_i._name\n n_numeric += 1\n\n result = (dataframe.NullableDataFrame(cols)\n if df.is_nullable()\n else dataframe.DefaultDataFrame(cols))\n\n result.set_column_names(col_names)\n\n length = len(cols)\n index = 0\n for elem in uniques:\n row = [None] * length\n row[0] = elem\n filtered = df.filter(c._name, str(elem))\n for i in range(1, length, 1):\n value = 0.0\n if operation == 1:\n value = filtered.minimum(col_names[i])\n elif operation == 2:\n value = filtered.maximum(col_names[i])\n elif operation == 3:\n value = filtered.average(col_names[i])\n elif operation == 4:\n value = filtered.sum(col_names[i])\n else:\n raise dataframe.DataFrameException(\n \"Unknown group operation: {}\".format(operation))\n\n row[i] = _cast_to_numeric_type(cols[i], value)\n\n result.set_row(index, row)\n index += 1\n\n if contains_null:\n row = [None] * length\n row[0] = None\n filtered = df.filter(c._name, \"None\")\n for i in range(1, length, 1):\n value = 0.0\n if operation == 1:\n value = filtered.minimum(col_names[i])\n elif operation == 2:\n value = filtered.maximum(col_names[i])\n elif operation == 3:\n value = filtered.average(col_names[i])\n elif operation == 4:\n value = filtered.sum(col_names[i])\n else:\n raise dataframe.DataFrameException(\n \"Unknown group operation: {}\".format(operation))\n\n row[i] = _cast_to_numeric_type(cols[i], value)\n\n result.set_row(index, row)\n index += 1\n\n return result\n\ndef _cast_to_numeric_type(col, value):\n \"\"\"Casts the specified double to the corresponding Number\n type of the specified Column.\n\n Args:\n col: The Column which specifies the numeric type\n value: The float value to cast\n\n Returns:\n A number which has the concrete type used\n by the specified Column\n \"\"\"\n c = col.type_code()\n if col.is_nullable():\n if c == doublecolumn.NullableDoubleColumn.TYPE_CODE:\n return float(value)\n elif c == floatcolumn.NullableFloatColumn.TYPE_CODE:\n return float(value)\n elif c == bytecolumn.NullableByteColumn.TYPE_CODE:\n return int(value) if not np.isnan(value) else None\n elif c == shortcolumn.NullableShortColumn.TYPE_CODE:\n return int(value) if not np.isnan(value) else None\n elif c == intcolumn.NullableIntColumn.TYPE_CODE:\n return int(value) if not np.isnan(value) else None\n elif c == longcolumn.NullableLongColumn.TYPE_CODE:\n return int(value) if not np.isnan(value) else None\n else:\n raise dataframe.DataFrameException(\"Unrecognized column type\")\n else:\n if c == doublecolumn.DoubleColumn.TYPE_CODE:\n return float(value)\n elif c == floatcolumn.FloatColumn.TYPE_CODE:\n return float(value)\n elif c == bytecolumn.ByteColumn.TYPE_CODE:\n return int(value)\n elif c == shortcolumn.ShortColumn.TYPE_CODE:\n return int(value)\n elif c == intcolumn.IntColumn.TYPE_CODE:\n return int(value)\n elif c == longcolumn.LongColumn.TYPE_CODE:\n return int(value)\n else:\n raise dataframe.DataFrameException(\"Unrecognized column type\")\n\ndef getitem_impl(arg, position):\n \"\"\"Implementation of the __getitem__() function\n\n Args:\n arg: The DataFrame instance on which the function was called upon\n position: The position argument passed to the function\n\n Returns:\n The value at the specified position\n \"\"\"\n if isinstance(position, tuple):\n if len(position) > 2:\n raise dataframe.DataFrameException(\n (\"Invalid position argument. Too many \"\n \"positions specified: {}\").format(len(position)))\n\n cols = position[0]\n rows = position[1]\n if isinstance(cols, (int, str)):\n # check for negative column indices\n if isinstance(cols, int) and cols < 0:\n if abs(cols) > arg.columns():\n raise dataframe.DataFrameException(\n \"Invalid column index: {}\".format(cols))\n\n cols = cols % arg.columns()\n\n if rows is None:\n # implements df[x, :] and df[\"x\", :]\n return arg.get_columns(cols=cols)\n elif isinstance(rows, int):\n # implements df[x, y] and df[\"x\", y]\n if rows < 0:\n if abs(rows) > arg.rows():\n raise dataframe.DataFrameException(\n \"Invalid row index: {}\".format(rows))\n\n rows = rows % arg.rows()\n\n return arg.get_column(cols).get_value(rows)\n elif isinstance(rows, str):\n # implements df[x, \"y_regex\"] and df[\"x\", \"y_regex\"]\n return arg.filter(cols, rows)\n elif isinstance(rows, tuple):\n # implements df[x, (y0, y1, ..., yn)]\n # and df[\"x\", (y0, y1, ..., yn)]\n col_selected = arg.get_column(cols)\n col = column.Column.like(col_selected, length=len(rows))\n df = (dataframe.NullableDataFrame(col)\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(col))\n\n for i, row_index in enumerate(rows):\n col[i] = col_selected[row_index]\n\n return df\n\n elif isinstance(rows, slice):\n # implements df[x, y0:y1:y2]\n # and df[\"x\", y0:y1:y2]\n start = rows.start\n stop = rows.stop\n step = rows.step\n col_selected = arg.get_column(cols)\n # numpy returns an array view when slicing\n # so we have to copy the array explicitly\n # to get an independent instance\n col_values = col_selected._values[start:stop:step].copy()\n col = column.Column.like(col_selected, length=0)\n col._values = col_values\n return (dataframe.NullableDataFrame(col)\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(col))\n\n elif isinstance(cols, (tuple, slice)):\n # prefetch the selected columns as a DataFrame\n if isinstance(cols, tuple):\n cols_selected = arg.get_columns(cols=cols)\n else: # is slice\n cols_selected = arg._internal_columns()[cols]\n cols_selected = (dataframe.NullableDataFrame(cols_selected)\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(cols_selected))\n\n if rows is None:\n # implements df[(x0, x1, ..., xn), ]\n # and df[x0:x1:x2, ]\n return cols_selected\n elif isinstance(rows, int):\n # implements df[(x0, x1, ..., xn), y]\n # and df[x0:x1:x2, y]\n if rows < 0:\n if abs(rows) > arg.rows():\n raise dataframe.DataFrameException(\n \"Invalid row index: {}\".format(rows))\n\n rows = rows % arg.rows()\n\n return cols_selected.get_row(rows)\n\n elif isinstance(rows, tuple):\n # implements df[(x0, x1, ..., xn), (y0, y1, ..., ym)]\n # and df[x0:x1:x2, (y0, y1, ..., ym)]\n cols = [column.Column.like(col, length=len(rows))\n for col in cols_selected._internal_columns()]\n\n df = (dataframe.NullableDataFrame(cols)\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(cols))\n\n for i, row_index in enumerate(rows):\n df.set_row(i, cols_selected.get_row(rows[i]))\n\n return df\n\n elif isinstance(rows, slice):\n # implements df[(x0, x1, ..., xn), y0:y1:y2]\n # and df[x0:x1:x2, y0:y1:y2]\n start = rows.start\n stop = rows.stop\n step = rows.step\n cols = [None] * cols_selected.columns()\n for i, col in enumerate(cols_selected._internal_columns()):\n col_values = col._values[start:stop:step].copy()\n col_sliced = column.Column.like(col, length=col_values.shape[0])\n col_sliced._values = col_values\n cols[i] = col_sliced\n\n return (dataframe.NullableDataFrame(cols)\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(cols))\n\n elif isinstance(rows, str):\n raise dataframe.DataFrameException(\n (\"Invalid column position type. A filter operation \"\n \"must only specify a single column \"\n \"but found {}\").format(type(cols)))\n\n else:\n # invalid type for column position arg\n raise dataframe.DataFrameException(\n (\"Invalid column position type. \"\n \"Expected int or str but found {}\").format(type(cols)))\n\n elif isinstance(position, int):\n # implements df[x]\n if position < 0:\n if abs(position) > arg.columns():\n raise dataframe.DataFrameException(\n \"Invalid column index: {}\".format(position))\n\n position = position % arg.columns()\n\n return arg.get_column(position)\n elif isinstance(position, str):\n # implements df[\"x\"]\n return arg.get_column(position)\n else:\n # invalid type for entire position arg\n raise dataframe.DataFrameException(\n (\"Invalid position type. \"\n \"Expected int or str but \"\n \"found {}\").format(type(position)))\n\n # make pylint happy about missing return statement\n raise dataframe.DataFrameException(\"Implementation error\")\n\ndef setitem_impl(arg, position, value):\n \"\"\"Implementation of the __setitem__() function.\n\n Args:\n arg: The DataFrame instance on which the function was called upon\n position: The position argument passed to the function\n value: The value argument passed to the function\n \"\"\"\n if isinstance(position, tuple):\n if len(position) > 2:\n raise dataframe.DataFrameException(\n (\"Invalid position argument. Too many \"\n \"positions specified: {}\").format(len(position)))\n\n cols = position[0]\n rows = position[1]\n if isinstance(cols, (int, str)):\n # check for negative column indices\n if isinstance(cols, int) and cols < 0:\n if abs(cols) > arg.columns():\n raise dataframe.DataFrameException(\n \"Invalid column index: {}\".format(cols))\n\n cols = cols % arg.columns()\n\n if rows is None:\n # implements df[x, :] = Column\n # and df[\"x\", :] = Column\n arg.set_column(cols, value)\n elif isinstance(rows, int):\n # implements df[x, y] = v\n # and df[\"x\", y] = v\n if rows < 0:\n if abs(rows) > arg.rows():\n raise dataframe.DataFrameException(\n \"Invalid row index: {}\".format(rows))\n\n rows = rows % arg.rows()\n\n arg.get_column(cols).set_value(rows, value)\n elif isinstance(rows, str):\n # implements df[x, \"y_regex\"] = v | func | lamda\n # and df[\"x\", \"y_regex\"] = v | func | lamda\n arg.replace(cols, rows, replacement=value)\n elif isinstance(rows, tuple):\n # implements df[x, (y0, y1, ..., yn)] = (v0, v1, ..., vn)\n # and df[\"x\", (y0, y1, ..., yn)] = (v0, v1, ..., vn)\n col = arg.get_column(cols)\n if isinstance(value, (list, tuple)):\n if len(rows) != len(value):\n raise dataframe.DataFrameException(\n (\"Invalid value argument. The specified \"\n \"list/tuple has a size of {} but the row position \"\n \"argument has a size of {}\")\n .format(len(value), len(rows)))\n\n for i, index in enumerate(rows):\n col.set_value(index, value[i])\n\n else:\n # implements df[x, (y0, y1, ..., yn)] = v\n # and df[\"x\", (y0, y1, ..., yn)] = v\n for index in rows:\n col.set_value(index, value)\n\n elif isinstance(rows, slice):\n rows = rows.indices(arg.rows())\n start = rows[0]\n stop = rows[1]\n step = rows[2]\n col = arg.get_column(cols)\n if isinstance(value, (list, tuple)):\n # implements df[x, y0:y1:y2] = (v0, v1, ..., vn)\n # and df[\"x\", y0:y1:y2] = (v0, v1, ..., vn)\n if ((stop - start) // step) != len(value):\n raise dataframe.DataFrameException(\n (\"Invalid value argument. The specified \"\n \"list/tuple has a size of {} but the row position \"\n \"argument has a size of {}\")\n .format(len(value), (stop - start) // step))\n\n i = 0\n for index in range(start, stop, step):\n col.set_value(index, value[i])\n i += 1\n\n else:\n # implements df[x, y0:y1:y2] = v\n # and df[\"x\", y0:y1:y2] = v\n for index in range(start, stop, step):\n col.set_value(index, value)\n\n else:\n # invalid type for row position arg\n raise dataframe.DataFrameException(\n (\"Invalid row position type. \"\n \"Expected int or str but found {}\").format(type(rows)))\n\n elif isinstance(cols, (tuple, slice)):\n # prefetch the selected columns as a DataFrame\n if isinstance(cols, tuple):\n cols_selected = arg.get_columns(cols=cols)\n else: # is slice\n cols_selected = (dataframe.NullableDataFrame(arg._internal_columns()[cols])\n if arg.is_nullable()\n else dataframe.DefaultDataFrame(arg._internal_columns()[cols]))\n\n if isinstance(rows, int):\n if rows < 0:\n if abs(rows) > arg.rows():\n raise dataframe.DataFrameException(\n \"Invalid row index: {}\".format(rows))\n\n rows = rows % arg.rows()\n\n if isinstance(value, (tuple, list)):\n # implements df[(x0, x1, ..., xn), y] = [v0, v1, ..., vn]\n # and df[x0:x1:x2, y] = [v0, v1, ..., vn]\n cols_selected.set_row(rows, value)\n else:\n # implements df[(x0, x1, ..., xn), y] = v\n # and df[x0:x1:x2, y] = v\n cols_selected.set_row(rows, [value] * cols_selected.columns())\n\n elif isinstance(rows, tuple):\n if isinstance(value, (list, tuple)):\n # implements df[(x0, x1, ..., xn), (y0, y1, ..., ym)] = [[ ], [ ], ..., [ ]]\n # and df[x0:x1:x2, (y0, y1, ..., ym)] = [[ ], [ ], ..., [ ]]\n if len(value) == 0:\n raise dataframe.DataFrameException(\n (\"Invalid value argument. The specified list/tuple \"\n \"of row values is empty\"))\n\n if isinstance(value[0], (list, tuple)):\n if len(rows) != len(value):\n raise dataframe.DataFrameException(\n (\"Invalid value argument. The specified list/tuple \"\n \"has a size of {} but the row position argument \"\n \"has a size of {}\").format(len(value), len(rows)))\n\n for i, index in enumerate(rows):\n cols_selected.set_row(index, value[i])\n else:\n for index in rows:\n cols_selected.set_row(index, value)\n\n elif isinstance(value, dataframe.DataFrame):\n # implements df[(x0, x1, ..., xn), (y0, y1, ..., ym)] = vDataFrame\n # and df[x0:x1:x2, (y0, y1, ..., ym)] = vDataFrame\n if len(rows) != value.rows():\n rmsg1 = \"rows\" if value.rows() != 1 else \"row\"\n rmsg2 = \"rows\" if len(rows) != 1 else \"row\"\n raise dataframe.DataFrameException(\n (\"Invalid value argument. The specified \"\n \"DataFrame has {} {} but the row position \"\n \"argument specified {} {}\")\n .format(value.rows(), rmsg1, len(rows), rmsg2))\n\n for i, index in enumerate(rows):\n cols_selected.set_row(index, value.get_row(i))\n\n else:\n # implements df[(x0, x1, ..., xn), (y0, y1, ..., ym)] = v\n # and df[x0:x1:x2, (y0, y1, ..., ym)] = v\n value = [value] * cols_selected.columns()\n for index in rows:\n cols_selected.set_row(index, value)\n\n elif isinstance(rows, slice):\n rows = rows.indices(cols_selected.rows())\n start = rows[0]\n stop = rows[1]\n step = rows[2]\n if isinstance(value, (list, tuple)):\n # implements df[(x0, x1, ..., xn), y0:y1:y2] = [ .. ]\n # and df[x0:x1:x2, y0:y1:y2] = [ .. ]\n for index in range(start, stop, step):\n cols_selected.set_row(index, value)\n\n elif isinstance(value, dataframe.DataFrame):\n # implements df[(x0, x1, ..., xn), y0:y1:y2] = vDataFrame\n # and df[x0:x1:x2, y0:y1:y2] = vDataFrame\n i = 0\n for index in range(start, stop, step):\n cols_selected.set_row(index, value.get_row(i))\n i += 1\n\n else:\n # implements df[(x0, x1, ..., xn), y0:y1:y2] = v\n # and df[x0:x1:x2, y0:y1:y2] = v\n value = [value] * cols_selected.columns()\n for index in range(start, stop, step):\n cols_selected.set_row(index, value)\n\n elif isinstance(rows, str):\n raise dataframe.DataFrameException(\n (\"Invalid column position type. A replacement operation \"\n \"must only specify a single column \"\n \"but found {}\").format(type(cols)))\n\n else:\n # invalid type for row position arg\n raise dataframe.DataFrameException(\n (\"Invalid row position type. \"\n \"Expected int or str but found {}\").format(type(rows)))\n\n else:\n # invalid type for column position arg\n raise dataframe.DataFrameException(\n (\"Invalid column position type. \"\n \"Expected int or str but found {}\").format(type(cols)))\n\n elif isinstance(position, int):\n # check for negative column indices\n if position < 0:\n if abs(position) > arg.columns():\n raise dataframe.DataFrameException(\n \"Invalid column index: {}\".format(position))\n\n position = position % arg.columns()\n\n # implements df[x] = Column\n if position == arg.columns():\n arg.add_column(value)\n else:\n arg.set_column(position, value)\n elif isinstance(position, str):\n # and df[\"x\"] = Column\n arg.set_column(position, value)\n else:\n # invalid type for entire position arg\n raise dataframe.DataFrameException(\n (\"Invalid position type. \"\n \"Expected int or str but \"\n \"found {}\").format(type(position)))\n"
] | [
[
"numpy.array",
"numpy.isnan"
]
] |
amitsou/cloud_services | [
"22a2381227ecab8d1626e3dfa961821954188327"
] | [
"Functions/sensing_utils.py"
] | [
"import os\nimport cv2\nimport sys\nimport json\nimport time\nimport codecs\nimport argparse\nimport logging\nimport numpy as np\nimport warnings\n\ntry:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\",category=FutureWarning)\n from keras.models import load_model\n from keras.preprocessing import image\n from keras import layers\n from keras import models\n from keras import regularizers\n from keras import layers\n from keras.preprocessing.image import ImageDataGenerator\nexcept Exception as ex:\n sys.exit('Error import Keras library')\n\ntry:\n import paho.mqtt.client as mqtt\nexcept Exception as ex:\n sys.exit('Paho library is not present')\n\n\n\ndef upload_image(out_topic):\n \"\"\"Upload an image into the Synaisthisi Platform\n \"\"\"\n try:\n while True:\n\n img_dir = get_img_dir()\n data_out = parse_img(img_dir)\n client.publish(out_topic, data_out)\n\n except KeyboardInterrupt:\n client.loop_stop()\n\n\ndef get_img_dir():\n \"\"\"Get the image path\n\n Returns:\n [str]: [The absolute path to the image file]\n \"\"\"\n img_dir = '' #enter your image path here\n img_name = ''#enter image name here\n img_dir = os.path.join(img_dir,img_name)\n return img_dir\n\n\ndef parse_img(img_dir):\n \"\"\"Open, preprocess and convert an image into json format\n\n Args:\n img_dir (str): The image absolute path\n\n Returns:\n [str]: The json object to be published to the platform\n \"\"\"\n img = cv2.imread(img_dir)\n height, width, channels = img.shape\n img = image.load_img(img_dir, target_size=(height,width))\n img = np.array(image)\n img = img.ravel()\n\n compressed_obj = [img.tolist(), height, width, channels]\n json_obj = json.dumps(compressed_obj)\n return json_obj\n\n\ndef s_type_service_args():\n \"\"\"\n Provide the S-type service args\n \"\"\"\n parser = argparse.ArgumentParser(description='Collect arguments')\n parser.add_argument(\"--username\", metavar='username(text)', help=\"Please provide username\")\n parser.add_argument(\"--p\", metavar='password(text)', help=\"Please provide password\")\n parser.add_argument(\"--output_topics\", nargs='*', metavar='Output_topic',help='MQTT Broker Output Topics')\n\n #Developer should take care to parse as many input/output topics created in web app\n args = parser.parse_args()\n username = args.username\n user_pass = args.p\n out_topics = args.output_topics\n\n print(\"Output Topics: {0}\".format(out_topics))\n out_topic = out_topics[0]\n return username, user_pass, out_topic"
] | [
[
"numpy.array"
]
] |
svaiter/sparse-ho | [
"8c04ca533e44ecd128dc26b6830a556babf8416f"
] | [
"examples/plot_held_out_lasso.py"
] | [
"\"\"\"\n============================\nLasso with held-out test set\n============================\n\nThis example shows how to perform hyperparameter optimization\nfor a Lasso using a held-out validation set.\n\n\"\"\"\n\n# Authors: Quentin Bertrand <[email protected]>\n# Quentin Klopfenstein <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import linear_model\n\nfrom sparse_ho.models import Lasso\nfrom sparse_ho.criterion import HeldOutMSE\nfrom sparse_ho.forward import Forward\nfrom sparse_ho.implicit_forward import ImplicitForward\nfrom sparse_ho.utils import Monitor\nfrom sparse_ho.ho import grad_search\nfrom sparse_ho.grid_search import grid_search\nfrom sklearn.datasets import make_regression\n\nfrom sklearn.model_selection import train_test_split\n\n\nfrom sparse_ho.datasets import get_data\n\nprint(__doc__)\n\ndataset = 'rcv1'\n# dataset = 'simu'\n\nif dataset == 'rcv1':\n X_train, X_val, X_test, y_train, y_val, y_test = get_data('rcv1_train')\nelse:\n X, y = make_regression(n_samples=1000, n_features=1000, noise=40)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)\n X_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=0.5)\n\nn_samples, n_features = X_train.shape\n\nprint(\"Starting path computation...\")\nn_samples = len(y_train)\nalpha_max = np.max(np.abs(X_train.T.dot(y_train))) / X_train.shape[0]\nlog_alpha0 = np.log(alpha_max / 10)\n\nn_alphas = 10\np_alphas = np.geomspace(1, 0.0001, n_alphas)\nalphas = alpha_max * p_alphas\nlog_alphas = np.log(alphas)\n\ntol = 1e-7\nmax_iter = 1e5\n\n##############################################################################\n# Grid-search with scikit-learn\n# -----------------------------\n\nestimator = linear_model.Lasso(\n fit_intercept=False, max_iter=1000, warm_start=True)\n\nprint('scikit-learn started')\n\nt0 = time.time()\nmodel = Lasso(X_train, y_train, estimator=estimator)\ncriterion = HeldOutMSE(X_val, y_val, model, X_test=X_test, y_test=y_test)\nalgo = Forward(criterion)\nmonitor_grid_sk = Monitor()\ngrid_search(\n algo, criterion, None, None, monitor_grid_sk, log_alphas=log_alphas,\n tol=tol)\nobjs = np.array(monitor_grid_sk.objs)\nt_sk = time.time() - t0\n\nprint('scikit-learn finished')\n\n\n##############################################################################\n# Grad-search with sparse-ho\n# --------------------------\n\nprint('sparse-ho started')\n\nt0 = time.time()\nmodel = Lasso(X_train, y_train, estimator=estimator)\ncriterion = HeldOutMSE(X_val, y_val, model, X_test=X_test, y_test=y_test)\nalgo = ImplicitForward(criterion)\nmonitor_grad = Monitor()\ngrad_search(\n algo, criterion, np.log(alpha_max / 10), monitor_grad, n_outer=10, tol=tol)\n\nt_grad_search = time.time() - t0\n\nprint('sparse-ho finished')\n\n##############################################################################\n# Plot results\n# ------------\n\np_alphas_grad = np.exp(np.array(monitor_grad.log_alphas)) / alpha_max\n\nobjs_grad = np.array(monitor_grad.objs)\n\nprint('sparse-ho finished')\nprint(\"Time to compute CV for scikit-learn: %.2f\" % t_sk)\nprint(\"Time to compute CV for sparse-ho: %.2f\" % t_grad_search)\n\nprint('Minimum objective grid-search %.5f' % objs.min())\nprint('Minimum objective grad-search %.5f' % objs_grad.min())\n\n\ncurrent_palette = sns.color_palette(\"colorblind\")\n\nfig = plt.figure(figsize=(5, 3))\nplt.semilogx(\n p_alphas, objs, color=current_palette[0])\nplt.semilogx(\n p_alphas, objs, 'bo', label='0-order method (grid-search)',\n color=current_palette[1])\nplt.semilogx(\n p_alphas_grad, objs_grad, 'bX', label='1-st order method',\n color=current_palette[2])\nplt.xlabel(r\"$\\lambda / \\lambda_{\\max}$\")\nplt.ylabel(\n r\"$\\|y^{\\rm{val}} - X^{\\rm{val}} \\hat \\beta^{(\\lambda)} \\|^2$\")\nplt.tick_params(width=5)\nplt.legend()\nplt.tight_layout()\nplt.show(block=False)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.semilogx",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.geomspace",
"sklearn.datasets.make_regression",
"numpy.log",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.Lasso",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
kindsenior/nngen | [
"cba265b1a140f2aef7208926703782b6dac9e8be"
] | [
"tests/matrix_conv2d/matrix_conv2d.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport functools\nimport math\nimport numpy as np\n\n# the next line can be removed after installation\nsys.path.insert(0, os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__)))))\n\nimport nngen as ng\n\nfrom veriloggen import *\nimport veriloggen.thread as vthread\nimport veriloggen.types.axi as axi\n\n\ndef run(act_shape=(1, 7, 7, 7), weight_shape=(3, 3, 3, 7),\n bias_shape=None, scale_shape=None,\n act_dtype=ng.int32, weight_dtype=ng.int32,\n bias_dtype=ng.int32, scale_dtype=ng.int32,\n out_dtype=ng.int32,\n stride=(1, 1, 1, 1),\n rshift_mul=None, rshift_sum=None, rshift_out=0,\n act_func=None,\n par_ich=1, par_och=1, par_col=1, par_row=1,\n concur_och=None, stationary='filter',\n input_ram_size=None, filter_ram_size=None,\n bias_ram_size=None, scale_ram_size=None,\n out_ram_size=None,\n axi_datawidth=32, silent=False,\n filename=None, simtype='iverilog', outputfile=None):\n\n # create target hardware\n act = ng.placeholder(act_dtype, shape=act_shape, name='act')\n weight = ng.variable(weight_dtype, shape=weight_shape, name='weight')\n\n if bias_shape is not None:\n bias = ng.variable(bias_dtype, bias_shape, name='bias')\n else:\n bias = None\n\n if scale_shape is not None:\n scale = ng.variable(scale_dtype, scale_shape, name='scale')\n else:\n scale = None\n\n out = ng.conv2d(act, weight, stride,\n bias, scale,\n rshift_mul, rshift_sum, rshift_out,\n act_func, 'SAME',\n out_dtype, ng.int32, ng.int32,\n 'conv2d',\n par_ich, par_och, par_col, par_row,\n concur_och,\n stationary,\n input_ram_size, filter_ram_size,\n bias_ram_size, scale_ram_size,\n None, None, None,\n out_ram_size)\n\n targ = ng.to_veriloggen([out], 'matrix_conv2d', silent=silent,\n config={'maxi_datawidth': axi_datawidth})\n\n # verification data\n if act_dtype.width > 4:\n vact = np.arange(act.length, dtype=np.int64).reshape(act.shape) % [11]\n else:\n vact = np.arange(act.length, dtype=np.int64).reshape(act.shape) % [5]\n\n vweight = np.arange(weight.length,\n dtype=np.int64).reshape(weight.shape) % [7] - [3]\n\n if bias is not None:\n vbias = np.arange(bias.length,\n dtype=np.int64).reshape(bias.shape) % [4]\n else:\n vbias = None\n\n if scale is not None:\n vscale = np.arange(scale.length,\n dtype=np.int64).reshape(scale.shape) % [6]\n else:\n vscale = None\n\n eval_outs = ng.eval([out], act=vact, weight=vweight, bias=vbias, scale=vscale)\n vout = eval_outs[0]\n\n # to memory image\n size_max = int(math.ceil(max(act.memory_size, weight.memory_size,\n bias.memory_size if bias is not None else 0,\n scale.memory_size if scale is not None else 0,\n out.memory_size) / 4096)) * 4096\n check_addr = max(act.addr, weight.addr,\n bias.addr if bias is not None else -1,\n scale.addr if scale is not None else -1,\n out.addr) + size_max\n size_check = size_max\n tmp_addr = check_addr + size_check\n\n memimg_datawidth = 32\n mem = np.zeros([1024 * 1024 * 8 // (memimg_datawidth // 8)], dtype=np.int64)\n mem = mem + [100]\n\n axi.set_memory(mem, vact, memimg_datawidth,\n act_dtype.width, act.addr,\n max(int(math.ceil(axi_datawidth / act_dtype.width)), par_ich))\n\n axi.set_memory(mem, vweight, memimg_datawidth,\n weight_dtype.width, weight.addr,\n max(int(math.ceil(axi_datawidth / weight_dtype.width)), par_ich))\n\n if bias is not None:\n axi.set_memory(mem, vbias, memimg_datawidth,\n bias_dtype.width, bias.addr,\n max(int(math.ceil(axi_datawidth / bias_dtype.width)), par_och))\n\n if scale is not None:\n axi.set_memory(mem, vscale, memimg_datawidth,\n scale_dtype.width, scale.addr,\n max(int(math.ceil(axi_datawidth / scale_dtype.width)), par_och))\n\n axi.set_memory(mem, vout, memimg_datawidth,\n out_dtype.width, check_addr,\n max(int(math.ceil(axi_datawidth / out_dtype.width)), par_och))\n\n # test controller\n m = Module('test')\n params = m.copy_params(targ)\n ports = m.copy_sim_ports(targ)\n clk = ports['CLK']\n resetn = ports['RESETN']\n rst = m.Wire('RST')\n rst.assign(Not(resetn))\n\n # AXI memory model\n if outputfile is None:\n outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'\n\n memimg_name = 'memimg_' + outputfile\n\n memory = axi.AxiMemoryModel(m, 'memory', clk, rst,\n datawidth=axi_datawidth,\n memimg=mem, memimg_name=memimg_name,\n memimg_datawidth=memimg_datawidth)\n memory.connect(ports, 'maxi')\n\n # AXI-Slave controller\n _saxi = vthread.AXIMLite(m, '_saxi', clk, rst, noio=True)\n _saxi.connect(ports, 'saxi')\n\n # timer\n time_counter = m.Reg('time_counter', 32, initval=0)\n seq = Seq(m, 'seq', clk, rst)\n seq(\n time_counter.inc()\n )\n\n def ctrl():\n for i in range(100):\n pass\n\n ng.sim.set_global_addrs(_saxi, tmp_addr)\n\n start_time = time_counter.value\n ng.sim.start(_saxi)\n\n print('# start')\n\n ng.sim.wait(_saxi)\n end_time = time_counter.value\n\n print('# end')\n print('# execution cycles: %d' % (end_time - start_time))\n\n # verify\n ok = True\n for bat in range(out.shape[0]):\n for y in range(out.shape[1]):\n for x in range(out.shape[2]):\n for ch in range(out.shape[3]):\n orig = memory.read_word(\n bat * out.aligned_shape[1] * out.aligned_shape[2] * out.aligned_shape[3] +\n y * out.aligned_shape[2] * out.aligned_shape[3] +\n x * out.aligned_shape[3] + ch,\n out.addr, out_dtype.width)\n check = memory.read_word(\n bat * out.aligned_shape[1] * out.aligned_shape[2] * out.aligned_shape[3] +\n y * out.aligned_shape[2] * out.aligned_shape[3] +\n x * out.aligned_shape[3] + ch,\n check_addr, out_dtype.width)\n\n if vthread.verilog.NotEql(orig, check):\n print('NG (', bat, y, x, ch,\n ') orig: ', orig, ' check: ', check)\n ok = False\n # else:\n # print('OK (', bat, y, x, ch,\n # ') orig: ', orig, ' check: ', check)\n\n if ok:\n print('# verify: PASSED')\n else:\n print('# verify: FAILED')\n\n vthread.finish()\n\n th = vthread.Thread(m, 'th_ctrl', clk, rst, ctrl)\n fsm = th.start()\n\n uut = m.Instance(targ, 'uut',\n params=m.connect_params(targ),\n ports=m.connect_ports(targ))\n\n # simulation.setup_waveform(m, uut)\n simulation.setup_clock(m, clk, hperiod=5)\n init = simulation.setup_reset(m, resetn, m.make_reset(), period=100, polarity='low')\n\n init.add(\n Delay(10000000),\n Systask('finish'),\n )\n\n # output source code\n if filename is not None:\n m.to_verilog(filename)\n\n # run simulation\n sim = simulation.Simulator(m, sim=simtype)\n rslt = sim.run(outputfile=outputfile)\n lines = rslt.splitlines()\n if simtype == 'verilator' and lines[-1].startswith('-'):\n rslt = '\\n'.join(lines[:-1])\n return rslt\n\n\nif __name__ == '__main__':\n rslt = run(silent=False, filename='tmp.v')\n print(rslt)\n"
] | [
[
"numpy.arange",
"numpy.zeros"
]
] |
grantseiter/Tax-Benefits-Of-Parenthood | [
"5350e832e8b877b46c2a3cab070fc8262b914a52"
] | [
"Tax-Calculator-3.0.0/taxcalc/tests/test_data.py"
] | [
"# CODING-STYLE CHECKS:\r\n# pycodestyle test_data.py\r\n\r\nimport os\r\nimport tempfile\r\nimport pytest\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom taxcalc import Data, GrowFactors\r\n\r\n\r\n# Test specification and use of simple Data-derived class.\r\n# This derived class is called Recs and it contains aged data.\r\n#\r\n# The following pytest fixture specifies the VARINFO file for the\r\n# Recs class, which is defined in the test_recs_class function.\r\n\r\n\r\nVARINFO_JSON = \"\"\"\r\n{\r\n \"read\": {\r\n \"RECID\": {\r\n \"required\": true,\r\n \"type\": \"int\",\r\n \"desc\": \"Unique numeric identifier for record\"\r\n },\r\n \"MARS\": {\r\n \"required\": true,\r\n \"type\": \"int\",\r\n \"desc\": \"Filing (marital) status [1..5]\"\r\n },\r\n \"e00300\": {\r\n \"type\": \"float\",\r\n \"desc\": \"Taxable interest income\"\r\n },\r\n \"s006\": {\r\n \"type\": \"float\",\r\n \"desc\": \"Record sampling weight\"\r\n }\r\n },\r\n \"calc\": {\r\n \"expanded_income\": {\r\n \"type\": \"float\"\r\n }\r\n }\r\n}\r\n\"\"\"\r\n\r\n\r\[email protected](scope='module', name='recs_varinfo_file')\r\ndef fixture_recs_varinfo_json_file():\r\n \"\"\"\r\n Define JSON VARINFO file for Data-derived Recs class.\r\n \"\"\"\r\n with tempfile.NamedTemporaryFile(mode='a', delete=False) as pfile:\r\n pfile.write(VARINFO_JSON + '\\n')\r\n pfile.close()\r\n yield pfile\r\n os.remove(pfile.name)\r\n\r\n\r\ndef test_recs_class(recs_varinfo_file, cps_subsample):\r\n \"\"\"\r\n Specify Data-derived Recs class and test it.\r\n \"\"\"\r\n\r\n class Recs(Data):\r\n \"\"\"\r\n The Recs class is derived from the abstract base Data class.\r\n \"\"\"\r\n VARINFO_FILE_NAME = recs_varinfo_file.name\r\n VARINFO_FILE_PATH = ''\r\n\r\n def __init__(self, data, start_year, gfactors, weights):\r\n super().__init__(data, start_year, gfactors, weights)\r\n\r\n def _extrapolate(self, year):\r\n self.e00300 *= self.gfactors.factor_value('AINTS', year)\r\n\r\n # test Recs class for incorrect instantiation\r\n with pytest.raises(ValueError):\r\n Recs(data=list(), start_year=2000,\r\n gfactors=None, weights=None)\r\n with pytest.raises(ValueError):\r\n Recs(data=cps_subsample, start_year=list(),\r\n gfactors=None, weights=None)\r\n with pytest.raises(ValueError):\r\n Recs(data=cps_subsample, start_year=2000,\r\n gfactors=None, weights='')\r\n with pytest.raises(ValueError):\r\n Recs(data=cps_subsample, start_year=2000,\r\n gfactors=GrowFactors(), weights=None)\r\n with pytest.raises(ValueError):\r\n Recs(data=cps_subsample, start_year=2000,\r\n gfactors='', weights='')\r\n # test Recs class for correct instantiation with no aging of data\r\n syr = 2014\r\n rec = Recs(data=cps_subsample, start_year=syr,\r\n gfactors=None, weights=None)\r\n assert isinstance(rec, Recs)\r\n assert np.all(rec.MARS != 0)\r\n assert rec.data_year == syr\r\n assert rec.current_year == syr\r\n sum_e00300_in_syr = rec.e00300.sum()\r\n rec.increment_year()\r\n assert rec.data_year == syr\r\n assert rec.current_year == syr + 1\r\n sum_e00300_in_syr_plus_one = rec.e00300.sum()\r\n assert np.allclose([sum_e00300_in_syr], [sum_e00300_in_syr_plus_one])\r\n del rec\r\n # test Recs class for correct instantiation with aging of data\r\n wghts_path = os.path.join(GrowFactors.FILE_PATH, 'cps_weights.csv.gz')\r\n wghts_df = pd.read_csv(wghts_path)\r\n rec = Recs(data=cps_subsample, start_year=syr,\r\n gfactors=GrowFactors(), weights=wghts_df)\r\n assert isinstance(rec, Recs)\r\n assert np.all(rec.MARS != 0)\r\n assert rec.data_year == syr\r\n assert rec.current_year == syr\r\n sum_s006_in_syr = rec.s006.sum()\r\n sum_e00300_in_syr = rec.e00300.sum()\r\n rec.increment_year()\r\n assert rec.data_year == syr\r\n assert rec.current_year == syr + 1\r\n sum_s006_in_syr_plus_one = rec.s006.sum()\r\n assert sum_s006_in_syr_plus_one > sum_s006_in_syr\r\n sum_e00300_in_syr_plus_one = rec.e00300.sum()\r\n # because growfactor for e00300 was less than one in 2015, assert < below:\r\n assert sum_e00300_in_syr_plus_one < sum_e00300_in_syr\r\n # test private methods\r\n rec._read_data(data=None)\r\n rec._read_weights(weights=None)\r\n with pytest.raises(ValueError):\r\n rec._read_weights(weights=list())\r\n"
] | [
[
"pandas.read_csv",
"numpy.allclose",
"numpy.all"
]
] |
michi7x7/pm-mos-model | [
"394d752b1165f5afd96520f1b6e2dbecc27fdc4b"
] | [
"CryMOS/QV.py"
] | [
"import numpy as np\n\nfrom .constants import *\nfrom .Bulk import BulkModel, BulkModelFD, BulkModelTails\nfrom .base import MosModelBase, writeable_property\n\nfrom math import sqrt\nfrom scipy.integrate import quad\n\n__all__ = ['DefaultQV', 'BeckersQVpy',\n 'DiracQVpy', 'TailsQVpy',\n 'GildenblatQVpy', 'DefaultQV']\n\n\nclass BeckersQVpy(MosModelBase, BulkModel):\n \"\"\" modelled after CRYOGENIC MOS TRANSISTOR MODEL \"\"\"\n\n new_params = ('cox', 'N_t', 'psi_t', 'g_t', 'Q_0', '_phi_m')\n params = MosModelBase.params + BulkModel.params + new_params\n pandas_default = ('temp',) # TODO\n\n def __init__(self, **kwargs):\n self.eps_si = eps_si # DO NOT CHANGE! many parts of the model refer to the global eps_si\n self.cox = 0.005755\n\n self._phi_m = None\n\n self.N_t = None\n self.psi_t = []\n self.g_t = 4.\n self.Q_0 = 0. # fixed oxide charge\n\n BulkModel.__init__(self)\n MosModelBase.__init__(self, **kwargs)\n\n self.update_params(**kwargs)\n\n @writeable_property\n def phi_m(self):\n \"\"\" gate work function / electron affinity. Default: degenerately doped silicon E_f = E_c \"\"\"\n return self.chi/e\n\n @property\n def phi_ms(self):\n \"\"\" work function difference between gate/bulk ~ flatband voltage\n\n for a poly-gate, both add chi, thus chi cancels \"\"\"\n return self.phi_m - self.phi_s\n\n @phi_ms.setter\n def phi_ms(self, phi_ms):\n \"\"\" just another way to set phi_m, does not keep phi_ms constant \"\"\"\n self._phi_m = phi_ms + self.phi_s\n\n def fs_ea(self, psi_s, V_ch):\n \"\"\" eq (8)\"\"\"\n return 1. / (1. + self.g_A * self.exp_phi_t(self.psi_a - psi_s + V_ch))\n\n def fb_ea(self):\n \"\"\" eq (9) \"\"\"\n\n assert self.N_A > self.N_D, \"NMOS only\"\n return 1. / (1. + self.g_A * self.exp_phi_t(self.psi_a - self.psi_b))\n\n @property\n def gamma(self):\n return sqrt(2 * e * self.N_A * eps_si) / self.cox\n\n def Es_square(self, psi_s, v_ch, psi_b=None, fb_ea=None):\n \"\"\" eq (7) \"\"\"\n\n # these are kinda hard to calculate, precalculate and use just once\n phi_t = self.phi_t\n psi_b = psi_b or self.psi_b\n fb_ea = fb_ea or self.fb_ea()\n # exp_phi_t = self.exp_phi_t\n exp_phi_t = lambda a: np.exp(a / phi_t)\n\n fs_ea = self.fs_ea(psi_s, v_ch)\n\n fac1 = 2. * e / eps_si\n fac2 = exp_phi_t(psi_s - v_ch) + exp_phi_t(-psi_s) - exp_phi_t(psi_b - v_ch) - exp_phi_t(-psi_b)\n fac3 = psi_s - psi_b - phi_t * np.log(fs_ea / fb_ea)\n return fac1 * (self.n_i * phi_t * fac2 + self.N_A * fac3)\n\n def Es(self, psi_s, v_ch, psi_b=None, **kwargs):\n \"\"\" sqrt of eq (7)\"\"\"\n psi_b = psi_b or self.psi_b\n return np.sign(psi_s-psi_b) * np.sqrt(self.Es_square(psi_s, v_ch, psi_b=psi_b, **kwargs))\n\n def v_fb(self, psi_s, v_ch):\n return self.phi_ms + (self.Q_0 - self.Q_it(psi_s, v_ch)) / self.cox\n\n @property\n def v_th0(self):\n \"\"\" approximated threshold voltage \"\"\"\n phi0 = self.psi_th - self.psi_b # + 5.*self.phi_t\n dphi = self.phi_t * np.log(self.fb_ea()) # E_f > E_i, fs_ea == 1\n\n return self.v_fb(self.psi_th, 0.0) + phi0 + \\\n self.gamma * sqrt(phi0 + dphi)\n\n @property\n def v_th(self):\n \"\"\" threshold voltage from full v_gb expression (psi_s = psi_th) \"\"\"\n return self.v_gb(self.psi_th, 0.0)\n\n @property\n def v_th1(self):\n phi_f0 = self.E_g/(2*e) + self.phi_t * np.log(self.N_A/np.sqrt((self.N_c * self.N_v)))\n\n # this includes incomplete ionization if the instance has ionization = incomplete\n phi_f1 = -self.psi_b\n return phi_f0 + self.phi_m - self.chi/e - (self.E_c-self.E_i)/e + self.gamma * np.sqrt(phi_f0 + phi_f1)\n\n def v_gb(self, psi_s, v_ch):\n return self.v_fb(psi_s, v_ch) + eps_si * self.Es(psi_s, v_ch) / self.cox + psi_s - self.psi_b\n\n def psi_s(self, v_ch, v_gb):\n \"\"\"solves the implicit equation (pot_loop) to get the surface potential as a function of v_ch and v_gb\"\"\"\n from scipy.optimize import root_scalar\n v_gb = np.atleast_1d(v_gb)\n psi_s = 0. * v_gb\n bracket = [-2., 2.]\n # bracket = [(self.E_v-self.E_i)/e-v_ch, (self.E_c-self.E_i)/e-v_ch]\n\n psi_b = self.psi_b\n fb_ea = self.fb_ea()\n\n Es = self.Es\n v_fb = self.v_fb\n\n # surface boundary condition:\n # going around the loop, all appearing voltage must cancel each other out, statet a bit before eq. (13)\n def pot_loop(psi_s, v_ch, v_gb):\n return v_fb(psi_s, v_ch) + \\\n eps_si * Es(psi_s, v_ch, psi_b=psi_b, fb_ea=fb_ea) / self.cox + \\\n psi_s - self.psi_b - v_gb\n\n for i, v in enumerate(v_gb):\n res = root_scalar(pot_loop, args=(v_ch, v), bracket=bracket, xtol=1e-30)\n if not res.converged:\n psi_s[i] = np.nan\n raise RuntimeError(\"root did not converge!\")\n else:\n psi_s[i] = res.root\n return psi_s\n\n def Q_m_1(self, psi_s, v_ch):\n \"\"\" Q_m exploiting the charge neutrality, here mobile = holes+electrons \"\"\"\n return self.Q_sc(psi_s, v_ch) - self.Q_f(psi_s, v_ch)\n\n def Q_m(self, psi_s, v_ch):\n \"\"\" Q_m only including electron terms \"\"\"\n log = np.log(self.fs_ea(psi_s, v_ch) / self.fb_ea())\n sqrt1 = - np.sqrt(2. * e * self.n_i * self.phi_t * eps_si * (\n self.exp_phi_t(psi_s - v_ch) - self.exp_phi_t(self.psi_b - v_ch)) + 2. * e * self.N_A * eps_si * (\n psi_s - self.psi_b - self.phi_t * log))\n\n sqrt2 = np.sqrt(2. * e * self.N_A * eps_si * (psi_s - self.psi_b - self.phi_t * log))\n return sqrt1 + sqrt2\n\n def fs_Et(self, g_t, psi_t, psi_s, v_ch):\n return 1. / (1. + g_t * self.exp_phi_t((+psi_t - psi_s + v_ch)))\n\n def Q_sc(self, psi_s, v_ch):\n \"\"\" total semiconductor charge per unit area, text after eq (10)\"\"\"\n return -eps_si * self.Es(psi_s, v_ch)\n\n def Q_f(self, psi_s, v_ch):\n \"\"\" fixed charge density per unit area, eq (11)\"\"\"\n log = np.log(self.fs_ea(psi_s, v_ch) / self.fb_ea())\n return -np.sqrt(\n 2. * e * self.N_A * eps_si * (psi_s - self.psi_b) - 2. * e * self.N_A * self.phi_t * eps_si * log)\n\n def Q_it(self, psi_s, v_ch):\n \"\"\" interface charge per unit area, eq (13) and eq (14) and text above\"\"\"\n ret = 0. * psi_s\n if self.N_t is not None and self.psi_t is not None:\n for psi_t, N_t in zip(np.atleast_1d(self.psi_t), np.atleast_1d(self.N_t)):\n # catch the case very complete ionization is assumed in order to avoid computational errors in fs_Et in this case\n if self.g_t != 0.:\n ret = ret + (-e * N_t * self.fs_Et(self.g_t, psi_t, psi_s, v_ch))\n else:\n ret = ret + (-e) * N_t\n return ret\n\n def set_arnout_traps(self, psi_t_c=0.58, N_t=1.2e15, fac=None):\n \"\"\" sets the interface traps similar to what Arnout did in his paper\"\"\"\n fac = fac or np.linspace(-2, 2, 5)\n self.psi_t = psi_t_c + self.phi_t * fac\n self.N_t = np.full_like(self.psi_t, N_t)\n\n def y_psi(self, v_gb, v_ch=0, linlog=0.5, logend=1e-3) -> (np.ndarray, np.ndarray):\n \"\"\" calculate the band structure in vertical direction\n\n returns: y, psi\n \"\"\"\n\n from math import log10, fabs\n\n psi_s = self.psi_s(v_ch, v_gb)\n psi_b = self.psi_b\n\n integr = lambda psi: 1/self.Es(psi, v_ch, psi_b=psi_b)\n\n if np.isclose(psi_s, psi_b):\n return [0, 1e-6], [psi_s, psi_b]\n\n del_psi = psi_s - psi_b\n\n # linear close to the interface, log further away\n # as per suggestion in https://h-gens.github.io/automated-drawing-of-the-mos-band-diagram.html\n psis = psi_b + del_psi*np.hstack((\n np.linspace(1, linlog, 51),\n np.logspace(log10(linlog), log10(logend), 101)[1:]\n ))\n\n @np.vectorize\n def get_y(psi):\n return quad(integr, psi, psi_s)[0]\n\n y = get_y(psis)\n return y, psis\n\n\nclass DiracQVpy(BulkModelFD, BeckersQVpy):\n \"\"\" QV model that uses FD-Integrals for E**2\n\n TODO: check whether psi_b and psi_s fit together in flatband condition!!!\n \"\"\"\n\n def Es_square(self, psi_s, v_ch, psi_b=None, fb_ea=None, E_i=None):\n # calculate Es_square via the fermi dirac integrals\n psi_b = psi_b or self.psi_b\n fac = 2. * e / eps_si\n\n def int_fun(psi):\n return self.n_psi(psi - v_ch) - self.p_psi(psi) + self.N_Am_psi(psi - v_ch) - self.N_Dp_psi(psi)\n\n intfun = lambda psi: fac * quad(int_fun, psi_b, psi)[0]\n return np.vectorize(intfun)(psi_s)\n\n def Q_f(self, psi_s, v_ch, psi_b=None):\n psi_b = psi_b or self.psi_b\n fac = 2. * e / eps_si\n\n def int_fun(psi):\n return fac * quad(\n lambda psi: self.N_Am_psi(psi) - self.N_Dp_psi(psi),\n psi_b - v_ch, psi - v_ch)[0]\n\n Es2 = np.vectorize(int_fun)(psi_s)\n\n # TODO: this is hideous... and probably wrong, is there no better way?\n return -eps_si * np.sign(psi_s - psi_b) * np.sqrt(np.abs(Es2))\n\n def Q_m(self, psi_s, v_ch, psi_b=None):\n fac = 2. * e / eps_si\n psi_b = psi_b or self.psi_b\n\n def int_fun_Qsc(psi):\n return self.n_psi(psi - v_ch) + self.N_Am_psi(psi - v_ch)\n\n Es_electrons = np.vectorize(\n lambda psi: fac * quad(int_fun_Qsc, psi_b, psi)[0]\n )(psi_s)\n\n return -eps_si * np.sqrt(Es_electrons) - self.Q_f(psi_s, v_ch, psi_b=psi_b)\n\n\nclass TailsQVpy(BulkModelTails, DiracQVpy):\n \"\"\" QV-model that includes bandtail-states \"\"\"\n pass\n\n\nclass GildenblatQVpy(BeckersQVpy):\n \"\"\" QV model that uses the H(u) description for Es_square\n\n The relevant paper is \"Surface potential equation for bulk MOSFET\" (Gildenblat 2009)\n \"\"\"\n\n @property\n def lam_bulk(self):\n return self.fb_ea()\n\n def Es_square(self, psi_s, v_ch, psi_b=None, fb_ea=None):\n from math import log, exp # this is substantially faster than np\n from warnings import warn\n warn(\"bulk_n, bulk_p and psi_b do not fit togeter: ERROR between psi_s and psi_b!\")\n\n psi_b = psi_b or self.psi_b\n phi_s = psi_s - psi_b\n phi_t = self.phi_t\n\n lam = self.lam_bulk\n\n n_b = self.bulk_n\n p_b = self.bulk_p\n\n k_0 = exp(-v_ch / phi_t)\n\n u = np.array(phi_s / phi_t, dtype=np.longdouble)\n g_fun = 1. / lam * np.log(1. + lam * (np.exp(u) - 1))\n # g_fun = 1. / lam * np.logaddexp(log(1. - lam), log(lam)+u) # only a single call to numpy: faster\n h2 = np.exp(-u) - 1 + g_fun + n_b / p_b * k_0 * (np.exp(u) - 1. - g_fun)\n\n return 2 * e * p_b * phi_t / eps_si * h2\n\n\nclass Dirac2DQV(BeckersQVpy):\n \"\"\" QV model that uses FD-Integrals for E**2 \"\"\"\n def Es_square(self, psi_s, v_ch, psi_b=None, fb_ea=None, E_i=None):\n # calculate es_square via the fermi dirac integrals\n phi_t = self.phi_t\n E_i = E_i or self.E_i\n psi_a = self.psi_a\n psi_b = psi_b or self.psi_b\n exp_phi_t = lambda a: np.exp(a / phi_t)\n fac = 2. * e / eps_si\n\n def fermi_dirac_integral(E, T):\n from fdint import fdk\n return fdk(k=0.5, phi=E / (k * T))\n\n def int_fun(psi):\n n_fd = self.N_c * 2 / np.sqrt(pi) * fermi_dirac_integral(e * (psi - v_ch) + E_i - self.E_c, self.temp)\n p_fd = self.N_v * 2 / np.sqrt(pi) * fermi_dirac_integral(self.E_v - e * psi - E_i, self.temp)\n na_min = self.N_A / (1. + self.g_A * exp_phi_t(psi_a - psi + v_ch))\n return n_fd - p_fd + na_min\n\n intfun = lambda psi: fac * quad(int_fun, psi_b, psi)[0]\n return np.vectorize(intfun)(psi_s)\n\n Q_m = BeckersQVpy.Q_m_1\n\n\n# default implementation\nDefaultQV = BeckersQVpy\n"
] | [
[
"numpy.full_like",
"scipy.integrate.quad",
"numpy.sign",
"numpy.vectorize",
"numpy.isclose",
"numpy.abs",
"numpy.exp",
"numpy.atleast_1d",
"numpy.log",
"numpy.sqrt",
"numpy.linspace",
"numpy.array",
"scipy.optimize.root_scalar"
]
] |
K2OKOH/da-faster-RCNN-ChineseComment | [
"b88d1821779b9edc3f0f4a595e1f41c3bfdd7cab"
] | [
"lib/model/utils/blob.py"
] | [
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"Blob helper functions.\"\"\"\n\nimport numpy as np\n# from scipy.misc import imread, imresize\nimport cv2\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\ndef im_list_to_blob(ims):\n \"\"\"Convert a list of images into a network input.\n 把图片列表变化为适合网络的输入格式\n Assumes images are already prepared (means subtracted, BGR order, ...).\n \"\"\"\n # 取出每张图片的最大的长宽和深度\n max_shape = np.array([im.shape for im in ims]).max(axis=0)\n # 求出图片的个数\n num_images = len(ims)\n # 创建一个np数组4维,(图片序号,长,宽,深度)(最大的),用for循环填入图片数据\n blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),\n dtype=np.float32)\n for i in xrange(num_images):\n im = ims[i]\n blob[i, 0:im.shape[0], 0:im.shape[1], :] = im\n # 返回图片的np数组\n return blob\n\ndef prep_im_for_blob(im, pixel_means, target_size, max_size):\n \"\"\"Mean subtract and scale an image for use in a blob.\"\"\"\n im = im.astype(np.float32, copy=False)\n # 减去中值\n im -= pixel_means\n # im = im[:, :, ::-1]\n # 记录维度(三个维度的值)\n im_shape = im.shape\n # 取前两个维度的最大值和最小值\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n # target是短边像素\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n # if np.round(im_scale * im_size_max) > max_size:\n # im_scale = float(max_size) / float(im_size_max)\n # im = imresize(im, im_scale)\n # 沿x,y轴缩放的系数都是im_scale\n im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n\n # 返回缩放后的图形 和 缩放比\n return im, im_scale\n"
] | [
[
"numpy.array",
"numpy.max",
"numpy.min",
"numpy.zeros"
]
] |
johnnyapol/RPICovidScraper | [
"84a97847c80c320e2eed3fd161e9f175f83d14a5"
] | [
"main.py"
] | [
"#!/usr/bin/env python3\n# Usage: ./main.py\n\"\"\"\nCopyright (C) 2020-2021 John C. Allwein 'johnnyapol' ([email protected])\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport os\nimport pickle\nimport requests\nfrom random import choice\nfrom subprocess import run\nimport sys\nimport traceback\nfrom datetime import date, timedelta, datetime\nfrom copy import deepcopy\nfrom itertools import chain\nfrom io import BytesIO\n\nfrom bs4 import BeautifulSoup\nfrom discord_webhook import DiscordEmbed, DiscordWebhook\nimport matplotlib.pyplot as plot\nimport savepagenow\n\n# Import configuration (if available)\ntry:\n import config\n\n WEBHOOKS = config.webhooks\n PSA = config.PSA\n QUIET = config.QUIET\nexcept:\n print(\"No discord webhooks supplied - data will just be stored locally\")\n traceback.print_exc()\n WEBHOOKS = None\n PSA = None\n QUIET = False\n\nDASHBOARD = \"https://covid19.rpi.edu/dashboard\"\n\n\nclass CovidData:\n def __init__(self):\n self.rpi_array = [0] * 5\n self.last_updated = date.today() - timedelta(days=1)\n self.historicalData = {}\n\n def update(self, case_data):\n today = date.today()\n\n if today != self.last_updated:\n self.last_updated = today\n self.historicalData[today] = case_data\n self.rpi_array = case_data\n\n def get_rolling(self):\n return sum(self.get_rolling_iterator(self.last_updated))\n\n def get_case_data(self):\n return self.rpi_array\n\n def get_rolling_iterator(self, day=date.today()):\n dates = [day - timedelta(days=x) for x in range(13, -1, -1)]\n return [\n self.historicalData[date][0] if date in self.historicalData else 0\n for date in dates\n ]\n\n\ndef check_for_updates():\n global DASHBOARD\n request = requests.get(\n DASHBOARD,\n headers={\n \"User-Agent\": \"RPICovidScraper https://github.com/johnnyapol/RPICovidScraper\"\n },\n )\n soup = BeautifulSoup(request.text, features=\"lxml\")\n header = \"field field--name-field-stats field--type-entity-reference-revisions field--label-hidden field__items\"\n header2 = \"field field--name-field-stat field--type-string field--label-hidden field__item\"\n date_header = \"field field--name-field-stats-caption field--type-string field--label-hidden field__item\"\n\n \"\"\"\n Current data format:\n\n case_data[0] = positive tests (last 24 hours)\n case_data[1] = positive test results (last 7 days)\n case_data[2] = positive test results (since august 17th)\n case_data[3] = total tests (last 7 days)\n case_data[4] = total tests (since august 17th)\n \"\"\"\n return (\n [\n int(\"\".join((\"\".join(x.text.strip().split(\" \"))).split(\",\")))\n for x in soup.find(\"div\", {\"class\": header}).findAll(\n \"div\", {\"class\": header2}\n )\n ],\n soup.find(\"div\", {\"class\": date_header}).text,\n )\n\n\ndef case_value_to_string(case_data, previous_case_data, index):\n diff = case_data[index] - previous_case_data[index]\n diff_string = f\"({diff:+,})\" if diff != 0 else \"\"\n return f\"{case_data[index]:,} {diff_string}\"\n\n\ndef get_source_url():\n start = \"https://github.com/johnnyapol/RPICovidScraper/\"\n try:\n return f'{start}commit/{run([\"git\", \"log\", \"--pretty=format:%H\", \"-n\", \"1\"], capture_output=True).stdout.decode(\"ascii\")}'\n except:\n return start\n\n\ndef post_discord(\n rolling, old_rolling, case_data, previous_case_data, date, dashboard_url, graph\n):\n global WEBHOOKS\n global PSA\n global QUIET\n if WEBHOOKS is None:\n return print(\"Skipping posting to discord as no webhooks supplied\")\n\n positive_thumbnails = [\n \"https://www.continentalmessage.com/wp-content/uploads/2015/09/123rf-alert2.jpg\",\n \"https://i.kym-cdn.com/photos/images/newsfeed/000/675/645/2c7.gif\",\n \"https://media.discordapp.net/attachments/783375197604413445/790625854202839100/image0.png\",\n \"https://media.tenor.com/images/6603c0a47ff16ad8d3682e481e727f76/tenor.gif\",\n ]\n\n neutral_thumbnails = [\n \"https://steamcdn-a.akamaihd.net/steamcommunity/public/images/clans/5671259/7923c9b8e0a5799d4d422208b31f5ca0f4f49067.png\",\n \"https://static01.nyt.com/images/2020/01/28/science/28VIRUS-BATS1/28VIRUS-BATS1-videoSixteenByNineJumbo1600.jpg\",\n \"https://ih1.redbubble.net/image.1877589148.0162/ur,mask_flatlay_front,wide_portrait,750x1000.jpg\",\n \"https://media.giphy.com/media/KHEgvyrgYnL9RW08h6/giphy.gif\",\n ]\n\n negative_thumbnails = [\n \"https://media.giphy.com/media/WS0MDT0DITCTLwcNNx/giphy.gif\",\n \"https://cdn.vox-cdn.com/thumbor/iuL4QWaANcy5lyeCDXxIrBq7_uQ=/0x0:3000x2000/1400x1050/filters:focal(1436x422:1916x902):no_upscale()/cdn.vox-cdn.com/uploads/chorus_image/image/68718659/AP_20331457642255.0.jpg\",\n ]\n\n emojis = [\"❤️\", \"✨\", \"🥓\", \"🍺\", \"🧻\", \"🐍\", \"☃️\", \"😷\"]\n\n if QUIET and case_data[0] == 0:\n return\n\n embed = DiscordEmbed()\n\n if case_data[0] > 4:\n embed.set_color(15158332)\n embed.set_thumbnail(url=choice(positive_thumbnails))\n elif case_data[0] > 0:\n embed.set_color(0xFFFF00)\n embed.set_thumbnail(url=choice(neutral_thumbnails))\n else:\n embed.set_color(3066993)\n embed.set_thumbnail(url=choice(negative_thumbnails))\n\n if PSA is not None:\n embed.add_embed_field(name=\"ANNOUNCEMENT\", value=PSA, inline=False)\n embed.color = 15844367\n\n embed.add_embed_field(\n name=\"New Positive Tests\",\n value=f\"{case_data[0]}\",\n inline=False,\n )\n embed.add_embed_field(\n name=\"Positive Tests (7 days)\",\n value=case_value_to_string(case_data, previous_case_data, 1),\n inline=False,\n )\n\n embed.add_embed_field(\n name=\"Positive Tests (14 days)\",\n value=case_value_to_string([rolling], [old_rolling], 0),\n inline=False,\n )\n\n embed.add_embed_field(\n name=\"Weekly Test Count\",\n value=case_value_to_string(case_data, previous_case_data, 3),\n inline=False,\n )\n if case_data[1] != 0:\n # Calculate weekly positivity rate\n pcr = (case_data[1] / case_data[3]) * 100\n embed.add_embed_field(name=\"Weekly Positivity Rate\", value=f\"{round(pcr, 4)}%\")\n embed.add_embed_field(\n name=\"Total Positive Tests\",\n value=case_value_to_string(case_data, previous_case_data, 2),\n )\n\n # Since discord footers don't support \"rich\" content, hack on a footer to the last field\n date = \"\".join(date.split(\"\\n\"))\n embed.add_embed_field(\n name=\"Total Tests\",\n value=f\"{case_value_to_string(case_data, previous_case_data, 4)}\\n{date} Made with {choice(emojis)} - [source]({get_source_url()})\",\n inline=False,\n )\n embed.set_author(\n name=\"Click for dashboard\",\n url=dashboard_url,\n icon_url=\"https://i.redd.it/14nqzc0hswy31.png\",\n )\n\n hook = DiscordWebhook(\n url=WEBHOOKS,\n content=choice(\n [\n \"The RPI Covid Dashboard has been updated!\",\n \"I got yer COVID data right here!\",\n \"Special delivery!\",\n \"Beep beep boop\",\n \"I found some data!\",\n ]\n ),\n username=\"RPI Covid Dashboard\",\n avatar_url=\"https://www.minnpost.com/wp-content/uploads/2020/03/coronavirusCDC640.png\",\n )\n\n if graph != None:\n hook.add_file(file=graph.read(), filename=\"graph.png\")\n embed.set_image(url=\"attachment://graph.png\")\n hook.add_embed(embed)\n hook.execute()\n\n\ndef load_previous():\n try:\n with open(\".cache\", \"rb\") as file:\n return pickle.load(file)\n except:\n print(\"Cache read failed\")\n return CovidData()\n\n\ndef save(case_data):\n with open(\".cache\", \"wb\") as file:\n pickle.dump(case_data, file)\n\n\ndef create_graph(data):\n x = [int(z) for z in data.get_rolling_iterator()]\n cum = [x[0]]\n for i in range(1, len(x)):\n cum.append(cum[-1] + x[i])\n # thanks to https://www.tutorialspoint.com/matplotlib/matplotlib_bar_plot.htm for help\n today = date.today()\n monthday = lambda d: f\"{d.month}-{d.day}\"\n dates = [today - timedelta(days=x) for x in range(13, -1, -1)]\n plot.title(f\"Previous 14 days\")\n plot.bar(dates, x, color=\"red\", label=\"daily positive tests\")\n plot.plot(dates, cum, color=\"orange\", label=f\"Positives since {monthday(dates[0])}\")\n # Add individual day labels\n for i, v in zip(dates, x):\n if v == 0:\n continue\n plot.text(i, v, str(v), color=\"blue\", fontweight=\"bold\", ha=\"center\")\n plot.plot(\n dates,\n [sum(data.get_rolling_iterator(date)) for date in dates],\n color=\"green\",\n label=\"Rolling 2 week sum\",\n )\n plot.xticks(dates, [monthday(date) for date in dates], rotation=45)\n plot.legend()\n\n data = BytesIO()\n plot.subplots_adjust(bottom=0.17)\n plot.ylabel(\"Number of positive tests\")\n plot.xlabel(\"Day reported\")\n now = datetime.now()\n plot.figtext(\n 0.5,\n 0.01,\n f\"Generated on {now.strftime('%m/%d/%y %H:%M')} {datetime.now().astimezone().tzinfo.tzname(None)}\",\n ha=\"center\",\n fontsize=8,\n )\n plot.savefig(data, format=\"png\")\n data.seek(0)\n return data\n\n\ndef main():\n global DASHBOARD\n covid_data = load_previous()\n previous_case_data = deepcopy(covid_data.get_case_data())\n current_case_data, date = check_for_updates()\n\n ci = any(x.lower() == \"--ci\" for x in sys.argv)\n force = any(x.lower() == \"--force\" for x in sys.argv)\n\n # Only post under the following conditions:\n # 1. There is new data from RPI\n # - AND -\n # 2. there are new positive tests OR new weekly/total numbers reported\n # This avoids the bs updates where all RPI does is reset the daily/weekly numbers\n if (\n force\n or current_case_data != previous_case_data\n and (\n current_case_data[0] != 0\n or any(\n current_case_data[x] != previous_case_data[x]\n for x in range(2, len(current_case_data))\n )\n )\n ):\n dashboard_url = DASHBOARD\n try:\n # We don't want to abuse the Wayback Machine in actions\n if not ci:\n dashboard_url = savepagenow.capture(DASHBOARD, accept_cache=True)\n else:\n print(\"Skipping page archive as we are running in CI mode\")\n except:\n print(f\"Page archived failed\")\n traceback.print_exc()\n\n old_rolling = covid_data.get_rolling()\n covid_data.update(current_case_data)\n\n post_discord(\n covid_data.get_rolling(),\n old_rolling,\n current_case_data,\n previous_case_data,\n date,\n dashboard_url,\n create_graph(covid_data),\n )\n\n save(covid_data)\n print(\n f\"Done. Old: {previous_case_data} New: {current_case_data}\\n Rolling: {covid_data.get_rolling()}\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.bar"
]
] |
nerminsamet/HoughNet-VID | [
"670405e002e1c4d60596434db4790783eaf62846"
] | [
"src/lib/datasets/dataset/vid_eval.py"
] | [
"import numpy as np\nimport scipy.io as sio\nfrom collections import defaultdict\n\nBIG_NUM = 1000000\n\ndef area(box, mode=\"xyxy\"):\n\n if mode == \"xyxy\":\n TO_REMOVE = 1\n area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)\n elif mode == \"xywh\":\n area = box[:, 2] * box[:, 3]\n else:\n raise RuntimeError(\"Should not be here\")\n\n return area\n\n\ndef boxlist_iou(boxlist1, boxlist2):\n\n area1 = area(boxlist1, mode = \"xyxy\")\n area2 = area(boxlist2, mode = \"xyxy\")\n\n lt = np.maximum(np.expand_dims(boxlist1[:, :2], axis=1), boxlist2[:, :2]) # [N,M,2]\n rb = np.minimum(np.expand_dims(boxlist1[:, 2:], axis=1), boxlist2[:, 2:]) # [N,M,2]\n\n TO_REMOVE = 1\n\n wh = np.clip((rb - lt + TO_REMOVE), a_min = 0, a_max=BIG_NUM) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n\n iou = inter / (area1[:, None] + area2 - inter)\n return iou\n\n\ndef eval_proposals_vid(pred_boxlists, gt_boxlists, iou_thresh=0.5, limit=300):\n assert len(gt_boxlists) == len(\n pred_boxlists\n ), \"Length of gt and pred lists need to be same.\"\n\n gt_overlaps = []\n num_pos = 0\n for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists):\n inds = np.argsort(pred_boxlist[\"scores\"])[::-1]\n pred_boxlist['labels'] = pred_boxlist['labels'][inds]\n pred_boxlist['bbox'] = pred_boxlist['bbox'][inds]\n pred_boxlist['scores'] = pred_boxlist['scores'][inds]\n\n if len(pred_boxlist['labels']) > limit:\n pred_boxlist['labels'] = pred_boxlist['labels'][:limit]\n pred_boxlist['bbox'] = pred_boxlist['bbox'][:limit]\n pred_boxlist['scores'] = pred_boxlist['scores'][:limit]\n\n num_pos += len(gt_boxlist['labels'])\n\n if len(gt_boxlist['labels']) == 0:\n continue\n\n if len(pred_boxlist['labels']) == 0:\n continue\n\n overlaps = boxlist_iou(pred_boxlist['bbox'], gt_boxlist['bbox'])\n\n _gt_overlaps = np.zeros(gt_boxlist['bbox'].shape[0])\n for j in range(min(len(pred_boxlist['bbox']), len(gt_boxlist['bbox']))):\n max_overlaps, argmax_overlaps = np.max(overlaps, axis=0), np.argmax(overlaps, axis=0)\n\n gt_ovr, gt_ind = np.max(max_overlaps, axis=0), np.argmax(max_overlaps, axis=0)\n assert gt_ovr >= 0\n\n box_ind = argmax_overlaps[gt_ind]\n\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert _gt_overlaps[j] == gt_ovr\n\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n\n gt_overlaps.append(_gt_overlaps)\n\n gt_overlaps = np.concatenate(gt_overlaps, axis=0)\n gt_overlaps = np.sort(gt_overlaps)\n\n recall = (gt_overlaps >= iou_thresh).astype(float).sum() / float(num_pos)\n\n return {\n \"recall\": recall\n }\n\ndef eval_detection_vid(pred_boxlists,\n gt_boxlists,\n iou_thresh=0.5,\n motion_ranges=[[0.0, 0.7], [0.7, 0.9], [0.9, 1.0]],\n motion_specific=False,\n use_07_metric=False):\n assert len(gt_boxlists) == len(\n pred_boxlists\n ), \"Length of gt and pred lists need to be same.\"\n\n if motion_specific:\n motion_iou_file = \"./evaluation/vid_groundtruth_motion_iou.mat\"\n motion_ious = sio.loadmat(motion_iou_file)\n motion_ious = np.array(\n [[motion_ious['motion_iou'][i][0][j][0] if len(motion_ious['motion_iou'][i][0][j]) != 0 else 0 \\\n for j in range(len(motion_ious['motion_iou'][i][0]))] \\\n for i in range(len(motion_ious['motion_iou']))])\n else:\n motion_ious = None\n\n motion_ap = defaultdict(dict)\n for motion_index, motion_range in enumerate(motion_ranges):\n print(\"Evaluating motion iou range {} - {}\".format(motion_range[0], motion_range[1]))\n prec, rec = calc_detection_vid_prec_rec(\n pred_boxlists=pred_boxlists,\n gt_boxlists=gt_boxlists,\n motion_ious=motion_ious,\n iou_thresh=iou_thresh,\n motion_range=motion_range,\n )\n ap = calc_detection_vid_ap(prec, rec, use_07_metric=use_07_metric)\n motion_ap[motion_index] = {\"ap\": ap, \"map\": np.nanmean(ap)}\n return motion_ap\n\n\ndef calc_detection_vid_prec_rec(gt_boxlists, pred_boxlists, motion_ious,\n iou_thresh=0.5, motion_range=[0., 1.]):\n n_pos = defaultdict(int)\n score = defaultdict(list)\n match = defaultdict(list)\n pred_ignore = defaultdict(list)\n if motion_ious is None:\n motion_ious = [None] * len(gt_boxlists)\n empty_weight = 0\n else:\n all_motion_iou = np.concatenate(motion_ious, axis=0)\n empty_weight = sum([(all_motion_iou[i] >= motion_range[0]) & (all_motion_iou[i] <= motion_range[1]) for i in\n range(len(all_motion_iou))]) / float(len(all_motion_iou))\n if empty_weight == 1:\n empty_weight = 0\n for gt_boxlist, pred_boxlist, motion_iou in zip(gt_boxlists, pred_boxlists, motion_ious):\n pred_bbox = pred_boxlist['bbox']\n pred_label = pred_boxlist['labels']\n pred_score = pred_boxlist['scores']\n gt_bbox = gt_boxlist['bbox']\n gt_label = gt_boxlist['labels']\n gt_ignore = np.zeros(len(gt_bbox))\n\n for gt_index, gt in enumerate(gt_bbox):\n if motion_iou:\n if motion_iou[gt_index] < motion_range[0] or motion_iou[gt_index] > motion_range[1]:\n gt_ignore[gt_index] = 1\n else:\n gt_ignore[gt_index] = 0\n\n for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):\n pred_mask_l = pred_label == l\n pred_bbox_l = pred_bbox[pred_mask_l]\n pred_score_l = pred_score[pred_mask_l]\n\n # sort by score\n order = pred_score_l.argsort()[::-1]\n pred_bbox_l = pred_bbox_l[order]\n pred_score_l = pred_score_l[order]\n\n gt_mask_l = gt_label == l\n gt_bbox_l = gt_bbox[gt_mask_l]\n gt_ignore_l = gt_ignore[gt_mask_l]\n\n n_pos[l] += gt_bbox_l.shape[0] - sum(gt_ignore_l)\n score[l].extend(pred_score_l)\n\n if len(pred_bbox_l) == 0:\n continue\n if len(gt_bbox_l) == 0:\n match[l].extend((0,) * pred_bbox_l.shape[0])\n pred_ignore[l].extend((empty_weight,) * pred_bbox_l.shape[0])\n continue\n\n # VID evaluation follows integer typed bounding boxes.\n pred_bbox_l = pred_bbox_l.copy()\n pred_bbox_l[:, 2:] += 1\n gt_bbox_l = gt_bbox_l.copy()\n gt_bbox_l[:, 2:] += 1\n iou = boxlist_iou(pred_bbox_l, gt_bbox_l)\n\n num_obj, num_gt_obj = iou.shape\n\n selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)\n for j in range(0, num_obj):\n iou_match = iou_thresh\n iou_match_ig = -1\n iou_match_nig = -1\n arg_match = -1\n for k in range(0, num_gt_obj):\n if (gt_ignore_l[k] == 1) & (iou[j, k] > iou_match_ig):\n iou_match_ig = iou[j, k]\n if (gt_ignore_l[k] == 0) & (iou[j, k] > iou_match_nig):\n iou_match_nig = iou[j, k]\n if selec[k] or iou[j, k] < iou_match:\n continue\n if iou[j, k] == iou_match:\n if arg_match < 0 or gt_ignore_l[arg_match]:\n arg_match = k\n else:\n arg_match = k\n iou_match = iou[j, k]\n\n if arg_match >= 0:\n match[l].append(1)\n pred_ignore[l].append(gt_ignore_l[arg_match])\n selec[arg_match] = True\n else:\n if iou_match_nig > iou_match_ig:\n pred_ignore[l].append(0)\n elif iou_match_ig > iou_match_nig:\n pred_ignore[l].append(1)\n else:\n pred_ignore[l].append(sum(gt_ignore_l) / float(num_gt_obj))\n match[l].append(0)\n # pred_ignore[l].append(0)\n\n n_fg_class = max(n_pos.keys()) + 1\n print(n_pos)\n prec = [None] * n_fg_class\n rec = [None] * n_fg_class\n\n for l in n_pos.keys():\n score_l = np.array(score[l])\n match_l = np.array(match[l], dtype=np.int8)\n pred_ignore_l = np.array(pred_ignore[l])\n\n order = score_l.argsort()[::-1]\n match_l = match_l[order]\n pred_ignore_l = pred_ignore_l[order]\n\n tps = np.logical_and(match_l == 1, np.logical_not(pred_ignore_l == 1))\n fps = np.logical_and(match_l == 0, np.logical_not(pred_ignore_l == 1))\n pred_ignore_l[pred_ignore_l == 0] = 1\n fps = fps * pred_ignore_l\n\n tp = np.cumsum(tps)\n fp = np.cumsum(fps)\n\n # If an element of fp + tp is 0,\n # the corresponding element of prec[l] is nan.\n prec[l] = tp / (fp + tp + np.spacing(1))\n # If n_pos[l] is 0, rec[l] is None.\n if n_pos[l] > 0:\n rec[l] = tp / n_pos[l]\n\n return prec, rec\n\n\ndef calc_detection_vid_ap(prec, rec, use_07_metric=False):\n \"\"\"Calculate average precisions based on evaluation code of VID.\n This function calculates average precisions\n from given precisions and recalls.\n The code is based on the evaluation code used in VID.\n Args:\n prec (list of numpy.array): A list of arrays.\n :obj:`prec[l]` indicates precision for class :math:`l`.\n If :obj:`prec[l]` is :obj:`None`, this function returns\n :obj:`numpy.nan` for class :math:`l`.\n rec (list of numpy.array): A list of arrays.\n :obj:`rec[l]` indicates recall for class :math:`l`.\n If :obj:`rec[l]` is :obj:`None`, this function returns\n :obj:`numpy.nan` for class :math:`l`.\n use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric\n for calculating average precision. The default value is\n :obj:`False`.\n Returns:\n ~numpy.ndarray:\n This function returns an array of average precisions.\n The :math:`l`-th value corresponds to the average precision\n for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is\n :obj:`None`, the corresponding value is set to :obj:`numpy.nan`.\n \"\"\"\n\n n_fg_class = len(prec)\n ap = np.empty(n_fg_class)\n for l in range(n_fg_class):\n if prec[l] is None or rec[l] is None:\n ap[l] = np.nan\n continue\n\n if use_07_metric:\n # 11 point metric\n ap[l] = 0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(rec[l] >= t) == 0:\n p = 0\n else:\n p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])\n ap[l] += p / 11\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0]))\n mrec = np.concatenate(([0], rec[l], [1]))\n\n mpre = np.maximum.accumulate(mpre[::-1])[::-1]\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n\n return ap\n"
] | [
[
"numpy.sum",
"numpy.spacing",
"numpy.maximum.accumulate",
"numpy.argsort",
"numpy.nan_to_num",
"numpy.nanmean",
"numpy.logical_not",
"numpy.expand_dims",
"numpy.where",
"scipy.io.loadmat",
"numpy.zeros",
"numpy.argmax",
"numpy.arange",
"numpy.max",
"numpy.sort",
"numpy.cumsum",
"numpy.empty",
"numpy.clip",
"numpy.array",
"numpy.concatenate"
]
] |
ebolyen/q2-treetime | [
"162ebfca9a120391840ed30ddef18fb2b780165e"
] | [
"q2_treetime/methods.py"
] | [
"import hashlib\n\nimport skbio\nimport numpy as np\n\n\ndef add_node_names(tree: skbio.TreeNode, scheme: str = 'md5-xor') \\\n -> skbio.TreeNode:\n HASH_SIZE = 128 // 8\n\n for node in tree.postorder(include_self=True):\n if not node.children or node.name is not None:\n continue\n\n xor = np.zeros(HASH_SIZE, dtype=np.uint8)\n for child in node.children:\n # child.name will never be None because of the postorder traversal\n digest = hashlib.md5(child.name.encode('utf8')).digest()\n xor ^= np.frombuffer(digest, dtype=np.uint8)\n\n node.name = xor.tobytes().hex()\n\n return tree\n\n\ndef convert():\n pass\n\n\ndef infer_gtr():\n pass\n\n\ndef skyline():\n pass\n\n\ndef ancestral_seqs():\n pass\n\n\ndef ancestral_traits():\n pass\n\n\ndef add_trait_coords():\n pass\n"
] | [
[
"numpy.frombuffer",
"numpy.zeros"
]
] |
SJTMusicTeam/MusicGeneration | [
"2918e151a1b9448e5452179bab70bf565d1eaaf7"
] | [
"mg/utils/midi2note.py"
] | [
"from music21 import converter, instrument, note, chord, stream, midi\nimport numpy as np\nimport pandas as pd\n\n\n# Melody-RNN Format is a sequence of 8-bit integers indicating the following:\n# MELODY_NOTE_ON = [0, 127] # (note on at that MIDI pitch)\nMELODY_NOTE_OFF = 128 # (stop playing all previous notes)\nMELODY_NO_EVENT = 129 # (no change from previous event)\n# Each element in the sequence lasts for one sixteenth note.\n# This can encode monophonic music only.\n\ndef streamToNoteArray(stream):\n \"\"\"\n Convert a Music21 sequence to a numpy array of int8s into Melody-RNN format:\n 0-127 - note on at specified pitch\n 128 - note off\n 129 - no event\n \"\"\"\n # Part one, extract from stream\n total_length = np.int(np.round(stream.flat.highestTime / 0.25)) # in semiquavers\n stream_list = []\n for element in stream.flat:\n if isinstance(element, note.Note):\n stream_list.append([np.round(element.offset / 0.25), np.round(element.quarterLength / 0.25), element.pitch.midi])\n elif isinstance(element, chord.Chord):\n stream_list.append([np.round(element.offset / 0.25), np.round(element.quarterLength / 0.25), element.sortAscending().pitches[-1].midi])\n np_stream_list = np.array(stream_list, dtype=np.int)\n df = pd.DataFrame({'pos': np_stream_list.T[0], 'dur': np_stream_list.T[1], 'pitch': np_stream_list.T[2]})\n df = df.sort_values(['pos','pitch'], ascending=[True, False]) # sort the dataframe properly\n df = df.drop_duplicates(subset=['pos']) # drop duplicate values\n # part 2, convert into a sequence of note events\n #output = np.zeros(df.off.max() + 1, dtype=np.int16) + np.int16(MELODY_NO_EVENT)\n output = np.zeros(total_length+2, dtype=np.int16) + np.int16(MELODY_NO_EVENT) # set array full of no events by default.\n # Fill in the output list\n for i in range(total_length):\n if not df[df.pos==i].empty:\n n = df[df.pos==i].iloc[0] # pick the highest pitch at each semiquaver\n output[i] = n.pitch # set note on\n output[i+n.dur] = MELODY_NOTE_OFF\n\n return output\n\ndef noteArrayToDataFrame(note_array):\n \"\"\"\n Convert a numpy array containing a Melody-RNN sequence into a dataframe.\n \"\"\"\n df = pd.DataFrame({\"code\": note_array})\n df['offset'] = df.index\n df['duration'] = df.index\n df = df[df.code != MELODY_NO_EVENT]\n df.duration = df.duration.diff(-1) * -1 * 0.25 # calculate durations and change to quarter note fractions\n df = df.fillna(0.25)\n return df[['code','duration']]\n\ndef noteArrayToStream(note_array):\n \"\"\"\n Convert a numpy array containing a Melody-RNN sequence into a music21 stream.\n \"\"\"\n df = noteArrayToDataFrame(note_array)\n melody_stream = stream.Stream()\n for index, row in df.iterrows():\n if row.code == MELODY_NO_EVENT:\n new_note = note.Rest() # bit of an oversimplification, doesn't produce long notes.\n elif row.code == MELODY_NOTE_OFF:\n new_note = note.Rest()\n else:\n new_note = note.Note(row.code)\n new_note.quarterLength = row.duration\n melody_stream.append(new_note)\n return melody_stream\n"
] | [
[
"numpy.zeros",
"pandas.DataFrame",
"numpy.array",
"numpy.int16",
"numpy.round"
]
] |
Ben3940/scikit-learn | [
"adb47e7c142ce6d699cc5927925d448cb2c1ab91"
] | [
"examples/inspection/plot_partial_dependence.py"
] | [
"\"\"\"\n===============================================================\nPartial Dependence and Individual Conditional Expectation Plots\n===============================================================\n\nPartial dependence plots show the dependence between the target function [2]_\nand a set of features of interest, marginalizing over the values of all other\nfeatures (the complement features). Due to the limits of human perception, the\nsize of the set of features of interest must be small (usually, one or two)\nthus they are usually chosen among the most important features.\n\nSimilarly, an individual conditional expectation (ICE) plot [3]_\nshows the dependence between the target function and a feature of interest.\nHowever, unlike partial dependence plots, which show the average effect of the\nfeatures of interest, ICE plots visualize the dependence of the prediction on a\nfeature for each :term:`sample` separately, with one line per sample.\nOnly one feature of interest is supported for ICE plots.\n\nThis example shows how to obtain partial dependence and ICE plots from a\n:class:`~sklearn.neural_network.MLPRegressor` and a\n:class:`~sklearn.ensemble.HistGradientBoostingRegressor` trained on the\nCalifornia housing dataset. The example is taken from [1]_.\n\n.. [1] T. Hastie, R. Tibshirani and J. Friedman, \"Elements of Statistical\n Learning Ed. 2\", Springer, 2009.\n\n.. [2] For classification you can think of it as the regression score before\n the link function.\n\n.. [3] :arxiv:`Goldstein, A., Kapelner, A., Bleich, J., and Pitkin, E. (2015).\n \"Peeking Inside the Black Box: Visualizing Statistical Learning With Plots of\n Individual Conditional Expectation\". Journal of Computational and\n Graphical Statistics, 24(1): 44-65 <1309.6392>`\n\n\"\"\"\n\n# %%\n# California Housing data preprocessing\n# -------------------------------------\n#\n# Center target to avoid gradient boosting init bias: gradient boosting\n# with the 'recursion' method does not account for the initial estimator\n# (here the average target, by default).\n\nimport pandas as pd\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.model_selection import train_test_split\n\ncal_housing = fetch_california_housing()\nX = pd.DataFrame(cal_housing.data, columns=cal_housing.feature_names)\ny = cal_housing.target\n\ny -= y.mean()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)\n\n# %%\n# 1-way partial dependence with different models\n# ----------------------------------------------\n#\n# In this section, we will compute 1-way partial dependence with two different\n# machine-learning models: (i) a multi-layer perceptron and (ii) a\n# gradient-boosting. With these two models, we illustrate how to compute and\n# interpret both partial dependence plot (PDP) and individual conditional\n# expectation (ICE).\n#\n# Multi-layer perceptron\n# ......................\n#\n# Let's fit a :class:`~sklearn.neural_network.MLPRegressor` and compute\n# single-variable partial dependence plots.\n\nfrom time import time\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.neural_network import MLPRegressor\n\nprint(\"Training MLPRegressor...\")\ntic = time()\nest = make_pipeline(\n QuantileTransformer(),\n MLPRegressor(\n hidden_layer_sizes=(30, 15),\n learning_rate_init=0.01,\n early_stopping=True,\n random_state=0,\n ),\n)\nest.fit(X_train, y_train)\nprint(f\"done in {time() - tic:.3f}s\")\nprint(f\"Test R2 score: {est.score(X_test, y_test):.2f}\")\n\n# %%\n# We configured a pipeline to scale the numerical input features and tuned the\n# neural network size and learning rate to get a reasonable compromise between\n# training time and predictive performance on a test set.\n#\n# Importantly, this tabular dataset has very different dynamic ranges for its\n# features. Neural networks tend to be very sensitive to features with varying\n# scales and forgetting to preprocess the numeric feature would lead to a very\n# poor model.\n#\n# It would be possible to get even higher predictive performance with a larger\n# neural network but the training would also be significantly more expensive.\n#\n# Note that it is important to check that the model is accurate enough on a\n# test set before plotting the partial dependence since there would be little\n# use in explaining the impact of a given feature on the prediction function of\n# a poor model.\n#\n# We will plot the partial dependence, both individual (ICE) and averaged one\n# (PDP). We limit to only 50 ICE curves to not overcrowd the plot.\n\nfrom sklearn.inspection import PartialDependenceDisplay\n\ncommon_params = {\n \"subsample\": 50,\n \"n_jobs\": 2,\n \"grid_resolution\": 20,\n \"centered\": True,\n \"random_state\": 0,\n}\n\nprint(\"Computing partial dependence plots...\")\ntic = time()\ndisplay = PartialDependenceDisplay.from_estimator(\n est,\n X_train,\n features=[\"MedInc\", \"AveOccup\", \"HouseAge\", \"AveRooms\"],\n kind=\"both\",\n **common_params,\n)\nprint(f\"done in {time() - tic:.3f}s\")\ndisplay.figure_.suptitle(\n \"Partial dependence of house value on non-location features\\n\"\n \"for the California housing dataset, with MLPRegressor\"\n)\ndisplay.figure_.subplots_adjust(hspace=0.3)\n\n# %%\n# Gradient boosting\n# .................\n#\n# Let's now fit a :class:`~sklearn.ensemble.HistGradientBoostingRegressor` and\n# compute the partial dependence on the same features.\n\nfrom sklearn.ensemble import HistGradientBoostingRegressor\n\nprint(\"Training HistGradientBoostingRegressor...\")\ntic = time()\nest = HistGradientBoostingRegressor(random_state=0)\nest.fit(X_train, y_train)\nprint(f\"done in {time() - tic:.3f}s\")\nprint(f\"Test R2 score: {est.score(X_test, y_test):.2f}\")\n\n# %%\n# Here, we used the default hyperparameters for the gradient boosting model\n# without any preprocessing as tree-based models are naturally robust to\n# monotonic transformations of numerical features.\n#\n# Note that on this tabular dataset, Gradient Boosting Machines are both\n# significantly faster to train and more accurate than neural networks. It is\n# also significantly cheaper to tune their hyperparameters (the defaults tend\n# to work well while this is not often the case for neural networks).\n#\n# We will plot the partial dependence, both individual (ICE) and averaged one\n# (PDP). We limit to only 50 ICE curves to not overcrowd the plot.\n\nprint(\"Computing partial dependence plots...\")\ntic = time()\ndisplay = PartialDependenceDisplay.from_estimator(\n est,\n X_train,\n features=[\"MedInc\", \"AveOccup\", \"HouseAge\", \"AveRooms\"],\n kind=\"both\",\n **common_params,\n)\nprint(f\"done in {time() - tic:.3f}s\")\ndisplay.figure_.suptitle(\n \"Partial dependence of house value on non-location features\\n\"\n \"for the California housing dataset, with Gradient Boosting\"\n)\ndisplay.figure_.subplots_adjust(wspace=0.4, hspace=0.3)\n\n# %%\n# Analysis of the plots\n# .....................\n#\n# We can clearly see on the PDPs (dashed orange line) that the median house price\n# shows a linear relationship with the median income (top left) and that the\n# house price drops when the average occupants per household increases (top\n# middle). The top right plot shows that the house age in a district does not\n# have a strong influence on the (median) house price; so does the average\n# rooms per household.\n#\n# The ICE curves (light blue lines) complement the analysis: we can see that\n# there are some exceptions (which are better highlighted with the option\n# `centered=True`), where the house price remains constant with respect to\n# median income and average occupants variations.\n# On the other hand, while the house age (top right) does not have a strong\n# influence on the median house price on average, there seems to be a number\n# of exceptions where the house price increases when\n# between the ages 15-25. Similar exceptions can be observed for the average\n# number of rooms (bottom left). Therefore, ICE plots show some individual\n# effect which are attenuated by taking the averages.\n#\n# In all plots, the tick marks on the x-axis represent the deciles of the\n# feature values in the training data.\n#\n# We also observe that :class:`~sklearn.neural_network.MLPRegressor` has much\n# smoother predictions than\n# :class:`~sklearn.ensemble.HistGradientBoostingRegressor`.\n#\n# However, it is worth noting that we are creating potential meaningless\n# synthetic samples if features are correlated.\n\n# %%\n# 2D interaction plots\n# --------------------\n#\n# PDPs with two features of interest enable us to visualize interactions among\n# them. However, ICEs cannot be plotted in an easy manner and thus interpreted.\n# Another consideration is linked to the performance to compute the PDPs. With\n# the tree-based algorithm, when only PDPs are requested, they can be computed\n# on an efficient way using the `'recursion'` method.\nimport matplotlib.pyplot as plt\n\nprint(\"Computing partial dependence plots...\")\ntic = time()\n_, ax = plt.subplots(ncols=3, figsize=(9, 4))\n\n# Note that we could have called the method `from_estimator` three times and\n# provide one feature, one kind of plot, and one axis for each call.\ndisplay = PartialDependenceDisplay.from_estimator(\n est,\n X_train,\n features=[\"AveOccup\", \"HouseAge\", (\"AveOccup\", \"HouseAge\")],\n kind=[\"both\", \"both\", \"average\"],\n ax=ax,\n **common_params,\n)\n\nprint(f\"done in {time() - tic:.3f}s\")\ndisplay.figure_.suptitle(\n \"Partial dependence of house value on non-location features\\n\"\n \"for the California housing dataset, with Gradient Boosting\"\n)\ndisplay.figure_.subplots_adjust(wspace=0.4, hspace=0.3)\n\n# %%\n# The two-way partial dependence plot shows the dependence of median house\n# price on joint values of house age and average occupants per household. We\n# can clearly see an interaction between the two features: for an average\n# occupancy greater than two, the house price is nearly independent of the\n# house age, whereas for values less than two there is a strong dependence on\n# age.\n#\n# 3D interaction plots\n# --------------------\n#\n# Let's make the same partial dependence plot for the 2 features interaction,\n# this time in 3 dimensions.\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.inspection import partial_dependence\n\nfig = plt.figure()\n\nfeatures = (\"AveOccup\", \"HouseAge\")\npdp = partial_dependence(\n est, X_train, features=features, kind=\"average\", grid_resolution=10\n)\nXX, YY = np.meshgrid(pdp[\"values\"][0], pdp[\"values\"][1])\nZ = pdp.average[0].T\nax = Axes3D(fig)\nfig.add_axes(ax)\n\nsurf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu, edgecolor=\"k\")\nax.set_xlabel(features[0])\nax.set_ylabel(features[1])\nax.set_zlabel(\"Partial dependence\")\n# pretty init view\nax.view_init(elev=22, azim=122)\nplt.colorbar(surf)\nplt.suptitle(\n \"Partial dependence of house value on median\\n\"\n \"age and average occupancy, with Gradient Boosting\"\n)\nplt.subplots_adjust(top=0.9)\nplt.show()\n"
] | [
[
"sklearn.inspection.PartialDependenceDisplay.from_estimator",
"sklearn.preprocessing.QuantileTransformer",
"matplotlib.pyplot.figure",
"sklearn.inspection.partial_dependence",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"matplotlib.pyplot.suptitle",
"sklearn.neural_network.MLPRegressor",
"matplotlib.pyplot.colorbar",
"numpy.meshgrid",
"sklearn.ensemble.HistGradientBoostingRegressor",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.fetch_california_housing"
]
] |
i2mint/examples | [
"1dd52935f6f6175a17af9b8e5ee9e2b10b08cb7f"
] | [
"synthetic_sounds/classification_problems.py"
] | [
"from typing import Iterable, Callable\nimport numpy as np\nfrom examples.synthetic_sounds.util import (\n seeds_to_wfs,\n seed_to_wf_chk,\n DFLT_SEEDS,\n DFLT_CHUNKER,\n chk_tag_gen,\n frame_annots_to_chk_annots,\n)\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\n\n\ndef make_frequency_groups(seeds: Iterable, chk_size: int, class_sep: float):\n freq_dict = {}\n for seed in sorted(list(set(seeds))):\n freq_dict[seed] = 100 + class_sep * 1000 * len(freq_dict)\n\n wfs = seeds_to_wfs(seeds, chk_size, freq_dict, seed_to_wf_chk)\n\n annots = []\n for idx, wf in enumerate(wfs):\n annots.append((np.array([chk_size * idx, chk_size * (idx + 1)]), seeds[idx]))\n\n return wfs, annots\n\n\ndef test_classification_model(\n seeds: Iterable = None,\n n_classes: int = None,\n chk_size: int = 2048 * 5,\n class_sep: float = 1.0,\n chunker=DFLT_CHUNKER,\n chunker_chk_size: int = 1024,\n featurizer: Callable = PCA,\n model: Callable = SVC,\n):\n if n_classes is None and seeds is None:\n raise AttributeError(\"Either seeds or n_classes needs to be specified!\")\n elif seeds is None:\n seeds = list(DFLT_SEEDS[:n_classes])\n elif n_classes is None:\n pass\n else:\n assert len(set(seeds)) == n_classes\n\n wfs, annots = make_frequency_groups(seeds, chk_size, class_sep)\n\n chks, tags = zip(*chk_tag_gen(wfs, seeds, chunker=chunker(chunker_chk_size)))\n\n featurizer = featurizer().fit(chks, tags)\n fvs = featurizer(chks)\n\n model = model().fit(fvs, tags)\n scores = model(fvs)\n\n chk_annots = frame_annots_to_chk_annots(annots, chunker_chk_size)\n classification_wf = np.hstack(wfs)\n\n return scores, chk_annots, classification_wf\n"
] | [
[
"numpy.array",
"numpy.hstack"
]
] |
not-a-hot-dog/parallelized-disease-modeling | [
"e82d052b4841f1c545f4f5c65ce509f1a3418754"
] | [
"generate_data.py"
] | [
"import numpy as np\n\n##Read in data matrix\ndata = np.load(\"data/matrix.npy\")\n\nprint('Creating data...')\n##Generate individual CSVs\nS_init_data = data[:, :, 2]\nI_init_data = data[:, :, 3]\nR_init_data = data[:, :, 4]\nisUS_data = data[:, :, 5]\n#Use fixed beta and gamma for stable simulation\nbeta_data = 0.8 * np.ones(S_init_data.shape)\n# beta_data = data[:, :, 0]*200\ngamma_data = 0.01 * np.ones(S_init_data.shape)\n# gamma_data = data[:, :, 1]/10\ndS_data = 0.01 * np.ones(S_init_data.shape)\ndI_data = 0.3 * np.ones(S_init_data.shape)\ndR_data = 0.01 * np.ones(S_init_data.shape)\n#Prevent diffusion at regions with no people\ndS_data[isUS_data == 0] = 0\ndI_data[isUS_data == 0] = 0\ndR_data[isUS_data == 0] = 0\n\nprint('Saving data...')\nnp.savetxt('data/beta.csv', beta_data, delimiter=',')\nnp.savetxt('data/gamma.csv', gamma_data, delimiter=',')\nnp.savetxt('data/S_init.csv', S_init_data, delimiter=',')\nnp.savetxt('data/I_init.csv', I_init_data, delimiter=',')\nnp.savetxt('data/R_init.csv', R_init_data, delimiter=',')\nnp.savetxt('data/dS.csv', dS_data, delimiter=',')\nnp.savetxt('data/dI.csv', dI_data, delimiter=',')\nnp.savetxt('data/dR.csv', dR_data, delimiter=',')\nnp.savetxt('data/isUS.csv', isUS_data, delimiter=',')"
] | [
[
"numpy.load",
"numpy.ones",
"numpy.savetxt"
]
] |
awardblvr/pv_mppt_test | [
"366015fd418448791b8470baf82e6ecb3686b17d"
] | [
"Graph_MPPT.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\n Import MPPT CSV data and plot it.\n\n CSV format:\n Volts,volts,amps,watts,state,mode_str,panelSN,resistance,timestamp\n 29.646,29.646,0.0,0.0,0,CR,B41J00052893,100000,20210913_120014.79\n 14.267,14.267,0.354,5.05,1,CR,B41J00052893,40.0,20210913_120016.16\n\n\"\"\"\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport argparse\nfrom datetime import datetime as dt\nimport time\nimport pandas as pd\nfrom numpy import *\nimport numpy as np\nfrom mpl_toolkits.axes_grid1 import host_subplot\nfrom mpl_toolkits import axisartist\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\n\nfrom csv import reader\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4, depth=4).pprint\npp_str = pprint.PrettyPrinter(indent=4, depth=4).pformat\n\ndef plot_df(df):\n '''\n 0 1 2 3 4 5 6 7 8\n 0 Volts, volts, amps, watts, state, mode_str, panelSN, resistance, timestamp\n 1 29.646, 29.646, 0.0, 0.0, 0, CR, B41J00052893, 100000, 20210913_120014.79 <--Voc\n 2 14.267, 14.267, 0.354, 5.05, 1, CR, B41J00052893, 40.0, 20210913_120016.16\n '''\n print(df)\n title_sn = df['panelSN'][1]\n\n volt_series = df['Volts'][1:]\n\n std_voltage_series = np.arange(50, 0, 0-(50.0 /volt_series.size ))\n\n print(f\"{volt_series.size=}\")\n print(f\"std_voltage_series-> size {len(std_voltage_series)}, {std_voltage_series})\")\n\n amps_series = df['amps'][1:]\n watts_series = df['watts'][1:]\n ohms_series = df['resistance'][1:]\n\n # print(volt_series)\n\n fig, ax1 = plt.subplots()\n\n\n color = 'tab:red'\n ax1.set_xlabel('Voltage')\n # ax1.set_ylabel('Current', color=color)\n ax1.set_ylim(1, 6)\n # ax1.plot(volt_series, amps_series, color=color)\n ax1.plot(std_voltage_series, amps_series, color=color, label='Current')\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n color = 'tab:blue'\n # ax2.set_ylabel('Watts', color=color) # we already handled the x-label with ax1\n # ax2.plot(volt_series, watts_series, color=color)\n ax2.plot(std_voltage_series, watts_series, color=color, label='Watts')\n ax2.tick_params(axis='y', labelcolor=color)\n\n plt.title(f\"Panel S/N {title_sn}\")\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n plt.legend()\n plt.show()\n\ndef get_and_plot_mppt(df):\n\n # IL = array(ItemList)\n \n host = host_subplot(111, axes_class=axisartist.Axes) # (1 row, 1 column, plot number 1)\n plt.subplots_adjust(right=0.75)\n\n par1 = host.twinx()\n # par2 = host.twinx()\n\n # par2.axis[\"right\"] = par2.new_fixed_axis(loc=\"right\", offset=(60, 0))\n\n par1.axis[\"right\"].toggle(all=True)\n # OUT_FOR_SINGLE par2.axis[\"right\"].toggle() #all=True)\n\n '''\n 0 1 2 3 4 5 6 7 8\n 0 Volts, volts, amps, watts, state, mode_str, panelSN, resistance, timestamp\n 1 29.646, 29.646, 0.0, 0.0, 0, CR, B41J00052893, 100000, 20210913_120014.79 <--Voc\n 2 14.267, 14.267, 0.354, 5.05, 1, CR, B41J00052893, 40.0, 20210913_120016.16\n '''\n\n # print\n # '\\n'.join(['%i: %s' % (n, l[n]) for n in xrange(len(l))])\n\n # print(f\"Current: {['%.2f'.format(x[0]) for x in IL[2:]]}, \\n {[x[2] for x in `IL`[2:]]}\")\n print(\"Voltage: %s\"%(\", \".join([\"%.1f\"%float(x[0]) for x in IL[2:]]))) # , \\n {[x[2] for x in IL[2:]]}\")\n print(\"Current: %s\"%(\", \".join([\"%.1f\"%float(x[2]) for x in IL[2:]]))) # , \\n {[x[2] for x in IL[2:]]}\")\n # OUT_FOR_SINGLE print(f\"Watts: {[x[3] for x in IL[2:]]}, \\n {[x[3] for x in IL[2:]]}\")\n # OUT_FOR_SINGLE print(f\"Resistance: {[x[7] for x in IL[2:]]}, \\n {[x[7] for x in IL[2:]]}\")\n\n\n p1, = host.plot([float(x[0]) for x in IL[2:]], [float(x[2]) for x in IL[2:]], label=\"Current\")\n p2, = par1.plot([float(x[0]) for x in IL[2:]], [float(x[3]) for x in IL[2:]], label=\"Watts\")\n # OUT_FOR_SINGLE p3, = host.plot([x[7] for x in IL[2:]], [x[7] for x in IL[2:]], label=\"Resistance\")\n\n xlim_min = 0 # min([x[0] for x in IL[2:]])\n xlim_max = 50 # max([x[0] for x in IL[2:]])\n print(f\"X-Axis {xlim_min=}, {xlim_max=}\")\n\n ylim_min = min([x[2] for x in IL[2:]])\n ylim_max = max([x[2] for x in IL[2:]])\n print(f\"Y-Axis {ylim_min=}, {ylim_max=}\")\n\n host.set_xlim( xlim_min, xlim_max) # X Axis (Voltage)\n host.set_ylim( ylim_min, ylim_max) # # Left Y Axis (Current)\n par1.set_ylim( 0, 200) # Right Y Axis 1 (Wattage)\n # OUT_FOR_SINGLE par2.set_ylim( IL[2][7], IL[-1][7]) # Right Y Axis 2 (Resistance)\n\n host.set_xlabel(\"Voltage\")\n host.set_ylabel(\"Current (Amps)\")\n par1.set_ylabel(\"Watts\")\n # OUT_FOR_SINGLE par2.set_ylabel(\"Load Resistance\")\n\n host.legend()\n\n host.axis[\"left\"].label.set_color(p1.get_color())\n par1.axis[\"right\"].label.set_color(p2.get_color())\n # OUT_FOR_SINGLE par2.axis[\"right\"].label.set_color(p3.get_color())\n\n # from MAYBE related examples axes.yaxis.set_major_locator(MaxNLocator(5))\n host.yaxis.set_major_locator(MaxNLocator(10))\n host.xaxis.set_major_locator(MaxNLocator(8))\n # par1.yaxis.set_major_locator(MaxNLocator(8))\n\n plt.show()\n\ndef main(arguments=None):\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('infile', help=\"Input file\") # type=argparse.FileType('r'))\n # parser.add_argument('-o', '--outfile', help=\"Output file\",\n # default=sys.stdout, type=argparse.FileType('w'))\n\n args = parser.parse_args(arguments)\n\n # print(pp_str(args))\n\n # read csv file as a list of lists\n\n # with open(args.infile, 'r') as read_obj:\n # # pass the file object to reader() to get the reader object\n # csv_reader = reader(read_obj)\n # # Pass reader object to list() to get a list of lists\n # list_of_rows = list(csv_reader)\n # # print(pp_str(list_of_rows))\n # for i in list_of_rows:\n # print(f\"{i}\")\n\n df = pd.read_csv(args.infile)\n\n # get_and_plot_mppt(df)\n\n plot_df(df)\n\n\n\nif __name__ == '__main__':\n\n main(sys.argv[1:])\n # time.sleep(2.612)\n\n\n sys.exit(0)\n"
] | [
[
"matplotlib.ticker.MaxNLocator",
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
]
] |
jberkman/pyxfoil | [
"511cb0e2cdc7e2967bc53fa0276351ebf57a90bf"
] | [
"pyxfoil.py"
] | [
"\"\"\"PYXFOIL: XFOIL AUTOMATION USING PYTHON\nLogan Halstrom\nEAE 127\nUCD\nCREATED: 15 SEP 2015\nMODIFIED: 17 OCT 2018\n\nDESCRIPTION: Provides functions for automating XFOIL runs.\nEach function will iteratively build a list of inputs. When you are ready,\nuse the RunXfoil command to run the input list\n\nNOTE: Since input list is predetermined, runs cannot be reiterated.\nMake sure to set the iter limit high enough, that each simulation will\nwork on the first try\n\nTO CALL IN A SCRIPT:\nimport sys\nsys.path.append('path/to/pyxfoil.py')\nimport pyxfoil\n\nFUTURE IMPROVEMENTS:\n\n------------------------------------------------------------------------\nMIT License\n\nCopyright (c) 2017 Logan Halstrom\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n------------------------------------------------------------------------\n\"\"\"\n\nimport os\nimport sys\nimport re\nimport numpy as np\nimport subprocess\nimport pandas as pd\n\n########################################################################\n### GENERAL FILE AND PROCESS UTILITIES #################################\n########################################################################\n\ndef MakeOutputDir(savedir):\n \"\"\"make results output directory if it does not already exist.\n instring --> directory path from script containing folder\n \"\"\"\n #split individual directories\n splitstring = savedir.split('/')\n prestring = ''\n for string in splitstring:\n prestring += string + '/'\n try:\n os.mkdir(prestring)\n except Exception:\n pass\n\ndef GetParentDir(savename):\n \"\"\"Get parent directory from path of file\"\"\"\n #split individual directories\n splitstring = savename.split('/')\n parent = ''\n #concatenate all dirs except bottommost\n for string in splitstring[:-1]:\n parent += string + '/'\n return parent\n\ndef FindBetween(string, before='^', after=None):\n \"\"\"Search 'string' for characters between 'before' and 'after' characters\n If after=None, return everything after 'before'\n Default before is beginning of line\n \"\"\"\n if after == None and before != None:\n match = re.search('{}(.*)$'.format(before), string)\n if match != None:\n return match.group(1)\n else:\n return 'No Match'\n else:\n match = re.search('(?<={})(?P<value>.*?)(?={})'.format(before, after), string)\n if match != None:\n return match.group('value')\n else:\n return 'No Match'\n\ndef IsItWindows():\n \"\"\"Return true if operating system is windows\"\"\"\n return True if os.name == 'nt' else False\n\ndef ErrorMessage(text):\n \"\"\"Format an error output message\n \"\"\"\n return \"\\n\\n\" \\\n \"********************************************************************\\n\" \\\n \"{}\\n\" \\\n \"********************************************************************\" \\\n \"\\n\\n\".format(text)\n\n########################################################################\n### XFOIL AUTOMATION CLASS #############################################\n########################################################################\n\nclass Xfoil:\n def __init__(self, foil='0012', naca=True, Re=0, Iter=100,\n xfoilpath=None, headless=True):\n \"\"\"Initialize class for specific airfoil.\n foil --> airfoil name, either NACA digits or path to geometry file\n naca --> True for naca digits, False for geometry file\n Re --> Reynolds number (inviscid if zero)\n Iter --> number of iterations per simulation (XFOIL default: 20)\n xfoilpath --> path to xfoil executable file\n headless --> run xfoil without graphical output (avoids X11/XQuartz dependency)\n \"\"\"\n\n #DETERMINE OPERATING SYSTEM\n self.win = IsItWindows()\n\n #SET PATH TO XFOIL FOR CURRENT OPERATING SYSTEM\n if xfoilpath != None:\n #Manually specify path to Xfoil\n self.xfoilpath = xfoilpath\n elif self.win:\n #Windows default location is in same folder as python script\n self.xfoilpath = 'xfoil.exe'\n #check dependencies\n if not os.path.isfile(self.xfoilpath):\n txt = \"PYXFOIL ERROR: Put xfoil.exe in same folder as pyxfoil.py\"\n sys.exit(ErrorMessage(txt))\n else:\n #Mac Install location\n self.xfoilpath = \"/usr/local/bin/xfoil\"\n #check dependencies\n if not os.path.isfile(self.xfoilpath):\n txt = \"PYXFOIL ERROR: Xfoil is not installed\"\n sys.exit(ErrorMessage(txt))\n if not os.path.isfile('/opt/X11/bin/xquartz'):\n txt = \"PYXFOIL ERROR: X11/xquartz not installed\"\n print(ErrorMessage(txt))\n\n\n #SAVE RUN PARAMETERS\n #Reynolds number\n self.Re = Re\n #Maximum iteration\n self.Iter = Iter\n #MAKE AIRFOIL NAME\n self.naca = naca\n if self.naca:\n #4-digit NACA to be loaded from equation\n self.name = 'naca' + foil\n else:\n #Load airfoil from file\n #airfoil name is between parent path and file extension\n parent = GetParentDir(foil)\n self.name = FindBetween(foil, parent, '\\.')\n #CREATE SAVE DIRECTORY\n #Save in Data/airfoilname/\n self.savepath = 'Data/{}'.format(self.name)\n MakeOutputDir(self.savepath)\n\n #INITIALIZE COMMAND INPUT LIST\n self.input = ''\n\n #TURN OFF GRAPHICS (MAKE XFOIL \"HEADLESS\")\n #avoids XQuartz incompatibility\n if headless:\n self.TurnOffGraphics()\n\n #LOAD AIRFOIL (AND START INPUT LIST)\n self.foil = foil\n self.LoadGeom()\n\n def AddInput(self, cmd):\n \"\"\"Add input command to command list\n cmd --> string command to add\n \"\"\"\n self.input += '{}\\n'.format(cmd)\n\n def RunXfoil(self, quiet=True):\n \"\"\"Once input command list has been built, run all commands with this\n quiet --> true for no XFOIL output to screen\n \"\"\"\n #Supress output if quiet option, otherwise write XFOIl output to screen\n stdout = open(os.devnull, 'wb') if quiet else None\n\n #START XFOIL\n xf = subprocess.Popen(self.xfoilpath,\n stdin=subprocess.PIPE,\n stdout=stdout,\n stderr=None,)\n #XFOIL SUBPROCESS\n self.xf = xf\n #Pipe inputs into xfoil\n res = xf.communicate( self.input.encode('utf-8') )\n #Space output with a few newlines\n if not quiet:\n print('\\n\\n\\n')\n\n def LoadGeom(self):\n \"\"\"Load given airfoil, either NACA number or file path\n \"\"\"\n if self.naca:\n #Load NACA airfoil based on given digits\n self.AddInput( 'naca {}'.format(self.foil) )\n else:\n #check dependencies\n if not os.path.isfile(self.foil):\n txt = \"PYXFOIL ERROR: Geometry input file does not exist/\" \\\n \"in wrong location\\n({})\".format(self.foil)\n sys.exit(ErrorMessage(txt))\n if len([l for l in open(self.foil, 'r')]) < 2:\n txt = \"PYXFOIL ERROR: Geometry input file is empty (no data)\" \\\n \"\\nDownload or create new file: ({})\".format(self.foil)\n sys.exit(ErrorMessage(txt))\n\n #Load geometry from file path\n self.AddInput('load {}'.format( self.foil) )\n\n def SaveGeom(self, overwrite=True):\n \"\"\"Save airfoil geometry. MUST BE CALLED IN TOP MENU.\n overwrite --> Overwrite file if it exists\n \"\"\"\n savename = self.SaveNameGeom()\n if not os.path.isfile(savename) and overwrite:\n self.AddInput( 'save {}'.format( savename ) )\n\n def EnterOperMenu(self):\n \"\"\"Set up 'oper' menu for inviscid or viscous operations.\n Call from top menu after loading geometry.\n \"\"\"\n #ENTER OPERATIONS MENU\n self.AddInput('oper')\n if self.Re != 0:\n #VISCOULS SIMULATION WITH GIVEN REYNOLDS NUMBER\n self.AddInput('visc {}'.format( self.Re ) )\n #SET ITERATION NUMBER\n self.AddInput('iter {}'.format( self.Iter ))\n\n def SingleAlfa(self, alf, SaveCP=True):\n \"\"\"Simulate airfoil at a single angle of attack.\n Must be run in 'oper' menu.\n alf --> angle of attack to simulate\n SaveCP --> Save individual surface pressure distributions\n \"\"\"\n self.AddInput('alfa {}'.format( alf ) )\n if SaveCP:\n savename = self.SaveNameSurfCp(alf)\n self.AddInput('cpwr {}'.format(savename) )\n\n def Polar(self, alfs, SaveCP=True, overwrite=True):\n \"\"\"Create and save polar for airfoil. Call in top menu after\n loading geometry.\n alfs --> list of alphas to run\n SaveCP --> Save individual surface pressure distributions\n overwrite --> overwrite polar file (otherwise append new alphas)\n \"\"\"\n\n #STORE RUN INFO\n if type(alfs) == float or type(alfs) == int:\n #angle of attack input must be array-like\n alfs = [alfs]\n self.alfs = alfs\n #SET REYNOLDS NUMBER\n self.EnterOperMenu()\n\n #SET UP POLAR ACCUMULATION\n # if len(alfs) > 1:\n savename = self.SaveNamePolar(alfs)\n\n if os.path.isfile(savename) and overwrite:\n os.remove(savename) #Remove polar file if starting new\n #TURN POLAR ACCUMULATION ON\n self.AddInput('pacc')\n #Submit Polar File Name\n self.AddInput(savename)\n #Skip Polar Dumpfile Name\n self.AddInput('')\n # self.AddInput(self.savename + 'dump.dat')\n # self.AddInput('pacc'; savename; self.savename + 'dump.dat')\n\n #SIMULATE EACH ANGLE OF ATTACK\n for alf in alfs:\n self.SingleAlfa(alf, SaveCP)\n\n # if len(alfs) > 1:\n #TURN POLAR ACCUMULATION OFF\n self.AddInput('pacc')\n\n def Quit(self):\n \"\"\"Quit XFOIL by going to top-most menu and issuing 'quit' command\n \"\"\"\n self.AddInput('')\n self.AddInput('')\n self.AddInput('')\n self.AddInput('')\n self.AddInput('quit')\n\n def TurnOffGraphics(self,):\n \"\"\" Turn off XFOIL graphical output so that XFOIL can run 'headless'.\n Use this to avoid XQuartz compatibility issues and to simplify output to screen.\n \"\"\"\n #Enter Plotting Options Menu\n self.AddInput('plop')\n #Turn graphics option to False\n self.AddInput('g f')\n #Return to main menu\n self.AddInput('')\n\n def SaveNameGeom(self,):\n \"\"\"Make save filename for airfoil geometry\n \"\"\"\n return '{}/{}.dat'.format(self.savepath, self.name)\n\n def SaveNameSurfCp(self, alf):\n \"\"\"Make save filename for airfoil surface pressure based on current\n airfoil, Reynolds number, and angle of attack\n alf --> current angle of attack\n \"\"\"\n return '{}/{}_surfCP_Re{:1.2e}a{:1.1f}.dat'.format(\n self.savepath, self.name, self.Re, alf)\n\n def SaveNamePolar(self, alfs):\n \"\"\"Make save filename for airfoil polar based on\n airfoil, Reynolds number, and angle of attack\n alfs --> Range of angles of attack to run\n \"\"\"\n if type(alfs) == float or type(alfs) == int:\n #angle of attack input must be array-like\n alfs = [alfs]\n if len(alfs) == 1:\n #only one provided angle of attack\n alfrange = 'a{:1.2f}'.format(alfs[0])\n else:\n #use least and greatest angle of attack for name\n alfrange = 'a{:1.1f}-{:1.1f}'.format(alfs[0], alfs[-1])\n return '{}/{}_polar_Re{:1.2e}{}.dat'.format(\n self.savepath, self.name, self.Re, alfrange)\n\n\n\n########################################################################\n### XFOIL FILE I/O #####################################################\n########################################################################\n\ndef ReadXfoilAirfoilGeom(filename):\n \"\"\"Read in XFOIL airfoil geometry file data, skipping title lines\n filename --> path to file\n \"\"\"\n df = pd.read_csv(filename, delim_whitespace=True, skiprows=1,\n names=['x', 'z'])\n return df\n\ndef ReadXfoilSurfPress(filename):\n \"\"\"Read in XFOIL surface pressure coefficient data, skipping title lines\n filename --> path to file\n \"\"\"\n if IsItWindows():\n #Windows file format\n names = ['x', 'y', 'Cp']\n skip = 3\n else:\n #Mac file format\n names = ['x', 'Cp']\n skip = 1\n #read file\n df = pd.read_csv(filename, delim_whitespace=True, skiprows=skip, names=names)\n return df\n\ndef ReadXfoilPolar(filename):\n \"\"\"Read in XFOIL polar file data, skipping title lines\n filename --> path to polar data file\n \"\"\"\n df = pd.read_csv(filename, delim_whitespace=True, skiprows=12,\n names=['alpha', 'Cl', 'Cd', 'Cdp', 'Cm', 'Top_Xtr', 'Bot_Xtr'])\n return df\n\ndef WriteXfoilFile(name, x, z):\n \"\"\"Write 2-column XFOIL file with fixed-width separation.\n First line is 'name'. Works best for writting geometry.\n \"\"\"\n ofile = open(name, 'w')\n ofile.write('foil\\n')\n for xx, zz in zip(x, z):\n #XYZ POINTS FORMATED IN 3, 16-WIDE COLUMNS\n #< : left-aligned,\n #14 : 14 spaces reserved in column,\n #.7 : 7 spaces reserved after decimal point,\n #f : float\n ofile.write(' {:<14.7f}{:<14.7f}\\n'.format(xx, zz))\n ofile.close()\n\n########################################################################\n### MAIN ###############################################################\n########################################################################\n\ndef GetPolar(foil='0012', naca=True, alfs=[0], Re=0,\n SaveCP=True, Iter=100, pane=False,\n overwrite=True, quiet=True):\n \"\"\"For a single airfoil at a single Reynolds number,\n create a polar with given alphas.\n foil --> naca digits or path to geom file\n naca --> True for naca digits, False for file path\n alfs --> list of alphas to run\n Re --> Reynolds number (default invisc)\n SaveCp --> save each individual pressure distribution\n pane --> smooth geometry before simulation (can cause instability)\n overwrite --> overwrite existing save files\n quiet --> Supress XFOIL output\n \"\"\"\n #INITIALIZE XFOIL OBJECT\n obj = Xfoil(foil, naca, Re, Iter=Iter)\n #GEOMETRY\n #condition panel geometry (use for rough shapes, not on smooth shapes)\n if pane:\n obj.AddInput('pane')\n #Save geometry for later slope calculations\n obj.SaveGeom()\n #RUN AND SAVE ALL POLAR CASES\n obj.Polar(alfs, SaveCP=SaveCP, overwrite=overwrite)\n #Quit XFOIL\n obj.Quit()\n #Run Input List In XFOIL\n obj.RunXfoil(quiet=quiet)\n\n return obj\n\n\n\n\ndef main(foil, naca, alfs, Re, Iter=30):\n \"\"\"\n foil --> path to airfoil file or naca 4-digit number\n naca --> boolean if naca or not\n alfs --> list of angle of attacks for airfoils (deg)\n Re --> Reynolds number to run\n Iter --> maximum number of iterations for each simulation\n \"\"\"\n\n obj = Xfoil(foil, naca, Re, Iter) #initialize xfoil\n obj.SaveGeom() #save airfoil geometry\n obj.EnterOperMenu() #set up operations, reynolds, iteration number\n obj.SingleAlfa(alfs[0]) #command to run single alpha case\n obj.Polar(alfs) #Command to run polar case\n obj.Quit() #command to quit XFOIL when done\n\n obj.RunXfoil() #Run all commands at once\n\n print('done')\n\nif __name__ == \"__main__\":\n\n foils = ['0012', 'Data/s1223.dat']\n nacas = [True, False]\n alfs = [0, 10]\n Re = 2e5\n\n for foil, naca in zip(foils, nacas):\n main(foil, naca, alfs, Re)\n\n\n\n\n\n\n\n"
] | [
[
"pandas.read_csv"
]
] |
michael-kuhlmann/padertorch | [
"3925fa693299ffb166d82dd81deed997237e85c7"
] | [
"padertorch/contrib/je/data/mixup.py"
] | [
"from lazy_dataset import Dataset, FilterException\nimport numpy as np\nimport numbers\n\n\nclass MixUpDataset(Dataset):\n \"\"\"\n >>> ds = MixUpDataset(range(10), SampleMixupComponents((.0,1.)), (lambda x: x), buffer_size=2)\n >>> list(ds)\n \"\"\"\n def __init__(self, input_dataset, sample_fn, mixup_fn, buffer_size=100):\n \"\"\"\n Combines examples from input_dataset and mixin_dataset into tuples.\n\n Args:\n input_dataset: lazy dataset providing example dict with key audio_length.\n sample_fn: sample_fn(buffer) returning a list of examples from buffer for mixup.\n \"\"\"\n self.input_dataset = input_dataset\n self.buffer = []\n self.buffer_size = buffer_size\n self.sample_fn = sample_fn\n self.mixup_fn = mixup_fn\n\n def __len__(self):\n return len(self.input_dataset)\n\n def __iter__(self):\n for example in self.input_dataset:\n self.buffer.append(example)\n if len(self.buffer) > self.buffer_size:\n examples = self.sample_fn(self.buffer)\n if len(examples) == 1:\n yield examples[0]\n elif len(examples) > 1:\n yield self.mixup_fn(examples)\n else:\n raise ValueError('sample_fn has to return at least one example')\n self.buffer.pop(0)\n else:\n yield example\n\n def copy(self, freeze=False):\n return self.__class__(\n input_dataset=self.input_dataset.copy(freeze=freeze),\n sample_fn=self.sample_fn,\n mixup_fn=self.mixup_fn,\n buffer_size=self.buffer_size,\n )\n\n @property\n def indexable(self):\n return False\n\n\nclass SampleMixupComponents:\n \"\"\"\n >>> sample_fn = SampleMixupComponents((0,1.))\n >>> buffer = list(range(10))\n >>> sample_fn(buffer)\n >>> buffer\n \"\"\"\n def __init__(self, mixup_prob):\n self.mixup_prob = mixup_prob\n\n def __call__(self, buffer):\n examples = [buffer[-1]]\n num_mixins = np.random.choice(len(self.mixup_prob), p=self.mixup_prob)\n num_mixins = min(num_mixins, len(buffer) - 1)\n if num_mixins > 0:\n idx = np.random.choice(len(buffer)-1, num_mixins, replace=False)\n examples.extend(buffer[i] for i in idx)\n return examples\n\n\nclass SuperposeEvents:\n \"\"\"\n >>> mixup_fn = SuperposeEvents(min_overlap=0.5)\n >>> example1 = {'example_id': '0', 'dataset': '0', 'stft': np.ones((1, 10, 9, 2)), 'events': np.array([0,1,0,0,1]), 'events_alignment': np.array([0,1,0,0,1])[:,None].repeat(10,axis=1)}\n >>> example2 = {'example_id': '1', 'dataset': '1', 'stft': -np.ones((1, 8, 9, 2)), 'events': np.array([0,0,1,0,0]), 'events_alignment': np.array([0,0,1,0,0])[:,None].repeat(8,axis=1)}\n >>> mixup_fn([example1, example2])\n \"\"\"\n def __init__(self, min_overlap=1., max_length=None):\n self.min_overlap = min_overlap\n self.max_length = max_length\n\n def __call__(self, components):\n assert len(components) > 0\n start_indices = [0]\n stop_indices = [components[0]['stft'].shape[1]]\n for comp in components[1:]:\n l = comp['stft'].shape[1]\n min_start = -int(l*(1-self.min_overlap))\n max_start = components[0]['stft'].shape[1] - int(np.ceil(self.min_overlap*l))\n if self.max_length is not None:\n min_start = max(\n min_start, max(stop_indices) - self.max_length\n )\n max_start = min(\n max_start, min(start_indices) + self.max_length - l\n )\n if max_start < min_start:\n raise FilterException\n start_indices.append(\n int(min_start + np.random.rand() * (max_start - min_start + 1))\n )\n stop_indices.append(start_indices[-1] + l)\n start_indices = np.array(start_indices)\n stop_indices = np.array(stop_indices)\n stop_indices -= start_indices.min()\n start_indices -= start_indices.min()\n\n stft_shape = list(components[0]['stft'].shape)\n stft_shape[1] = stop_indices.max()\n mixed_stft = np.zeros(stft_shape, dtype=components[0]['stft'].dtype)\n if 'events_alignment' in components[0]:\n assert all(['events_alignment' in comp for comp in components])\n alignment_shape = list(components[0]['events_alignment'].shape)\n alignment_shape[1] = stop_indices.max()\n mixed_alignment = np.zeros(alignment_shape)\n else:\n mixed_alignment = None\n for comp, start, stop in zip(components, start_indices, stop_indices):\n mixed_stft[:, start:stop] += comp['stft']\n if mixed_alignment is not None:\n mixed_alignment[:, start:stop] += comp['events_alignment']\n\n mix = {\n 'example_id': '+'.join([comp['example_id'] for comp in components]),\n 'dataset': '+'.join(sorted(set([comp['dataset'] for comp in components]))),\n 'stft': mixed_stft,\n 'seq_len': mixed_stft.shape[1],\n }\n if all(['events' in comp for comp in components]):\n mix['events'] = (np.sum([comp['events'] for comp in components], axis=0) > .5).astype(components[0]['events'].dtype)\n if mixed_alignment is not None:\n mix['events_alignment'] = (mixed_alignment > .5).astype(components[0]['events_alignment'].dtype)\n return mix\n"
] | [
[
"numpy.sum",
"numpy.ceil",
"numpy.zeros",
"numpy.random.rand",
"numpy.array"
]
] |
achinta/CategoricalNF | [
"d8717a037e8f13641e9d9a89abf66fba38e23f91"
] | [
"experiments/graph_coloring/datasets/graph_coloring_generation.py"
] | [
"import random\nimport numpy as np\nimport networkx as nx\nimport sys, os, json, argparse, itertools\nimport grinpy as gp\nimport time\nfrom glob import glob\nfrom multiprocessing import Pool\nfrom ortools.sat.python import cp_model\n\n\"\"\"\nThis code is based on https://github.com/machine-reasoning-ufrgs/GNN-GCP\n\"\"\"\n\n\ndef solve_csp(M, n_colors, nmin=25):\n model = cp_model.CpModel()\n N = len(M)\n variables = []\n \n variables = [ model.NewIntVar(0, n_colors-1, '{i}'.format(i=i)) for i in range(N) ]\n \n for i in range(N):\n for j in range(i+1,N):\n if M[i][j] == 1:\n model.Add( variables[i] != variables [j] )\n \n solver = cp_model.CpSolver()\n solver.parameters.max_time_in_seconds = int( ((10.0 / nmin) * N) )\n status = solver.Solve(model)\n \n if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL :\n solution = dict()\n for k in range(N):\n solution[k] = solver.Value(variables[k])\n return solution\n elif status == cp_model.INFEASIBLE:\n return None\n else:\n raise Exception(\"CSP is unsure about the problem\")\n\n\ndef is_cn(Ma, cn_i):\n if solve_csp(Ma, cn_i-1) == None:\n return True\n else:\n return False\n\n\ndef multiprocessing_dataset_generation(nmin, nmax, ncolors, path, samples, seed, num_workers=8):\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)\n\t# For increasing sampling speed, we create multiple workers/processes in parallel\n\tsamples_per_worker = int(samples//num_workers)\n\tp = Pool()\n\targs_list = [(nmin, nmax, ncolors, path, samples_per_worker, samples_per_worker*i, seed+i) for i in range(num_workers)]\n\tp.map(_create_simple_dataset_tuple, args_list)\n\n\ndef _create_simple_dataset_tuple(args):\n\tnmin, nmax, ncolors, path, samples, start_idx, seed = args\n\tcreate_simple_dataset(nmin, nmax, ncolors, path, samples, start_idx, seed)\n\n\ndef create_simple_dataset(nmin, nmax, ncolors, path, samples, start_idx=0, seed=123):\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)\n\t\t\n\texport_pack = 500\n\tall_solutions = {\"N\": np.zeros((export_pack,), dtype=np.uint8), \n\t\t\t\t\t \"adjacency\": -np.ones((export_pack, nmax, nmax), dtype=np.int8), \n\t\t\t\t\t \"nodes\": -np.ones((export_pack, nmax), dtype=np.int8),\n\t\t\t\t\t \"graph_idx\": -np.ones((export_pack,), dtype=np.int32),\n\t\t\t\t\t \"idx\": 0}\n\n\tdef export_solution(Ma, init_sol, z, graph_idx=-1):\n\t\tN, Ma, sol = write_solution(Ma=Ma, init_sol=init_sol, save_path=None)\n\t\tsol_idx = all_solutions[\"idx\"]\n\t\tall_solutions[\"N\"][sol_idx] = N\n\t\tall_solutions[\"adjacency\"][sol_idx,:N,:N] = Ma.astype(np.uint8)\n\t\tall_solutions[\"nodes\"][sol_idx,:N] = sol\n\t\tall_solutions[\"graph_idx\"][sol_idx] = graph_idx\n\n\t\tall_solutions[\"idx\"] += 1\n\t\tif all_solutions[\"idx\"] >= export_pack:\n\t\t\tall_solutions.pop(\"idx\")\n\t\t\tnp.savez_compressed(os.path.join(path, \"samples_%s_%s.npz\" % (str(z-export_pack+2).zfill(7), str(z+1).zfill(7))), \n\t\t\t\t\t\t\t\t**all_solutions)\n\n\t\t\tall_solutions[\"N\"].fill(0)\n\t\t\tall_solutions[\"adjacency\"].fill(-1)\n\t\t\tall_solutions[\"nodes\"].fill(-1)\n\t\t\tall_solutions[\"graph_idx\"].fill(-1)\n\t\t\tall_solutions[\"idx\"] = 0\n\n\t# Adjacency density ratio to sample from. \n\tedge_prob_constraints = {3: (0.1, 0.3), 4: (0.15, 0.3)}\n\n\tnp.random.seed(seed)\n\trandom.seed(seed)\n\tz = start_idx\n\tN = np.random.randint(nmin, nmax+1)\n\twhile z in range(start_idx,samples+start_idx):\n\t\tN = np.random.randint(nmin, nmax+1)\n\t\tsave_path = os.path.join(path, \"sample_%s.npz\" % (str(z).zfill(6)))\n\t\tfound_sol = False\n\t\t\n\t\tCn = ncolors\n\t\tlim_inf, lim_sup = edge_prob_constraints[Cn][0], edge_prob_constraints[Cn][1]\n\t\tlim_sup = min(lim_sup, nmax/N*(lim_inf+lim_sup)/2.0)\n\n\t\tp_connected = random.uniform(lim_inf, lim_sup)\n\t\tMa = gen_matrix(N, p_connected)\n\n\t\tinit_sol = solve_csp(Ma, Cn)\n\t\tif init_sol is not None and is_cn(Ma,Cn):\n\t\t\texport_solution(Ma, init_sol, z)\n\t\t\tfound_sol = True\n\n\t\tif found_sol:\n\t\t\tz += 1\n\t\t\tif z % 100 == 0:\n\t\t\t\tprint(\"Completed %i (%4.2f%%) in [%i,%i] samples...\" % (z-start_idx, (z-start_idx)*100.0/samples, start_idx, start_idx+samples))\n\n\ndef write_solution(Ma, init_sol, save_path=None):\n\tN = Ma.shape[0]\n\tsol = np.zeros(N, dtype=np.uint8)\n\tfor i in range(N):\n\t\tsol[i] = int(init_sol[i])\n\tif save_path is not None:\n\t\tnp.savez_compressed(save_path, adjacency=Ma, nodes=sol)\n\telse:\n\t\treturn (N, Ma, sol)\n\n\ndef combine_solution_files(save_path):\n\tprint(\"Combining solution files...\")\n\tsample_files = sorted(glob(os.path.join(save_path, \"sample*.npz\")))\n\tnodes, adjacency = None, None\n\tfor filename in sample_files:\n\t\tdata_arr = np.load(filename)\n\t\tif nodes is None and adjacency is None:\n\t\t\tnodes, adjacency = data_arr[\"nodes\"], data_arr[\"adjacency\"]\n\t\telse:\n\t\t\tnodes = np.concatenate([nodes, data_arr[\"nodes\"]], axis=0)\n\t\t\tadjacency = np.concatenate([adjacency, data_arr[\"adjacency\"]], axis=0)\n\tnp.savez_compressed(os.path.join(save_path, \"samples_combined.npz\"), nodes=nodes, adjacency=adjacency)\n\n\ndef gen_matrix(N, prob):\n\tMa = np.zeros((N,N))\n\tMa = np.random.choice([0,1], size=(N, N), p=[1-prob,prob])\n\ti_lower = np.tril_indices(N, -1)\n\tMa[i_lower] = Ma.T[i_lower] # make the matrix symmetric\n\tnp.fill_diagonal(Ma, 0)\n\n\t# Ensuring that every node has at least 1 connection\n\twhile np.min(Ma.sum(axis=0)) == 0:\n\t\tidx = np.argmin(Ma.sum(axis=0))\n\t\tMa[idx,:] = np.random.choice([0,1], size=(N,), p=[1-prob,prob])\n\t\tMa[:,idx] = Ma[idx,:]\n\t\tMa[idx,idx] = 0\n\n\t# Test that the whole graph is connected\n\tconnect = np.zeros((N,))\n\tconnect[0] = 1\n\tMa_diag = np.eye(N) + Ma\n\twhile (1 - connect).sum() > 0:\n\t\tnew_connect = ((connect[None,:] * Ma_diag).sum(axis=1) > 0).astype(connect.dtype)\n\t\tif np.any(new_connect != connect):\n\t\t\tconnect = new_connect\n\t\telse:\n\t\t\tnum_choices = 3\n\t\t\tstart_nodes = np.random.choice(np.where(connect>0)[0], size=(num_choices,))\n\t\t\tend_nodes = np.random.choice(np.where(connect==0)[0], size=(num_choices,))\n\t\t\tMa[start_nodes, end_nodes] = 1\n\t\t\tMa[end_nodes, start_nodes] = 1\n\t\t\tMa_diag = np.eye(N) + Ma\n\n\treturn Ma\n\n\nif __name__ == '__main__':\n\t# Define argument parser\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--path', help='Path to which the files should be saved.', type=str, required=True)\n\tparser.add_argument('--samples', help='Number of samples to generate', type=int, default=2e5)\n\tparser.add_argument('--nmin', default=25, help='Minimum number of nodes in a graph', type=int)\n\tparser.add_argument('--nmax', default=50, help='Maximum number of nodes in a graph', type=int)\n\tparser.add_argument('--ncolor', default=3, help='Number of colors to use for the graph coloring', type=int)\n\tparser.add_argument('--train', help='If train is selected, we use a different seed', action='store_true')\n\n\t# Parse arguments from command line\n\targs = parser.parse_args()\n\tseed = 1327 if args.train else 3712\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\n\t# Start the generation process\n\tprint('Creating {} instances'.format(args.samples))\n\tmultiprocessing_dataset_generation(\n\t\t\targs.nmin, args.nmax,\n\t\t\tncolors=args.ncolor,\n\t\t\tsamples=args.samples,\n\t\t\tpath=args.path,\n\t\t\tseed=seed\n\t\t)\n\tcombine_solution_files(args.path)"
] | [
[
"numpy.load",
"numpy.eye",
"numpy.ones",
"numpy.zeros",
"numpy.any",
"numpy.tril_indices",
"numpy.random.seed",
"numpy.random.choice",
"numpy.where",
"numpy.fill_diagonal",
"numpy.concatenate",
"numpy.random.randint",
"numpy.savez_compressed"
]
] |
ARKseal/crawlingathome-gpu-hcloud | [
"73f185df36eb5e420e6513bcf1b518dd21499b76"
] | [
"helpers/bloom.py"
] | [
"# use this file inside every minute cron in order to recalculate bloom filters. location: staging server\n# folder structure\n# /home/archiveteam/CAH/\n# |_bloom archiveteam@IP::bloom contains bloom filters\n# |_clipped contains clipped lists\n# |_ds contains files ready to be sent to the eye\n# |_hashes contains list of hashes of files inserted into the dataset\n# |_results archiveteam@IP::CAH incoming folder for the final results from workers\n\n# Stacked bloom filters. Naming convention:\n# frozen filters: filter.bin, filter1.bin, filter2.bin\n# active filters: filter_active.bin\n#\n#\nimport sys\nimport time\nimport requests\nimport pandas as pd\nfrom glob import glob\nfrom pathlib import Path\nfrom datetime import datetime\nfrom bloom_filter2 import BloomFilter\n\n# update the bloom server filters too\nbloomip = \"116.202.162.146\"\n\nserverbloom = BloomFilter(max_elements=10000000, error_rate=0.01, filename=(f\"/home/archiveteam/bloom-{bloomip}.bin\",-1))\nserverclip = BloomFilter(max_elements=10000000, error_rate=0.01, filename=(f\"/home/archiveteam/clip-{bloomip}.bin\",-1))\n\nstart = time.time()\nnow = datetime.now().strftime(\"%Y/%m/%d_%H:%M\")\n\nfailed = BloomFilter(max_elements=10000000, error_rate=0.01, filename=(\"/home/archiveteam/CAH/bloom/failed-domains.bin\",-1))\nfilesfailed = BloomFilter(max_elements=100000, error_rate=0.01, filename=(\"/home/archiveteam/filesfailed.bin\",-1))\n\ntime.sleep(5)\ncounter = 0\nuniques = 0\nfor file in glob(\"/home/archiveteam/CAH/hashes/*.hsh\"):\n stem = Path(file).stem.strip(\".\")\n if stem not in serverbloom:\n with open(file,\"rt\") as f:\n for line in f.readlines():\n counter += 1\n post = {\n 'file': (stem, open(file, 'rb')),\n 'key': (None, 'main'),\n }\n response = requests.post(f'http://{bloomip}:8000/add/', files=post)\n if response.status_code == 200:\n serverbloom.add(stem)\n uniques = int(response.text)\n\nfailed_counter = 0\nfor file in glob(\"/home/archiveteam/CAH/bloom/*.txt\"):\n stem = Path(file).stem.strip(\".\")\n if stem not in filesfailed:\n with open(file,\"rt\") as f:\n for line in f.readlines():\n line = line.strip()\n if line not in failed:\n failed.add(line)\n failed_counter += 1\n filesfailed.add(stem)\n\nclipped_counter = 0\nfor file in glob(\"/home/archiveteam/CAH/clipped/*.clp\"):\n stem = Path(file).stem.strip(\".\")\n if stem not in serverclip:\n post = {\n 'file': (stem, open(file, 'rb')),\n 'key': (None, 'clipped'),\n }\n response = requests.post(f'http://{bloomip}:8000/add/', files=post)\n if response.status_code == 200:\n serverclip.add(stem)\n clipped_counter = int(response.text)\n\npd.set_option('precision', 2)\ndf = pd.read_csv(\"bloom.log\", sep=\" \",header=None, names=[\"Date\", \"a\", \"unique pairs (5%)\", \"b\", \"total including duplicates\",\"c\",\"clipped filter (5%)\",\"d\",\"failed filter\",\"e\"])\ndf[\"Date\"]=df.Date.apply(lambda x: datetime.strptime(x, \"[%Y/%m/%d_%H:%M]\"))\ndf[\"unique pairs (5%)\"]=df[\"unique pairs (5%)\"]/1000000\ndf[\"total including duplicates\"]=df[\"total including duplicates\"]/1000000\ndf[\"clipped filter (5%)\"]=df[\"clipped filter (5%)\"]/1000000\n\nif uniques > 0:\n print(f\"[{now}] added {uniques} \\\"from total of\\\" {counter} \\\"(i.e. {round((counter-uniques)*100/(counter+sys.float_info.epsilon),2)}% duplication in {round(time.time()-start,2)} sec) Also added \\\" {clipped_counter} \\\"clipped and\\\" {failed_counter} failed\")\n with open('dashboard.txt', 'w') as file:\n file.write(\"<h5><a href='http://cah.io.community'>Crawling at Home project</a></h5>\\n\")\n file.write(\"<h1>Bloom filters status</h1>\\n\")\n file.write(\"<h2>All time stats</h2>\\n\")\n file.write(\"<h5>initialized from first parquet files</h5>\\n\")\n file.write(str(df.sum(axis=0, numeric_only=True)).replace(\"\\n\",\"<br/>\"))\n file.write(\"<br/><br/>\")\n file.write(\"<h2>Last day stats</h2>\\n\")\n file.write(str(df[df.Date > datetime.now() - pd.to_timedelta(\"1day\")].sum(axis=0, numeric_only=True)).replace(\"\\n\",\"<br/>\"))\n file.write(\"<h2>Last week stats</h2>\\n\")\n file.write(\"<h5>Last reset date: 02 August 2021</h5>\\n\")\n file.write(str(df[df.Date > datetime.now() - pd.to_timedelta(\"7day\")].sum(axis=0, numeric_only=True)).replace(\"\\n\",\"<br/>\"))\n"
] | [
[
"pandas.read_csv",
"pandas.to_timedelta",
"pandas.set_option"
]
] |
ikingye/scikit-learn | [
"3254e98a79e5c1172c794ad38f222dc45f0fb65c"
] | [
"examples/applications/plot_tomography_l1_reconstruction.py"
] | [
"\"\"\"\n======================================================================\nCompressive sensing: tomography reconstruction with L1 prior (Lasso)\n======================================================================\n\nThis example shows the reconstruction of an image from a set of parallel\nprojections, acquired along different angles. Such a dataset is acquired in\n**computed tomography** (CT).\n\nWithout any prior information on the sample, the number of projections\nrequired to reconstruct the image is of the order of the linear size\n``l`` of the image (in pixels). For simplicity we consider here a sparse\nimage, where only pixels on the boundary of objects have a non-zero\nvalue. Such data could correspond for example to a cellular material.\nNote however that most images are sparse in a different basis, such as\nthe Haar wavelets. Only ``l/7`` projections are acquired, therefore it is\nnecessary to use prior information available on the sample (its\nsparsity): this is an example of **compressive sensing**.\n\nThe tomography projection operation is a linear transformation. In\naddition to the data-fidelity term corresponding to a linear regression,\nwe penalize the L1 norm of the image to account for its sparsity. The\nresulting optimization problem is called the :ref:`lasso`. We use the\nclass :class:`~sklearn.linear_model.Lasso`, that uses the coordinate descent\nalgorithm. Importantly, this implementation is more computationally efficient\non a sparse matrix, than the projection operator used here.\n\nThe reconstruction with L1 penalization gives a result with zero error\n(all pixels are successfully labeled with 0 or 1), even if noise was\nadded to the projections. In comparison, an L2 penalization\n(:class:`~sklearn.linear_model.Ridge`) produces a large number of labeling\nerrors for the pixels. Important artifacts are observed on the\nreconstructed image, contrary to the L1 penalization. Note in particular\nthe circular artifact separating the pixels in the corners, that have\ncontributed to fewer projections than the central disk.\n\"\"\"\n\nprint(__doc__)\n\n# Author: Emmanuelle Gouillart <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy import ndimage\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import Ridge\nimport matplotlib.pyplot as plt\n\n\ndef _weights(x, dx=1, orig=0):\n x = np.ravel(x)\n floor_x = np.floor((x - orig) / dx).astype(np.int64)\n alpha = (x - orig - floor_x * dx) / dx\n return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))\n\n\ndef _generate_center_coordinates(l_x):\n X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)\n center = l_x / 2.\n X += 0.5 - center\n Y += 0.5 - center\n return X, Y\n\n\ndef build_projection_operator(l_x, n_dir):\n \"\"\" Compute the tomography design matrix.\n\n Parameters\n ----------\n\n l_x : int\n linear size of image array\n\n n_dir : int\n number of angles at which projections are acquired.\n\n Returns\n -------\n p : sparse matrix of shape (n_dir l_x, l_x**2)\n \"\"\"\n X, Y = _generate_center_coordinates(l_x)\n angles = np.linspace(0, np.pi, n_dir, endpoint=False)\n data_inds, weights, camera_inds = [], [], []\n data_unravel_indices = np.arange(l_x ** 2)\n data_unravel_indices = np.hstack((data_unravel_indices,\n data_unravel_indices))\n for i, angle in enumerate(angles):\n Xrot = np.cos(angle) * X - np.sin(angle) * Y\n inds, w = _weights(Xrot, dx=1, orig=X.min())\n mask = np.logical_and(inds >= 0, inds < l_x)\n weights += list(w[mask])\n camera_inds += list(inds[mask] + i * l_x)\n data_inds += list(data_unravel_indices[mask])\n proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))\n return proj_operator\n\n\ndef generate_synthetic_data():\n \"\"\" Synthetic binary data \"\"\"\n rs = np.random.RandomState(0)\n n_pts = 36\n x, y = np.ogrid[0:l, 0:l]\n mask_outer = (x - l / 2.) ** 2 + (y - l / 2.) ** 2 < (l / 2.) ** 2\n mask = np.zeros((l, l))\n points = l * rs.rand(2, n_pts)\n mask[(points[0]).astype(int), (points[1]).astype(int)] = 1\n mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)\n res = np.logical_and(mask > mask.mean(), mask_outer)\n return np.logical_xor(res, ndimage.binary_erosion(res))\n\n\n# Generate synthetic images, and projections\nl = 128\nproj_operator = build_projection_operator(l, l // 7)\ndata = generate_synthetic_data()\nproj = proj_operator * data.ravel()[:, np.newaxis]\nproj += 0.15 * np.random.randn(*proj.shape)\n\n# Reconstruction with L2 (Ridge) penalization\nrgr_ridge = Ridge(alpha=0.2)\nrgr_ridge.fit(proj_operator, proj.ravel())\nrec_l2 = rgr_ridge.coef_.reshape(l, l)\n\n# Reconstruction with L1 (Lasso) penalization\n# the best value of alpha was determined using cross validation\n# with LassoCV\nrgr_lasso = Lasso(alpha=0.001)\nrgr_lasso.fit(proj_operator, proj.ravel())\nrec_l1 = rgr_lasso.coef_.reshape(l, l)\n\nplt.figure(figsize=(8, 3.3))\nplt.subplot(131)\nplt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')\nplt.axis('off')\nplt.title('original image')\nplt.subplot(132)\nplt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')\nplt.title('L2 penalization')\nplt.axis('off')\nplt.subplot(133)\nplt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')\nplt.title('L1 penalization')\nplt.axis('off')\n\nplt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,\n right=1)\n\nplt.show()\n"
] | [
[
"sklearn.linear_model.Ridge",
"numpy.random.RandomState",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplots_adjust",
"sklearn.linear_model.Lasso",
"matplotlib.pyplot.figure",
"numpy.logical_and",
"numpy.cos",
"matplotlib.pyplot.title",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.arange",
"scipy.sparse.coo_matrix",
"numpy.hstack",
"scipy.ndimage.binary_erosion",
"numpy.random.randn",
"numpy.floor",
"numpy.ravel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"scipy.ndimage.gaussian_filter",
"numpy.sin"
]
] |
physycom/slides | [
"ff73de94997e39673d6d5c82b1bb4d9d0069fee6"
] | [
"tools/dubrovnik/router_map.py"
] | [
"#! /usr/bin/env python3\n\nimport os\nimport json\nimport folium\nimport argparse\nimport pandas as pd\nimport mysql.connector\nfrom matplotlib import cm\nfrom matplotlib.colors import to_hex\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--cfg', type=str, required=True)\n\n args = parser.parse_args()\n\n with open(args.cfg, encoding='utf-8') as f:\n config = json.load(f)\n\n conf = config['model_data']['params']['dubrovnik']['mysql']\n db = mysql.connector.connect(\n host = conf['host'],\n port = conf['port'],\n user = conf['user'],\n passwd = conf['pwd'],\n database = conf['db']\n )\n cursor = db.cursor()\n try:\n query = f\"\"\"\n SELECT\n ds.id AS id,\n ds.name AS name,\n ds.serial AS serial,\n ds.lat AS lat,\n ds.lng AS lon,\n ds.networkId,\n ds.status as status\n FROM\t\n Devices ds \n \"\"\" \n # print(query)\n cursor.execute(query)\n result = cursor.fetchall()\n print(f'Received {len(result)} mysql data in {query}')\n stationsmeta = pd.DataFrame(result)\n stationsmeta.columns = cursor.column_names\n except Exception as e:\n print('Connection error : {}'.format(e))\n\n if 0 == 1: # To be activate if and when the longitude will be fixed from Meraki\n data = list(stationsmeta.T.to_dict().values())\n with open('dubrovnik_router.json', 'w') as out:\n json.dump(data, out, indent=2, ensure_ascii=False)\n else:\n station_json = os.path.join(os.environ['WORKSPACE'], 'slides', 'vars', 'extra', 'dubrovnik_router.json')\n stationsmeta = pd.DataFrame.from_dict(json.load(open(station_json)))\n\n map_file = 'dubrovnik_router.html'\n cmap = cm.get_cmap('viridis', len(stationsmeta))\n stationsmeta['color'] = [ to_hex(c) for c in cmap.colors ]\n stationsmeta.index = stationsmeta.id\n stationsmeta = stationsmeta.drop(columns='id')\n stationsmeta = stationsmeta[stationsmeta.lon > 0]\n print(len(stationsmeta))\n map_center = stationsmeta[['lat', 'lon']].mean()\n\n m = folium.Map(location=map_center, control_scale=True, tiles = 'Stamen Terrain')\n layerlabel = '<span style=\"color: {col};\">{txt}</span>'\n for sid, data in stationsmeta.iterrows():\n layer_sel = folium.FeatureGroup(name=layerlabel.format(col=f'{data.color}', txt=f'Router {sid}'))\n pt = folium.CircleMarker(\n location=[data.lat, data.lon],\n radius=5,\n color=f'{data.color}',\n fill=True,\n fill_color=f'{data.color}',\n fill_opacity=1,\n popup=folium.Popup(f'<p>Router <b>{sid}</b></br> Name <b>{data[0]}</b></br> Serial <b>{data.serial}</b></br></p>', show=False, sticky=True, max_width=300),\n )\n layer_sel.add_child(pt)\n m.add_child(layer_sel)\n \n folium.map.LayerControl(collapsed=False).add_to(m)\n s, w = stationsmeta[['lat', 'lon']].min()\n n, e = stationsmeta[['lat', 'lon']].max()\n m.fit_bounds([ [s,w], [n,e] ])\n m.save(f'dubrovnik_router_map.html')"
] | [
[
"pandas.DataFrame",
"matplotlib.colors.to_hex"
]
] |
applejenny66/snoopy | [
"916700661976aef121c16c3cf1418f395eff54a6"
] | [
"test.py"
] | [
"import numpy\nimport argparse\nimport cv2\n\nimage = cv2.imread('pikachu.jpg')\ncv2.imshow(\"Original\", image)\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncv2.imshow(\"Gray\", gray)\n\neq = cv2.equalizeHist(gray)\n##cv2.imshow(\"Gray EQ\", eq)\n\n#display two images in a figure\ncv2.imshow(\"Histogram Equalization\", numpy.hstack([gray, eq]))\n\ncv2.imwrite(\"pikachu_eq.jpg\", numpy.hstack([gray, eq]))\n\n\nif(cv2.waitKey(0)==27):\n cv2.destroyAllWindows()\n"
] | [
[
"numpy.hstack"
]
] |
tehcoderer/GamestonkTerminal | [
"54a1b6f545a0016c576e9e00eef5c003d229dacf",
"54a1b6f545a0016c576e9e00eef5c003d229dacf"
] | [
"openbb_terminal/cryptocurrency/discovery/coinmarketcap_model.py",
"bots/stocks/government/lastcontracts.py"
] | [
"\"\"\"CoinMarketCap model\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\n\nimport pandas as pd\nfrom coinmarketcapapi import CoinMarketCapAPI, CoinMarketCapAPIError\n\nimport openbb_terminal.config_terminal as cfg\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.rich_config import console\n\nlogger = logging.getLogger(__name__)\n\nFILTERS = [\"Symbol\", \"CMC_Rank\", \"LastPrice\", \"DayPctChange\", \"MarketCap\"]\n\n\n@log_start_end(log=logger)\ndef get_cmc_top_n() -> pd.DataFrame:\n \"\"\"Shows top n coins. [Source: CoinMarketCap]\n\n Returns\n -------\n pd.DataFrame\n Top coin on CoinMarketCap\n\n \"\"\"\n df = pd.DataFrame()\n\n try:\n cmc = CoinMarketCapAPI(cfg.API_CMC_KEY)\n ratings = cmc.cryptocurrency_listings_latest().data\n\n symbol, rank, price, pchange1d, mkt_cap = [], [], [], [], []\n\n for coin in ratings:\n symbol.append(coin[\"symbol\"])\n rank.append(coin[\"cmc_rank\"])\n price.append(coin[\"quote\"][\"USD\"][\"price\"])\n pchange1d.append(coin[\"quote\"][\"USD\"][\"percent_change_24h\"])\n mkt_cap.append(coin[\"quote\"][\"USD\"][\"market_cap\"] / (10**9))\n\n df = pd.DataFrame(data=[symbol, rank, price, pchange1d, mkt_cap]).transpose()\n df.columns = [\n \"Symbol\",\n \"CMC_Rank\",\n \"Last Price\",\n \"1 Day Pct Change\",\n \"Market Cap ($B)\",\n ]\n except CoinMarketCapAPIError as e:\n if \"API Key\" in str(e):\n console.print(\"[red]Invalid API Key[/red]\\n\")\n else:\n console.print(e)\n\n return df\n",
"import logging\n\nimport disnake\nimport pandas as pd\n\nfrom bots import imps\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.stocks.government import quiverquant_model\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef lastcontracts_command(past_transactions_days: int = 2, num: int = 20):\n \"\"\"Displays last government contracts [quiverquant.com]\"\"\"\n\n # Debug user input\n if imps.DEBUG:\n logger.debug(\"gov lastcontracts %s %s\", past_transactions_days, num)\n\n df_contracts = quiverquant_model.get_government_trading(\"contracts\")\n\n if df_contracts.empty:\n logger.debug(\"No government contracts found\")\n raise Exception(\"No government contracts found\")\n\n df_contracts.sort_values(\"Date\", ascending=False)\n\n df_contracts[\"Date\"] = pd.to_datetime(df_contracts[\"Date\"])\n df_contracts[\"Date\"] = df_contracts[\"Date\"].dt.date\n\n df_contracts.drop_duplicates(inplace=True)\n df_contracts = df_contracts[\n df_contracts[\"Date\"].isin(\n df_contracts[\"Date\"].unique()[:past_transactions_days]\n )\n ]\n\n df_contracts = df_contracts[[\"Date\", \"Ticker\", \"Amount\", \"Agency\"]][:num]\n choices = [\n disnake.SelectOption(label=\"Overview\", value=\"0\", emoji=\"🟢\"),\n ]\n title = \"Stocks: [quiverquant.com] Top buy government trading\"\n initial_str = \"Overview\"\n i = 1\n for col_name in df_contracts[\"Ticker\"].values:\n menu = f\"\\nPage {i}: {col_name}\"\n initial_str += f\"\\nPage {i}: {col_name}\"\n choices.append(\n disnake.SelectOption(label=menu, value=f\"{i}\", emoji=\"🟢\"),\n )\n i += 1\n\n embeds = []\n df_contracts = df_contracts.T\n reports = [f\"{initial_str}\"]\n embeds.append(\n disnake.Embed(\n title=title,\n description=initial_str,\n colour=imps.COLOR,\n ).set_author(\n name=imps.AUTHOR_NAME,\n icon_url=imps.AUTHOR_ICON_URL,\n )\n )\n for column in df_contracts.columns.values:\n description = \"```\" + df_contracts[column].fillna(\"\").to_string() + \"```\"\n embeds.append(\n disnake.Embed(description=description, colour=imps.COLOR,).set_author(\n name=imps.AUTHOR_NAME,\n icon_url=imps.AUTHOR_ICON_URL,\n )\n )\n reports.append(f\"{description}\")\n\n return {\n \"view\": imps.Menu,\n \"title\": title,\n \"description\": reports,\n \"embed\": embeds,\n \"choices\": choices,\n }\n"
] | [
[
"pandas.DataFrame"
],
[
"pandas.to_datetime"
]
] |
techkang/mmcv | [
"333eb6a8f964f005d4c0f34c3214ac2036bb228c"
] | [
"mmcv/parallel/collate.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections.abc import Mapping, Sequence\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data.dataloader import default_collate\n\nfrom .data_container import DataContainer\n\n\ndef collate(batch, samples_per_gpu=1):\n \"\"\"Puts each data field into a tensor/DataContainer with outer dimension\n batch size.\n\n Extend default_collate to add support for\n :type:`~mmcv.parallel.DataContainer`. There are 3 cases.\n\n 1. cpu_only = True, e.g., meta data\n 2. cpu_only = False, stack = True, e.g., images tensors\n 3. cpu_only = False, stack = False, e.g., gt bboxes\n \"\"\"\n\n if not isinstance(batch, Sequence):\n raise TypeError(f'{batch.dtype} is not supported.')\n\n if isinstance(batch[0], DataContainer):\n stacked = []\n if batch[0].cpu_only:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i:i + samples_per_gpu]])\n return DataContainer(\n stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)\n elif batch[0].stack:\n for i in range(0, len(batch), samples_per_gpu):\n assert isinstance(batch[i].data, torch.Tensor)\n\n if batch[i].pad_dims is not None:\n ndim = batch[i].dim()\n assert ndim > batch[i].pad_dims\n max_shape = [0 for _ in range(batch[i].pad_dims)]\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = batch[i].size(-dim)\n for sample in batch[i:i + samples_per_gpu]:\n for dim in range(0, ndim - batch[i].pad_dims):\n assert batch[i].size(dim) == sample.size(dim)\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = max(max_shape[dim - 1],\n sample.size(-dim))\n padded_samples = []\n for sample in batch[i:i + samples_per_gpu]:\n pad = [0 for _ in range(batch[i].pad_dims * 2)]\n for dim in range(1, batch[i].pad_dims + 1):\n pad[2 * dim -\n 1] = max_shape[dim - 1] - sample.size(-dim)\n padded_samples.append(\n F.pad(\n sample.data, pad, value=sample.padding_value))\n stacked.append(default_collate(padded_samples))\n elif batch[i].pad_dims is None:\n stacked.append(\n default_collate([\n sample.data\n for sample in batch[i:i + samples_per_gpu]\n ]))\n else:\n raise ValueError(\n 'pad_dims should be either None or integers (1-3)')\n\n else:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i:i + samples_per_gpu]])\n return DataContainer(stacked, batch[0].stack, batch[0].padding_value)\n elif isinstance(batch[0], Sequence) and not isinstance(batch[0], (str, bytes)):\n transposed = zip(*batch)\n return [collate(samples, samples_per_gpu) for samples in transposed]\n elif isinstance(batch[0], Mapping):\n return {\n key: collate([d[key] for d in batch], samples_per_gpu)\n for key in batch[0]\n }\n else:\n return default_collate(batch)\n"
] | [
[
"torch.utils.data.dataloader.default_collate",
"torch.nn.functional.pad"
]
] |
Tejas-Nanaware/Learning-OpenCV | [
"4956db7f7f90d9d4a44327aebc2f809e4d9b2ca3"
] | [
"corner detection.py"
] | [
"import cv2\nimport numpy as np\n\nimg = cv2.imread('corner detection.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ngray = np.float32(gray)\n\ncorners = cv2.goodFeaturesToTrack(gray, 100, 0.01, 10)\ncorners = np.int0(corners)\n\nfor corner in corners:\n\tx, y = corner.ravel()\n\tcv2.circle(img, (x,y), 3, 255, -1)\n\ncv2.imshow('Corner', img)\ncv2.waitKey(0)"
] | [
[
"numpy.int0",
"numpy.float32"
]
] |
Irme/MONAI | [
"49e693c4e7df83dc1f8ab87349373de9263188a9"
] | [
"tests/test_mask_intensityd.py"
] | [
"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.transforms import MaskIntensityd\n\nTEST_CASE_1 = [\n {\"keys\": \"img\", \"mask_data\": np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]]])},\n {\"img\": np.array([[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, 6, 6]]])},\n np.array([[[0, 0, 0], [0, 2, 0], [0, 0, 0]], [[0, 0, 0], [0, 5, 0], [0, 0, 0]]]),\n]\n\nTEST_CASE_2 = [\n {\"keys\": \"img\", \"mask_data\": np.array([[[0, 0, 0], [0, 5, 0], [0, 0, 0]]])},\n {\"img\": np.array([[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, 6, 6]]])},\n np.array([[[0, 0, 0], [0, 2, 0], [0, 0, 0]], [[0, 0, 0], [0, 5, 0], [0, 0, 0]]]),\n]\n\nTEST_CASE_3 = [\n {\"keys\": \"img\", \"mask_data\": np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 1, 0], [0, 1, 0], [0, 1, 0]]])},\n {\"img\": np.array([[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[4, 4, 4], [5, 5, 5], [6, 6, 6]]])},\n np.array([[[0, 0, 0], [0, 2, 0], [0, 0, 0]], [[0, 4, 0], [0, 5, 0], [0, 6, 0]]]),\n]\n\n\nclass TestMaskIntensityd(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])\n def test_value(self, argments, image, expected_data):\n result = MaskIntensityd(**argments)(image)\n np.testing.assert_allclose(result[\"img\"], expected_data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.array",
"numpy.testing.assert_allclose"
]
] |
Ditwoo/catalyst | [
"3126390f9f679ebcfedbe01707b416678a2732ac"
] | [
"catalyst/metrics/accuracy.py"
] | [
"\"\"\"\nVarious accuracy metrics:\n * :func:`accuracy`\n * :func:`multi_label_accuracy`\n\"\"\"\nfrom typing import Optional, Sequence, Union\n\nimport numpy as np\n\nimport torch\n\nfrom catalyst.metrics.functional import process_multilabel_components\nfrom catalyst.utils.torch import get_activation_fn\n\n\ndef accuracy(\n outputs: torch.Tensor,\n targets: torch.Tensor,\n topk: Sequence[int] = (1,),\n activation: Optional[str] = None,\n) -> Sequence[torch.Tensor]:\n \"\"\"\n Computes multi-class accuracy@topk for the specified values of `topk`.\n\n Args:\n outputs: model outputs, logits\n with shape [bs; num_classes]\n targets: ground truth, labels\n with shape [bs; 1]\n activation: activation to use for model output\n topk: `topk` for accuracy@topk computing\n\n Returns:\n list with computed accuracy@topk\n \"\"\"\n activation_fn = get_activation_fn(activation)\n outputs = activation_fn(outputs)\n\n max_k = max(topk)\n batch_size = targets.size(0)\n\n if len(outputs.shape) == 1 or outputs.shape[1] == 1:\n # binary accuracy\n pred = outputs.t()\n else:\n # multi-class accuracy\n _, pred = outputs.topk(max_k, 1, True, True) # noqa: WPS425\n pred = pred.t()\n correct = pred.eq(targets.long().view(1, -1).expand_as(pred))\n\n output = []\n for k in topk:\n correct_k = (\n correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)\n )\n output.append(correct_k.mul_(1.0 / batch_size))\n return output\n\n\ndef multi_label_accuracy(\n outputs: torch.Tensor,\n targets: torch.Tensor,\n threshold: Union[float, torch.Tensor],\n activation: Optional[str] = None,\n) -> torch.Tensor:\n \"\"\"\n Computes multi-label accuracy for the specified activation and threshold.\n\n Args:\n outputs: NxK tensor that for each of the N examples\n indicates the probability of the example belonging to each of\n the K classes, according to the model.\n targets: binary NxK tensort that encodes which of the K\n classes are associated with the N-th input\n (eg: a row [0, 1, 0, 1] indicates that the example is\n associated with classes 2 and 4)\n threshold: threshold for for model output\n activation: activation to use for model output\n\n Returns:\n computed multi-label accuracy\n \"\"\"\n outputs, targets, _ = process_multilabel_components(\n outputs=outputs, targets=targets\n )\n activation_fn = get_activation_fn(activation)\n outputs = activation_fn(outputs)\n\n outputs = (outputs > threshold).long()\n output = (targets.long() == outputs.long()).sum().float() / np.prod(\n targets.shape\n )\n return output\n\n\n__all__ = [\"accuracy\", \"multi_label_accuracy\"]\n"
] | [
[
"numpy.prod"
]
] |
ackness/GazeFlow | [
"ca6b7d548571f85af84bdec77292758ab5d36449"
] | [
"layers/spadebn.py"
] | [
"#!/usr/bin/env python3\n\nimport tensorflow as tf\n\nfrom layers.spectral_normalization import SpectralNormalization\n\n\nclass SpadeBN(tf.keras.layers.Layer):\n \"\"\"SPADE BatchNormalization\n\n Sources:\n\n https://towardsdatascience.com/implementing-spade-using-fastai-6ad86b94030a\n \"\"\"\n\n def __init__(self, width: int = 128, kernel_size=3, **kwargs):\n self.bn = tf.keras.layers.experimental.SyncBatchNormalization()\n self.conv0 = SpectralNormalization(\n tf.keras.layers.Conv2D(width, kernel_size=kernel_size, activation=\"relu\")\n )\n self.conv1 = SpectralNormalization(\n tf.keras.layers.Conv2D(width, kernel_size=kernel_size, activation=\"relu\")\n )\n self.conv2 = SpectralNormalization(\n tf.keras.layers.Conv2D(width, kernel_size=kernel_size, activation=\"relu\")\n )\n\n def call(self, x: tf.Tensor, cond: tf.Tensor):\n interim_conv = self.conv0(cond)\n gamma = self.conv1(interim_conv)\n beta = self.conv2(interim_conv)\n outputs = self.bn(x) * gamma + beta\n return outputs\n\n def get_config(self):\n config = super().get_config()\n config_update = {\"width\": self.width, \"kernel_size\": 3}\n config.update(config_update)\n return config\n"
] | [
[
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.experimental.SyncBatchNormalization"
]
] |
woojinsong/PyTorch-tutorials-kr | [
"36fefd556f45c2b1f5db912793172c0369430fd4"
] | [
"docs/_downloads/295945daa9a2749eebb39cf0af107ee2/polynomial_custom_function.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nPyTorch: 새 autograd Function 정의하기\n----------------------------------------\n\n:math:`y=\\sin(x)` 을 예측할 수 있도록, :math:`-\\pi` 부터 :math:`pi` 까지\n유클리드 거리(Euclidean distance)를 최소화하도록 3차 다항식을 학습합니다.\n다항식을 :math:`y=a+bx+cx^2+dx^3` 라고 쓰는 대신 :math:`y=a+b P_3(c+dx)` 로 다항식을 적겠습니다.\n여기서 :math:`P_3(x)=\\frac{1}{2}\\left(5x^3-3x\\right)` 은 3차\n`르장드르 다항식(Legendre polynomial)`_ 입니다.\n\n.. _르장드르 다항식(Legendre polynomial):\n https://en.wikipedia.org/wiki/Legendre_polynomials\n\n이 구현은 PyTorch 텐서 연산을 사용하여 순전파 단계를 계산하고, PyTorch autograd를 사용하여\n변화도(gradient)를 계산합니다.\n\n아래 구현에서는 :math:`P_3'(x)` 을 수행하기 위해 사용자 정의 autograd Function를 구현합니다.\n수학적으로는 :math:`P_3'(x)=\\frac{3}{2}\\left(5x^2-1\\right)` 입니다.\n\"\"\"\nimport torch\nimport math\n\n\nclass LegendrePolynomial3(torch.autograd.Function):\n \"\"\"\n torch.autograd.Function을 상속받아 사용자 정의 autograd Function을 구현하고,\n 텐서 연산을 하는 순전파 단계와 역전파 단계를 구현해보겠습니다.\n \"\"\"\n\n @staticmethod\n def forward(ctx, input):\n \"\"\"\n 순전파 단계에서는 입력을 갖는 텐서를 받아 출력을 갖는 텐서를 반환합니다.\n ctx는 컨텍스트 객체(context object)로 역전파 연산을 위한 정보 저장에 사용합니다.\n ctx.save_for_backward 메소드를 사용하여 역전파 단계에서 사용할 어떤 객체도\n 저장(cache)해 둘 수 있습니다.\n \"\"\"\n ctx.save_for_backward(input)\n return 0.5 * (5 * input ** 3 - 3 * input)\n\n @staticmethod\n def backward(ctx, grad_output):\n \"\"\"\n 역전파 단계에서는 출력에 대한 손실(loss)의 변화도(gradient)를 갖는 텐서를 받고,\n 입력에 대한 손실의 변화도를 계산해야 합니다.\n \"\"\"\n input, = ctx.saved_tensors\n return grad_output * 1.5 * (5 * input ** 2 - 1)\n\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:0\") # GPU에서 실행하려면 이 주석을 제거하세요\n\n# 입력값과 출력값을 갖는 텐서들을 생성합니다.\n# requires_grad=False가 기본값으로 설정되어 역전파 단계 중에 이 텐서들에 대한 변화도를 계산할\n# 필요가 없음을 나타냅니다.\nx = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)\ny = torch.sin(x)\n\n# 가중치를 갖는 임의의 텐서를 생성합니다. 3차 다항식이므로 4개의 가중치가 필요합니다:\n# y = a + b * P3(c + d * x) \n# 이 가중치들이 수렴(convergence)하기 위해서는 정답으로부터 너무 멀리 떨어지지 않은 값으로\n# 초기화가 되어야 합니다. \n# requires_grad=True로 설정하여 역전파 단계 중에 이 텐서들에 대한 변화도를 계산할 필요가\n# 있음을 나타냅니다. \na = torch.full((), 0.0, device=device, dtype=dtype, requires_grad=True)\nb = torch.full((), -1.0, device=device, dtype=dtype, requires_grad=True)\nc = torch.full((), 0.0, device=device, dtype=dtype, requires_grad=True)\nd = torch.full((), 0.3, device=device, dtype=dtype, requires_grad=True)\n\nlearning_rate = 5e-6\nfor t in range(2000):\n # 사용자 정의 Function을 적용하기 위해 Function.apply 메소드를 사용합니다.\n # 여기에 'P3'라고 이름을 붙였습니다.\n P3 = LegendrePolynomial3.apply\n\n # 순전파 단계: 연산을 하여 예측값 y를 계산합니다; \n # 사용자 정의 autograd 연산을 사용하여 P3를 계산합니다.\n y_pred = a + b * P3(c + d * x)\n\n # 손실을 계산하고 출력합니다.\n loss = (y_pred - y).pow(2).sum()\n if t % 100 == 99:\n print(t, loss.item())\n\n # autograd를 사용하여 역전파 단계를 계산합니다.\n loss.backward()\n\n # 경사하강법(gradient descent)을 사용하여 가중치를 갱신합니다.\n with torch.no_grad():\n a -= learning_rate * a.grad\n b -= learning_rate * b.grad\n c -= learning_rate * c.grad\n d -= learning_rate * d.grad\n\n # 가중치 갱신 후에는 변화도를 직접 0으로 만듭니다.\n a.grad = None\n b.grad = None\n c.grad = None\n d.grad = None\n\nprint(f'Result: y = {a.item()} + {b.item()} * P3({c.item()} + {d.item()} x)')\n"
] | [
[
"torch.linspace",
"torch.no_grad",
"torch.full",
"torch.sin",
"torch.device"
]
] |
sarnthil/emotion-classification-roles | [
"1fdd3a8cbdac5ab2ad9598a101b763882df78280"
] | [
"scripts/indicator_experiment/calculate-fscores-from-aggregations.py"
] | [
"import json\nfrom pathlib import Path\nfrom collections import defaultdict, Counter\n\nfrom sklearn.metrics import precision_recall_fscore_support\n\nPRECISION_FALLBACK = RECALL_FALLBACK = 1\n\n# dataset -> setting -> emotion -> measure -> score\nresults = {}\n\nfor dataset_path in Path(\"workdata/indicator-experiment/predictions\").glob(\"*\"):\n dataset = dataset_path.name\n for file in dataset_path.glob(\"*.aggregated\"):\n part = file.stem\n results.setdefault(dataset, {}).setdefault(part, {})\n confusion_matrix = defaultdict(Counter)\n y_true, y_pred = [], []\n instances = 0\n with file.open() as f:\n for line in f:\n data = json.loads(line)\n confusion_matrix[data[\"gold\"]][data[\"prediction\"]] += 1\n y_true.append(data[\"gold\"])\n y_pred.append(data[\"prediction\"])\n instances += 1\n for emotion in confusion_matrix:\n tp = confusion_matrix[emotion][emotion]\n fn = sum(\n confusion_matrix[emotion][other]\n for other in confusion_matrix[emotion]\n if other != emotion\n )\n fp = sum(\n confusion_matrix[other][emotion]\n for other in confusion_matrix\n if other != emotion\n )\n tn = instances - tp - fn - fp\n precision = tp / (tp + fp) if tp + fp else PRECISION_FALLBACK\n recall = tp / (tp + fn) if tp + fn else RECALL_FALLBACK\n f1 = (\n 2 * ((precision * recall) / (precision + recall))\n if precision and recall\n else 0\n )\n results[dataset][part][emotion] = {\n \"precision\": precision,\n \"recall\": recall,\n \"f1\": f1,\n }\n emos = list(results[dataset][part].keys())\n\n for average in [\"macro\", \"micro\"]:\n p, r, f, s = precision_recall_fscore_support(\n y_true, y_pred, zero_division=1, average=average\n )\n results[dataset][part][f\"all_{average}\"] = {\n \"precision\": p,\n \"recall\": r,\n \"f1\": f,\n }\n results[dataset][part][\"all_unweighted_mean\"] = {\n \"precision\": sum(\n results[dataset][part][emo][\"precision\"] for emo in emos\n )\n / len(emos),\n \"recall\": sum(results[dataset][part][emo][\"recall\"] for emo in emos)\n / len(emos),\n \"f1\": sum(results[dataset][part][emo][\"f1\"] for emo in emos)\n / len(emos),\n }\n results[dataset][part][\"all_weighted_mean\"] = {\n \"precision\": sum(\n results[dataset][part][emo][\"precision\"]\n * sum(\n confusion_matrix[emo][other]\n for other in confusion_matrix[emo]\n )\n for emo in emos\n )\n / instances,\n \"recall\": sum(\n results[dataset][part][emo][\"recall\"]\n * sum(\n confusion_matrix[emo][other]\n for other in confusion_matrix[emo]\n )\n for emo in emos\n )\n / instances,\n \"f1\": sum(\n results[dataset][part][emo][\"f1\"]\n * sum(\n confusion_matrix[emo][other]\n for other in confusion_matrix[emo]\n )\n for emo in emos\n )\n / instances,\n }\n\n with Path(\"workdata/indicator-experiment/results.json\").open(\"w\") as f:\n json.dump(results, f)\n"
] | [
[
"sklearn.metrics.precision_recall_fscore_support"
]
] |
RikPi/bluerov2 | [
"811dcca43241221b918425cf8351219d183f4c03"
] | [
"bluerov2_hmi/src/bluerov2_hmi/__init__.py"
] | [
"import rospy\nimport cv2\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom sensor_msgs.msg import Range, BatteryState, CameraInfo, Temperature, NavSatFix, Image\nfrom geometry_msgs.msg import PoseWithCovarianceStamped, PointStamped\nfrom std_msgs.msg import Float64, Header\nfrom mavros_msgs.msg import State\nimport numpy as np\nfrom image_geometry import PinholeCameraModel\nfrom tf2_ros import TransformListener, Buffer\nimport tf2_geometry_msgs\nfrom message_filters import TimeSynchronizer, Subscriber\nfrom bluerov2_navigation.helpers import math\nfrom pathlib import Path\nfrom PyQt5.QtWidgets import QWidget, QMessageBox\nfrom qt_gui.plugin import Plugin\nfrom python_qt_binding import loadUi\nimport rospkg\nfrom bluerov2_msgs.srv import SetAutopilot, SetAutopilotResponse, SetAutopilotRequest\nfrom bluerov2_msgs.msg import Autopilot\n\n\nclass MsgMonitor:\n \"\"\" Checks if a message has been received within a timeout. \"\"\"\n def __init__(self, timeout: float = 5.0):\n self._last_time = rospy.Time.now()\n self._timeout = timeout\n\n def is_valid(self):\n return (rospy.Time.now() - self._last_time).to_sec() < self._timeout\n\n def __call__(self, msg: rospy.AnyMsg):\n self._last_time = rospy.Time.now()\n\n\nclass AnnotationFormat:\n def __init__(self, color: tuple = (255, 255, 255), thickness: int = 2, font: int = cv2.FONT_HERSHEY_SIMPLEX):\n self.color = color\n self.thickness = thickness\n self.font = font\n\n\nclass HudOverlay:\n def __init__(self):\n self._c = 0\n self._cvbridge = CvBridge()\n # --------- Params ---------\n self._fontsize = rospy.get_param(\"fontsize\", 1.5)\n self._skip_frames = int(rospy.get_param(\"skip_frames\", 5))\n # --------- Topic Defs -------\n self._info_in_topic = \"image_in/camera_info\"\n self._image_in_topic = \"image_in/image_raw\"\n self._image_out_topic = \"image_out/image_raw\"\n self._heading_topic = \"mavros/global_position/compass_hdg\"\n self._alt_topic = \"mavros/distance_sensor/rangefinder_pub\"\n self._state_topic = \"mavros/state\"\n self._batt_topic = \"mavros/battery\"\n self._temp_topic = \"mavros/imu/temperature_baro\"\n self._pose_topic = \"waterlinked/pose_with_cov_stamped\"\n self._latlon_topic = \"mavros/global_position/global\"\n self._sog_topic = \"guidance/sog\"\n self._cog_topic = \"guidance/cog\"\n self._depth_topic = \"mavros/global_position/rel_alt\"\n # ------- MSG Defs ---------\n self._heading = Float64()\n self._sog = Float64()\n self._cog = Float64\n self._alt = Range()\n self._depth = Float64()\n self._battery = BatteryState()\n self._state = State()\n self._temperature = Temperature()\n self._pose = PoseWithCovarianceStamped()\n self._latlon = NavSatFix()\n # ------- MSG Monitors --------\n self._heading_mon = MsgMonitor()\n self._sog_mon = MsgMonitor()\n self._cog_mon = MsgMonitor()\n self._alt_mon = MsgMonitor()\n self._depth_mon = MsgMonitor()\n self._battery_mon = MsgMonitor()\n self._state_mon = MsgMonitor()\n self._temperature_mon = MsgMonitor()\n self._pose_mon = MsgMonitor()\n self._latlon_mon = MsgMonitor()\n # ------- Image Geometry ----------\n self._cam_model = PinholeCameraModel()\n self._home = None # Frame projected onto camera\n # ------- TF\n self._tf_buffer = Buffer()\n TransformListener(self._tf_buffer)\n # ---------- Annotation -------\n self._valid_annotation = AnnotationFormat()\n self._invalid_annotation = AnnotationFormat(color=(0, 0, 255))\n # ------- Subscribers -------\n self._image_in_sub = Subscriber(self._image_in_topic, Image)\n self._info_in_sub = Subscriber(self._info_in_topic, CameraInfo)\n ts = TimeSynchronizer([self._image_in_sub, self._info_in_sub], 10)\n ts.registerCallback(self._annotate_img)\n rospy.Subscriber(self._heading_topic, Float64, self._update_heading)\n rospy.Subscriber(self._depth_topic, Float64, self._update_depth)\n rospy.Subscriber(self._alt_topic, Range, self._update_alt)\n rospy.Subscriber(self._state_topic, State, self._update_state)\n rospy.Subscriber(self._batt_topic, BatteryState, self._update_bat)\n rospy.Subscriber(self._temp_topic, Temperature, self._update_temperature)\n rospy.Subscriber(self._pose_topic, PoseWithCovarianceStamped, self._update_pose)\n rospy.Subscriber(self._latlon_topic, NavSatFix, self._update_latlon)\n rospy.Subscriber(self._cog_topic, Float64, self._update_cog)\n rospy.Subscriber(self._sog_topic, Float64, self._update_sog)\n # ---------- Publisher --------\n self._pub = rospy.Publisher(self._image_out_topic, Image, queue_size=10)\n\n\n def _update_cog(self, msg: Float64):\n self._cog_mon(msg)\n self._cog = msg\n\n def _update_sog(self, msg: Float64):\n self._sog_mon(msg)\n self._sog = msg\n\n def _update_heading(self, msg: Float64):\n self._heading_mon(msg)\n self._heading = msg\n\n def _update_state(self, msg: State):\n self._state_mon(msg)\n self._state = msg\n\n def _update_depth(self, msg: Float64):\n self._depth_mon(msg)\n self._depth = msg\n\n def _update_alt(self, msg: Range):\n self._alt_mon(msg)\n self._alt = msg\n\n def _update_bat(self, msg: BatteryState):\n self._battery_mon(msg)\n self._battery = msg\n\n def _update_temperature(self, msg: Temperature):\n self._temperature_mon(msg)\n self._temperature = msg\n\n def _update_pose(self, msg: PoseWithCovarianceStamped):\n self._pose_mon(msg)\n self._pose = msg\n\n def _update_latlon(self, msg: NavSatFix):\n self._latlon_mon(msg)\n self._latlon = msg\n\n def _degToCompass(self, num):\n val = int((num / 22.5) + .5)\n arr = [\"N\", \"NNE\", \"NE\", \"ENE\", \"E\", \"ESE\", \"SE\", \"SSE\", \"S\", \"SSW\", \"SW\", \"WSW\", \"W\", \"WNW\", \"NW\", \"NNW\"]\n return arr[(val % 16)]\n\n def _gen_annotation(self, fmt: str=\"{}\", value: tuple = (np.inf,), anno: AnnotationFormat = AnnotationFormat()):\n st = fmt.format(*value) if value is not None else fmt.format(None)\n box, _ = cv2.getTextSize(st, anno.font, self._fontsize, anno.thickness)\n w, h = box\n b, g, r = anno.color\n return st, w, h, b, g, r\n\n def _annotate_img(self, image_msg: Image, info_msg: CameraInfo):\n if self._c % self._skip_frames == 0:\n # Convert image to opencv format\n img = self._cvbridge.imgmsg_to_cv2(image_msg)\n # Construct pinhole camera model\n self._cam_model.fromCameraInfo(info_msg)\n # Get the frame origin projected into image coordiantes\n self._home = None\n try:\n if self._tf_buffer.can_transform(self._cam_model.tf_frame, \"waterlinked\", rospy.Time.from_sec(0)):\n point = self._tf_buffer.transform(PointStamped(Header(0, \"waterlinked\", rospy.Time.now()), None),\n self._cam_model.tf_frame)\n self._home = self._cam_model.project3dToPixel(point.point.x, point.point.y, point.point.z)\n except Exception as e:\n rospy.logerr_throttle(10.0, f\"{rospy.get_name()} | {e}\")\n\n # TOP CENTRAL BOX: IMPORTANT DATA (Altitude, Heading, Depth)\n st, w, h, b, g, r = self._gen_annotation(\"Alt: {:02.1f} m\", (self._alt.range,), self._valid_annotation) if self._alt_mon.is_valid() else self._gen_annotation(\"Alt: {:02.1f} m\", (self._alt.range,), self._invalid_annotation)\n pos = (int(img.shape[1] - w/2), 10)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b,g,r), self._valid_annotation.thickness, cv2.LINE_AA)\n\n st, w1, h1, b, g, r = self._gen_annotation(\"Hdg: {:03d} {}\", (int(self._heading.data), self._degToCompass(self._heading.data)),\n self._valid_annotation) if self._heading_mon.is_valid() else self._gen_annotation(\n \"Hdg: {:03d} {}\", (self._heading.data, self._degToCompass(self._heading.data)), self._invalid_annotation)\n pos = (int(img.shape[1] - w1/2) + w + 5, 10)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b,g,r), self._valid_annotation.thickness, cv2.LINE_AA)\n\n st, w2, h2, b, g, r = self._gen_annotation(\"Dpt: {:02.1f} m\",\n (self._depth.data,),\n self._valid_annotation) if self._depth_mon.is_valid() else self._gen_annotation(\n \"Dpt: {:02.1f} m\", (self._depth.data), self._invalid_annotation)\n pos = (int(img.shape[1] - w2 / 2) + w1 + w2 + 10, 10)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n\n #BOTTOM LEFT BOX: SYSTEM HEALTH (State, Battery voltage, Temperature)\n # State ROV\n st, w, h, b, g, r = self._gen_annotation(\"Bat: {:02.1f} V\", (self._battery.voltage,),\n self._valid_annotation) if self._alt_mon.is_valid() else self._gen_annotation(\n \"Bat: {:02.1f} V\", (self._battery.voltage,), self._invalid_annotation)\n pos = (0, img.shape[0] - 3*h)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n fmt = \"St: Armed\" if self._state.armed else \"St: Disarmed\"\n st, w1, h1, b, g, r = self._gen_annotation(fmt, None,\n self._valid_annotation) if self._alt_mon.is_valid() else self._gen_annotation(\n fmt, None, self._invalid_annotation)\n pos = (0, img.shape[0] - 2*h + 5 )\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n # Temperature\n st, w2, h2, b, g, r = self._gen_annotation(\"Temp: {:02.1f} degC\", (self._temperature.temperature,),\n self._valid_annotation) if self._alt_mon.is_valid() else self._gen_annotation(\n \"Temp: {:02.1f} degC\", (self._temperature.temperature,), self._invalid_annotation)\n pos = (0, img.shape[0] - 2*h + h1 + 10 )\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n\n # TOP LEFT BOX: POSITION RELATIVE TO BOAT (LOS distance, LOS angle)\n distance = np.sqrt(self._pose.pose.pose.position.x ** 2 + self._pose.pose.pose.position.y ** 2)\n\n\n st, w, h, b, g, r = self._gen_annotation(\"Ship Distance: {:02.1f} m\", (distance,),\n self._valid_annotation) if self._pose_mon.is_valid() else self._gen_annotation(\n \"Ship distance: {:02.1f} m\", (distance,), self._invalid_annotation)\n pos = (0, 150)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n\n bearing = math.rad2deg(math.atan2(self._pose.pose.pose.position.y,\n self._pose.pose.pose.position.x)) # This is ENU, convert to NED\n bearing = 90 - bearing\n bearing = 360 + bearing if bearing < 0 else bearing\n relative_bearing = bearing - self._heading.data\n relative_bearing = relative_bearing + 360 if abs(relative_bearing) > 180 and relative_bearing < 0 else relative_bearing\n relative_bearing = relative_bearing - 360 if abs(relative_bearing) > 180 and relative_bearing > 0 else relative_bearing\n st, w1, h1, b, g, r = self._gen_annotation(\"Ship Bearing: {:02.1f} deg\", (relative_bearing,),\n self._valid_annotation) if self._pose_mon.is_valid() else self._gen_annotation(\n \"Ship Bearing: {:02.1f} deg\", (relative_bearing,), self._invalid_annotation)\n pos = (0, 150 + h + 20)\n img = cv2.putText(img, st, pos, self._valid_annotation.font, self._fontsize, (b, g, r),\n self._valid_annotation.thickness, cv2.LINE_AA)\n\n # TOP RIGHT BOX: POSITION GLOBAL (Latitude, Longitude, SoG, CoG)\n # # Latitude\n # pos = (img.shape[1] - w/3 - 100, 150)\n # st = \"Lat: {:02.2f}deg\".format(self._latlon.latitude)\n # img = cv2.putText(img, st, (pos[0] + 2, pos[1]), font, self._fontsize, color, thickness, cv2.LINE_AA)\n # # Longitude\n # pos = (img.shape[1] - w/3 - 100, 150 + h + 20)\n # st = \"Lon: {:02.2f}deg\".format(self._latlon.longitude)\n # img = cv2.putText(img, st, (pos[0] + 2, pos[1] + 2), font, self._fontsize, color, thickness, cv2.LINE_AA)\n # # SoG\n # pos = (img.shape[1] - w/3 - 100, 150 + 2*(h + 20))\n # st = \"SoG: {:02.2f}km/h\".format(self._sogcog.sog)\n # img = cv2.putText(img, st, (pos[0] + 2, pos[1] + 2), font, self._fontsize, color, thickness, cv2.LINE_AA)\n # # CoG\n # pos = (img.shape[1] - w/3 - 100, 150 + 3*(h + 20))\n # st = \"CoG: {:02.2f}deg\".format(self._sogcog.cog)\n # img = cv2.putText(img, st, (pos[0] + 2, pos[1] + 2), font, self._fontsize, color, thickness, cv2.LINE_AA)\n\n if self._home is not None:\n self._home = [int(i) for i in self._home]\n img = cv2.circle(img, self._home, 5, (0, 255, 0), -1)\n box, baseline = cv2.getTextSize(\"SHIP\", cv2.FONT_HERSHEY_PLAIN, self._fontsize, 2)\n pos = (int(self._home[0] - box[0] / 2), self._home[1] - 5 - baseline - 3)\n img = cv2.putText(img, \"SHIP\", pos, cv2.FONT_HERSHEY_PLAIN, self._fontsize, (0, 255, 0), 2)\n\n out = self._cvbridge.cv2_to_imgmsg(img, encoding=\"bgr8\")\n self._pub.publish(out)\n self._c = 0\n else :\n self._c = self._c + 1\n\n\nclass AutopilotInterface(Plugin):\n def __init__(self, context):\n super().__init__(context)\n # Give QObjects reasonable names\n self.setObjectName('Autopilot Interface')\n # Create QWidget\n self._widget = QWidget()\n # Get path to UI file which should be in the \"resource\" folder of this package\n ui_file = str(Path(rospkg.RosPack().get_path('bluerov2_hmi') + '/resource/AutopilotInterface.ui'))\n # Extend the widget with all attributes and children from UI file\n loadUi(ui_file, self._widget)\n # Give QObjects reasonable names\n self._widget.setObjectName('AutopilotInterface')\n\n self._autopilot_msg = Autopilot()\n\n self._widget.SetAutopilotButton.clicked.connect(self.on_button_clicked)\n self._widget.AMSLRadio.clicked.connect(self.amsl_radio_clicked)\n self._widget.BottomRadio.clicked.connect(self.bottom_radio_clicked)\n self._widget.DepthRadio.clicked.connect(self.depth_radio_clicked)\n self._widget.HeightSetText.editingFinished.connect(self.height_text_finished)\n self._widget.SpeedSetText.editingFinished.connect(self.speed_text_finished)\n self._widget.HeadingDial.valueChanged.connect(self.height_dial_changed)\n\n # Show _widget.windowTitle on left-top of each plugin (when\n # it's set in _widget). This is useful when you open multiple\n # plugins at once. Also if you open multiple instances of your\n # plugin at once, these lines add number to make it easy to\n # tell from pane to pane.\n self._autopilot_service = rospy.ServiceProxy(\"autopilot/set\", SetAutopilot)\n if context.serial_number() > 1:\n self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))\n # Add widget to the user interface\n context.add_widget(self._widget)\n\n def amsl_radio_clicked(self):\n self._autopilot_msg.height_reference = self._autopilot_msg.MSL\n\n def bottom_radio_clicked(self):\n self._autopilot_msg.height_reference = self._autopilot_msg.BTM\n\n def depth_radio_clicked(self):\n self._autopilot_msg.height_reference = self._autopilot_msg.DPT\n\n def height_text_finished(self):\n try:\n self._autopilot_msg.Z = float(self._widget.HeightSetText.text())\n except ValueError as e:\n alert = QMessageBox()\n alert.setText(str(e))\n alert.exec()\n\n def speed_text_finished(self):\n try:\n self._autopilot_msg.U = float(self._widget.SpeedSetText.text())\n except ValueError as e:\n alert = QMessageBox()\n alert.setText(str(e))\n alert.exec()\n\n def height_dial_changed(self):\n value = self._widget.HeadingDial.value()\n if value > 180:\n value = value - 180\n else:\n value = value + 180\n self._autopilot_msg.heading = value\n self._widget.HeadingLabel.setText(\"{:03d}N deg\".format(int(self._autopilot_msg.heading)))\n\n def on_button_clicked(self):\n req = SetAutopilotRequest()\n req.settings = self._autopilot_msg\n try:\n self._autopilot_service.wait_for_service(1.0)\n res = self._autopilot_service.call(req)\n except Exception as e:\n alert = QMessageBox()\n alert.setText(str(e))\n alert.exec()\n if res.success:\n alert = QMessageBox()\n alert.setText(\"Autopilot Active\\nSpeed: {:2f} m/s\\nHeading: {:03d} deg\\nHeight: {:.2f} m {}\".format(self._autopilot_msg.U,\n int(self._autopilot_msg.heading),\n self._autopilot_msg.Z,\n self._autopilot_msg.height_reference))\n alert.exec()\n else:\n alert = QMessageBox()\n alert.setText(\"Autopilot Not Set.\")\n alert.exec()\n\n\n def shutdown_plugin(self):\n # TODO unregister all publishers here\n pass\n\n def save_settings(self, plugin_settings, instance_settings):\n # TODO save intrinsic configuration, usually using:\n # instance_settings.set_value(k, v)\n pass\n\n def restore_settings(self, plugin_settings, instance_settings):\n # TODO restore intrinsic configuration, usually using:\n # v = instance_settings.value(k)\n pass\n\n # def trigger_configuration(self):\n # Comment in to signal that the plugin has a way to configure\n # This will enable a setting button (gear icon) in each dock widget title bar\n # Usually used to open a modal configuration dialog\n"
] | [
[
"numpy.sqrt"
]
] |
neptune-ml/pytorch-lightning | [
"3bcaed52454f3e6c3bce5513032e34302e5b1bb6"
] | [
"tests/strategies/test_deepspeed_strategy.py"
] | [
"import contextlib\nimport json\nimport logging\nimport os\nfrom typing import Any, Dict, Optional\nfrom unittest import mock\n\nimport pytest\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\nfrom torchmetrics import Accuracy\n\nfrom pytorch_lightning import LightningDataModule, LightningModule, seed_everything, Trainer\nfrom pytorch_lightning.callbacks import Callback, LearningRateMonitor, ModelCheckpoint\nfrom pytorch_lightning.plugins import DeepSpeedPrecisionPlugin\nfrom pytorch_lightning.strategies import DeepSpeedStrategy\nfrom pytorch_lightning.strategies.deepspeed import LightningDeepSpeedModule\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.imports import _DEEPSPEED_AVAILABLE\nfrom pytorch_lightning.utilities.meta import init_meta_context\nfrom tests.helpers.boring_model import BoringModel, RandomDataset, RandomIterableDataset\nfrom tests.helpers.datamodules import ClassifDataModule\nfrom tests.helpers.runif import RunIf\n\nif _DEEPSPEED_AVAILABLE:\n import deepspeed\n from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict\n\n\nclass ModelParallelBoringModel(BoringModel):\n def __init__(self):\n super().__init__()\n self.layer = None\n\n def configure_sharded_model(self) -> None:\n self.layer = torch.nn.Linear(32, 2)\n\n def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n self.configure_sharded_model()\n\n\nclass ModelParallelBoringModelNoSchedulers(ModelParallelBoringModel):\n def configure_optimizers(self):\n return torch.optim.SGD(self.layer.parameters(), lr=0.1)\n\n\nclass ModelParallelBoringModelManualOptim(BoringModel):\n def __init__(self):\n super().__init__()\n self.layer = None\n\n def training_step(self, batch, batch_idx):\n opt = self.optimizers()\n output = self(batch)\n loss = self.loss(batch, output)\n opt.zero_grad()\n self.manual_backward(loss)\n opt.step()\n\n def configure_sharded_model(self) -> None:\n self.layer = torch.nn.Linear(32, 2)\n\n def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n self.configure_sharded_model()\n\n @property\n def automatic_optimization(self) -> bool:\n return False\n\n\ndef test_deepspeed_lightning_module(tmpdir):\n \"\"\"Test to ensure that a model wrapped in `LightningDeepSpeedModule` moves types and device correctly.\"\"\"\n\n model = BoringModel()\n module = LightningDeepSpeedModule(model, precision=16)\n\n module.half()\n assert module.dtype == torch.half\n assert model.dtype == torch.half\n\n module.to(torch.double)\n assert module.dtype == torch.double\n assert model.dtype == torch.double\n\n\n@RunIf(min_gpus=1)\ndef test_deepspeed_lightning_module_precision(tmpdir):\n \"\"\"Test to ensure that a model wrapped in `LightningDeepSpeedModule` moves tensors to half when precision\n 16.\"\"\"\n\n model = BoringModel()\n module = LightningDeepSpeedModule(model, precision=16)\n\n module.cuda().half()\n assert module.dtype == torch.half\n assert model.dtype == torch.half\n\n x = torch.randn((1, 32), dtype=torch.float).cuda()\n out = module(x)\n\n assert out.dtype == torch.half\n\n module.to(torch.double)\n assert module.dtype == torch.double\n assert model.dtype == torch.double\n\n\[email protected]\ndef deepspeed_config():\n return {\n \"optimizer\": {\"type\": \"SGD\", \"params\": {\"lr\": 3e-5}},\n \"scheduler\": {\n \"type\": \"WarmupLR\",\n \"params\": {\"last_batch_iteration\": -1, \"warmup_min_lr\": 0, \"warmup_max_lr\": 3e-5, \"warmup_num_steps\": 100},\n },\n }\n\n\[email protected]\ndef deepspeed_zero_config(deepspeed_config):\n return {**deepspeed_config, \"zero_allow_untested_optimizer\": True, \"zero_optimization\": {\"stage\": 2}}\n\n\n@RunIf(deepspeed=True)\[email protected](\"strategy\", (\"deepspeed\", DeepSpeedStrategy))\ndef test_deepspeed_strategy_string(tmpdir, strategy):\n \"\"\"Test to ensure that the strategy can be passed via string or instance, and parallel devices is correctly\n set.\"\"\"\n\n trainer = Trainer(\n fast_dev_run=True, default_root_dir=tmpdir, strategy=strategy if isinstance(strategy, str) else strategy()\n )\n\n assert isinstance(trainer.strategy, DeepSpeedStrategy)\n assert trainer.strategy.parallel_devices == [torch.device(\"cpu\")]\n\n\n@RunIf(deepspeed=True)\ndef test_deepspeed_strategy_env(tmpdir, monkeypatch, deepspeed_config):\n \"\"\"Test to ensure that the strategy can be passed via a string with an environment variable.\"\"\"\n config_path = os.path.join(tmpdir, \"temp.json\")\n with open(config_path, \"w\") as f:\n f.write(json.dumps(deepspeed_config))\n monkeypatch.setenv(\"PL_DEEPSPEED_CONFIG_PATH\", config_path)\n\n trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir, strategy=\"deepspeed\")\n\n strategy = trainer.strategy\n assert isinstance(strategy, DeepSpeedStrategy)\n assert strategy.parallel_devices == [torch.device(\"cpu\")]\n assert strategy.config == deepspeed_config\n\n\n@RunIf(deepspeed=True)\[email protected](\"precision\", [16, \"mixed\"])\[email protected](\n \"amp_backend\",\n [\"native\", pytest.param(\"apex\", marks=RunIf(amp_apex=True))],\n)\ndef test_deepspeed_precision_choice(amp_backend, precision, tmpdir):\n \"\"\"Test to ensure precision plugin is also correctly chosen.\n\n DeepSpeed handles precision via Custom DeepSpeedPrecisionPlugin\n \"\"\"\n\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n accelerator=\"gpu\",\n strategy=\"deepspeed\",\n amp_backend=amp_backend,\n precision=precision,\n )\n\n assert isinstance(trainer.strategy, DeepSpeedStrategy)\n assert isinstance(trainer.strategy.precision_plugin, DeepSpeedPrecisionPlugin)\n assert trainer.strategy.precision_plugin.precision == precision\n\n\n@RunIf(deepspeed=True)\ndef test_deepspeed_with_invalid_config_path(tmpdir):\n \"\"\"Test to ensure if we pass an invalid config path we throw an exception.\"\"\"\n\n with pytest.raises(\n MisconfigurationException, match=\"You passed in a path to a DeepSpeed config but the path does not exist\"\n ):\n DeepSpeedStrategy(config=\"invalid_path.json\")\n\n\n@RunIf(deepspeed=True)\ndef test_deepspeed_with_env_path(tmpdir, monkeypatch, deepspeed_config):\n \"\"\"Test to ensure if we pass an env variable, we load the config from the path.\"\"\"\n config_path = os.path.join(tmpdir, \"temp.json\")\n with open(config_path, \"w\") as f:\n f.write(json.dumps(deepspeed_config))\n monkeypatch.setenv(\"PL_DEEPSPEED_CONFIG_PATH\", config_path)\n strategy = DeepSpeedStrategy()\n assert strategy.config == deepspeed_config\n\n\n@RunIf(deepspeed=True)\ndef test_deepspeed_defaults(tmpdir):\n \"\"\"Ensure that defaults are correctly set as a config for DeepSpeed if no arguments are passed.\"\"\"\n strategy = DeepSpeedStrategy()\n assert strategy.config is not None\n assert isinstance(strategy.config[\"zero_optimization\"], dict)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_warn_deepspeed_ignored(tmpdir):\n class TestModel(BoringModel):\n def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:\n return loss.backward()\n\n model = TestModel()\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(),\n accelerator=\"gpu\",\n devices=1,\n precision=16,\n track_grad_norm=2,\n )\n from pytorch_lightning.plugins.precision.deepspeed import warning_cache\n\n with pytest.warns(UserWarning, match=\"will be ignored since DeepSpeed handles the backward\"):\n trainer.fit(model)\n assert any(\"track_grad_norm=2.0)' but this is not supported\" in w for w in warning_cache)\n\n\n@RunIf(min_gpus=1, deepspeed=True)\[email protected](\n [\"dataset_cls\", \"value\"],\n [(RandomDataset, \"auto\"), (RandomDataset, 10), (RandomIterableDataset, \"auto\"), (RandomIterableDataset, 10)],\n)\[email protected](\"deepspeed.init_distributed\", autospec=True)\[email protected](\"pytorch_lightning.Trainer.log_dir\", new_callable=mock.PropertyMock, return_value=\"abc\")\ndef test_deepspeed_auto_batch_size_config_select(mock_deepspeed_distributed, mock_log_dir, tmpdir, dataset_cls, value):\n \"\"\"Test to ensure that the batch size is correctly set as expected for deepspeed logging purposes.\"\"\"\n\n class TestModel(BoringModel):\n def train_dataloader(self):\n return DataLoader(dataset_cls(32, 64))\n\n class AssertCallback(Callback):\n def setup(self, trainer, pl_module, stage: Optional[str] = None) -> None:\n assert isinstance(trainer.strategy, DeepSpeedStrategy)\n config = trainer.strategy.config\n\n # int value overrides auto mode\n expected_value = value if isinstance(value, int) else 1\n if dataset_cls == RandomDataset:\n expected_value = pl_module.train_dataloader().batch_size if value == \"auto\" else value\n\n assert config[\"train_micro_batch_size_per_gpu\"] == expected_value\n raise SystemExit\n\n ck = AssertCallback()\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=True,\n callbacks=ck,\n gpus=1,\n strategy=DeepSpeedStrategy(logging_batch_size_per_gpu=value, zero_optimization=False),\n )\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_run_configure_optimizers(tmpdir):\n \"\"\"Test end to end that deepspeed works with defaults (without ZeRO as that requires compilation), whilst using\n configure_optimizers for optimizers and schedulers.\"\"\"\n\n class TestCB(Callback):\n def on_train_start(self, trainer, pl_module) -> None:\n from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer\n\n assert isinstance(trainer.optimizers[0], FP16_DeepSpeedZeroOptimizer)\n assert isinstance(trainer.optimizers[0].optimizer, torch.optim.SGD)\n assert isinstance(trainer.lr_scheduler_configs[0].scheduler, torch.optim.lr_scheduler.StepLR)\n # check that the lr_scheduler config was preserved\n assert trainer.lr_scheduler_configs[0].name == \"Sean\"\n\n class TestModel(BoringModel):\n def configure_optimizers(self):\n [optimizer], [scheduler] = super().configure_optimizers()\n return {\"optimizer\": optimizer, \"lr_scheduler\": {\"scheduler\": scheduler, \"name\": \"Sean\"}}\n\n model = TestModel()\n lr_monitor = LearningRateMonitor()\n trainer = Trainer(\n strategy=DeepSpeedStrategy(), # disable ZeRO so our optimizers are not wrapped\n default_root_dir=tmpdir,\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n callbacks=[TestCB(), lr_monitor],\n )\n trainer.fit(model)\n\n assert lr_monitor.lrs == {\"Sean\": [0.1]}\n\n _assert_save_model_is_equal(model, tmpdir, trainer)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_config(tmpdir, deepspeed_zero_config):\n \"\"\"Test to ensure deepspeed works correctly when passed a DeepSpeed config object including\n optimizers/schedulers and saves the model weights to load correctly.\"\"\"\n\n class TestCB(Callback):\n def on_train_start(self, trainer, pl_module) -> None:\n from deepspeed.runtime.lr_schedules import WarmupLR\n from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer\n\n assert isinstance(trainer.optimizers[0], FP16_DeepSpeedZeroOptimizer)\n assert isinstance(trainer.optimizers[0].optimizer, torch.optim.SGD)\n assert isinstance(trainer.lr_scheduler_configs[0].scheduler, WarmupLR)\n assert trainer.lr_scheduler_configs[0].interval == \"step\"\n assert trainer.lr_scheduler_configs[0].opt_idx == 0\n\n model = BoringModel()\n lr_monitor = LearningRateMonitor()\n trainer = Trainer(\n strategy=DeepSpeedStrategy(config=deepspeed_zero_config),\n default_root_dir=tmpdir,\n accelerator=\"gpu\",\n devices=1,\n log_every_n_steps=1,\n limit_train_batches=4,\n limit_val_batches=4,\n limit_test_batches=4,\n max_epochs=2,\n precision=16,\n callbacks=[TestCB(), lr_monitor],\n )\n\n trainer.fit(model)\n trainer.test(model)\n assert list(lr_monitor.lrs) == [\"lr-SGD\"]\n assert len(set(lr_monitor.lrs[\"lr-SGD\"])) == 8\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_custom_precision_params(tmpdir):\n \"\"\"Ensure if we modify the FP16 parameters via the DeepSpeedStrategy, the deepspeed config contains these\n changes.\"\"\"\n\n class TestCB(Callback):\n def on_train_start(self, trainer, pl_module) -> None:\n assert trainer.strategy.config[\"fp16\"][\"loss_scale\"] == 10\n assert trainer.strategy.config[\"fp16\"][\"initial_scale_power\"] == 10\n assert trainer.strategy.config[\"fp16\"][\"loss_scale_window\"] == 10\n assert trainer.strategy.config[\"fp16\"][\"hysteresis\"] == 10\n assert trainer.strategy.config[\"fp16\"][\"min_loss_scale\"] == 10\n raise SystemExit()\n\n model = BoringModel()\n ds = DeepSpeedStrategy(\n loss_scale=10, initial_scale_power=10, loss_scale_window=10, hysteresis=10, min_loss_scale=10\n )\n trainer = Trainer(\n default_root_dir=tmpdir, strategy=ds, precision=16, accelerator=\"gpu\", devices=1, callbacks=[TestCB()]\n )\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@RunIf(deepspeed=True)\ndef test_deepspeed_custom_activation_checkpointing_params(tmpdir):\n \"\"\"Ensure if we modify the activation checkpointing parameters, the deepspeed config contains these changes.\"\"\"\n ds = DeepSpeedStrategy(\n partition_activations=True,\n cpu_checkpointing=True,\n contiguous_memory_optimization=True,\n synchronize_checkpoint_boundary=True,\n )\n checkpoint_config = ds.config[\"activation_checkpointing\"]\n assert checkpoint_config[\"partition_activations\"]\n assert checkpoint_config[\"cpu_checkpointing\"]\n assert checkpoint_config[\"contiguous_memory_optimization\"]\n assert checkpoint_config[\"synchronize_checkpoint_boundary\"]\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_custom_activation_checkpointing_params_forwarded(tmpdir):\n \"\"\"Ensure if we modify the activation checkpointing parameters, we pass these to\n deepspeed.checkpointing.configure correctly.\"\"\"\n ds = DeepSpeedStrategy(\n partition_activations=True,\n cpu_checkpointing=True,\n contiguous_memory_optimization=True,\n synchronize_checkpoint_boundary=True,\n )\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n enable_progress_bar=False,\n fast_dev_run=1,\n strategy=ds,\n precision=16,\n accelerator=\"gpu\",\n devices=1,\n )\n with mock.patch(\n \"deepspeed.checkpointing.configure\", wraps=deepspeed.checkpointing.configure\n ) as deepspeed_checkpointing_configure:\n trainer.fit(model)\n\n deepspeed_checkpointing_configure.assert_called_with(\n mpu_=None, partition_activations=True, contiguous_checkpointing=True, checkpoint_in_cpu=True, profile=None\n )\n\n\n@RunIf(min_gpus=1, deepspeed=True)\ndef test_deepspeed_assert_config_zero_offload_disabled(tmpdir, deepspeed_zero_config):\n \"\"\"Ensure if we use a config and turn off offload_optimizer, that this is set to False within the config.\"\"\"\n\n deepspeed_zero_config[\"zero_optimization\"][\"offload_optimizer\"] = False\n\n class TestCallback(Callback):\n def setup(self, trainer, pl_module, stage=None) -> None:\n assert trainer.strategy.config[\"zero_optimization\"][\"offload_optimizer\"] is False\n raise SystemExit()\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n enable_progress_bar=False,\n max_epochs=1,\n strategy=DeepSpeedStrategy(config=deepspeed_zero_config),\n precision=16,\n gpus=1,\n callbacks=[TestCallback()],\n )\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu(tmpdir):\n \"\"\"Test to ensure that DeepSpeed with multiple GPUs works and deepspeed distributed is initialized\n correctly.\"\"\"\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n with mock.patch(\"deepspeed.init_distributed\", wraps=deepspeed.init_distributed) as mock_deepspeed_distributed:\n trainer.fit(model)\n mock_deepspeed_distributed.assert_called_once()\n trainer.test(model)\n\n _assert_save_model_is_equal(model, tmpdir, trainer)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_fp32_works(tmpdir):\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir, accelerator=\"gpu\", devices=1, strategy=\"deepspeed_stage_3\", fast_dev_run=True\n )\n trainer.fit(model)\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_stage_3_save_warning(tmpdir):\n \"\"\"Test to ensure that DeepSpeed Stage 3 gives a warning when saving on rank zero.\"\"\"\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n\n # both ranks need to call save checkpoint, however only rank 0 needs to check the warning\n context_manager = (\n pytest.warns(UserWarning, match=\"each worker will save a shard of the checkpoint within a directory.\")\n if trainer.is_global_zero\n else contextlib.suppress()\n )\n with context_manager:\n trainer.save_checkpoint(checkpoint_path)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_single_file(tmpdir):\n \"\"\"Test to ensure that DeepSpeed loads from a single file checkpoint.\"\"\"\n model = BoringModel()\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n trainer.fit(model)\n trainer.save_checkpoint(checkpoint_path)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n )\n strategy = trainer.strategy\n assert isinstance(strategy, DeepSpeedStrategy)\n assert not strategy.load_full_weights\n with pytest.raises(MisconfigurationException, match=\"DeepSpeed was unable to load the checkpoint.\"):\n trainer.test(model, ckpt_path=checkpoint_path)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3, load_full_weights=True),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n )\n strategy = trainer.strategy\n assert isinstance(strategy, DeepSpeedStrategy)\n assert strategy.load_full_weights\n trainer.test(model, ckpt_path=checkpoint_path)\n\n\nclass ModelParallelClassificationModel(LightningModule):\n def __init__(self, lr: float = 0.01, num_blocks: int = 5):\n super().__init__()\n self.lr = lr\n self.num_blocks = num_blocks\n self.prepare_data_per_node = True\n\n self.train_acc = Accuracy()\n self.valid_acc = Accuracy()\n self.test_acc = Accuracy()\n\n def make_block(self):\n return nn.Sequential(nn.Linear(32, 32, bias=False), nn.ReLU())\n\n def configure_sharded_model(self) -> None:\n self.model = nn.Sequential(*(self.make_block() for x in range(self.num_blocks)), nn.Linear(32, 3))\n\n def forward(self, x):\n x = self.model(x)\n # Ensure output is in float32 for softmax operation\n x = x.float()\n logits = F.softmax(x, dim=1)\n return logits\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n logits = self.forward(x)\n loss = F.cross_entropy(logits, y)\n self.log(\"train_loss\", loss, prog_bar=True)\n self.log(\"train_acc\", self.train_acc(logits, y), prog_bar=True, sync_dist=True)\n return {\"loss\": loss}\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n logits = self.forward(x)\n self.log(\"val_loss\", F.cross_entropy(logits, y), prog_bar=False, sync_dist=True)\n self.log(\"val_acc\", self.valid_acc(logits, y), prog_bar=True, sync_dist=True)\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n logits = self.forward(x)\n self.log(\"test_loss\", F.cross_entropy(logits, y), prog_bar=False, sync_dist=True)\n self.log(\"test_acc\", self.test_acc(logits, y), prog_bar=True, sync_dist=True)\n\n def predict_step(self, batch, batch_idx, dataloader_idx=0):\n x, y = batch\n logits = self.forward(x)\n self.test_acc(logits, y)\n return self.test_acc.compute()\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)\n return [optimizer], [{\"scheduler\": lr_scheduler, \"interval\": \"step\"}]\n\n def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n if not hasattr(self, \"model\"):\n self.configure_sharded_model()\n\n # Lightning saves the lr schedulers, but DeepSpeed saves the optimizer states separately\n assert len(checkpoint[\"lr_schedulers\"]) == 1\n assert \"optimizer_states\" not in checkpoint\n\n\nclass ManualModelParallelClassificationModel(ModelParallelClassificationModel):\n @property\n def automatic_optimization(self) -> bool:\n return False\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n logits = self.forward(x)\n loss = F.cross_entropy(logits, y)\n opt = self.optimizers()\n self.log(\"train_loss\", loss, prog_bar=True)\n self.log(\"train_acc\", self.train_acc(logits, y), prog_bar=True, sync_dist=True)\n opt.zero_grad()\n self.manual_backward(loss)\n opt.step()\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_3(tmpdir, deepspeed_config):\n \"\"\"Test to ensure ZeRO Stage 3 works with a parallel model.\"\"\"\n model = ModelParallelBoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n trainer.test(model)\n\n _assert_save_model_is_equal(model, tmpdir, trainer)\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_3_manual_optimization(tmpdir, deepspeed_config):\n \"\"\"Test to ensure ZeRO Stage 3 works with a parallel model.\"\"\"\n model = ModelParallelBoringModelManualOptim()\n model.training_epoch_end = None\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n trainer.test(model)\n\n _assert_save_model_is_equal(model, tmpdir, trainer)\n\n\[email protected]((\"accumulate_grad_batches\", \"automatic_optimization\"), [(1, False), (2, True)])\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_3_checkpointing(tmpdir, automatic_optimization, accumulate_grad_batches):\n seed_everything(1)\n if automatic_optimization:\n model = ModelParallelClassificationModel()\n else:\n model = ManualModelParallelClassificationModel()\n dm = ClassifDataModule()\n ck = ModelCheckpoint(monitor=\"val_acc\", mode=\"max\", save_last=True, save_top_k=-1)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=10,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n precision=16,\n accumulate_grad_batches=accumulate_grad_batches,\n callbacks=[ck],\n )\n trainer.fit(model, datamodule=dm)\n\n results = trainer.test(datamodule=dm)\n assert results[0][\"test_acc\"] > 0.7\n saved_results = trainer.test(ckpt_path=ck.best_model_path, datamodule=dm)\n assert saved_results[0][\"test_acc\"] > 0.7\n assert saved_results == results\n\n if automatic_optimization:\n model = ModelParallelClassificationModel()\n else:\n model = ManualModelParallelClassificationModel()\n trainer = Trainer(default_root_dir=tmpdir, gpus=2, strategy=DeepSpeedStrategy(stage=3), precision=16)\n\n results = trainer.test(model, datamodule=dm, ckpt_path=ck.best_model_path)\n assert results[0][\"test_acc\"] > 0.7\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_3_warns_resume_training(tmpdir):\n \"\"\"Test to ensure with Stage 3 and multiple GPUs that we can resume from training, throwing a warning that the\n optimizer state and scheduler states cannot be restored.\"\"\"\n dm = ClassifDataModule()\n model = BoringModel()\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n trainer.fit(model)\n trainer.save_checkpoint(checkpoint_path)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=True,\n strategy=DeepSpeedStrategy(stage=3, load_full_weights=True),\n accelerator=\"gpu\",\n devices=1,\n precision=16,\n )\n with pytest.warns(\n UserWarning,\n match=\"A single checkpoint file has been given. This means optimizer states cannot be restored. \"\n \"If you'd like to restore these states, you must \"\n \"provide a path to the originally saved DeepSpeed checkpoint.\",\n ):\n trainer.fit(model, datamodule=dm, ckpt_path=checkpoint_path)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_3_resume_training(tmpdir):\n \"\"\"Test to ensure with Stage 3 and single GPU that we can resume training.\"\"\"\n initial_model = ModelParallelClassificationModel()\n dm = ClassifDataModule()\n\n ck = ModelCheckpoint(monitor=\"val_acc\", mode=\"max\", save_last=True, save_top_k=-1)\n initial_trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=2,\n limit_val_batches=2,\n limit_test_batches=2,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=1,\n precision=16,\n callbacks=[ck],\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n initial_trainer.fit(initial_model, datamodule=dm)\n\n class TestCallback(Callback):\n def on_train_batch_start(\n self, trainer: Trainer, pl_module: LightningModule, batch: Any, batch_idx: int\n ) -> None:\n original_deepspeed_strategy = initial_trainer.strategy\n current_deepspeed_strategy = trainer.strategy\n\n assert isinstance(original_deepspeed_strategy, DeepSpeedStrategy)\n assert isinstance(current_deepspeed_strategy, DeepSpeedStrategy)\n # assert optimizer states are the correctly loaded\n original_optimizer_dict = original_deepspeed_strategy.deepspeed_engine.optimizer.state_dict()\n current_optimizer_dict = current_deepspeed_strategy.deepspeed_engine.optimizer.state_dict()\n for orig_tensor, current_tensor in zip(\n original_optimizer_dict[\"fp32_flat_groups\"], current_optimizer_dict[\"fp32_flat_groups\"]\n ):\n assert torch.all(orig_tensor.eq(current_tensor))\n # assert model state is loaded correctly\n for current_param, initial_param in zip(pl_module.parameters(), initial_model.parameters()):\n assert torch.equal(current_param.cpu(), initial_param.cpu())\n # assert epoch has correctly been restored\n assert trainer.current_epoch == 1\n\n # assert lr-scheduler states are loaded correctly\n original_lr_scheduler = initial_trainer.lr_scheduler_configs[0].scheduler\n current_lr_scheduler = trainer.lr_scheduler_configs[0].scheduler\n assert original_lr_scheduler.state_dict() == current_lr_scheduler.state_dict()\n\n model = ModelParallelClassificationModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=True,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=1,\n precision=16,\n callbacks=TestCallback(),\n enable_progress_bar=False,\n enable_model_summary=False,\n )\n trainer.fit(model, datamodule=dm, ckpt_path=ck.best_model_path)\n\n\[email protected](\"offload_optimizer\", [False, True])\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir, offload_optimizer):\n \"\"\"Test to ensure with Stage 2 and multiple GPUs, accumulated grad batches works.\"\"\"\n seed_everything(42)\n\n class VerificationCallback(Callback):\n def __init__(self):\n self.on_train_batch_start_called = False\n\n def on_train_batch_start(self, trainer, pl_module: LightningModule, batch: Any, batch_idx: int) -> None:\n deepspeed_engine = trainer.strategy.model\n assert trainer.global_step == deepspeed_engine.global_steps\n self.on_train_batch_start_called = True\n\n model = ModelParallelClassificationModel()\n dm = ClassifDataModule()\n verification_callback = VerificationCallback()\n trainer = Trainer(\n default_root_dir=tmpdir,\n enable_progress_bar=False,\n # TODO: this test fails with max_epochs >1 as there are leftover batches per epoch.\n # there's divergence in how Lightning handles the last batch of the epoch with how DeepSpeed does it.\n # we step the optimizers on the last batch but DeepSpeed keeps the accumulation for the next epoch\n max_epochs=1,\n strategy=DeepSpeedStrategy(stage=2, offload_optimizer=offload_optimizer),\n accelerator=\"gpu\",\n devices=2,\n limit_train_batches=5,\n limit_val_batches=2,\n precision=16,\n accumulate_grad_batches=2,\n callbacks=[verification_callback],\n )\n assert trainer.limit_train_batches % trainer.accumulate_grad_batches != 0, \"leftover batches should be tested\"\n trainer.fit(model, datamodule=dm)\n assert verification_callback.on_train_batch_start_called\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_test(tmpdir):\n \"\"\"Test to ensure we can use DeepSpeed with just test using ZeRO Stage 3.\"\"\"\n model = ModelParallelBoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.test(model)\n\n\n# TODO(Sean): Once partial parameter partitioning is supported this test should be re-enabled\[email protected](\"Partial parameter partitioning for DeepSpeed is currently broken.\")\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_partial_partition_parameters(tmpdir):\n \"\"\"Test to ensure that a module that defines a layer inside the ``__init__`` and ``configure_sharded_model``\n correctly converts all parameters to float16 when ``precision=16`` and runs successfully.\"\"\"\n\n class TestModel(ModelParallelBoringModel):\n def __init__(self):\n super().__init__()\n self.layer_2 = torch.nn.Linear(32, 32)\n\n def configure_sharded_model(self) -> None:\n self.layer = torch.nn.Linear(32, 2)\n\n def forward(self, x):\n x = self.layer_2(x)\n return self.layer(x)\n\n def on_train_epoch_start(self) -> None:\n assert all([x.dtype == torch.float16 for x in self.parameters()])\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_test_rnn(tmpdir):\n \"\"\"Test to ensure that turning off explicit partitioning of the entire module for ZeRO Stage 3 works when\n training with certain layers which will crash with explicit partitioning.\"\"\"\n\n class TestModel(BoringModel):\n def __init__(self):\n super().__init__()\n self.rnn = torch.nn.GRU(32, 32)\n\n def on_train_epoch_start(self) -> None:\n assert all([x.dtype == torch.float16 for x in self.parameters()])\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n\n\n@RunIf(deepspeed=True)\[email protected](\"deepspeed.init_distributed\", autospec=True)\[email protected](\"platform\", [\"Linux\", \"Windows\"])\ndef test_deepspeed_strategy_env_variables(mock_deepspeed_distributed, tmpdir, platform):\n \"\"\"Test to ensure that we setup distributed communication using correctly.\n\n When using windows, ranks environment variables should not be set, and deepspeed should handle this.\n \"\"\"\n trainer = Trainer(default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3))\n strategy = trainer.strategy\n assert isinstance(strategy, DeepSpeedStrategy)\n with mock.patch(\"platform.system\", return_value=platform) as mock_platform:\n strategy._init_deepspeed_distributed()\n mock_deepspeed_distributed.assert_called()\n mock_platform.assert_called()\n if platform == \"Windows\":\n # assert no env variables have been set within the DeepSpeedStrategy\n assert all(k not in os.environ for k in (\"MASTER_PORT\", \"MASTER_ADDR\", \"RANK\", \"WORLD_SIZE\", \"LOCAL_RANK\"))\n else:\n assert os.environ[\"MASTER_ADDR\"] == str(trainer.strategy.cluster_environment.main_address)\n assert os.environ[\"MASTER_PORT\"] == str(trainer.strategy.cluster_environment.main_port)\n assert os.environ[\"RANK\"] == str(trainer.strategy.global_rank)\n assert os.environ[\"WORLD_SIZE\"] == str(trainer.strategy.world_size)\n assert os.environ[\"LOCAL_RANK\"] == str(trainer.strategy.local_rank)\n\n\ndef _assert_save_model_is_equal(model, tmpdir, trainer):\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n checkpoint_path = trainer.strategy.broadcast(checkpoint_path)\n trainer.save_checkpoint(checkpoint_path)\n trainer.strategy.barrier()\n\n # carry out the check only on rank 0\n if trainer.is_global_zero:\n single_ckpt_path = os.path.join(tmpdir, \"single_model.pt\")\n convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, single_ckpt_path)\n state_dict = torch.load(single_ckpt_path)\n\n model = model.cpu()\n # Assert model parameters are identical after loading\n for orig_param, saved_model_param in zip(model.parameters(), state_dict.values()):\n if model.dtype == torch.half:\n # moved model to float32 for comparison with single fp32 saved weights\n saved_model_param = saved_model_param.half()\n assert torch.equal(orig_param, saved_model_param)\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multigpu_no_schedulers(tmpdir):\n \"\"\"Test to ensure ZeRO Stage 3 works with a parallel model and no schedulers.\"\"\"\n model = ModelParallelBoringModelNoSchedulers()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n\n _assert_save_model_is_equal(model, tmpdir, trainer)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_skip_backward_raises(tmpdir):\n class TestModel(BoringModel):\n def training_step(self, batch, batch_idx):\n return None\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n precision=16,\n )\n with pytest.raises(MisconfigurationException, match=\"returning `None` .* is not supported\"):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_setup_train_dataloader(tmpdir):\n \"\"\"Test DeepSpeed works when setup is required to call in the DataModule.\"\"\"\n\n class TestSetupIsCalledDataModule(LightningDataModule):\n def __init__(self):\n super().__init__()\n self._setup = False\n\n def setup(self, stage: Optional[str] = None) -> None:\n self._setup = True\n\n def train_dataloader(self):\n assert self._setup\n return DataLoader(RandomDataset(32, 64), batch_size=2)\n\n def val_dataloader(self):\n assert self._setup\n return DataLoader(RandomDataset(32, 64), batch_size=2)\n\n def test_dataloader(self):\n assert self._setup\n return DataLoader(RandomDataset(32, 64), batch_size=2)\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(logging_level=logging.INFO),\n accelerator=\"gpu\",\n devices=1,\n fast_dev_run=True,\n )\n dm = TestSetupIsCalledDataModule()\n with mock.patch(\"deepspeed.utils.logging.logger.warning\", autospec=True) as mock_object:\n trainer.fit(model, datamodule=dm)\n assert any(\"Tried to infer the batch size\" in str(arg) for arg in mock_object.call_args_list)\n\n\[email protected](\"torch.optim.lr_scheduler.StepLR.step\", autospec=True)\[email protected](\"interval\", [\"step\", \"epoch\"])\[email protected](\"max_epoch\", [2])\[email protected](\"limit_train_batches\", [2])\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_scheduler_step_count(mock_step, max_epoch, limit_train_batches, interval):\n \"\"\"Test to ensure that the scheduler is called the correct amount of times during training when scheduler is\n set to step or epoch.\"\"\"\n\n class TestModel(BoringModel):\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\"scheduler\": scheduler, \"interval\": interval},\n }\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=os.getcwd(),\n limit_train_batches=limit_train_batches,\n limit_val_batches=0,\n max_epochs=max_epoch,\n accelerator=\"gpu\",\n devices=1,\n strategy=\"deepspeed\",\n )\n trainer.fit(model)\n if interval == \"epoch\":\n # assert called once at init and once during training\n assert mock_step.call_count == 1 + max_epoch\n else:\n # assert called once at init and once during training\n assert mock_step.call_count == 1 + (max_epoch * limit_train_batches)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_configure_gradient_clipping(tmpdir):\n \"\"\"Test to ensure that a warning is raised when `LightningModule.configure_gradient_clipping` is overridden in\n case of deepspeed.\"\"\"\n\n class TestModel(BoringModel):\n def configure_gradient_clipping(self, optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm):\n if optimizer_idx == 0:\n self.clip_gradients(optimizer, gradient_clip_val, gradient_clip_algorithm)\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n accelerator=\"gpu\",\n devices=1,\n strategy=\"deepspeed\",\n fast_dev_run=True,\n )\n with pytest.warns(UserWarning, match=\"handles gradient clipping internally\"):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_deepspeed_gradient_clip_by_value(tmpdir):\n \"\"\"Test to ensure that an exception is raised when using `gradient_clip_algorithm='value'`.\"\"\"\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n accelerator=\"gpu\",\n devices=1,\n strategy=\"deepspeed\",\n gradient_clip_algorithm=\"value\",\n )\n with pytest.raises(MisconfigurationException, match=\"does not support clipping gradients by value\"):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1, standalone=True, deepspeed=True)\ndef test_different_accumulate_grad_batches_fails(tmpdir):\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir, accumulate_grad_batches={1: 2}, accelerator=\"gpu\", devices=1, strategy=\"deepspeed\"\n )\n with pytest.raises(\n MisconfigurationException, match=\"DeepSpeed currently does not support different `accumulate_grad_batches`\"\n ):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_specific_gpu_device_id(tmpdir):\n class TestCallback(Callback):\n def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:\n assert model.device.index == 1\n\n def on_train_batch_start(\n self,\n trainer: Trainer,\n pl_module: LightningModule,\n batch: Any,\n batch_idx: int,\n ) -> None:\n assert batch.device.index == 1\n\n def on_test_start(self, trainer: Trainer, pl_module: LightningModule) -> None:\n assert model.device.index == 1\n\n def on_test_batch_start(\n self,\n trainer: Trainer,\n pl_module: LightningModule,\n batch: Any,\n batch_idx: int,\n dataloader_idx: int,\n ) -> None:\n assert batch.device.index == 1\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=True,\n accelerator=\"gpu\",\n devices=[1],\n strategy=\"deepspeed\",\n callbacks=TestCallback(),\n )\n trainer.fit(model)\n trainer.test(model)\n\n\n@RunIf(min_gpus=2, min_torch=\"1.10.0\", standalone=True, deepspeed=True)\ndef test_deepspeed_with_meta_device(tmpdir):\n with init_meta_context():\n model = BoringModel()\n assert model.layer.weight.device.type == \"meta\"\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3),\n accelerator=\"gpu\",\n devices=2,\n fast_dev_run=True,\n precision=16,\n )\n trainer.fit(model)\n assert model.layer.weight.device.type == \"cpu\"\n\n\n@RunIf(min_gpus=2, standalone=True, deepspeed=True)\ndef test_deepspeed_multi_save_same_filepath(tmpdir):\n \"\"\"Test that verifies that deepspeed saves only latest checkpoint in the specified path and deletes the old\n sharded checkpoints.\"\"\"\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=\"deepspeed\",\n accelerator=\"gpu\",\n devices=2,\n callbacks=[ModelCheckpoint(save_top_k=1, save_last=True)],\n limit_train_batches=1,\n limit_val_batches=0,\n num_sanity_val_steps=0,\n max_epochs=2,\n )\n trainer.fit(model)\n ckpt_path = os.path.join(trainer.checkpoint_callback.dirpath, \"last.ckpt\")\n expected = [\"latest\", \"zero_to_fp32.py\", \"checkpoint\"]\n assert set(expected) == set(os.listdir(ckpt_path))\n"
] | [
[
"torch.optim.lr_scheduler.ExponentialLR",
"torch.nn.Linear",
"torch.load",
"torch.randn",
"torch.nn.functional.softmax",
"torch.equal",
"torch.nn.GRU",
"torch.nn.ReLU",
"torch.nn.functional.cross_entropy",
"torch.optim.lr_scheduler.StepLR",
"torch.device"
]
] |
groussea/custom_colormap | [
"e92f88d1b5be18509fa6e6204387b10c74b4c8ad"
] | [
"custom_colormaps.py"
] | [
"\"\"\"\nNAME\n Custom Colormaps for Matplotlib\nPURPOSE\n This program shows how to implement make_cmap which is a function that\n generates a colorbar\nPROGRAMMER(S)\n Chris Slocum\n Gauthier Rousseau\nREVISION HISTORY\n 20130411 -- Initial version created\n 20140313 -- Small changes made and code posted online\n 20140320 -- Added the ability to set the position of each color\n 20150724 -- Attempted to make this more Pythonic\n 20180307 -- Changed license to BSD 3-clause\n 20190711 -- Added transprency (Alpha component) + different customized color maps + resolution of the colormap as an argument\n\"\"\"\nimport numpy as np\n\n\ndef create_colormap(colors, position=None, bit=False, reverse=False, res=256, name='custom_colormap'):\n \"\"\"\n returns a linear custom colormap\n\n Parameters\n ----------\n colors : array-like\n contain RGBA values. The RGBA values may either be in 8-bit [0 to 255]\n or arithmetic [0 to 1] (default).\n Arrange your tuples so that the first color is the lowest value for the\n colorbar and the last is the highest.\n position : array like\n contains values from 0 to 1 to dictate the location of each color.\n bit : Boolean\n 8-bit [0 to 255] (in which bit must be set to\n True when called) or arithmetic [0 to 1] (default)\n reverse : Boolean\n If you want to flip the scheme\n res : integer\n Resolution of the colormap\n name : string\n name of the scheme if you plan to save it\n\n Returns\n -------\n cmap : matplotlib.colors.LinearSegmentedColormap\n cmap with equally spaced colors\n \"\"\"\n from matplotlib.colors import LinearSegmentedColormap\n if not isinstance(colors, np.ndarray):\n colors = np.array(colors, dtype='f')\n if reverse:\n colors = colors[::-1]\n if position is not None and not isinstance(position, np.ndarray):\n position = np.array(position)\n elif position is None:\n position = np.linspace(0, 1, colors.shape[0])\n else:\n if position.size != colors.shape[0]:\n raise ValueError(\"position length must be the same as colors\")\n elif not np.isclose(position[0], 0) and not np.isclose(position[-1], 1):\n raise ValueError(\"position must start with 0 and end with 1\")\n if bit:\n colors[:] = [tuple(map(lambda x: x / 255., color)) for color in colors]\n cdict = {'red':[], 'green':[], 'blue':[], 'alpha':[]}\n for pos, color in zip(position, colors):\n cdict['red'].append((pos, color[0], color[0]))\n cdict['green'].append((pos, color[1], color[1]))\n cdict['blue'].append((pos, color[2], color[2]))\n cdict['alpha'].append((pos, color[3], color[3]))\n return LinearSegmentedColormap(name, cdict,res)\n\ndef make_cmap_customized(Palette='mountain',position=[0.0, 0.16, 0.2, 0.24, 0.4, 0.7, 0.8, 1],reverse=False,alpha=255):\n if Palette=='sunrise':\n couleur7=(0,0,0,alpha)\n couleur6=(64,50,79,alpha)\n couleur5=(107,64,110,alpha)\n couleur4=(141,76,125,alpha)\n couleur3=(172,85,122,alpha)\n couleur2=(210,124,124,alpha)\n couleur1=(240,206,125,alpha) \n couleur0=(255,255,255,alpha)\n elif Palette=='green':\n couleur7=(0,0,0,alpha)\n couleur6=(6,49,50,alpha)\n couleur5=(28,78,78,alpha)\n couleur4=(55,140,129,alpha)\n couleur3=(172,185,153,alpha)\n couleur2=(199,205,181,alpha)\n couleur1=(232,219,194,alpha) \n couleur0=(255,255,255,alpha)\n elif Palette=='mountain':\n couleur7=(0,0,0,alpha)\n couleur6=(45,52,70,alpha)\n couleur5=(89,76,96,alpha)\n couleur4=(145,101,118,alpha)\n couleur3=(212,119,127,alpha)\n couleur2=(212,153,154,alpha)\n couleur1=(238,189,184,alpha) \n couleur0=(255,255,255,alpha) \n elif Palette=='prune':\n couleur7=(0,0,0,alpha)\n couleur6=(66,37,67,alpha)\n couleur5=(125,58,91,alpha)\n couleur4=(107,77,131,alpha)\n couleur3=(205,179,214,alpha)\n couleur2=(164,173,154,alpha)\n couleur1=(207,213,199,alpha) \n couleur0=(255,255,255,alpha)\n elif Palette=='asym_mountain5':\n couleur7=(45,52,70,alpha)\n couleur6=(110,86,96,alpha)\n couleur5=(135,90,115,alpha)\n couleur4=(145,101,118,alpha) \n couleur3=(212,119,127,alpha)\n couleur2=(232,219,194,alpha) \n couleur1=(167,213,229,alpha) \n couleur0=(121,175,204,alpha)\n \n colors = [couleur0,couleur1,couleur2,couleur3,couleur4,couleur5,couleur6,couleur7]\n\n return create_colormap(colors, bit=True ,position=position,reverse=reverse,res=1000) \n\n\n\n\nif __name__ == \"__main__\":\n # An example of how to use make_cmap\n import matplotlib.pyplot as plt\n fig = plt.figure()\n ax = fig.add_subplot(311)\n # Create a list of RGB tuples\n colors = [(255, 0, 0,10), (255, 255, 0,70), (255, 255, 255,80), (0, 157, 0,255), (0, 0, 255,255)] # This example uses the 8-bit RGB\n # Call the function make_cmap which returns your colormap\n my_cmap = create_colormap(colors, bit=True)\n # Use your colormap\n plt.plot([0,50],[0,25],color='k',zorder=0)\n\n plt.text(25,12.5,'colormaps',zorder=0,horizontalalignment='center',verticalalignment='center',fontsize=30)\n plt.pcolor(np.random.rand(25, 50), cmap=my_cmap)\n\n plt.colorbar()\n ax = fig.add_subplot(312)\n\n plt.plot([0,50],[0,25],color='k',zorder=0)\n plt.text(25,12.5,'with',zorder=0,horizontalalignment='center',verticalalignment='center',fontsize=30)\n plt.pcolor(np.random.rand(25, 50), cmap=make_cmap_customized(Palette='green',alpha=255/4))\n plt.colorbar()\n\n ax = fig.add_subplot(313)\n colors = [(0.4, 0.2, 0.0,0.5), (1, 1, 1,0.2), (0, 0.3, 0.4,0.8)]\n # Create an array or list of positions from 0 to 1.\n position = [0, 0.3, 1]\n plt.plot([0,50],[0,25],color='k',zorder=0)\n plt.text(25,12.5,'transparency',zorder=0,horizontalalignment='center',verticalalignment='center',fontsize=30)\n plt.pcolor(np.random.rand(25, 50), cmap=make_cmap_customized(Palette='mountain',alpha=255/2))\n plt.colorbar()\n plt.savefig(\"example_custom_colormap.png\")\n plt.show()\n"
] | [
[
"matplotlib.colors.LinearSegmentedColormap",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.isclose",
"matplotlib.pyplot.show",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.text",
"numpy.random.rand",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.linspace"
]
] |
fedesigno/solar-panel-segmentation | [
"75856be3361bb4904387e6abc986627d1cc98ebb"
] | [
"solarnet/models/segmenter.py"
] | [
"import torch\nfrom torch import nn\n\nfrom typing import List\n\nfrom .base import ResnetBase\n\n\nclass Segmenter(ResnetBase):\n \"\"\"A ResNet34 U-Net model, as described in\n https://github.com/fastai/fastai/blob/master/courses/dl2/carvana-unet-lrg.ipynb\n\n Attributes:\n imagenet_base: boolean, default: False\n Whether or not to load weights pretrained on imagenet\n \"\"\"\n\n def __init__(self, imagenet_base: bool = False) -> None:\n super().__init__(imagenet_base=imagenet_base)\n\n self.target_modules = [str(x) for x in [2, 4, 5, 6]]\n self.hooks = self.add_hooks()\n\n self.relu = nn.ReLU()\n self.upsamples = nn.ModuleList([\n UpBlock(2048, 1024, 512),\n UpBlock(512, 512, 256),\n UpBlock(256, 256, 64),\n UpBlock(64, 64, 32),\n UpBlock(32, 3, 16),\n ])\n self.conv_transpose = nn.ConvTranspose2d(16, 1, 1)\n self.sigmoid = nn.Sigmoid()\n\n def add_hooks(self) -> List[torch.utils.hooks.RemovableHandle]:\n hooks = []\n for name, child in self.pretrained.named_children():\n if name in self.target_modules:\n hooks.append(child.register_forward_hook(self.save_output))\n return hooks\n\n def retrieve_hooked_outputs(self) -> List[torch.Tensor]:\n # to be called in the forward pass, this method returns the tensors\n # which were saved by the forward hooks\n outputs = []\n for name, child in self.pretrained.named_children():\n if name in self.target_modules:\n outputs.append(child.output)\n return outputs\n\n def cleanup(self) -> None:\n # removes the hooks, and the tensors which were added\n for name, child in self.pretrained.named_children():\n if name in self.target_modules:\n # allows the method to be safely called even if\n # the hooks aren't there\n try:\n del child.output\n except AttributeError:\n continue\n for hook in self.hooks:\n hook.remove()\n\n @staticmethod\n def save_output(module, input, output):\n # the hook to add to the target modules\n module.output = output\n\n def load_base(self, state_dict: dict) -> None:\n # This allows a model trained on the classifier to be loaded\n # into the model used for segmentation, even though their state_dicts\n # differ\n self.load_state_dict(state_dict, strict=False)\n\n def forward(self, x):\n org_input = x\n x = self.relu(self.pretrained(x))\n # we reverse the outputs so that the smallest output\n # is the first one we get, and the largest the last\n interim = self.retrieve_hooked_outputs()[::-1]\n\n for upsampler, interim_output in zip(self.upsamples[:-1], interim):\n x = upsampler(x, interim_output)\n x = self.upsamples[-1](x, org_input)\n return self.sigmoid(self.conv_transpose(x))\n\n\nclass UpBlock(nn.Module):\n\n def __init__(self, in_channels: int, across_channels: int, out_channels: int) -> None:\n super().__init__()\n up_out = across_out = out_channels // 2\n self.conv_across = nn.Conv2d(across_channels, across_out, 1)\n # alternative: ConvTranspose2d(in_channels, up_out, 2, stride=2)\n self.upsample = nn.Sequential(nn.Upsample(scale_factor=2, mode=\"bilinear\", align_corners=True),\n nn.Conv2d(in_channels, up_out, kernel_size=1))\n self.batchnorm = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU()\n\n def forward(self, x_up, x_across):\n upsampled = self.upsample(x_up)\n skipped = self.conv_across(x_across)\n joint = torch.cat((upsampled, skipped), dim=1)\n return self.batchnorm(self.relu(joint))\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.cat",
"torch.nn.ConvTranspose2d"
]
] |
rajat315315/pandas | [
"2eec4f7cfa1c45671b9875062343521a53ae8b28"
] | [
"pandas/core/series.py"
] | [
"\"\"\"\nData structure for 1-dimensional cross-sectional and time series data\n\"\"\"\nfrom __future__ import annotations\n\nfrom io import StringIO\nfrom shutil import get_terminal_size\nfrom textwrap import dedent\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n Callable,\n Hashable,\n Iterable,\n List,\n Optional,\n Tuple,\n Type,\n Union,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import lib, properties, reshape, tslibs\nfrom pandas._libs.lib import no_default\nfrom pandas._typing import (\n AggFuncType,\n ArrayLike,\n Axis,\n Dtype,\n DtypeObj,\n FrameOrSeriesUnion,\n IndexKeyFunc,\n NpDtype,\n StorageOptions,\n ValueKeyFunc,\n)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import Appender, Substitution, doc\nfrom pandas.util._validators import validate_bool_kwarg, validate_percentile\n\nfrom pandas.core.dtypes.cast import (\n convert_dtypes,\n maybe_cast_to_extension_array,\n validate_numeric_casting,\n)\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_bool,\n is_categorical_dtype,\n is_dict_like,\n is_extension_array_dtype,\n is_integer,\n is_iterator,\n is_list_like,\n is_object_dtype,\n is_scalar,\n validate_all_hashable,\n)\nfrom pandas.core.dtypes.generic import ABCDataFrame\nfrom pandas.core.dtypes.inference import is_hashable\nfrom pandas.core.dtypes.missing import (\n isna,\n na_value_for_dtype,\n notna,\n remove_na_arraylike,\n)\n\nfrom pandas.core import algorithms, base, generic, missing, nanops, ops\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.core.aggregation import transform\nfrom pandas.core.apply import series_apply\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.arrays.categorical import CategoricalAccessor\nfrom pandas.core.arrays.sparse import SparseAccessor\nimport pandas.core.common as com\nfrom pandas.core.construction import (\n create_series_with_explicit_dtype,\n extract_array,\n is_empty_data,\n sanitize_array,\n)\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.indexers import deprecate_ndim_indexing, unpack_1tuple\nfrom pandas.core.indexes.accessors import CombinedDatetimelikeProperties\nfrom pandas.core.indexes.api import (\n CategoricalIndex,\n Float64Index,\n Index,\n MultiIndex,\n ensure_index,\n)\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.period import PeriodIndex\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex\nfrom pandas.core.indexing import check_bool_indexer\nfrom pandas.core.internals import SingleBlockManager\nfrom pandas.core.internals.construction import sanitize_index\nfrom pandas.core.shared_docs import _shared_docs\nfrom pandas.core.sorting import ensure_key_mapped, nargsort\nfrom pandas.core.strings import StringMethods\nfrom pandas.core.tools.datetimes import to_datetime\n\nimport pandas.io.formats.format as fmt\nimport pandas.plotting\n\nif TYPE_CHECKING:\n from pandas._typing import TimedeltaConvertibleTypes, TimestampConvertibleTypes\n\n from pandas.core.frame import DataFrame\n from pandas.core.groupby.generic import SeriesGroupBy\n from pandas.core.resample import Resampler\n\n__all__ = [\"Series\"]\n\n_shared_doc_kwargs = {\n \"axes\": \"index\",\n \"klass\": \"Series\",\n \"axes_single_arg\": \"{0 or 'index'}\",\n \"axis\": \"\"\"axis : {0 or 'index'}\n Parameter needed for compatibility with DataFrame.\"\"\",\n \"inplace\": \"\"\"inplace : boolean, default False\n If True, performs operation inplace and returns None.\"\"\",\n \"unique\": \"np.ndarray\",\n \"duplicated\": \"Series\",\n \"optional_by\": \"\",\n \"optional_mapper\": \"\",\n \"optional_labels\": \"\",\n \"optional_axis\": \"\",\n \"replace_iloc\": \"\"\"\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\"\"\",\n}\n\n\ndef _coerce_method(converter):\n \"\"\"\n Install the scalar coercion methods.\n \"\"\"\n\n def wrapper(self):\n if len(self) == 1:\n return converter(self.iloc[0])\n raise TypeError(f\"cannot convert the series to {converter}\")\n\n wrapper.__name__ = f\"__{converter.__name__}__\"\n return wrapper\n\n\n# ----------------------------------------------------------------------\n# Series class\n\n\nclass Series(base.IndexOpsMixin, generic.NDFrame):\n \"\"\"\n One-dimensional ndarray with axis labels (including time series).\n\n Labels need not be unique but must be a hashable type. The object\n supports both integer- and label-based indexing and provides a host of\n methods for performing operations involving the index. Statistical\n methods from ndarray have been overridden to automatically exclude\n missing data (currently represented as NaN).\n\n Operations between Series (+, -, /, *, **) align values based on their\n associated index values-- they need not be the same length. The result\n index will be the sorted union of the two indexes.\n\n Parameters\n ----------\n data : array-like, Iterable, dict, or scalar value\n Contains data stored in Series. If data is a dict, argument order is\n maintained.\n index : array-like or Index (1d)\n Values must be hashable and have the same length as `data`.\n Non-unique index values are allowed. Will default to\n RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like\n and index is None, then the values in the index are used to\n reindex the Series after it is created using the keys in the data.\n dtype : str, numpy.dtype, or ExtensionDtype, optional\n Data type for the output Series. If not specified, this will be\n inferred from `data`.\n See the :ref:`user guide <basics.dtypes>` for more usages.\n name : str, optional\n The name to give to the Series.\n copy : bool, default False\n Copy input data.\n \"\"\"\n\n _typ = \"series\"\n _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)\n\n _name: Hashable\n _metadata: List[str] = [\"name\"]\n _internal_names_set = {\"index\"} | generic.NDFrame._internal_names_set\n _accessors = {\"dt\", \"cat\", \"str\", \"sparse\"}\n _hidden_attrs = (\n base.IndexOpsMixin._hidden_attrs\n | generic.NDFrame._hidden_attrs\n | frozenset([\"compress\", \"ptp\"])\n )\n\n # Override cache_readonly bc Series is mutable\n hasnans = property(\n base.IndexOpsMixin.hasnans.func, doc=base.IndexOpsMixin.hasnans.__doc__\n )\n __hash__ = generic.NDFrame.__hash__\n _mgr: SingleBlockManager\n div: Callable[[Series, Any], Series]\n rdiv: Callable[[Series, Any], Series]\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data=None,\n index=None,\n dtype: Optional[Dtype] = None,\n name=None,\n copy: bool = False,\n fastpath: bool = False,\n ):\n\n if (\n isinstance(data, SingleBlockManager)\n and index is None\n and dtype is None\n and copy is False\n ):\n # GH#33357 called with just the SingleBlockManager\n NDFrame.__init__(self, data)\n self.name = name\n return\n\n # we are called internally, so short-circuit\n if fastpath:\n\n # data is an ndarray, index is defined\n if not isinstance(data, SingleBlockManager):\n data = SingleBlockManager.from_array(data, index)\n if copy:\n data = data.copy()\n if index is None:\n index = data.index\n\n else:\n\n name = ibase.maybe_extract_name(name, data, type(self))\n\n if is_empty_data(data) and dtype is None:\n # gh-17261\n warnings.warn(\n \"The default dtype for empty Series will be 'object' instead \"\n \"of 'float64' in a future version. Specify a dtype explicitly \"\n \"to silence this warning.\",\n DeprecationWarning,\n stacklevel=2,\n )\n # uncomment the line below when removing the DeprecationWarning\n # dtype = np.dtype(object)\n\n if index is not None:\n index = ensure_index(index)\n\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, MultiIndex):\n raise NotImplementedError(\n \"initializing a Series from a MultiIndex is not supported\"\n )\n elif isinstance(data, Index):\n\n if dtype is not None:\n # astype copies\n data = data.astype(dtype)\n else:\n # GH#24096 we need to ensure the index remains immutable\n data = data._values.copy()\n copy = False\n\n elif isinstance(data, np.ndarray):\n if len(data.dtype):\n # GH#13296 we are dealing with a compound dtype, which\n # should be treated as 2D\n raise ValueError(\n \"Cannot construct a Series from an ndarray with \"\n \"compound dtype. Use DataFrame instead.\"\n )\n elif isinstance(data, Series):\n if index is None:\n index = data.index\n else:\n data = data.reindex(index, copy=copy)\n copy = False\n data = data._mgr\n elif is_dict_like(data):\n data, index = self._init_dict(data, index, dtype)\n dtype = None\n copy = False\n elif isinstance(data, SingleBlockManager):\n if index is None:\n index = data.index\n elif not data.index.equals(index) or copy:\n # GH#19275 SingleBlockManager input should only be called\n # internally\n raise AssertionError(\n \"Cannot pass both SingleBlockManager \"\n \"`data` argument and a different \"\n \"`index` argument. `copy` must be False.\"\n )\n\n elif is_extension_array_dtype(data):\n pass\n elif isinstance(data, (set, frozenset)):\n raise TypeError(f\"'{type(data).__name__}' type is unordered\")\n else:\n data = com.maybe_iterable_to_list(data)\n\n if index is None:\n if not is_list_like(data):\n data = [data]\n index = ibase.default_index(len(data))\n elif is_list_like(data):\n sanitize_index(data, index)\n\n # create/copy the manager\n if isinstance(data, SingleBlockManager):\n if dtype is not None:\n data = data.astype(dtype=dtype, errors=\"ignore\", copy=copy)\n elif copy:\n data = data.copy()\n else:\n data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)\n\n data = SingleBlockManager.from_array(data, index)\n\n generic.NDFrame.__init__(self, data)\n self.name = name\n self._set_axis(0, index, fastpath=True)\n\n def _init_dict(self, data, index=None, dtype: Optional[Dtype] = None):\n \"\"\"\n Derive the \"_mgr\" and \"index\" attributes of a new Series from a\n dictionary input.\n\n Parameters\n ----------\n data : dict or dict-like\n Data used to populate the new Series.\n index : Index or index-like, default None\n Index for the new Series: if None, use dict keys.\n dtype : dtype, default None\n The dtype for the new Series: if None, infer from data.\n\n Returns\n -------\n _data : BlockManager for the new Series\n index : index for the new Series\n \"\"\"\n # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]\n # raises KeyError), so we iterate the entire dict, and align\n if data:\n # GH:34717, issue was using zip to extract key and values from data.\n # using generators in effects the performance.\n # Below is the new way of extracting the keys and values\n\n keys = tuple(data.keys())\n values = list(data.values()) # Generating list of values- faster way\n elif index is not None:\n # fastpath for Series(data=None). Just use broadcasting a scalar\n # instead of reindexing.\n values = na_value_for_dtype(dtype)\n keys = index\n else:\n keys, values = (), []\n\n # Input is now list-like, so rely on \"standard\" construction:\n\n # TODO: passing np.float64 to not break anything yet. See GH-17261\n s = create_series_with_explicit_dtype(\n values, index=keys, dtype=dtype, dtype_if_empty=np.float64\n )\n\n # Now we just make sure the order is respected, if any\n if data and index is not None:\n s = s.reindex(index, copy=False)\n return s._mgr, s.index\n\n # ----------------------------------------------------------------------\n\n @property\n def _constructor(self) -> Type[Series]:\n return Series\n\n @property\n def _constructor_expanddim(self) -> Type[DataFrame]:\n from pandas.core.frame import DataFrame\n\n return DataFrame\n\n # types\n @property\n def _can_hold_na(self) -> bool:\n return self._mgr._can_hold_na\n\n _index = None\n\n def _set_axis(self, axis: int, labels, fastpath: bool = False) -> None:\n \"\"\"\n Override generic, we want to set the _typ here.\n\n This is called from the cython code when we set the `index` attribute\n directly, e.g. `series.index = [1, 2, 3]`.\n \"\"\"\n if not fastpath:\n labels = ensure_index(labels)\n\n if labels._is_all_dates:\n deep_labels = labels\n if isinstance(labels, CategoricalIndex):\n deep_labels = labels.categories\n\n if not isinstance(\n deep_labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)\n ):\n try:\n labels = DatetimeIndex(labels)\n # need to set here because we changed the index\n if fastpath:\n self._mgr.set_axis(axis, labels)\n except (tslibs.OutOfBoundsDatetime, ValueError):\n # labels may exceeds datetime bounds,\n # or not be a DatetimeIndex\n pass\n\n object.__setattr__(self, \"_index\", labels)\n if not fastpath:\n # The ensure_index call above ensures we have an Index object\n self._mgr.set_axis(axis, labels)\n\n # ndarray compatibility\n @property\n def dtype(self) -> DtypeObj:\n \"\"\"\n Return the dtype object of the underlying data.\n \"\"\"\n return self._mgr.dtype\n\n @property\n def dtypes(self) -> DtypeObj:\n \"\"\"\n Return the dtype object of the underlying data.\n \"\"\"\n # DataFrame compatibility\n return self.dtype\n\n @property\n def name(self) -> Hashable:\n \"\"\"\n Return the name of the Series.\n\n The name of a Series becomes its index or column name if it is used\n to form a DataFrame. It is also used whenever displaying the Series\n using the interpreter.\n\n Returns\n -------\n label (hashable object)\n The name of the Series, also the column name if part of a DataFrame.\n\n See Also\n --------\n Series.rename : Sets the Series name when given a scalar input.\n Index.name : Corresponding Index property.\n\n Examples\n --------\n The Series name can be set initially when calling the constructor.\n\n >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers')\n >>> s\n 0 1\n 1 2\n 2 3\n Name: Numbers, dtype: int64\n >>> s.name = \"Integers\"\n >>> s\n 0 1\n 1 2\n 2 3\n Name: Integers, dtype: int64\n\n The name of a Series within a DataFrame is its column name.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]],\n ... columns=[\"Odd Numbers\", \"Even Numbers\"])\n >>> df\n Odd Numbers Even Numbers\n 0 1 2\n 1 3 4\n 2 5 6\n >>> df[\"Even Numbers\"].name\n 'Even Numbers'\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, value: Hashable) -> None:\n validate_all_hashable(value, error_name=f\"{type(self).__name__}.name\")\n object.__setattr__(self, \"_name\", value)\n\n @property\n def values(self):\n \"\"\"\n Return Series as ndarray or ndarray-like depending on the dtype.\n\n .. warning::\n\n We recommend using :attr:`Series.array` or\n :meth:`Series.to_numpy`, depending on whether you need\n a reference to the underlying data or a NumPy array.\n\n Returns\n -------\n numpy.ndarray or ndarray-like\n\n See Also\n --------\n Series.array : Reference to the underlying data.\n Series.to_numpy : A NumPy array representing the underlying data.\n\n Examples\n --------\n >>> pd.Series([1, 2, 3]).values\n array([1, 2, 3])\n\n >>> pd.Series(list('aabc')).values\n array(['a', 'a', 'b', 'c'], dtype=object)\n\n >>> pd.Series(list('aabc')).astype('category').values\n ['a', 'a', 'b', 'c']\n Categories (3, object): ['a', 'b', 'c']\n\n Timezone aware datetime data is converted to UTC:\n\n >>> pd.Series(pd.date_range('20130101', periods=3,\n ... tz='US/Eastern')).values\n array(['2013-01-01T05:00:00.000000000',\n '2013-01-02T05:00:00.000000000',\n '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')\n \"\"\"\n return self._mgr.external_values()\n\n @property\n def _values(self):\n \"\"\"\n Return the internal repr of this data (defined by Block.interval_values).\n This are the values as stored in the Block (ndarray or ExtensionArray\n depending on the Block class), with datetime64[ns] and timedelta64[ns]\n wrapped in ExtensionArrays to match Index._values behavior.\n\n Differs from the public ``.values`` for certain data types, because of\n historical backwards compatibility of the public attribute (e.g. period\n returns object ndarray and datetimetz a datetime64[ns] ndarray for\n ``.values`` while it returns an ExtensionArray for ``._values`` in those\n cases).\n\n Differs from ``.array`` in that this still returns the numpy array if\n the Block is backed by a numpy array (except for datetime64 and\n timedelta64 dtypes), while ``.array`` ensures to always return an\n ExtensionArray.\n\n Overview:\n\n dtype | values | _values | array |\n ----------- | ------------- | ------------- | ------------- |\n Numeric | ndarray | ndarray | PandasArray |\n Category | Categorical | Categorical | Categorical |\n dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray |\n dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray |\n td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] |\n Period | ndarray[obj] | PeriodArray | PeriodArray |\n Nullable | EA | EA | EA |\n\n \"\"\"\n return self._mgr.internal_values()\n\n # error: Decorated property not supported\n @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc]\n @property\n def array(self) -> ExtensionArray:\n return self._mgr._block.array_values()\n\n # ops\n def ravel(self, order=\"C\"):\n \"\"\"\n Return the flattened underlying data as an ndarray.\n\n Returns\n -------\n numpy.ndarray or ndarray-like\n Flattened data of the Series.\n\n See Also\n --------\n numpy.ndarray.ravel : Return a flattened array.\n \"\"\"\n return self._values.ravel(order=order)\n\n def __len__(self) -> int:\n \"\"\"\n Return the length of the Series.\n \"\"\"\n return len(self._mgr)\n\n def view(self, dtype: Optional[Dtype] = None) -> Series:\n \"\"\"\n Create a new view of the Series.\n\n This function will return a new Series with a view of the same\n underlying values in memory, optionally reinterpreted with a new data\n type. The new data type must preserve the same size in bytes as to not\n cause index misalignment.\n\n Parameters\n ----------\n dtype : data type\n Data type object or one of their string representations.\n\n Returns\n -------\n Series\n A new Series object as a view of the same data in memory.\n\n See Also\n --------\n numpy.ndarray.view : Equivalent numpy function to create a new view of\n the same data in memory.\n\n Notes\n -----\n Series are instantiated with ``dtype=float64`` by default. While\n ``numpy.ndarray.view()`` will return a view with the same data type as\n the original array, ``Series.view()`` (without specified dtype)\n will try using ``float64`` and may fail if the original data type size\n in bytes is not the same.\n\n Examples\n --------\n >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')\n >>> s\n 0 -2\n 1 -1\n 2 0\n 3 1\n 4 2\n dtype: int8\n\n The 8 bit signed integer representation of `-1` is `0b11111111`, but\n the same bytes represent 255 if read as an 8 bit unsigned integer:\n\n >>> us = s.view('uint8')\n >>> us\n 0 254\n 1 255\n 2 0\n 3 1\n 4 2\n dtype: uint8\n\n The views share the same underlying values:\n\n >>> us[0] = 128\n >>> s\n 0 -128\n 1 -1\n 2 0\n 3 1\n 4 2\n dtype: int8\n \"\"\"\n return self._constructor(\n self._values.view(dtype), index=self.index\n ).__finalize__(self, method=\"view\")\n\n # ----------------------------------------------------------------------\n # NDArray Compat\n _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)\n\n def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:\n \"\"\"\n Return the values as a NumPy array.\n\n Users should not call this directly. Rather, it is invoked by\n :func:`numpy.array` and :func:`numpy.asarray`.\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to use for the resulting NumPy array. By default,\n the dtype is inferred from the data.\n\n Returns\n -------\n numpy.ndarray\n The values in the series converted to a :class:`numpy.ndarray`\n with the specified `dtype`.\n\n See Also\n --------\n array : Create a new array from data.\n Series.array : Zero-copy view to the array backing the Series.\n Series.to_numpy : Series method for similar behavior.\n\n Examples\n --------\n >>> ser = pd.Series([1, 2, 3])\n >>> np.asarray(ser)\n array([1, 2, 3])\n\n For timezone-aware data, the timezones may be retained with\n ``dtype='object'``\n\n >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz=\"CET\"))\n >>> np.asarray(tzser, dtype=\"object\")\n array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),\n Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],\n dtype=object)\n\n Or the values may be localized to UTC and the tzinfo discarded with\n ``dtype='datetime64[ns]'``\n\n >>> np.asarray(tzser, dtype=\"datetime64[ns]\") # doctest: +ELLIPSIS\n array(['1999-12-31T23:00:00.000000000', ...],\n dtype='datetime64[ns]')\n \"\"\"\n return np.asarray(self.array, dtype)\n\n # ----------------------------------------------------------------------\n # Unary Methods\n\n # coercion\n __float__ = _coerce_method(float)\n __long__ = _coerce_method(int)\n __int__ = _coerce_method(int)\n\n # ----------------------------------------------------------------------\n\n # indexers\n @property\n def axes(self) -> List[Index]:\n \"\"\"\n Return a list of the row axis labels.\n \"\"\"\n return [self.index]\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n @Appender(generic.NDFrame.take.__doc__)\n def take(self, indices, axis=0, is_copy=None, **kwargs) -> Series:\n if is_copy is not None:\n warnings.warn(\n \"is_copy is deprecated and will be removed in a future version. \"\n \"'take' always returns a copy, so there is no need to specify this.\",\n FutureWarning,\n stacklevel=2,\n )\n nv.validate_take((), kwargs)\n\n indices = ensure_platform_int(indices)\n new_index = self.index.take(indices)\n new_values = self._values.take(indices)\n\n result = self._constructor(new_values, index=new_index, fastpath=True)\n return result.__finalize__(self, method=\"take\")\n\n def _take_with_is_copy(self, indices, axis=0):\n \"\"\"\n Internal version of the `take` method that sets the `_is_copy`\n attribute to keep track of the parent dataframe (using in indexing\n for the SettingWithCopyWarning). For Series this does the same\n as the public take (it never sets `_is_copy`).\n\n See the docstring of `take` for full explanation of the parameters.\n \"\"\"\n return self.take(indices=indices, axis=axis)\n\n def _ixs(self, i: int, axis: int = 0):\n \"\"\"\n Return the i-th value or values in the Series by location.\n\n Parameters\n ----------\n i : int\n\n Returns\n -------\n scalar (int) or Series (slice, sequence)\n \"\"\"\n return self._values[i]\n\n def _slice(self, slobj: slice, axis: int = 0) -> Series:\n # axis kwarg is retained for compat with NDFrame method\n # _slice is *always* positional\n return self._get_values(slobj)\n\n def __getitem__(self, key):\n key = com.apply_if_callable(key, self)\n\n if key is Ellipsis:\n return self\n\n key_is_scalar = is_scalar(key)\n if isinstance(key, (list, tuple)):\n key = unpack_1tuple(key)\n\n if is_integer(key) and self.index._should_fallback_to_positional():\n return self._values[key]\n\n elif key_is_scalar:\n return self._get_value(key)\n\n if is_hashable(key):\n # Otherwise index.get_value will raise InvalidIndexError\n try:\n # For labels that don't resolve as scalars like tuples and frozensets\n result = self._get_value(key)\n\n return result\n\n except (KeyError, TypeError):\n if isinstance(key, tuple) and isinstance(self.index, MultiIndex):\n # We still have the corner case where a tuple is a key\n # in the first level of our MultiIndex\n return self._get_values_tuple(key)\n\n if is_iterator(key):\n key = list(key)\n\n if com.is_bool_indexer(key):\n key = check_bool_indexer(self.index, key)\n key = np.asarray(key, dtype=bool)\n return self._get_values(key)\n\n return self._get_with(key)\n\n def _get_with(self, key):\n # other: fancy integer or otherwise\n if isinstance(key, slice):\n # _convert_slice_indexer to determine if this slice is positional\n # or label based, and if the latter, convert to positional\n slobj = self.index._convert_slice_indexer(key, kind=\"getitem\")\n return self._slice(slobj)\n elif isinstance(key, ABCDataFrame):\n raise TypeError(\n \"Indexing a Series with DataFrame is not \"\n \"supported, use the appropriate DataFrame column\"\n )\n elif isinstance(key, tuple):\n return self._get_values_tuple(key)\n\n elif not is_list_like(key):\n # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684\n return self.loc[key]\n\n if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)):\n key = list(key)\n\n if isinstance(key, Index):\n key_type = key.inferred_type\n else:\n key_type = lib.infer_dtype(key, skipna=False)\n\n # Note: The key_type == \"boolean\" case should be caught by the\n # com.is_bool_indexer check in __getitem__\n if key_type == \"integer\":\n # We need to decide whether to treat this as a positional indexer\n # (i.e. self.iloc) or label-based (i.e. self.loc)\n if not self.index._should_fallback_to_positional():\n return self.loc[key]\n else:\n return self.iloc[key]\n\n # handle the dup indexing case GH#4246\n return self.loc[key]\n\n def _get_values_tuple(self, key):\n # mpl hackaround\n if com.any_none(*key):\n result = self._get_values(key)\n deprecate_ndim_indexing(result, stacklevel=5)\n return result\n\n if not isinstance(self.index, MultiIndex):\n raise KeyError(\"key of type tuple not found and not a MultiIndex\")\n\n # If key is contained, would have returned by now\n indexer, new_index = self.index.get_loc_level(key)\n return self._constructor(self._values[indexer], index=new_index).__finalize__(\n self\n )\n\n def _get_values(self, indexer):\n try:\n return self._constructor(self._mgr.get_slice(indexer)).__finalize__(self)\n except ValueError:\n # mpl compat if we look up e.g. ser[:, np.newaxis];\n # see tests.series.timeseries.test_mpl_compat_hack\n # the asarray is needed to avoid returning a 2D DatetimeArray\n return np.asarray(self._values[indexer])\n\n def _get_value(self, label, takeable: bool = False):\n \"\"\"\n Quickly retrieve single value at passed index label.\n\n Parameters\n ----------\n label : object\n takeable : interpret the index as indexers, default False\n\n Returns\n -------\n scalar value\n \"\"\"\n if takeable:\n return self._values[label]\n\n # Similar to Index.get_value, but we do not fall back to positional\n loc = self.index.get_loc(label)\n return self.index._get_values_for_loc(self, loc, label)\n\n def __setitem__(self, key, value):\n key = com.apply_if_callable(key, self)\n cacher_needs_updating = self._check_is_chained_assignment_possible()\n\n if key is Ellipsis:\n key = slice(None)\n\n try:\n self._set_with_engine(key, value)\n except (KeyError, ValueError):\n values = self._values\n if is_integer(key) and self.index.inferred_type != \"integer\":\n # positional setter\n values[key] = value\n else:\n # GH#12862 adding a new key to the Series\n self.loc[key] = value\n\n except TypeError as err:\n if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):\n raise KeyError(\n \"key of type tuple not found and not a MultiIndex\"\n ) from err\n\n if com.is_bool_indexer(key):\n key = check_bool_indexer(self.index, key)\n key = np.asarray(key, dtype=bool)\n try:\n self._where(~key, value, inplace=True)\n except InvalidIndexError:\n self.iloc[key] = value\n return\n\n else:\n self._set_with(key, value)\n\n if cacher_needs_updating:\n self._maybe_update_cacher()\n\n def _set_with_engine(self, key, value):\n # fails with AttributeError for IntervalIndex\n loc = self.index._engine.get_loc(key)\n validate_numeric_casting(self.dtype, value)\n self._values[loc] = value\n\n def _set_with(self, key, value):\n # other: fancy integer or otherwise\n if isinstance(key, slice):\n indexer = self.index._convert_slice_indexer(key, kind=\"getitem\")\n return self._set_values(indexer, value)\n\n else:\n assert not isinstance(key, tuple)\n\n if is_scalar(key):\n key = [key]\n\n if isinstance(key, Index):\n key_type = key.inferred_type\n key = key._values\n else:\n key_type = lib.infer_dtype(key, skipna=False)\n\n # Note: key_type == \"boolean\" should not occur because that\n # should be caught by the is_bool_indexer check in __setitem__\n if key_type == \"integer\":\n if not self.index._should_fallback_to_positional():\n self._set_labels(key, value)\n else:\n self._set_values(key, value)\n else:\n self.loc[key] = value\n\n def _set_labels(self, key, value):\n key = com.asarray_tuplesafe(key)\n indexer: np.ndarray = self.index.get_indexer(key)\n mask = indexer == -1\n if mask.any():\n raise KeyError(f\"{key[mask]} not in index\")\n self._set_values(indexer, value)\n\n def _set_values(self, key, value):\n if isinstance(key, Series):\n key = key._values\n self._mgr = self._mgr.setitem( # type: ignore[assignment]\n indexer=key, value=value\n )\n self._maybe_update_cacher()\n\n def _set_value(self, label, value, takeable: bool = False):\n \"\"\"\n Quickly set single value at passed label.\n\n If label is not contained, a new object is created with the label\n placed at the end of the result index.\n\n Parameters\n ----------\n label : object\n Partial indexing with MultiIndex not allowed.\n value : object\n Scalar value.\n takeable : interpret the index as indexers, default False\n \"\"\"\n try:\n if takeable:\n self._values[label] = value\n else:\n loc = self.index.get_loc(label)\n validate_numeric_casting(self.dtype, value)\n self._values[loc] = value\n except KeyError:\n\n # set using a non-recursive method\n self.loc[label] = value\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n @property\n def _is_mixed_type(self):\n return False\n\n def repeat(self, repeats, axis=None) -> Series:\n \"\"\"\n Repeat elements of a Series.\n\n Returns a new Series where each element of the current Series\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n Series.\n axis : None\n Must be ``None``. Has no effect but is accepted for compatibility\n with numpy.\n\n Returns\n -------\n Series\n Newly created Series with repeated elements.\n\n See Also\n --------\n Index.repeat : Equivalent function for Index.\n numpy.repeat : Similar method for :class:`numpy.ndarray`.\n\n Examples\n --------\n >>> s = pd.Series(['a', 'b', 'c'])\n >>> s\n 0 a\n 1 b\n 2 c\n dtype: object\n >>> s.repeat(2)\n 0 a\n 0 a\n 1 b\n 1 b\n 2 c\n 2 c\n dtype: object\n >>> s.repeat([1, 2, 3])\n 0 a\n 1 b\n 1 b\n 2 c\n 2 c\n 2 c\n dtype: object\n \"\"\"\n nv.validate_repeat((), {\"axis\": axis})\n new_index = self.index.repeat(repeats)\n new_values = self._values.repeat(repeats)\n return self._constructor(new_values, index=new_index).__finalize__(\n self, method=\"repeat\"\n )\n\n def reset_index(self, level=None, drop=False, name=None, inplace=False):\n \"\"\"\n Generate a new DataFrame or Series with the index reset.\n\n This is useful when the index needs to be treated as a column, or\n when the index is meaningless and needs to be reset to the default\n before another operation.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default optional\n For a Series with a MultiIndex, only remove the specified levels\n from the index. Removes all levels by default.\n drop : bool, default False\n Just reset the index, without inserting it as a column in\n the new DataFrame.\n name : object, optional\n The name to use for the column containing the original Series\n values. Uses ``self.name`` by default. This argument is ignored\n when `drop` is True.\n inplace : bool, default False\n Modify the Series in place (do not create a new object).\n\n Returns\n -------\n Series or DataFrame or None\n When `drop` is False (the default), a DataFrame is returned.\n The newly created columns will come first in the DataFrame,\n followed by the original Series values.\n When `drop` is True, a `Series` is returned.\n In either case, if ``inplace=True``, no value is returned.\n\n See Also\n --------\n DataFrame.reset_index: Analogous function for DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4], name='foo',\n ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))\n\n Generate a DataFrame with default index.\n\n >>> s.reset_index()\n idx foo\n 0 a 1\n 1 b 2\n 2 c 3\n 3 d 4\n\n To specify the name of the new column use `name`.\n\n >>> s.reset_index(name='values')\n idx values\n 0 a 1\n 1 b 2\n 2 c 3\n 3 d 4\n\n To generate a new Series with the default set `drop` to True.\n\n >>> s.reset_index(drop=True)\n 0 1\n 1 2\n 2 3\n 3 4\n Name: foo, dtype: int64\n\n To update the Series in place, without generating a new one\n set `inplace` to True. Note that it also requires ``drop=True``.\n\n >>> s.reset_index(inplace=True, drop=True)\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n Name: foo, dtype: int64\n\n The `level` parameter is interesting for Series with a multi-level\n index.\n\n >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),\n ... np.array(['one', 'two', 'one', 'two'])]\n >>> s2 = pd.Series(\n ... range(4), name='foo',\n ... index=pd.MultiIndex.from_arrays(arrays,\n ... names=['a', 'b']))\n\n To remove a specific level from the Index, use `level`.\n\n >>> s2.reset_index(level='a')\n a foo\n b\n one bar 0\n two bar 1\n one baz 2\n two baz 3\n\n If `level` is not set, all levels are removed from the Index.\n\n >>> s2.reset_index()\n a b foo\n 0 bar one 0\n 1 bar two 1\n 2 baz one 2\n 3 baz two 3\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if drop:\n new_index = ibase.default_index(len(self))\n if level is not None:\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < self.index.nlevels:\n new_index = self.index.droplevel(level)\n\n if inplace:\n self.index = new_index\n # set name if it was passed, otherwise, keep the previous name\n self.name = name or self.name\n else:\n return self._constructor(\n self._values.copy(), index=new_index\n ).__finalize__(self, method=\"reset_index\")\n elif inplace:\n raise TypeError(\n \"Cannot reset_index inplace on a Series to create a DataFrame\"\n )\n else:\n df = self.to_frame(name)\n return df.reset_index(level=level, drop=drop)\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def __repr__(self) -> str:\n \"\"\"\n Return a string representation for a particular Series.\n \"\"\"\n buf = StringIO(\"\")\n width, height = get_terminal_size()\n max_rows = (\n height\n if get_option(\"display.max_rows\") == 0\n else get_option(\"display.max_rows\")\n )\n min_rows = (\n height\n if get_option(\"display.max_rows\") == 0\n else get_option(\"display.min_rows\")\n )\n show_dimensions = get_option(\"display.show_dimensions\")\n\n self.to_string(\n buf=buf,\n name=self.name,\n dtype=self.dtype,\n min_rows=min_rows,\n max_rows=max_rows,\n length=show_dimensions,\n )\n return buf.getvalue()\n\n def to_string(\n self,\n buf=None,\n na_rep=\"NaN\",\n float_format=None,\n header=True,\n index=True,\n length=False,\n dtype=False,\n name=False,\n max_rows=None,\n min_rows=None,\n ):\n \"\"\"\n Render a string representation of the Series.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n na_rep : str, optional\n String representation of NaN to use, default 'NaN'.\n float_format : one-parameter function, optional\n Formatter function to apply to columns' elements if they are\n floats, default None.\n header : bool, default True\n Add the Series header (index name).\n index : bool, optional\n Add index (row) labels, default True.\n length : bool, default False\n Add the Series length.\n dtype : bool, default False\n Add the Series dtype.\n name : bool, default False\n Add the Series name if not None.\n max_rows : int, optional\n Maximum number of rows to show before truncating. If None, show\n all.\n min_rows : int, optional\n The number of rows to display in a truncated repr (when number\n of rows is above `max_rows`).\n\n Returns\n -------\n str or None\n String representation of Series if ``buf=None``, otherwise None.\n \"\"\"\n formatter = fmt.SeriesFormatter(\n self,\n name=name,\n length=length,\n header=header,\n index=index,\n dtype=dtype,\n na_rep=na_rep,\n float_format=float_format,\n min_rows=min_rows,\n max_rows=max_rows,\n )\n result = formatter.to_string()\n\n # catch contract violations\n if not isinstance(result, str):\n raise AssertionError(\n \"result must be of type str, type \"\n f\"of result is {repr(type(result).__name__)}\"\n )\n\n if buf is None:\n return result\n else:\n try:\n buf.write(result)\n except AttributeError:\n with open(buf, \"w\") as f:\n f.write(result)\n\n @doc(\n klass=_shared_doc_kwargs[\"klass\"],\n storage_options=generic._shared_docs[\"storage_options\"],\n examples=dedent(\n \"\"\"\n Examples\n --------\n >>> s = pd.Series([\"elk\", \"pig\", \"dog\", \"quetzal\"], name=\"animal\")\n >>> print(s.to_markdown())\n | | animal |\n |---:|:---------|\n | 0 | elk |\n | 1 | pig |\n | 2 | dog |\n | 3 | quetzal |\n \"\"\"\n ),\n )\n def to_markdown(\n self,\n buf: Optional[IO[str]] = None,\n mode: str = \"wt\",\n index: bool = True,\n storage_options: StorageOptions = None,\n **kwargs,\n ) -> Optional[str]:\n \"\"\"\n Print {klass} in Markdown-friendly format.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n mode : str, optional\n Mode in which file is opened, \"wt\" by default.\n index : bool, optional, default True\n Add index (row) labels.\n\n .. versionadded:: 1.1.0\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n **kwargs\n These parameters will be passed to `tabulate \\\n <https://pypi.org/project/tabulate>`_.\n\n Returns\n -------\n str\n {klass} in Markdown-friendly format.\n\n Notes\n -----\n Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.\n\n Examples\n --------\n >>> s = pd.Series([\"elk\", \"pig\", \"dog\", \"quetzal\"], name=\"animal\")\n >>> print(s.to_markdown())\n | | animal |\n |---:|:---------|\n | 0 | elk |\n | 1 | pig |\n | 2 | dog |\n | 3 | quetzal |\n\n Output markdown with a tabulate option.\n\n >>> print(s.to_markdown(tablefmt=\"grid\"))\n +----+----------+\n | | animal |\n +====+==========+\n | 0 | elk |\n +----+----------+\n | 1 | pig |\n +----+----------+\n | 2 | dog |\n +----+----------+\n | 3 | quetzal |\n +----+----------+\n \"\"\"\n return self.to_frame().to_markdown(\n buf, mode, index, storage_options=storage_options, **kwargs\n )\n\n # ----------------------------------------------------------------------\n\n def items(self) -> Iterable[Tuple[Hashable, Any]]:\n \"\"\"\n Lazily iterate over (index, value) tuples.\n\n This method returns an iterable tuple (index, value). This is\n convenient if you want to create a lazy iterator.\n\n Returns\n -------\n iterable\n Iterable of tuples containing the (index, value) pairs from a\n Series.\n\n See Also\n --------\n DataFrame.items : Iterate over (column name, Series) pairs.\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.\n\n Examples\n --------\n >>> s = pd.Series(['A', 'B', 'C'])\n >>> for index, value in s.items():\n ... print(f\"Index : {index}, Value : {value}\")\n Index : 0, Value : A\n Index : 1, Value : B\n Index : 2, Value : C\n \"\"\"\n return zip(iter(self.index), iter(self))\n\n @Appender(items.__doc__)\n def iteritems(self) -> Iterable[Tuple[Hashable, Any]]:\n return self.items()\n\n # ----------------------------------------------------------------------\n # Misc public methods\n\n def keys(self) -> Index:\n \"\"\"\n Return alias for index.\n\n Returns\n -------\n Index\n Index of the Series.\n \"\"\"\n return self.index\n\n def to_dict(self, into=dict):\n \"\"\"\n Convert Series to {label -> value} dict or dict-like object.\n\n Parameters\n ----------\n into : class, default dict\n The collections.abc.Mapping subclass to use as the return\n object. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n collections.abc.Mapping\n Key-value representation of Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_dict()\n {0: 1, 1: 2, 2: 3, 3: 4}\n >>> from collections import OrderedDict, defaultdict\n >>> s.to_dict(OrderedDict)\n OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])\n >>> dd = defaultdict(list)\n >>> s.to_dict(dd)\n defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})\n \"\"\"\n # GH16122\n into_c = com.standardize_mapping(into)\n return into_c(self.items())\n\n def to_frame(self, name=None) -> DataFrame:\n \"\"\"\n Convert Series to DataFrame.\n\n Parameters\n ----------\n name : object, default None\n The passed name should substitute for the series name (if it has\n one).\n\n Returns\n -------\n DataFrame\n DataFrame representation of Series.\n\n Examples\n --------\n >>> s = pd.Series([\"a\", \"b\", \"c\"],\n ... name=\"vals\")\n >>> s.to_frame()\n vals\n 0 a\n 1 b\n 2 c\n \"\"\"\n if name is None:\n df = self._constructor_expanddim(self)\n else:\n df = self._constructor_expanddim({name: self})\n\n return df\n\n def _set_name(self, name, inplace=False) -> Series:\n \"\"\"\n Set the Series name.\n\n Parameters\n ----------\n name : str\n inplace : bool\n Whether to modify `self` directly or return a copy.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n ser = self if inplace else self.copy()\n ser.name = name\n return ser\n\n @Appender(\n \"\"\"\nExamples\n--------\n>>> ser = pd.Series([390., 350., 30., 20.],\n... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name=\"Max Speed\")\n>>> ser\nFalcon 390.0\nFalcon 350.0\nParrot 30.0\nParrot 20.0\nName: Max Speed, dtype: float64\n>>> ser.groupby([\"a\", \"b\", \"a\", \"b\"]).mean()\na 210.0\nb 185.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(level=0).mean()\nFalcon 370.0\nParrot 25.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(ser > 100).mean()\nMax Speed\nFalse 25.0\nTrue 370.0\nName: Max Speed, dtype: float64\n\n**Grouping by Indexes**\n\nWe can groupby different levels of a hierarchical index\nusing the `level` parameter:\n\n>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n... ['Captive', 'Wild', 'Captive', 'Wild']]\n>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))\n>>> ser = pd.Series([390., 350., 30., 20.], index=index, name=\"Max Speed\")\n>>> ser\nAnimal Type\nFalcon Captive 390.0\n Wild 350.0\nParrot Captive 30.0\n Wild 20.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(level=0).mean()\nAnimal\nFalcon 370.0\nParrot 25.0\nName: Max Speed, dtype: float64\n>>> ser.groupby(level=\"Type\").mean()\nType\nCaptive 210.0\nWild 185.0\nName: Max Speed, dtype: float64\n\nWe can also choose to include `NA` in group keys or not by defining\n`dropna` parameter, the default setting is `True`:\n\n>>> ser = pd.Series([1, 2, 3, 3], index=[\"a\", 'a', 'b', np.nan])\n>>> ser.groupby(level=0).sum()\na 3\nb 3\ndtype: int64\n\n>>> ser.groupby(level=0, dropna=False).sum()\na 3\nb 3\nNaN 3\ndtype: int64\n\n>>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot']\n>>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name=\"Max Speed\")\n>>> ser.groupby([\"a\", \"b\", \"a\", np.nan]).mean()\na 210.0\nb 350.0\nName: Max Speed, dtype: float64\n\n>>> ser.groupby([\"a\", \"b\", \"a\", np.nan], dropna=False).mean()\na 210.0\nb 350.0\nNaN 20.0\nName: Max Speed, dtype: float64\n\"\"\"\n )\n @Appender(generic._shared_docs[\"groupby\"] % _shared_doc_kwargs)\n def groupby(\n self,\n by=None,\n axis=0,\n level=None,\n as_index: bool = True,\n sort: bool = True,\n group_keys: bool = True,\n squeeze: bool = no_default,\n observed: bool = False,\n dropna: bool = True,\n ) -> SeriesGroupBy:\n from pandas.core.groupby.generic import SeriesGroupBy\n\n if squeeze is not no_default:\n warnings.warn(\n (\n \"The `squeeze` parameter is deprecated and \"\n \"will be removed in a future version.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n else:\n squeeze = False\n\n if level is None and by is None:\n raise TypeError(\"You have to supply one of 'by' and 'level'\")\n axis = self._get_axis_number(axis)\n\n return SeriesGroupBy(\n obj=self,\n keys=by,\n axis=axis,\n level=level,\n as_index=as_index,\n sort=sort,\n group_keys=group_keys,\n squeeze=squeeze,\n observed=observed,\n dropna=dropna,\n )\n\n # ----------------------------------------------------------------------\n # Statistics, overridden ndarray methods\n\n # TODO: integrate bottleneck\n\n def count(self, level=None):\n \"\"\"\n Return number of non-NA/null observations in the Series.\n\n Parameters\n ----------\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a smaller Series.\n\n Returns\n -------\n int or Series (if level specified)\n Number of non-null values in the Series.\n\n See Also\n --------\n DataFrame.count : Count non-NA cells for each column or row.\n\n Examples\n --------\n >>> s = pd.Series([0.0, 1.0, np.nan])\n >>> s.count()\n 2\n \"\"\"\n if level is None:\n return notna(self.array).sum()\n elif not isinstance(self.index, MultiIndex):\n raise ValueError(\"Series.count level is only valid with a MultiIndex\")\n\n index = self.index\n assert isinstance(index, MultiIndex) # for mypy\n\n if isinstance(level, str):\n level = index._get_level_number(level)\n\n lev = index.levels[level]\n level_codes = np.array(index.codes[level], subok=False, copy=True)\n\n mask = level_codes == -1\n if mask.any():\n level_codes[mask] = cnt = len(lev)\n lev = lev.insert(cnt, lev._na_value)\n\n obs = level_codes[notna(self._values)]\n out = np.bincount(obs, minlength=len(lev) or None)\n return self._constructor(out, index=lev, dtype=\"int64\").__finalize__(\n self, method=\"count\"\n )\n\n def mode(self, dropna=True) -> Series:\n \"\"\"\n Return the mode(s) of the Series.\n\n The mode is the value that appears most often. There can be multiple modes.\n\n Always returns Series even if only one value is returned.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't consider counts of NaN/NaT.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series\n Modes of the Series in sorted order.\n \"\"\"\n # TODO: Add option for bins like value_counts()\n return algorithms.mode(self, dropna=dropna)\n\n def unique(self):\n \"\"\"\n Return unique values of Series object.\n\n Uniques are returned in order of appearance. Hash table-based unique,\n therefore does NOT sort.\n\n Returns\n -------\n ndarray or ExtensionArray\n The unique values returned as a NumPy array. See Notes.\n\n See Also\n --------\n unique : Top-level unique method for any 1-d array-like object.\n Index.unique : Return Index with unique values from an Index object.\n\n Notes\n -----\n Returns the unique values as a NumPy array. In case of an\n extension-array backed Series, a new\n :class:`~api.extensions.ExtensionArray` of that type with just\n the unique values is returned. This includes\n\n * Categorical\n * Period\n * Datetime with Timezone\n * Interval\n * Sparse\n * IntegerNA\n\n See Examples section.\n\n Examples\n --------\n >>> pd.Series([2, 1, 3, 3], name='A').unique()\n array([2, 1, 3])\n\n >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()\n array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')\n\n >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern')\n ... for _ in range(3)]).unique()\n <DatetimeArray>\n ['2016-01-01 00:00:00-05:00']\n Length: 1, dtype: datetime64[ns, US/Eastern]\n\n An unordered Categorical will return categories in the order of\n appearance.\n\n >>> pd.Series(pd.Categorical(list('baabc'))).unique()\n ['b', 'a', 'c']\n Categories (3, object): ['b', 'a', 'c']\n\n An ordered Categorical preserves the category ordering.\n\n >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),\n ... ordered=True)).unique()\n ['b', 'a', 'c']\n Categories (3, object): ['a' < 'b' < 'c']\n \"\"\"\n return super().unique()\n\n def drop_duplicates(self, keep=\"first\", inplace=False) -> Optional[Series]:\n \"\"\"\n Return Series with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n Method to handle dropping duplicates:\n\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n - ``False`` : Drop all duplicates.\n\n inplace : bool, default ``False``\n If ``True``, performs operation inplace and returns None.\n\n Returns\n -------\n Series or None\n Series with duplicates dropped or None if ``inplace=True``.\n\n See Also\n --------\n Index.drop_duplicates : Equivalent method on Index.\n DataFrame.drop_duplicates : Equivalent method on DataFrame.\n Series.duplicated : Related method on Series, indicating duplicate\n Series values.\n\n Examples\n --------\n Generate a Series with duplicated entries.\n\n >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],\n ... name='animal')\n >>> s\n 0 lama\n 1 cow\n 2 lama\n 3 beetle\n 4 lama\n 5 hippo\n Name: animal, dtype: object\n\n With the 'keep' parameter, the selection behaviour of duplicated values\n can be changed. The value 'first' keeps the first occurrence for each\n set of duplicated entries. The default value of keep is 'first'.\n\n >>> s.drop_duplicates()\n 0 lama\n 1 cow\n 3 beetle\n 5 hippo\n Name: animal, dtype: object\n\n The value 'last' for parameter 'keep' keeps the last occurrence for\n each set of duplicated entries.\n\n >>> s.drop_duplicates(keep='last')\n 1 cow\n 3 beetle\n 4 lama\n 5 hippo\n Name: animal, dtype: object\n\n The value ``False`` for parameter 'keep' discards all sets of\n duplicated entries. Setting the value of 'inplace' to ``True`` performs\n the operation inplace and returns ``None``.\n\n >>> s.drop_duplicates(keep=False, inplace=True)\n >>> s\n 1 cow\n 3 beetle\n 5 hippo\n Name: animal, dtype: object\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n result = super().drop_duplicates(keep=keep)\n if inplace:\n self._update_inplace(result)\n return None\n else:\n return result\n\n def duplicated(self, keep=\"first\") -> Series:\n \"\"\"\n Indicate duplicate Series values.\n\n Duplicated values are indicated as ``True`` values in the resulting\n Series. Either all duplicates, all except the first or all except the\n last occurrence of duplicates can be indicated.\n\n Parameters\n ----------\n keep : {'first', 'last', False}, default 'first'\n Method to handle dropping duplicates:\n\n - 'first' : Mark duplicates as ``True`` except for the first\n occurrence.\n - 'last' : Mark duplicates as ``True`` except for the last\n occurrence.\n - ``False`` : Mark all duplicates as ``True``.\n\n Returns\n -------\n Series\n Series indicating whether each value has occurred in the\n preceding values.\n\n See Also\n --------\n Index.duplicated : Equivalent method on pandas.Index.\n DataFrame.duplicated : Equivalent method on pandas.DataFrame.\n Series.drop_duplicates : Remove duplicate values from Series.\n\n Examples\n --------\n By default, for each set of duplicated values, the first occurrence is\n set on False and all others on True:\n\n >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])\n >>> animals.duplicated()\n 0 False\n 1 False\n 2 True\n 3 False\n 4 True\n dtype: bool\n\n which is equivalent to\n\n >>> animals.duplicated(keep='first')\n 0 False\n 1 False\n 2 True\n 3 False\n 4 True\n dtype: bool\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True:\n\n >>> animals.duplicated(keep='last')\n 0 True\n 1 False\n 2 True\n 3 False\n 4 False\n dtype: bool\n\n By setting keep on ``False``, all duplicates are True:\n\n >>> animals.duplicated(keep=False)\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n dtype: bool\n \"\"\"\n res = base.IndexOpsMixin.duplicated(self, keep=keep)\n result = self._constructor(res, index=self.index)\n return result.__finalize__(self, method=\"duplicated\")\n\n def idxmin(self, axis=0, skipna=True, *args, **kwargs):\n \"\"\"\n Return the row label of the minimum value.\n\n If multiple values equal the minimum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n axis : int, default 0\n For compatibility with DataFrame.idxmin. Redundant for application\n on Series.\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\n will be NA.\n *args, **kwargs\n Additional arguments and keywords have no effect but might be\n accepted for compatibility with NumPy.\n\n Returns\n -------\n Index\n Label of the minimum value.\n\n Raises\n ------\n ValueError\n If the Series is empty.\n\n See Also\n --------\n numpy.argmin : Return indices of the minimum values\n along the given axis.\n DataFrame.idxmin : Return index of first occurrence of minimum\n over requested axis.\n Series.idxmax : Return index *label* of the first occurrence\n of maximum of values.\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmin``. This method\n returns the label of the minimum, while ``ndarray.argmin`` returns\n the position. To get the position, use ``series.values.argmin()``.\n\n Examples\n --------\n >>> s = pd.Series(data=[1, None, 4, 1],\n ... index=['A', 'B', 'C', 'D'])\n >>> s\n A 1.0\n B NaN\n C 4.0\n D 1.0\n dtype: float64\n\n >>> s.idxmin()\n 'A'\n\n If `skipna` is False and there is an NA value in the data,\n the function returns ``nan``.\n\n >>> s.idxmin(skipna=False)\n nan\n \"\"\"\n i = self.argmin(axis, skipna, *args, **kwargs)\n if i == -1:\n return np.nan\n return self.index[i]\n\n def idxmax(self, axis=0, skipna=True, *args, **kwargs):\n \"\"\"\n Return the row label of the maximum value.\n\n If multiple values equal the maximum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n axis : int, default 0\n For compatibility with DataFrame.idxmax. Redundant for application\n on Series.\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\n will be NA.\n *args, **kwargs\n Additional arguments and keywords have no effect but might be\n accepted for compatibility with NumPy.\n\n Returns\n -------\n Index\n Label of the maximum value.\n\n Raises\n ------\n ValueError\n If the Series is empty.\n\n See Also\n --------\n numpy.argmax : Return indices of the maximum values\n along the given axis.\n DataFrame.idxmax : Return index of first occurrence of maximum\n over requested axis.\n Series.idxmin : Return index *label* of the first occurrence\n of minimum of values.\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmax``. This method\n returns the label of the maximum, while ``ndarray.argmax`` returns\n the position. To get the position, use ``series.values.argmax()``.\n\n Examples\n --------\n >>> s = pd.Series(data=[1, None, 4, 3, 4],\n ... index=['A', 'B', 'C', 'D', 'E'])\n >>> s\n A 1.0\n B NaN\n C 4.0\n D 3.0\n E 4.0\n dtype: float64\n\n >>> s.idxmax()\n 'C'\n\n If `skipna` is False and there is an NA value in the data,\n the function returns ``nan``.\n\n >>> s.idxmax(skipna=False)\n nan\n \"\"\"\n i = self.argmax(axis, skipna, *args, **kwargs)\n if i == -1:\n return np.nan\n return self.index[i]\n\n def round(self, decimals=0, *args, **kwargs) -> Series:\n \"\"\"\n Round each value in a Series to the given number of decimals.\n\n Parameters\n ----------\n decimals : int, default 0\n Number of decimal places to round to. If decimals is negative,\n it specifies the number of positions to the left of the decimal point.\n *args, **kwargs\n Additional arguments and keywords have no effect but might be\n accepted for compatibility with NumPy.\n\n Returns\n -------\n Series\n Rounded values of the Series.\n\n See Also\n --------\n numpy.around : Round values of an np.array.\n DataFrame.round : Round values of a DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([0.1, 1.3, 2.7])\n >>> s.round()\n 0 0.0\n 1 1.0\n 2 3.0\n dtype: float64\n \"\"\"\n nv.validate_round(args, kwargs)\n result = self._values.round(decimals)\n result = self._constructor(result, index=self.index).__finalize__(\n self, method=\"round\"\n )\n\n return result\n\n def quantile(self, q=0.5, interpolation=\"linear\"):\n \"\"\"\n Return value at the given quantile.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n The quantile(s) to compute, which can lie in range: 0 <= q <= 1.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n\n Returns\n -------\n float or Series\n If ``q`` is an array, a Series will be returned where the\n index is ``q`` and the values are the quantiles, otherwise\n a float will be returned.\n\n See Also\n --------\n core.window.Rolling.quantile : Calculate the rolling quantile.\n numpy.percentile : Returns the q-th percentile(s) of the array elements.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.quantile(.5)\n 2.5\n >>> s.quantile([.25, .5, .75])\n 0.25 1.75\n 0.50 2.50\n 0.75 3.25\n dtype: float64\n \"\"\"\n validate_percentile(q)\n\n # We dispatch to DataFrame so that core.internals only has to worry\n # about 2D cases.\n df = self.to_frame()\n\n result = df.quantile(q=q, interpolation=interpolation, numeric_only=False)\n if result.ndim == 2:\n result = result.iloc[:, 0]\n\n if is_list_like(q):\n result.name = self.name\n return self._constructor(result, index=Float64Index(q), name=self.name)\n else:\n # scalar\n return result.iloc[0]\n\n def corr(self, other, method=\"pearson\", min_periods=None) -> float:\n \"\"\"\n Compute correlation with `other` Series, excluding missing values.\n\n Parameters\n ----------\n other : Series\n Series with which to compute the correlation.\n method : {'pearson', 'kendall', 'spearman'} or callable\n Method used to compute correlation:\n\n - pearson : Standard correlation coefficient\n - kendall : Kendall Tau correlation coefficient\n - spearman : Spearman rank correlation\n - callable: Callable with input two 1d ndarrays and returning a float.\n\n .. versionadded:: 0.24.0\n Note that the returned matrix from corr will have 1 along the\n diagonals and will be symmetric regardless of the callable's\n behavior.\n min_periods : int, optional\n Minimum number of observations needed to have a valid result.\n\n Returns\n -------\n float\n Correlation with other.\n\n See Also\n --------\n DataFrame.corr : Compute pairwise correlation between columns.\n DataFrame.corrwith : Compute pairwise correlation with another\n DataFrame or Series.\n\n Examples\n --------\n >>> def histogram_intersection(a, b):\n ... v = np.minimum(a, b).sum().round(decimals=1)\n ... return v\n >>> s1 = pd.Series([.2, .0, .6, .2])\n >>> s2 = pd.Series([.3, .6, .0, .1])\n >>> s1.corr(s2, method=histogram_intersection)\n 0.3\n \"\"\"\n this, other = self.align(other, join=\"inner\", copy=False)\n if len(this) == 0:\n return np.nan\n\n if method in [\"pearson\", \"spearman\", \"kendall\"] or callable(method):\n return nanops.nancorr(\n this.values, other.values, method=method, min_periods=min_periods\n )\n\n raise ValueError(\n \"method must be either 'pearson', \"\n \"'spearman', 'kendall', or a callable, \"\n f\"'{method}' was supplied\"\n )\n\n def cov(\n self,\n other: Series,\n min_periods: Optional[int] = None,\n ddof: Optional[int] = 1,\n ) -> float:\n \"\"\"\n Compute covariance with Series, excluding missing values.\n\n Parameters\n ----------\n other : Series\n Series with which to compute the covariance.\n min_periods : int, optional\n Minimum number of observations needed to have a valid result.\n ddof : int, default 1\n Delta degrees of freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n float\n Covariance between Series and other normalized by N-1\n (unbiased estimator).\n\n See Also\n --------\n DataFrame.cov : Compute pairwise covariance of columns.\n\n Examples\n --------\n >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])\n >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])\n >>> s1.cov(s2)\n -0.01685762652715874\n \"\"\"\n this, other = self.align(other, join=\"inner\", copy=False)\n if len(this) == 0:\n return np.nan\n return nanops.nancov(\n this.values, other.values, min_periods=min_periods, ddof=ddof\n )\n\n @doc(\n klass=\"Series\",\n extra_params=\"\",\n other_klass=\"DataFrame\",\n examples=dedent(\n \"\"\"\n Difference with previous row\n\n >>> s = pd.Series([1, 1, 2, 3, 5, 8])\n >>> s.diff()\n 0 NaN\n 1 0.0\n 2 1.0\n 3 1.0\n 4 2.0\n 5 3.0\n dtype: float64\n\n Difference with 3rd previous row\n\n >>> s.diff(periods=3)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 2.0\n 4 4.0\n 5 6.0\n dtype: float64\n\n Difference with following row\n\n >>> s.diff(periods=-1)\n 0 0.0\n 1 -1.0\n 2 -1.0\n 3 -2.0\n 4 -3.0\n 5 NaN\n dtype: float64\n\n Overflow in input dtype\n\n >>> s = pd.Series([1, 0], dtype=np.uint8)\n >>> s.diff()\n 0 NaN\n 1 255.0\n dtype: float64\"\"\"\n ),\n )\n def diff(self, periods: int = 1) -> Series:\n \"\"\"\n First discrete difference of element.\n\n Calculates the difference of a {klass} element compared with another\n element in the {klass} (default is element in previous row).\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for calculating difference, accepts negative\n values.\n {extra_params}\n Returns\n -------\n {klass}\n First differences of the Series.\n\n See Also\n --------\n {klass}.pct_change: Percent change over given number of periods.\n {klass}.shift: Shift index by desired number of periods with an\n optional time freq.\n {other_klass}.diff: First discrete difference of object.\n\n Notes\n -----\n For boolean dtypes, this uses :meth:`operator.xor` rather than\n :meth:`operator.sub`.\n The result is calculated according to current dtype in {klass},\n however dtype of the result is always float64.\n\n Examples\n --------\n {examples}\n \"\"\"\n result = algorithms.diff(self.array, periods)\n return self._constructor(result, index=self.index).__finalize__(\n self, method=\"diff\"\n )\n\n def autocorr(self, lag=1) -> float:\n \"\"\"\n Compute the lag-N autocorrelation.\n\n This method computes the Pearson correlation between\n the Series and its shifted self.\n\n Parameters\n ----------\n lag : int, default 1\n Number of lags to apply before performing autocorrelation.\n\n Returns\n -------\n float\n The Pearson correlation between self and self.shift(lag).\n\n See Also\n --------\n Series.corr : Compute the correlation between two Series.\n Series.shift : Shift index by desired number of periods.\n DataFrame.corr : Compute pairwise correlation of columns.\n DataFrame.corrwith : Compute pairwise correlation between rows or\n columns of two DataFrame objects.\n\n Notes\n -----\n If the Pearson correlation is not well defined return 'NaN'.\n\n Examples\n --------\n >>> s = pd.Series([0.25, 0.5, 0.2, -0.05])\n >>> s.autocorr() # doctest: +ELLIPSIS\n 0.10355...\n >>> s.autocorr(lag=2) # doctest: +ELLIPSIS\n -0.99999...\n\n If the Pearson correlation is not well defined, then 'NaN' is returned.\n\n >>> s = pd.Series([1, 0, 0, 0])\n >>> s.autocorr()\n nan\n \"\"\"\n return self.corr(self.shift(lag))\n\n def dot(self, other):\n \"\"\"\n Compute the dot product between the Series and the columns of other.\n\n This method computes the dot product between the Series and another\n one, or the Series and each columns of a DataFrame, or the Series and\n each columns of an array.\n\n It can also be called using `self @ other` in Python >= 3.5.\n\n Parameters\n ----------\n other : Series, DataFrame or array-like\n The other object to compute the dot product with its columns.\n\n Returns\n -------\n scalar, Series or numpy.ndarray\n Return the dot product of the Series and other if other is a\n Series, the Series of the dot product of Series and each rows of\n other if other is a DataFrame or a numpy.ndarray between the Series\n and each columns of the numpy array.\n\n See Also\n --------\n DataFrame.dot: Compute the matrix product with the DataFrame.\n Series.mul: Multiplication of series and other, element-wise.\n\n Notes\n -----\n The Series and other has to share the same index if other is a Series\n or a DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([0, 1, 2, 3])\n >>> other = pd.Series([-1, 2, -3, 4])\n >>> s.dot(other)\n 8\n >>> s @ other\n 8\n >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])\n >>> s.dot(df)\n 0 24\n 1 14\n dtype: int64\n >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])\n >>> s.dot(arr)\n array([24, 14])\n \"\"\"\n if isinstance(other, (Series, ABCDataFrame)):\n common = self.index.union(other.index)\n if len(common) > len(self.index) or len(common) > len(other.index):\n raise ValueError(\"matrices are not aligned\")\n\n left = self.reindex(index=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right.values\n else:\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[0] != rvals.shape[0]:\n raise Exception(\n f\"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}\"\n )\n\n if isinstance(other, ABCDataFrame):\n return self._constructor(\n np.dot(lvals, rvals), index=other.columns\n ).__finalize__(self, method=\"dot\")\n elif isinstance(other, Series):\n return np.dot(lvals, rvals)\n elif isinstance(rvals, np.ndarray):\n return np.dot(lvals, rvals)\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n def __matmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(other)\n\n def __rmatmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(np.transpose(other))\n\n @doc(base.IndexOpsMixin.searchsorted, klass=\"Series\")\n def searchsorted(self, value, side=\"left\", sorter=None):\n return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)\n\n # -------------------------------------------------------------------\n # Combination\n\n def append(self, to_append, ignore_index=False, verify_integrity=False):\n \"\"\"\n Concatenate two or more Series.\n\n Parameters\n ----------\n to_append : Series or list/tuple of Series\n Series to append with self.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n verify_integrity : bool, default False\n If True, raise Exception on creating index with duplicates.\n\n Returns\n -------\n Series\n Concatenated Series.\n\n See Also\n --------\n concat : General function to concatenate DataFrame or Series objects.\n\n Notes\n -----\n Iteratively appending to a Series can be more computationally intensive\n than a single concatenate. A better solution is to append values to a\n list and then concatenate the list with the original Series all at\n once.\n\n Examples\n --------\n >>> s1 = pd.Series([1, 2, 3])\n >>> s2 = pd.Series([4, 5, 6])\n >>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])\n >>> s1.append(s2)\n 0 1\n 1 2\n 2 3\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n >>> s1.append(s3)\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n 5 6\n dtype: int64\n\n With `ignore_index` set to True:\n\n >>> s1.append(s2, ignore_index=True)\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n 5 6\n dtype: int64\n\n With `verify_integrity` set to True:\n\n >>> s1.append(s2, verify_integrity=True)\n Traceback (most recent call last):\n ...\n ValueError: Indexes have overlapping values: [0, 1, 2]\n \"\"\"\n from pandas.core.reshape.concat import concat\n\n if isinstance(to_append, (list, tuple)):\n to_concat = [self]\n to_concat.extend(to_append)\n else:\n to_concat = [self, to_append]\n if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]):\n msg = \"to_append should be a Series or list/tuple of Series, got DataFrame\"\n raise TypeError(msg)\n return concat(\n to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity\n )\n\n def _binop(self, other, func, level=None, fill_value=None):\n \"\"\"\n Perform generic binary operation with optional fill value.\n\n Parameters\n ----------\n other : Series\n func : binary operator\n fill_value : float or object\n Value to substitute for NA/null values. If both Series are NA in a\n location, the result will be NA regardless of the passed fill value.\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n\n Returns\n -------\n Series\n \"\"\"\n if not isinstance(other, Series):\n raise AssertionError(\"Other operand must be Series\")\n\n this = self\n\n if not self.index.equals(other.index):\n this, other = self.align(other, level=level, join=\"outer\", copy=False)\n\n this_vals, other_vals = ops.fill_binop(this.values, other.values, fill_value)\n\n with np.errstate(all=\"ignore\"):\n result = func(this_vals, other_vals)\n\n name = ops.get_op_result_name(self, other)\n return this._construct_result(result, name)\n\n def _construct_result(\n self, result: Union[ArrayLike, Tuple[ArrayLike, ArrayLike]], name: Hashable\n ) -> Union[Series, Tuple[Series, Series]]:\n \"\"\"\n Construct an appropriately-labelled Series from the result of an op.\n\n Parameters\n ----------\n result : ndarray or ExtensionArray\n name : Label\n\n Returns\n -------\n Series\n In the case of __divmod__ or __rdivmod__, a 2-tuple of Series.\n \"\"\"\n if isinstance(result, tuple):\n # produced by divmod or rdivmod\n\n res1 = self._construct_result(result[0], name=name)\n res2 = self._construct_result(result[1], name=name)\n\n # GH#33427 assertions to keep mypy happy\n assert isinstance(res1, Series)\n assert isinstance(res2, Series)\n return (res1, res2)\n\n # We do not pass dtype to ensure that the Series constructor\n # does inference in the case where `result` has object-dtype.\n out = self._constructor(result, index=self.index)\n out = out.__finalize__(self)\n\n # Set the result's name after __finalize__ is called because __finalize__\n # would set it back to self.name\n out.name = name\n return out\n\n @doc(\n generic._shared_docs[\"compare\"],\n \"\"\"\nReturns\n-------\nSeries or DataFrame\n If axis is 0 or 'index' the result will be a Series.\n The resulting index will be a MultiIndex with 'self' and 'other'\n stacked alternately at the inner level.\n\n If axis is 1 or 'columns' the result will be a DataFrame.\n It will have two columns namely 'self' and 'other'.\n\nSee Also\n--------\nDataFrame.compare : Compare with another DataFrame and show differences.\n\nNotes\n-----\nMatching NaNs will not appear as a difference.\n\nExamples\n--------\n>>> s1 = pd.Series([\"a\", \"b\", \"c\", \"d\", \"e\"])\n>>> s2 = pd.Series([\"a\", \"a\", \"c\", \"b\", \"e\"])\n\nAlign the differences on columns\n\n>>> s1.compare(s2)\n self other\n1 b a\n3 d b\n\nStack the differences on indices\n\n>>> s1.compare(s2, align_axis=0)\n1 self b\n other a\n3 self d\n other b\ndtype: object\n\nKeep all original rows\n\n>>> s1.compare(s2, keep_shape=True)\n self other\n0 NaN NaN\n1 b a\n2 NaN NaN\n3 d b\n4 NaN NaN\n\nKeep all original rows and also all original values\n\n>>> s1.compare(s2, keep_shape=True, keep_equal=True)\n self other\n0 a a\n1 b a\n2 c c\n3 d b\n4 e e\n\"\"\",\n klass=_shared_doc_kwargs[\"klass\"],\n )\n def compare(\n self,\n other: Series,\n align_axis: Axis = 1,\n keep_shape: bool = False,\n keep_equal: bool = False,\n ) -> FrameOrSeriesUnion:\n return super().compare(\n other=other,\n align_axis=align_axis,\n keep_shape=keep_shape,\n keep_equal=keep_equal,\n )\n\n def combine(self, other, func, fill_value=None) -> Series:\n \"\"\"\n Combine the Series with a Series or scalar according to `func`.\n\n Combine the Series and `other` using `func` to perform elementwise\n selection for combined Series.\n `fill_value` is assumed when value is missing at some index\n from one of the two objects being combined.\n\n Parameters\n ----------\n other : Series or scalar\n The value(s) to be combined with the `Series`.\n func : function\n Function that takes two scalars as inputs and returns an element.\n fill_value : scalar, optional\n The value to assume when an index is missing from\n one Series or the other. The default specifies to use the\n appropriate NaN value for the underlying dtype of the Series.\n\n Returns\n -------\n Series\n The result of combining the Series with the other object.\n\n See Also\n --------\n Series.combine_first : Combine Series values, choosing the calling\n Series' values first.\n\n Examples\n --------\n Consider 2 Datasets ``s1`` and ``s2`` containing\n highest clocked speeds of different birds.\n\n >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})\n >>> s1\n falcon 330.0\n eagle 160.0\n dtype: float64\n >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})\n >>> s2\n falcon 345.0\n eagle 200.0\n duck 30.0\n dtype: float64\n\n Now, to combine the two datasets and view the highest speeds\n of the birds across the two datasets\n\n >>> s1.combine(s2, max)\n duck NaN\n eagle 200.0\n falcon 345.0\n dtype: float64\n\n In the previous example, the resulting value for duck is missing,\n because the maximum of a NaN and a float is a NaN.\n So, in the example, we set ``fill_value=0``,\n so the maximum value returned will be the value from some dataset.\n\n >>> s1.combine(s2, max, fill_value=0)\n duck 30.0\n eagle 200.0\n falcon 345.0\n dtype: float64\n \"\"\"\n if fill_value is None:\n fill_value = na_value_for_dtype(self.dtype, compat=False)\n\n if isinstance(other, Series):\n # If other is a Series, result is based on union of Series,\n # so do this element by element\n new_index = self.index.union(other.index)\n new_name = ops.get_op_result_name(self, other)\n new_values = []\n for idx in new_index:\n lv = self.get(idx, fill_value)\n rv = other.get(idx, fill_value)\n with np.errstate(all=\"ignore\"):\n new_values.append(func(lv, rv))\n else:\n # Assume that other is a scalar, so apply the function for\n # each element in the Series\n new_index = self.index\n with np.errstate(all=\"ignore\"):\n new_values = [func(lv, other) for lv in self._values]\n new_name = self.name\n\n if is_categorical_dtype(self.dtype):\n pass\n elif is_extension_array_dtype(self.dtype):\n # TODO: can we do this for only SparseDtype?\n # The function can return something of any type, so check\n # if the type is compatible with the calling EA.\n new_values = maybe_cast_to_extension_array(type(self._values), new_values)\n return self._constructor(new_values, index=new_index, name=new_name)\n\n def combine_first(self, other) -> Series:\n \"\"\"\n Combine Series values, choosing the calling Series's values first.\n\n Parameters\n ----------\n other : Series\n The value(s) to be combined with the `Series`.\n\n Returns\n -------\n Series\n The result of combining the Series with the other object.\n\n See Also\n --------\n Series.combine : Perform elementwise operation on two Series\n using a given function.\n\n Notes\n -----\n Result index will be the union of the two indexes.\n\n Examples\n --------\n >>> s1 = pd.Series([1, np.nan])\n >>> s2 = pd.Series([3, 4])\n >>> s1.combine_first(s2)\n 0 1.0\n 1 4.0\n dtype: float64\n \"\"\"\n new_index = self.index.union(other.index)\n this = self.reindex(new_index, copy=False)\n other = other.reindex(new_index, copy=False)\n if this.dtype.kind == \"M\" and other.dtype.kind != \"M\":\n other = to_datetime(other)\n\n return this.where(notna(this), other)\n\n def update(self, other) -> None:\n \"\"\"\n Modify Series in place using values from passed Series.\n\n Uses non-NA values from passed Series to make updates. Aligns\n on index.\n\n Parameters\n ----------\n other : Series, or object coercible into Series\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s.update(pd.Series([4, 5, 6]))\n >>> s\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n >>> s = pd.Series(['a', 'b', 'c'])\n >>> s.update(pd.Series(['d', 'e'], index=[0, 2]))\n >>> s\n 0 d\n 1 b\n 2 e\n dtype: object\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update(pd.Series([4, 5, 6, 7, 8]))\n >>> s\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n If ``other`` contains NaNs the corresponding values are not updated\n in the original Series.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update(pd.Series([4, np.nan, 6]))\n >>> s\n 0 4\n 1 2\n 2 6\n dtype: int64\n\n ``other`` can also be a non-Series object type\n that is coercible into a Series\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update([4, np.nan, 6])\n >>> s\n 0 4\n 1 2\n 2 6\n dtype: int64\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.update({1: 9})\n >>> s\n 0 1\n 1 9\n 2 3\n dtype: int64\n \"\"\"\n\n if not isinstance(other, Series):\n other = Series(other)\n\n other = other.reindex_like(self)\n mask = notna(other)\n\n self._mgr = self._mgr.putmask(mask=mask, new=other)\n self._maybe_update_cacher()\n\n # ----------------------------------------------------------------------\n # Reindexing, sorting\n\n def sort_values(\n self,\n axis=0,\n ascending=True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool = False,\n key: ValueKeyFunc = None,\n ):\n \"\"\"\n Sort by the values.\n\n Sort a Series in ascending or descending order by some\n criterion.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Axis to direct sorting. The value 'index' is accepted for\n compatibility with DataFrame.sort_values.\n ascending : bool, default True\n If True, sort values in ascending order, otherwise descending.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See also :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable algorithms.\n na_position : {'first' or 'last'}, default 'last'\n Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at\n the end.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n key : callable, optional\n If not None, apply the key function to the series values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect a\n ``Series`` and return an array-like.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n Series or None\n Series ordered by values or None if ``inplace=True``.\n\n See Also\n --------\n Series.sort_index : Sort by the Series indices.\n DataFrame.sort_values : Sort DataFrame by the values along either axis.\n DataFrame.sort_index : Sort DataFrame by indices.\n\n Examples\n --------\n >>> s = pd.Series([np.nan, 1, 3, 10, 5])\n >>> s\n 0 NaN\n 1 1.0\n 2 3.0\n 3 10.0\n 4 5.0\n dtype: float64\n\n Sort values ascending order (default behaviour)\n\n >>> s.sort_values(ascending=True)\n 1 1.0\n 2 3.0\n 4 5.0\n 3 10.0\n 0 NaN\n dtype: float64\n\n Sort values descending order\n\n >>> s.sort_values(ascending=False)\n 3 10.0\n 4 5.0\n 2 3.0\n 1 1.0\n 0 NaN\n dtype: float64\n\n Sort values inplace\n\n >>> s.sort_values(ascending=False, inplace=True)\n >>> s\n 3 10.0\n 4 5.0\n 2 3.0\n 1 1.0\n 0 NaN\n dtype: float64\n\n Sort values putting NAs first\n\n >>> s.sort_values(na_position='first')\n 0 NaN\n 1 1.0\n 2 3.0\n 4 5.0\n 3 10.0\n dtype: float64\n\n Sort a series of strings\n\n >>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])\n >>> s\n 0 z\n 1 b\n 2 d\n 3 a\n 4 c\n dtype: object\n\n >>> s.sort_values()\n 3 a\n 1 b\n 4 c\n 2 d\n 0 z\n dtype: object\n\n Sort using a key function. Your `key` function will be\n given the ``Series`` of values and should return an array-like.\n\n >>> s = pd.Series(['a', 'B', 'c', 'D', 'e'])\n >>> s.sort_values()\n 1 B\n 3 D\n 0 a\n 2 c\n 4 e\n dtype: object\n >>> s.sort_values(key=lambda x: x.str.lower())\n 0 a\n 1 B\n 2 c\n 3 D\n 4 e\n dtype: object\n\n NumPy ufuncs work well here. For example, we can\n sort by the ``sin`` of the value\n\n >>> s = pd.Series([-4, -2, 0, 2, 4])\n >>> s.sort_values(key=np.sin)\n 1 -2\n 4 4\n 2 0\n 0 -4\n 3 2\n dtype: int64\n\n More complicated user-defined functions can be used,\n as long as they expect a Series and return an array-like\n\n >>> s.sort_values(key=lambda x: (np.tan(x.cumsum())))\n 0 -4\n 3 2\n 4 4\n 1 -2\n 2 0\n dtype: int64\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n # Validate the axis parameter\n self._get_axis_number(axis)\n\n # GH 5856/5853\n if inplace and self._is_cached:\n raise ValueError(\n \"This Series is a view of some other array, to \"\n \"sort in-place you must create a copy\"\n )\n\n if is_list_like(ascending):\n if len(ascending) != 1:\n raise ValueError(\n f\"Length of ascending ({len(ascending)}) must be 1 for Series\"\n )\n ascending = ascending[0]\n\n if not is_bool(ascending):\n raise ValueError(\"ascending must be boolean\")\n\n if na_position not in [\"first\", \"last\"]:\n raise ValueError(f\"invalid na_position: {na_position}\")\n\n # GH 35922. Make sorting stable by leveraging nargsort\n values_to_sort = ensure_key_mapped(self, key)._values if key else self._values\n sorted_index = nargsort(values_to_sort, kind, ascending, na_position)\n\n result = self._constructor(\n self._values[sorted_index], index=self.index[sorted_index]\n )\n\n if ignore_index:\n result.index = ibase.default_index(len(sorted_index))\n\n if inplace:\n self._update_inplace(result)\n else:\n return result.__finalize__(self, method=\"sort_values\")\n\n def sort_index(\n self,\n axis=0,\n level=None,\n ascending: bool = True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n sort_remaining: bool = True,\n ignore_index: bool = False,\n key: IndexKeyFunc = None,\n ):\n \"\"\"\n Sort Series by index labels.\n\n Returns a new Series sorted by label if `inplace` argument is\n ``False``, otherwise updates the original series and returns None.\n\n Parameters\n ----------\n axis : int, default 0\n Axis to direct sorting. This can only be 0 for Series.\n level : int, optional\n If not None, sort on values in specified index level(s).\n ascending : bool or list of bools, default True\n Sort ascending vs. descending. When the index is a MultiIndex the\n sort direction can be controlled for each level individually.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See also :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable algorithms. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n key : callable, optional\n If not None, apply the key function to the index values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect an\n ``Index`` and return an ``Index`` of the same shape.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n Series or None\n The original Series sorted by the labels or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.sort_index: Sort DataFrame by the index.\n DataFrame.sort_values: Sort DataFrame by the value.\n Series.sort_values : Sort Series by the value.\n\n Examples\n --------\n >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])\n >>> s.sort_index()\n 1 c\n 2 b\n 3 a\n 4 d\n dtype: object\n\n Sort Descending\n\n >>> s.sort_index(ascending=False)\n 4 d\n 3 a\n 2 b\n 1 c\n dtype: object\n\n Sort Inplace\n\n >>> s.sort_index(inplace=True)\n >>> s\n 1 c\n 2 b\n 3 a\n 4 d\n dtype: object\n\n By default NaNs are put at the end, but use `na_position` to place\n them at the beginning\n\n >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan])\n >>> s.sort_index(na_position='first')\n NaN d\n 1.0 c\n 2.0 b\n 3.0 a\n dtype: object\n\n Specify index level to sort\n\n >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo',\n ... 'baz', 'baz', 'bar', 'bar']),\n ... np.array(['two', 'one', 'two', 'one',\n ... 'two', 'one', 'two', 'one'])]\n >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays)\n >>> s.sort_index(level=1)\n bar one 8\n baz one 6\n foo one 4\n qux one 2\n bar two 7\n baz two 5\n foo two 3\n qux two 1\n dtype: int64\n\n Does not sort by remaining levels when sorting by levels\n\n >>> s.sort_index(level=1, sort_remaining=False)\n qux one 2\n foo one 4\n baz one 6\n bar one 8\n qux two 1\n foo two 3\n baz two 5\n bar two 7\n dtype: int64\n\n Apply a key function before sorting\n\n >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd'])\n >>> s.sort_index(key=lambda x : x.str.lower())\n A 1\n b 2\n C 3\n d 4\n dtype: int64\n \"\"\"\n\n return super().sort_index(\n axis,\n level,\n ascending,\n inplace,\n kind,\n na_position,\n sort_remaining,\n ignore_index,\n key,\n )\n\n def argsort(self, axis=0, kind=\"quicksort\", order=None) -> Series:\n \"\"\"\n Return the integer indices that would sort the Series values.\n\n Override ndarray.argsort. Argsorts the value, omitting NA/null values,\n and places the result in the same locations as the non-NA values.\n\n Parameters\n ----------\n axis : {0 or \"index\"}\n Has no effect but is accepted for compatibility with numpy.\n kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable algorithms.\n order : None\n Has no effect but is accepted for compatibility with numpy.\n\n Returns\n -------\n Series\n Positions of values within the sort order with -1 indicating\n nan values.\n\n See Also\n --------\n numpy.ndarray.argsort : Returns the indices that would sort this array.\n \"\"\"\n values = self._values\n mask = isna(values)\n\n if mask.any():\n result = Series(-1, index=self.index, name=self.name, dtype=\"int64\")\n notmask = ~mask\n result[notmask] = np.argsort(values[notmask], kind=kind)\n return self._constructor(result, index=self.index).__finalize__(\n self, method=\"argsort\"\n )\n else:\n return self._constructor(\n np.argsort(values, kind=kind), index=self.index, dtype=\"int64\"\n ).__finalize__(self, method=\"argsort\")\n\n def nlargest(self, n=5, keep=\"first\") -> Series:\n \"\"\"\n Return the largest `n` elements.\n\n Parameters\n ----------\n n : int, default 5\n Return this many descending sorted values.\n keep : {'first', 'last', 'all'}, default 'first'\n When there are duplicate values that cannot all fit in a\n Series of `n` elements:\n\n - ``first`` : return the first `n` occurrences in order\n of appearance.\n - ``last`` : return the last `n` occurrences in reverse\n order of appearance.\n - ``all`` : keep all occurrences. This can result in a Series of\n size larger than `n`.\n\n Returns\n -------\n Series\n The `n` largest values in the Series, sorted in decreasing order.\n\n See Also\n --------\n Series.nsmallest: Get the `n` smallest elements.\n Series.sort_values: Sort Series by values.\n Series.head: Return the first `n` rows.\n\n Notes\n -----\n Faster than ``.sort_values(ascending=False).head(n)`` for small `n`\n relative to the size of the ``Series`` object.\n\n Examples\n --------\n >>> countries_population = {\"Italy\": 59000000, \"France\": 65000000,\n ... \"Malta\": 434000, \"Maldives\": 434000,\n ... \"Brunei\": 434000, \"Iceland\": 337000,\n ... \"Nauru\": 11300, \"Tuvalu\": 11300,\n ... \"Anguilla\": 11300, \"Montserrat\": 5200}\n >>> s = pd.Series(countries_population)\n >>> s\n Italy 59000000\n France 65000000\n Malta 434000\n Maldives 434000\n Brunei 434000\n Iceland 337000\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Montserrat 5200\n dtype: int64\n\n The `n` largest elements where ``n=5`` by default.\n\n >>> s.nlargest()\n France 65000000\n Italy 59000000\n Malta 434000\n Maldives 434000\n Brunei 434000\n dtype: int64\n\n The `n` largest elements where ``n=3``. Default `keep` value is 'first'\n so Malta will be kept.\n\n >>> s.nlargest(3)\n France 65000000\n Italy 59000000\n Malta 434000\n dtype: int64\n\n The `n` largest elements where ``n=3`` and keeping the last duplicates.\n Brunei will be kept since it is the last with value 434000 based on\n the index order.\n\n >>> s.nlargest(3, keep='last')\n France 65000000\n Italy 59000000\n Brunei 434000\n dtype: int64\n\n The `n` largest elements where ``n=3`` with all duplicates kept. Note\n that the returned Series has five elements due to the three duplicates.\n\n >>> s.nlargest(3, keep='all')\n France 65000000\n Italy 59000000\n Malta 434000\n Maldives 434000\n Brunei 434000\n dtype: int64\n \"\"\"\n return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()\n\n def nsmallest(self, n=5, keep=\"first\") -> Series:\n \"\"\"\n Return the smallest `n` elements.\n\n Parameters\n ----------\n n : int, default 5\n Return this many ascending sorted values.\n keep : {'first', 'last', 'all'}, default 'first'\n When there are duplicate values that cannot all fit in a\n Series of `n` elements:\n\n - ``first`` : return the first `n` occurrences in order\n of appearance.\n - ``last`` : return the last `n` occurrences in reverse\n order of appearance.\n - ``all`` : keep all occurrences. This can result in a Series of\n size larger than `n`.\n\n Returns\n -------\n Series\n The `n` smallest values in the Series, sorted in increasing order.\n\n See Also\n --------\n Series.nlargest: Get the `n` largest elements.\n Series.sort_values: Sort Series by values.\n Series.head: Return the first `n` rows.\n\n Notes\n -----\n Faster than ``.sort_values().head(n)`` for small `n` relative to\n the size of the ``Series`` object.\n\n Examples\n --------\n >>> countries_population = {\"Italy\": 59000000, \"France\": 65000000,\n ... \"Brunei\": 434000, \"Malta\": 434000,\n ... \"Maldives\": 434000, \"Iceland\": 337000,\n ... \"Nauru\": 11300, \"Tuvalu\": 11300,\n ... \"Anguilla\": 11300, \"Montserrat\": 5200}\n >>> s = pd.Series(countries_population)\n >>> s\n Italy 59000000\n France 65000000\n Brunei 434000\n Malta 434000\n Maldives 434000\n Iceland 337000\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Montserrat 5200\n dtype: int64\n\n The `n` smallest elements where ``n=5`` by default.\n\n >>> s.nsmallest()\n Montserrat 5200\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n Iceland 337000\n dtype: int64\n\n The `n` smallest elements where ``n=3``. Default `keep` value is\n 'first' so Nauru and Tuvalu will be kept.\n\n >>> s.nsmallest(3)\n Montserrat 5200\n Nauru 11300\n Tuvalu 11300\n dtype: int64\n\n The `n` smallest elements where ``n=3`` and keeping the last\n duplicates. Anguilla and Tuvalu will be kept since they are the last\n with value 11300 based on the index order.\n\n >>> s.nsmallest(3, keep='last')\n Montserrat 5200\n Anguilla 11300\n Tuvalu 11300\n dtype: int64\n\n The `n` smallest elements where ``n=3`` with all duplicates kept. Note\n that the returned Series has four elements due to the three duplicates.\n\n >>> s.nsmallest(3, keep='all')\n Montserrat 5200\n Nauru 11300\n Tuvalu 11300\n Anguilla 11300\n dtype: int64\n \"\"\"\n return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()\n\n def swaplevel(self, i=-2, j=-1, copy=True) -> Series:\n \"\"\"\n Swap levels i and j in a :class:`MultiIndex`.\n\n Default is to swap the two innermost levels of the index.\n\n Parameters\n ----------\n i, j : int, str\n Level of the indices to be swapped. Can pass level name as string.\n copy : bool, default True\n Whether to copy underlying data.\n\n Returns\n -------\n Series\n Series with levels swapped in MultiIndex.\n \"\"\"\n assert isinstance(self.index, MultiIndex)\n new_index = self.index.swaplevel(i, j)\n return self._constructor(self._values, index=new_index, copy=copy).__finalize__(\n self, method=\"swaplevel\"\n )\n\n def reorder_levels(self, order) -> Series:\n \"\"\"\n Rearrange index levels using input order.\n\n May not drop or duplicate levels.\n\n Parameters\n ----------\n order : list of int representing new level order\n Reference level by number or key.\n\n Returns\n -------\n type of caller (new object)\n \"\"\"\n if not isinstance(self.index, MultiIndex): # pragma: no cover\n raise Exception(\"Can only reorder levels on a hierarchical axis.\")\n\n result = self.copy()\n assert isinstance(result.index, MultiIndex)\n result.index = result.index.reorder_levels(order)\n return result\n\n def explode(self, ignore_index: bool = False) -> Series:\n \"\"\"\n Transform each element of a list-like to a row.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n ignore_index : bool, default False\n If True, the resulting index will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n Series\n Exploded lists to rows; index will be duplicated for these rows.\n\n See Also\n --------\n Series.str.split : Split string values on specified separator.\n Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex\n to produce DataFrame.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n DataFrame.explode : Explode a DataFrame from list-like\n columns to long format.\n\n Notes\n -----\n This routine will explode list-likes including lists, tuples, sets,\n Series, and np.ndarray. The result dtype of the subset rows will\n be object. Scalars will be returned unchanged, and empty list-likes will\n result in a np.nan for that row. In addition, the ordering of elements in\n the output will be non-deterministic when exploding sets.\n\n Examples\n --------\n >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]])\n >>> s\n 0 [1, 2, 3]\n 1 foo\n 2 []\n 3 [3, 4]\n dtype: object\n\n >>> s.explode()\n 0 1\n 0 2\n 0 3\n 1 foo\n 2 NaN\n 3 3\n 3 4\n dtype: object\n \"\"\"\n if not len(self) or not is_object_dtype(self):\n return self.copy()\n\n values, counts = reshape.explode(np.asarray(self.array))\n\n if ignore_index:\n index = ibase.default_index(len(values))\n else:\n index = self.index.repeat(counts)\n\n return self._constructor(values, index=index, name=self.name)\n\n def unstack(self, level=-1, fill_value=None):\n \"\"\"\n Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.\n\n Parameters\n ----------\n level : int, str, or list of these, default last level\n Level(s) to unstack, can pass level name.\n fill_value : scalar value, default None\n Value to use when replacing NaN values.\n\n Returns\n -------\n DataFrame\n Unstacked Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4],\n ... index=pd.MultiIndex.from_product([['one', 'two'],\n ... ['a', 'b']]))\n >>> s\n one a 1\n b 2\n two a 3\n b 4\n dtype: int64\n\n >>> s.unstack(level=-1)\n a b\n one 1 2\n two 3 4\n\n >>> s.unstack(level=0)\n one two\n a 1 3\n b 2 4\n \"\"\"\n from pandas.core.reshape.reshape import unstack\n\n return unstack(self, level, fill_value)\n\n # ----------------------------------------------------------------------\n # function application\n\n def map(self, arg, na_action=None) -> Series:\n \"\"\"\n Map values of Series according to input correspondence.\n\n Used for substituting each value in a Series with another value,\n that may be derived from a function, a ``dict`` or\n a :class:`Series`.\n\n Parameters\n ----------\n arg : function, collections.abc.Mapping subclass or Series\n Mapping correspondence.\n na_action : {None, 'ignore'}, default None\n If 'ignore', propagate NaN values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n Series\n Same index as caller.\n\n See Also\n --------\n Series.apply : For applying more complex functions on a Series.\n DataFrame.apply : Apply a function row-/column-wise.\n DataFrame.applymap : Apply a function elementwise on a whole DataFrame.\n\n Notes\n -----\n When ``arg`` is a dictionary, values in Series that are not in the\n dictionary (as keys) are converted to ``NaN``. However, if the\n dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.\n provides a method for default values), then this default is used\n rather than ``NaN``.\n\n Examples\n --------\n >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])\n >>> s\n 0 cat\n 1 dog\n 2 NaN\n 3 rabbit\n dtype: object\n\n ``map`` accepts a ``dict`` or a ``Series``. Values that are not found\n in the ``dict`` are converted to ``NaN``, unless the dict has a default\n value (e.g. ``defaultdict``):\n\n >>> s.map({'cat': 'kitten', 'dog': 'puppy'})\n 0 kitten\n 1 puppy\n 2 NaN\n 3 NaN\n dtype: object\n\n It also accepts a function:\n\n >>> s.map('I am a {}'.format)\n 0 I am a cat\n 1 I am a dog\n 2 I am a nan\n 3 I am a rabbit\n dtype: object\n\n To avoid applying the function to missing values (and keep them as\n ``NaN``) ``na_action='ignore'`` can be used:\n\n >>> s.map('I am a {}'.format, na_action='ignore')\n 0 I am a cat\n 1 I am a dog\n 2 NaN\n 3 I am a rabbit\n dtype: object\n \"\"\"\n new_values = super()._map_values(arg, na_action=na_action)\n return self._constructor(new_values, index=self.index).__finalize__(\n self, method=\"map\"\n )\n\n def _gotitem(self, key, ndim, subset=None) -> Series:\n \"\"\"\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n Requested ndim of result.\n subset : object, default None\n Subset to act on.\n \"\"\"\n return self\n\n _agg_see_also_doc = dedent(\n \"\"\"\n See Also\n --------\n Series.apply : Invoke function on a Series.\n Series.transform : Transform function producing a Series with like indexes.\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.agg('min')\n 1\n\n >>> s.agg(['min', 'max'])\n min 1\n max 4\n dtype: int64\n \"\"\"\n )\n\n @doc(\n generic._shared_docs[\"aggregate\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n see_also=_agg_see_also_doc,\n examples=_agg_examples_doc,\n )\n def aggregate(self, func=None, axis=0, *args, **kwargs):\n # Validate the axis parameter\n self._get_axis_number(axis)\n\n # if func is None, will switch to user-provided \"named aggregation\" kwargs\n if func is None:\n func = dict(kwargs.items())\n\n op = series_apply(self, func, args=args, kwds=kwargs)\n result, how = op.agg()\n if result is None:\n\n # we can be called from an inner function which\n # passes this meta-data\n kwargs.pop(\"_axis\", None)\n kwargs.pop(\"_level\", None)\n\n # try a regular apply, this evaluates lambdas\n # row-by-row; however if the lambda is expected a Series\n # expression, e.g.: lambda x: x-x.quantile(0.25)\n # this will fail, so we can try a vectorized evaluation\n\n # we cannot FIRST try the vectorized evaluation, because\n # then .agg and .apply would have different semantics if the\n # operation is actually defined on the Series, e.g. str\n try:\n result = self.apply(func, *args, **kwargs)\n except (ValueError, AttributeError, TypeError):\n result = func(self, *args, **kwargs)\n\n return result\n\n agg = aggregate\n\n @doc(\n _shared_docs[\"transform\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n )\n def transform(\n self, func: AggFuncType, axis: Axis = 0, *args, **kwargs\n ) -> FrameOrSeriesUnion:\n return transform(self, func, axis, *args, **kwargs)\n\n def apply(\n self,\n func: AggFuncType,\n convert_dtype: bool = True,\n args: Tuple[Any, ...] = (),\n **kwds,\n ) -> FrameOrSeriesUnion:\n \"\"\"\n Invoke function on values of Series.\n\n Can be ufunc (a NumPy function that applies to the entire Series)\n or a Python function that only works on single values.\n\n Parameters\n ----------\n func : function\n Python function or NumPy ufunc to apply.\n convert_dtype : bool, default True\n Try to find better dtype for elementwise function results. If\n False, leave as dtype=object.\n args : tuple\n Positional arguments passed to func after the series value.\n **kwds\n Additional keyword arguments passed to func.\n\n Returns\n -------\n Series or DataFrame\n If func returns a Series object the result will be a DataFrame.\n\n See Also\n --------\n Series.map: For element-wise operations.\n Series.agg: Only perform aggregating type operations.\n Series.transform: Only perform transforming type operations.\n\n Examples\n --------\n Create a series with typical summer temperatures for each city.\n\n >>> s = pd.Series([20, 21, 12],\n ... index=['London', 'New York', 'Helsinki'])\n >>> s\n London 20\n New York 21\n Helsinki 12\n dtype: int64\n\n Square the values by defining a function and passing it as an\n argument to ``apply()``.\n\n >>> def square(x):\n ... return x ** 2\n >>> s.apply(square)\n London 400\n New York 441\n Helsinki 144\n dtype: int64\n\n Square the values by passing an anonymous function as an\n argument to ``apply()``.\n\n >>> s.apply(lambda x: x ** 2)\n London 400\n New York 441\n Helsinki 144\n dtype: int64\n\n Define a custom function that needs additional positional\n arguments and pass these additional arguments using the\n ``args`` keyword.\n\n >>> def subtract_custom_value(x, custom_value):\n ... return x - custom_value\n\n >>> s.apply(subtract_custom_value, args=(5,))\n London 15\n New York 16\n Helsinki 7\n dtype: int64\n\n Define a custom function that takes keyword arguments\n and pass these arguments to ``apply``.\n\n >>> def add_custom_values(x, **kwargs):\n ... for month in kwargs:\n ... x += kwargs[month]\n ... return x\n\n >>> s.apply(add_custom_values, june=30, july=20, august=25)\n London 95\n New York 96\n Helsinki 87\n dtype: int64\n\n Use a function from the Numpy library.\n\n >>> s.apply(np.log)\n London 2.995732\n New York 3.044522\n Helsinki 2.484907\n dtype: float64\n \"\"\"\n op = series_apply(self, func, convert_dtype, args, kwds)\n return op.apply()\n\n def _reduce(\n self,\n op,\n name: str,\n *,\n axis=0,\n skipna=True,\n numeric_only=None,\n filter_type=None,\n **kwds,\n ):\n \"\"\"\n Perform a reduction operation.\n\n If we have an ndarray as a value, then simply perform the operation,\n otherwise delegate to the object.\n \"\"\"\n delegate = self._values\n\n if axis is not None:\n self._get_axis_number(axis)\n\n if isinstance(delegate, ExtensionArray):\n # dispatch to ExtensionArray interface\n return delegate._reduce(name, skipna=skipna, **kwds)\n\n else:\n # dispatch to numpy arrays\n if numeric_only:\n raise NotImplementedError(\n f\"Series.{name} does not implement numeric_only.\"\n )\n with np.errstate(all=\"ignore\"):\n return op(delegate, skipna=skipna, **kwds)\n\n def _reindex_indexer(self, new_index, indexer, copy):\n if indexer is None:\n if copy:\n return self.copy()\n return self\n\n new_values = algorithms.take_1d(\n self._values, indexer, allow_fill=True, fill_value=None\n )\n return self._constructor(new_values, index=new_index)\n\n def _needs_reindex_multi(self, axes, method, level):\n \"\"\"\n Check if we do need a multi reindex; this is for compat with\n higher dims.\n \"\"\"\n return False\n\n @doc(\n NDFrame.align,\n klass=_shared_doc_kwargs[\"klass\"],\n axes_single_arg=_shared_doc_kwargs[\"axes_single_arg\"],\n )\n def align(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy=True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n broadcast_axis=None,\n ):\n return super().align(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n broadcast_axis=broadcast_axis,\n )\n\n def rename(\n self,\n index=None,\n *,\n axis=None,\n copy=True,\n inplace=False,\n level=None,\n errors=\"ignore\",\n ):\n \"\"\"\n Alter Series index labels or name.\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don't throw an\n error.\n\n Alternatively, change ``Series.name`` with a scalar value.\n\n See the :ref:`user guide <basics.rename>` for more.\n\n Parameters\n ----------\n axis : {0 or \"index\"}\n Unused. Accepted for compatibility with DataFrame method only.\n index : scalar, hashable sequence, dict-like or function, optional\n Functions or dict-like are transformations to apply to\n the index.\n Scalar or hashable sequence-like will alter the ``Series.name``\n attribute.\n\n **kwargs\n Additional keyword arguments passed to the function. Only the\n \"inplace\" keyword is used.\n\n Returns\n -------\n Series or None\n Series with index labels or name altered or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.rename : Corresponding DataFrame method.\n Series.rename_axis : Set the name of the axis.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename(\"my_name\") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n \"\"\"\n if callable(index) or is_dict_like(index):\n return super().rename(\n index, copy=copy, inplace=inplace, level=level, errors=errors\n )\n else:\n return self._set_name(index, inplace=inplace)\n\n @Appender(\n \"\"\"\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> s.set_axis(['a', 'b', 'c'], axis=0)\n a 1\n b 2\n c 3\n dtype: int64\n \"\"\"\n )\n @Substitution(\n **_shared_doc_kwargs,\n extended_summary_sub=\"\",\n axis_description_sub=\"\",\n see_also_sub=\"\",\n )\n @Appender(generic.NDFrame.set_axis.__doc__)\n def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):\n return super().set_axis(labels, axis=axis, inplace=inplace)\n\n @doc(\n NDFrame.reindex,\n klass=_shared_doc_kwargs[\"klass\"],\n axes=_shared_doc_kwargs[\"axes\"],\n optional_labels=_shared_doc_kwargs[\"optional_labels\"],\n optional_axis=_shared_doc_kwargs[\"optional_axis\"],\n )\n def reindex(self, index=None, **kwargs):\n return super().reindex(index=index, **kwargs)\n\n def drop(\n self,\n labels=None,\n axis=0,\n index=None,\n columns=None,\n level=None,\n inplace=False,\n errors=\"raise\",\n ) -> Series:\n \"\"\"\n Return Series with specified index labels removed.\n\n Remove elements of a Series based on specifying the index labels.\n When using a multi-index, labels on different levels can be removed\n by specifying the level.\n\n Parameters\n ----------\n labels : single label or list-like\n Index labels to drop.\n axis : 0, default 0\n Redundant for application on Series.\n index : single label or list-like\n Redundant for application on Series, but 'index' can be used instead\n of 'labels'.\n columns : single label or list-like\n No change is made to the Series; use 'index' or 'labels' instead.\n level : int or level name, optional\n For MultiIndex, level for which the labels will be removed.\n inplace : bool, default False\n If True, do operation inplace and return None.\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and only existing labels are dropped.\n\n Returns\n -------\n Series or None\n Series with specified index labels removed or None if ``inplace=True``.\n\n Raises\n ------\n KeyError\n If none of the labels are found in the index.\n\n See Also\n --------\n Series.reindex : Return only specified index labels of Series.\n Series.dropna : Return series without null values.\n Series.drop_duplicates : Return Series with duplicate values removed.\n DataFrame.drop : Drop specified labels from rows or columns.\n\n Examples\n --------\n >>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C'])\n >>> s\n A 0\n B 1\n C 2\n dtype: int64\n\n Drop labels B en C\n\n >>> s.drop(labels=['B', 'C'])\n A 0\n dtype: int64\n\n Drop 2nd level label in MultiIndex Series\n\n >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],\n ... index=midx)\n >>> s\n lama speed 45.0\n weight 200.0\n length 1.2\n cow speed 30.0\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.drop(labels='weight', level=1)\n lama speed 45.0\n length 1.2\n cow speed 30.0\n length 1.5\n falcon speed 320.0\n length 0.3\n dtype: float64\n \"\"\"\n return super().drop(\n labels=labels,\n axis=axis,\n index=index,\n columns=columns,\n level=level,\n inplace=inplace,\n errors=errors,\n )\n\n @doc(NDFrame.fillna, **_shared_doc_kwargs)\n def fillna(\n self,\n value=None,\n method=None,\n axis=None,\n inplace=False,\n limit=None,\n downcast=None,\n ) -> Optional[Series]:\n return super().fillna(\n value=value,\n method=method,\n axis=axis,\n inplace=inplace,\n limit=limit,\n downcast=downcast,\n )\n\n def pop(self, item: Hashable) -> Any:\n \"\"\"\n Return item and drops from series. Raise KeyError if not found.\n\n Parameters\n ----------\n item : label\n Index of the element that needs to be removed.\n\n Returns\n -------\n Value that is popped from series.\n\n Examples\n --------\n >>> ser = pd.Series([1,2,3])\n\n >>> ser.pop(0)\n 1\n\n >>> ser\n 1 2\n 2 3\n dtype: int64\n \"\"\"\n return super().pop(item=item)\n\n @doc(\n NDFrame.replace,\n klass=_shared_doc_kwargs[\"klass\"],\n inplace=_shared_doc_kwargs[\"inplace\"],\n replace_iloc=_shared_doc_kwargs[\"replace_iloc\"],\n )\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n ):\n return super().replace(\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n limit=limit,\n regex=regex,\n method=method,\n )\n\n def _replace_single(self, to_replace, method: str, inplace: bool, limit):\n \"\"\"\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n \"\"\"\n\n orig_dtype = self.dtype\n result = self if inplace else self.copy()\n fill_f = missing.get_fill_func(method)\n\n mask = missing.mask_missing(result.values, to_replace)\n values = fill_f(result.values, limit=limit, mask=mask)\n\n if values.dtype == orig_dtype and inplace:\n return\n\n result = self._constructor(values, index=self.index, dtype=self.dtype)\n result = result.__finalize__(self)\n\n if inplace:\n self._update_inplace(result)\n return\n\n return result\n\n @doc(NDFrame.shift, klass=_shared_doc_kwargs[\"klass\"])\n def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> Series:\n return super().shift(\n periods=periods, freq=freq, axis=axis, fill_value=fill_value\n )\n\n def memory_usage(self, index=True, deep=False):\n \"\"\"\n Return the memory usage of the Series.\n\n The memory usage can optionally include the contribution of\n the index and of elements of `object` dtype.\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of the Series index.\n deep : bool, default False\n If True, introspect the data deeply by interrogating\n `object` dtypes for system-level memory consumption, and include\n it in the returned value.\n\n Returns\n -------\n int\n Bytes of memory consumed.\n\n See Also\n --------\n numpy.ndarray.nbytes : Total bytes consumed by the elements of the\n array.\n DataFrame.memory_usage : Bytes consumed by a DataFrame.\n\n Examples\n --------\n >>> s = pd.Series(range(3))\n >>> s.memory_usage()\n 152\n\n Not including the index gives the size of the rest of the data, which\n is necessarily smaller:\n\n >>> s.memory_usage(index=False)\n 24\n\n The memory footprint of `object` values is ignored by default:\n\n >>> s = pd.Series([\"a\", \"b\"])\n >>> s.values\n array(['a', 'b'], dtype=object)\n >>> s.memory_usage()\n 144\n >>> s.memory_usage(deep=True)\n 244\n \"\"\"\n v = super().memory_usage(deep=deep)\n if index:\n v += self.index.memory_usage(deep=deep)\n return v\n\n def isin(self, values) -> Series:\n \"\"\"\n Whether elements in Series are contained in `values`.\n\n Return a boolean Series showing whether each element in the Series\n matches an element in the passed sequence of `values` exactly.\n\n Parameters\n ----------\n values : set or list-like\n The sequence of values to test. Passing in a single string will\n raise a ``TypeError``. Instead, turn a single string into a\n list of one element.\n\n Returns\n -------\n Series\n Series of booleans indicating if each element is in values.\n\n Raises\n ------\n TypeError\n * If `values` is a string\n\n See Also\n --------\n DataFrame.isin : Equivalent method on DataFrame.\n\n Examples\n --------\n >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama',\n ... 'hippo'], name='animal')\n >>> s.isin(['cow', 'lama'])\n 0 True\n 1 True\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n Passing a single string as ``s.isin('lama')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['lama'])\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n Strings and integers are distinct and are therefore not comparable:\n\n >>> pd.Series([1]).isin(['1'])\n 0 False\n dtype: bool\n >>> pd.Series([1.1]).isin(['1.1'])\n 0 False\n dtype: bool\n \"\"\"\n result = algorithms.isin(self._values, values)\n return self._constructor(result, index=self.index).__finalize__(\n self, method=\"isin\"\n )\n\n def between(self, left, right, inclusive=True) -> Series:\n \"\"\"\n Return boolean Series equivalent to left <= series <= right.\n\n This function returns a boolean vector containing `True` wherever the\n corresponding Series element is between the boundary values `left` and\n `right`. NA values are treated as `False`.\n\n Parameters\n ----------\n left : scalar or list-like\n Left boundary.\n right : scalar or list-like\n Right boundary.\n inclusive : bool, default True\n Include boundaries.\n\n Returns\n -------\n Series\n Series representing whether each element is between left and\n right (inclusive).\n\n See Also\n --------\n Series.gt : Greater than of series and other.\n Series.lt : Less than of series and other.\n\n Notes\n -----\n This function is equivalent to ``(left <= ser) & (ser <= right)``\n\n Examples\n --------\n >>> s = pd.Series([2, 0, 4, 8, np.nan])\n\n Boundary values are included by default:\n\n >>> s.between(1, 4)\n 0 True\n 1 False\n 2 True\n 3 False\n 4 False\n dtype: bool\n\n With `inclusive` set to ``False`` boundary values are excluded:\n\n >>> s.between(1, 4, inclusive=False)\n 0 True\n 1 False\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n `left` and `right` can be any scalar value:\n\n >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])\n >>> s.between('Anna', 'Daniel')\n 0 False\n 1 True\n 2 True\n 3 False\n dtype: bool\n \"\"\"\n if inclusive:\n lmask = self >= left\n rmask = self <= right\n else:\n lmask = self > left\n rmask = self < right\n\n return lmask & rmask\n\n # ----------------------------------------------------------------------\n # Convert to types that support pd.NA\n\n def _convert_dtypes(\n self,\n infer_objects: bool = True,\n convert_string: bool = True,\n convert_integer: bool = True,\n convert_boolean: bool = True,\n convert_floating: bool = True,\n ) -> Series:\n input_series = self\n if infer_objects:\n input_series = input_series.infer_objects()\n if is_object_dtype(input_series):\n input_series = input_series.copy()\n\n if convert_string or convert_integer or convert_boolean or convert_floating:\n inferred_dtype = convert_dtypes(\n input_series._values,\n convert_string,\n convert_integer,\n convert_boolean,\n convert_floating,\n )\n try:\n result = input_series.astype(inferred_dtype)\n except TypeError:\n result = input_series.copy()\n else:\n result = input_series.copy()\n return result\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isna(self) -> Series:\n return generic.NDFrame.isna(self)\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isnull(self) -> Series:\n return super().isnull()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notna(self) -> Series:\n return super().notna()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notnull(self) -> Series:\n return super().notnull()\n\n def dropna(self, axis=0, inplace=False, how=None):\n \"\"\"\n Return a new Series with missing values removed.\n\n See the :ref:`User Guide <missing_data>` for more on which values are\n considered missing, and how to work with missing data.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n There is only one axis to drop values from.\n inplace : bool, default False\n If True, do operation inplace and return None.\n how : str, optional\n Not in use. Kept for compatibility.\n\n Returns\n -------\n Series or None\n Series with NA entries dropped from it or None if ``inplace=True``.\n\n See Also\n --------\n Series.isna: Indicate missing values.\n Series.notna : Indicate existing (non-missing) values.\n Series.fillna : Replace missing values.\n DataFrame.dropna : Drop rows or columns which contain NA values.\n Index.dropna : Drop missing indices.\n\n Examples\n --------\n >>> ser = pd.Series([1., 2., np.nan])\n >>> ser\n 0 1.0\n 1 2.0\n 2 NaN\n dtype: float64\n\n Drop NA values from a Series.\n\n >>> ser.dropna()\n 0 1.0\n 1 2.0\n dtype: float64\n\n Keep the Series with valid entries in the same variable.\n\n >>> ser.dropna(inplace=True)\n >>> ser\n 0 1.0\n 1 2.0\n dtype: float64\n\n Empty strings are not considered NA values. ``None`` is considered an\n NA value.\n\n >>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay'])\n >>> ser\n 0 NaN\n 1 2\n 2 NaT\n 3\n 4 None\n 5 I stay\n dtype: object\n >>> ser.dropna()\n 1 2\n 3\n 5 I stay\n dtype: object\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n # Validate the axis parameter\n self._get_axis_number(axis or 0)\n\n if self._can_hold_na:\n result = remove_na_arraylike(self)\n if inplace:\n self._update_inplace(result)\n else:\n return result\n else:\n if inplace:\n # do nothing\n pass\n else:\n return self.copy()\n\n # ----------------------------------------------------------------------\n # Time series-oriented methods\n\n @doc(NDFrame.asfreq, **_shared_doc_kwargs)\n def asfreq(\n self,\n freq,\n method=None,\n how: Optional[str] = None,\n normalize: bool = False,\n fill_value=None,\n ) -> Series:\n return super().asfreq(\n freq=freq,\n method=method,\n how=how,\n normalize=normalize,\n fill_value=fill_value,\n )\n\n @doc(NDFrame.resample, **_shared_doc_kwargs)\n def resample(\n self,\n rule,\n axis=0,\n closed: Optional[str] = None,\n label: Optional[str] = None,\n convention: str = \"start\",\n kind: Optional[str] = None,\n loffset=None,\n base: Optional[int] = None,\n on=None,\n level=None,\n origin: Union[str, TimestampConvertibleTypes] = \"start_day\",\n offset: Optional[TimedeltaConvertibleTypes] = None,\n ) -> Resampler:\n return super().resample(\n rule=rule,\n axis=axis,\n closed=closed,\n label=label,\n convention=convention,\n kind=kind,\n loffset=loffset,\n base=base,\n on=on,\n level=level,\n origin=origin,\n offset=offset,\n )\n\n def to_timestamp(self, freq=None, how=\"start\", copy=True) -> Series:\n \"\"\"\n Cast to DatetimeIndex of Timestamps, at *beginning* of period.\n\n Parameters\n ----------\n freq : str, default frequency of PeriodIndex\n Desired frequency.\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end.\n copy : bool, default True\n Whether or not to return a copy.\n\n Returns\n -------\n Series with DatetimeIndex\n \"\"\"\n new_values = self._values\n if copy:\n new_values = new_values.copy()\n\n if not isinstance(self.index, PeriodIndex):\n raise TypeError(f\"unsupported Type {type(self.index).__name__}\")\n new_index = self.index.to_timestamp(freq=freq, how=how)\n return self._constructor(new_values, index=new_index).__finalize__(\n self, method=\"to_timestamp\"\n )\n\n def to_period(self, freq=None, copy=True) -> Series:\n \"\"\"\n Convert Series from DatetimeIndex to PeriodIndex.\n\n Parameters\n ----------\n freq : str, default None\n Frequency associated with the PeriodIndex.\n copy : bool, default True\n Whether or not to return a copy.\n\n Returns\n -------\n Series\n Series with index converted to PeriodIndex.\n \"\"\"\n new_values = self._values\n if copy:\n new_values = new_values.copy()\n\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(f\"unsupported Type {type(self.index).__name__}\")\n new_index = self.index.to_period(freq=freq)\n return self._constructor(new_values, index=new_index).__finalize__(\n self, method=\"to_period\"\n )\n\n # ----------------------------------------------------------------------\n # Add index\n _AXIS_ORDERS = [\"index\"]\n _AXIS_REVERSED = False\n _AXIS_LEN = len(_AXIS_ORDERS)\n _info_axis_number = 0\n _info_axis_name = \"index\"\n\n index: Index = properties.AxisProperty(\n axis=0, doc=\"The index (axis labels) of the Series.\"\n )\n\n # ----------------------------------------------------------------------\n # Accessor Methods\n # ----------------------------------------------------------------------\n str = CachedAccessor(\"str\", StringMethods)\n dt = CachedAccessor(\"dt\", CombinedDatetimelikeProperties)\n cat = CachedAccessor(\"cat\", CategoricalAccessor)\n plot = CachedAccessor(\"plot\", pandas.plotting.PlotAccessor)\n sparse = CachedAccessor(\"sparse\", SparseAccessor)\n\n # ----------------------------------------------------------------------\n # Add plotting methods to Series\n hist = pandas.plotting.hist_series\n\n # ----------------------------------------------------------------------\n # Template-Based Arithmetic/Comparison Methods\n\n def _cmp_method(self, other, op):\n res_name = ops.get_op_result_name(self, other)\n\n if isinstance(other, Series) and not self._indexed_same(other):\n raise ValueError(\"Can only compare identically-labeled Series objects\")\n\n lvalues = extract_array(self, extract_numpy=True)\n rvalues = extract_array(other, extract_numpy=True)\n\n res_values = ops.comparison_op(lvalues, rvalues, op)\n\n return self._construct_result(res_values, name=res_name)\n\n def _logical_method(self, other, op):\n res_name = ops.get_op_result_name(self, other)\n self, other = ops.align_method_SERIES(self, other, align_asobject=True)\n\n lvalues = extract_array(self, extract_numpy=True)\n rvalues = extract_array(other, extract_numpy=True)\n\n res_values = ops.logical_op(lvalues, rvalues, op)\n return self._construct_result(res_values, name=res_name)\n\n def _arith_method(self, other, op):\n res_name = ops.get_op_result_name(self, other)\n self, other = ops.align_method_SERIES(self, other)\n\n lvalues = extract_array(self, extract_numpy=True)\n rvalues = extract_array(other, extract_numpy=True)\n result = ops.arithmetic_op(lvalues, rvalues, op)\n\n return self._construct_result(result, name=res_name)\n\n\nSeries._add_numeric_operations()\n\n# Add arithmetic!\nops.add_flex_arithmetic_methods(Series)\n"
] | [
[
"pandas.core.reshape.concat.concat",
"numpy.asarray",
"pandas.core.nanops.nancorr",
"numpy.transpose",
"pandas.core.dtypes.missing.na_value_for_dtype",
"pandas.core.generic.NDFrame.isna",
"pandas._libs.lib.infer_dtype",
"pandas.core.indexing.check_bool_indexer",
"pandas.core.dtypes.common.is_scalar",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.cast.convert_dtypes",
"pandas.core.dtypes.inference.is_hashable",
"pandas.core.algorithms.diff",
"numpy.errstate",
"pandas.core.reshape.reshape.unstack",
"numpy.array",
"numpy.dot",
"pandas.core.indexers.unpack_1tuple",
"pandas.core.dtypes.common.is_integer",
"pandas.core.apply.series_apply",
"pandas.core.common.any_none",
"pandas.compat.numpy.function.validate_round",
"pandas.core.missing.get_fill_func",
"pandas.core.algorithms.take_1d",
"pandas.core.indexers.deprecate_ndim_indexing",
"pandas._libs.properties.AxisProperty",
"pandas.core.aggregation.transform",
"pandas.core.ops.fill_binop",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.dtypes.missing.isna",
"pandas.core.common.maybe_iterable_to_list",
"pandas.core.internals.SingleBlockManager.from_array",
"pandas.core.dtypes.cast.validate_numeric_casting",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.construction.sanitize_array",
"pandas.core.dtypes.common.is_dict_like",
"numpy.argsort",
"pandas.util._decorators.doc",
"pandas.core.construction.extract_array",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.tools.datetimes.to_datetime",
"pandas.core.construction.create_series_with_explicit_dtype",
"pandas.core.ops.arithmetic_op",
"pandas.core.dtypes.missing.remove_na_arraylike",
"pandas.core.common.asarray_tuplesafe",
"pandas.util._validators.validate_percentile",
"pandas.core.base.IndexOpsMixin.duplicated",
"pandas.core.common.apply_if_callable",
"pandas.core.internals.construction.sanitize_index",
"pandas.core.groupby.generic.SeriesGroupBy",
"pandas.core.dtypes.common.is_bool",
"pandas.core.ops.add_flex_arithmetic_methods",
"pandas.core.missing.mask_missing",
"pandas.core.ops.get_op_result_name",
"pandas.core.ops.logical_op",
"pandas.core.algorithms.searchsorted",
"pandas.core.sorting.nargsort",
"pandas.core.ops.comparison_op",
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.indexes.api.Float64Index",
"pandas.core.algorithms.SelectNSeries",
"pandas.core.construction.is_empty_data",
"pandas.core.accessor.CachedAccessor",
"pandas.compat.numpy.function.validate_repeat",
"pandas.core.ops.align_method_SERIES",
"pandas.core.indexes.api.ensure_index",
"pandas.core.sorting.ensure_key_mapped",
"pandas.core.dtypes.common.is_iterator",
"pandas.core.nanops.nancov",
"pandas.core.dtypes.missing.notna",
"pandas.core.generic.NDFrame.__init__",
"pandas._config.get_option",
"pandas.core.common.is_bool_indexer",
"pandas.core.algorithms.isin",
"pandas.core.indexes.datetimes.DatetimeIndex",
"pandas.io.formats.format.SeriesFormatter",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.util._decorators.Substitution",
"pandas.core.common.standardize_mapping",
"pandas.core.algorithms.mode",
"pandas.compat.numpy.function.validate_take"
]
] |
krodyush/nncf | [
"476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a"
] | [
"nncf/quantization/metrics.py"
] | [
"import numpy as np\nimport networkx as nx\nfrom copy import deepcopy\nfrom texttable import Texttable\nfrom collections import deque\n\nfrom nncf.quantization.layers import SymmetricQuantizer\nfrom nncf.nncf_network import NNCFNetwork, NNCFGraph\nfrom nncf.dynamic_graph.transform_graph import is_nncf_module\nfrom nncf.quantization.quantizer_propagation import DEFAULT_QUANT_TRAIT_TO_OP_DICT, QuantizationTrait\n\nclass BaseMetric:\n def __init__(self):\n pass\n\n def collect(self):\n pass\n\n def get_metric_table(self):\n pass\n\n\nclass NetworkQuantizationShareMetric(BaseMetric):\n \"\"\"\n This is a metric representing the share of the model that has been quantized.\n It includes the calculation of the following numbers:\n - Percentage of symmetric/asymmetric/per-channel/per-tensor weight quantizers relative\n to the number of placed weight quantizers\n - Percentage of symmetric/asymmetric/per-channel/per-tensor non weight quantizers relative\n to the number of placed non weight quantizers\n - Percentage of weight quantizers and non weight quantizers for each precision relative\n to the number potential* quantizers / placed quantizers\n Bitwidth distribution data is also collected.\n\n * The maximum possible number of potential quantizers depends on the presence of ignored\n scopes and the mode of quantizer setup that is used at the time of collecting the metric.\n\n \"\"\"\n NAME_STR = 'NetworkQuantizationShare'\n\n WEIGHTS_RATIO_STR = ' WQs / All placed WQs' # WQ - weight quantizer\n ACTIVATIONS_RATIO_STR = ' AQs / All placed AQs' # AQ - activation quantizer\n TOTAL_RATIO_STR = ' Qs (out of total placed)'\n\n PARAMS_STR = 'Quantizer parameter'\n SYMMETRIC_STR = 'Symmetric'\n ASYMMETRIC_STR = 'Asymmetric'\n PER_CHANNEL_STR = 'Per-channel'\n SIGNED_STR = 'Signed'\n PER_TENSOR_STR = 'Per-tensor'\n UNSIGNED_STR = 'Unsigned'\n SHARE_WEIGHT_QUANTIZERS_STR = 'Placed WQs / Potential WQs'\n SHARE_ACTIVATION_QUANTIZERS_STR = 'Placed AQs / Potential AQs'\n\n def __init__(self, compressed_model, weights_quantizers, non_weights_quantizers, quantizer_setup_type):\n super().__init__()\n self._compressed_model = compressed_model\n self._quantizer_setup_type = quantizer_setup_type # type: QuantizerSetupType\n self.non_weights_quantizers = {k: v.quantizer_module_ref for k, v in non_weights_quantizers.items()}\n self.weights_quantizers = weights_quantizers\n self._all_quantizations = {**self.weights_quantizers, **self.non_weights_quantizers}\n self.header = [self.PARAMS_STR, self.WEIGHTS_RATIO_STR, self.ACTIVATIONS_RATIO_STR, self.TOTAL_RATIO_STR]\n self.params = {self.PER_CHANNEL_STR, self.PER_TENSOR_STR, self.UNSIGNED_STR, self.SIGNED_STR,\n self.SYMMETRIC_STR, self.ASYMMETRIC_STR}\n self.params_bits_stat = set()\n self.num_potential_quantized_weights = len(compressed_model.get_nncf_modules())\n self.num_potential_quantized_activations = self._get_num_potential_quantized_activations()\n self.num_placed_weight_quantizers = len(self.weights_quantizers)\n self.num_placed_activation_quantizers = len(self.non_weights_quantizers)\n self.num_all_potential_quantizer = self.num_potential_quantized_weights +\\\n self.num_potential_quantized_activations\n self.stat = {}\n self._ratio = {\n self.WEIGHTS_RATIO_STR: len(self.weights_quantizers),\n self.ACTIVATIONS_RATIO_STR: len(self.non_weights_quantizers),\n self.TOTAL_RATIO_STR: len(self._all_quantizations)}\n\n def _get_num_potential_quantized_activations(self):\n from nncf.quantization.algo import QuantizerSetupType\n retval = 0\n if self._quantizer_setup_type == QuantizerSetupType.PATTERN_BASED:\n from nncf.quantization.algo import QuantizationBuilder\n # pylint: disable=protected-access\n default_pattern = QuantizationBuilder._make_default_quantizable_subgraph_pattern()\n retval = len(self._compressed_model.get_post_pattern_insertion_points(default_pattern))\n else:\n from nncf.quantization.algo import QuantizerPropagationSolver\n insertion_point_graph = self._compressed_model.get_insertion_point_graph()\n prop_graph_solver = QuantizerPropagationSolver()\n insertion_data = prop_graph_solver.run_on_ip_graph(insertion_point_graph)\n retval = len(insertion_data)\n return retval\n\n def collect(self):\n for quantizer in self._all_quantizations.values():\n self.params_bits_stat.add(quantizer.num_bits)\n\n for h in self.header:\n self.stat[h] = {}\n for p in self.params:\n self.stat[h][p] = 0\n for p in self.params_bits_stat:\n self.stat[h][p] = 0\n\n for quantizer in self._all_quantizations.values(): # type: BaseQuantizer\n num_bits = quantizer.num_bits\n self.stat[self.TOTAL_RATIO_STR][num_bits] += 1\n type_ = self.WEIGHTS_RATIO_STR if quantizer.is_weights else self.ACTIVATIONS_RATIO_STR\n self.stat[type_][num_bits] += 1\n if quantizer.per_channel:\n self.stat[type_][self.PER_CHANNEL_STR] += 1\n else:\n self.stat[type_][self.PER_TENSOR_STR] += 1\n if quantizer.signed:\n self.stat[type_][self.SIGNED_STR] += 1\n else:\n self.stat[type_][self.UNSIGNED_STR] += 1\n if isinstance(quantizer, SymmetricQuantizer):\n self.stat[type_][self.SYMMETRIC_STR] += 1\n else:\n self.stat[type_][self.ASYMMETRIC_STR] += 1\n\n def _get_copy_statistics(self):\n statistics = deepcopy(self.stat)\n for h in self.header[1:]:\n for key, _ in statistics[h].items():\n try:\n statistics[h][key] /= self._ratio[h]\n statistics[h][key] *= 100\n except ZeroDivisionError:\n statistics[h][key] = 0\n return statistics\n\n def get_metric_table(self):\n table_with_bits_stats = Texttable()\n table_with_other_stats = Texttable()\n data = [['Metric type', 'Value']]\n for h in (self.WEIGHTS_RATIO_STR, self.ACTIVATIONS_RATIO_STR):\n for p in self.params:\n try:\n row = ['{} '.format(p) + str(h), '{:.2f} % ({} / {}) '.format(\\\n self.stat[h][p] / self._ratio[h] * 100, self.stat[h][p], self._ratio[h])]\n except ZeroDivisionError:\n row = ['{} '.format(p) + h, 0]\n data.append(row)\n try:\n row = [self.SHARE_WEIGHT_QUANTIZERS_STR, '{:.2f} % ({} / {}) '.format(\\\n self.num_placed_weight_quantizers / self.num_potential_quantized_weights * 100,\n self.num_placed_weight_quantizers, self.num_potential_quantized_weights)]\n except ZeroDivisionError:\n row = [self.SHARE_WEIGHT_QUANTIZERS_STR, '{} % '.format(0)]\n\n data.append(row)\n try:\n row = [self.SHARE_ACTIVATION_QUANTIZERS_STR, '{:.2f} % ({} / {}) '.format(\\\n self.num_placed_activation_quantizers / self.num_potential_quantized_activations * 100,\n self.num_placed_activation_quantizers, self.num_potential_quantized_activations)]\n except ZeroDivisionError:\n row = [self.SHARE_ACTIVATION_QUANTIZERS_STR, '{} % '.format(0)]\n data.append(row)\n\n table_with_other_stats.add_rows(data)\n\n data = [['Num bits (N)', 'N-bits WQs / Placed WQs', 'N-bits AQs / Placed AQs', 'N-bits Qs / Placed Qs']]\n for p in self.params_bits_stat:\n row = [p]\n for h in (self.WEIGHTS_RATIO_STR, self.ACTIVATIONS_RATIO_STR, self.TOTAL_RATIO_STR):\n try:\n row.append('{:.2f} % ({} / {}) '.format(\\\n self.stat[h][p] / self._ratio[h] * 100, self.stat[h][p], self._ratio[h]))\n except ZeroDivisionError:\n row.append(0)\n data.append(row)\n table_with_bits_stats.add_rows(data)\n\n retval = {\n \"Share quantization statistics:\" : table_with_other_stats,\n \"Bitwidth distribution:\" : table_with_bits_stats\n }\n return retval\n\n def get_bits_stat(self):\n table = Texttable()\n data = [['Num bits (N)', 'N-bits WQs / Placed Qs', 'N-bits AQs / Placed Qs', 'N-bits Qs / Placed Qs']]\n for p in self.params_bits_stat:\n row = [p]\n for h in (self.WEIGHTS_RATIO_STR, self.ACTIVATIONS_RATIO_STR, self.TOTAL_RATIO_STR):\n try:\n row.append(self.stat[h][p] / self._ratio[self.TOTAL_RATIO_STR] * 100)\n except ZeroDivisionError:\n row.append(0)\n data.append(row)\n table.add_rows(data)\n return table\n\nclass MemoryCostMetric(BaseMetric):\n \"\"\"\n\n This metric considers:\n - how many times memory consumption for network weights will decrease.\n - how many times memory consumption* for activations tensor will decrease.\n\n * Reflects host memory consumption, assuming only the final low-precision output activation tensors are stored\n in host memory (i.e. assuming intermediate accumulation results are only stored in device memory)\n\n \"\"\"\n PARAMS_STR = 'params'\n NAME_STR = 'MemoryCost'\n\n EXPECTED_MEMORY_CONSUMPTION_DECREASE_STR = 'Memory consumption decrease for weights'\n SIZE_MEMORY_FP_WEIGHTS_STR = 'Memory consumption for full-precision weights'\n SIZE_MEMORY_COMPRESSED_WEIGHTS_STR = 'Memory consumption for quantized weights'\n MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_FP32_MODEL_STR =\\\n 'Max memory consumption for an activation tensor in FP32 model'\n MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_COMPRESSED_MODEL_STR =\\\n 'Max memory consumption for an activation tensor in compressed model'\n\n def __init__(self, compressed_model: NNCFNetwork, weights_quantizers, non_weight_quantizers):\n super().__init__()\n self._compressed_model = compressed_model\n self._weights_quantizers = weights_quantizers\n self._non_weight_quantizers = {k: v.quantizer_module_ref for k, v in non_weight_quantizers.items()}\n self.header = [self.EXPECTED_MEMORY_CONSUMPTION_DECREASE_STR, self.SIZE_MEMORY_FP_WEIGHTS_STR,\\\n self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR,\\\n self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_FP32_MODEL_STR,\\\n self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_COMPRESSED_MODEL_STR]\n self.stat = {}\n\n def collect(self):\n self.stat[self.SIZE_MEMORY_FP_WEIGHTS_STR] = 0\n self.stat[self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR] = 0\n fp_num_bits = 32\n nncf_modules = self._compressed_model.get_nncf_modules()\n\n for scope_module, nncf_module in nncf_modules.items():\n count_el = np.prod(nncf_module.weight.shape)\n self.stat[self.SIZE_MEMORY_FP_WEIGHTS_STR] += count_el * fp_num_bits\n status, quantizer = self._get_quantizer_for_scope(scope_module, self._weights_quantizers)\n if status > 0:\n num_bits = quantizer.num_bits\n self.stat[self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR] += count_el * num_bits\n else:\n self.stat[self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR] += count_el * fp_num_bits\n try:\n self.stat[self.EXPECTED_MEMORY_CONSUMPTION_DECREASE_STR] = self.stat[self.SIZE_MEMORY_FP_WEIGHTS_STR] /\\\n self.stat[self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR]\n except ZeroDivisionError:\n self.stat[self.EXPECTED_MEMORY_CONSUMPTION_DECREASE_STR] = 0\n self.stat[self.SIZE_MEMORY_COMPRESSED_WEIGHTS_STR] /= 2**23\n self.stat[self.SIZE_MEMORY_FP_WEIGHTS_STR] /= 2**23\n\n original_graph = deepcopy(self._compressed_model.get_original_graph())\n\n memory_consumption_fp_model = {}\n memory_consumption_compressed_model = {}\n # pylint: disable=protected-access\n original_nx_graph = original_graph._nx_graph\n nx.set_edge_attributes(original_nx_graph, 32, \"precision\")\n input_nodes = original_graph.get_input_nodes()\n input_node_keys = []\n for input_node in input_nodes:\n input_node_key = original_graph.get_node_key_by_id(input_node.node_id)\n input_node_keys.append(input_node_key)\n next_nodes = original_graph.get_next_nodes(input_node)\n for next_node in next_nodes:\n scope = next_node.op_exec_context.scope_in_model\n status, quantizer = self._get_quantizer_for_scope(scope, self._non_weight_quantizers)\n if status:\n next_node_key = original_graph.get_node_key_by_id(next_node.node_id)\n num_bits = quantizer.num_bits\n original_nx_graph.edges[input_node_key, next_node_key]['precision'] = num_bits\n\n for u, v in original_nx_graph.edges:\n if u in input_node_keys:\n continue\n\n shape = original_nx_graph.edges[u, v][NNCFGraph.ACTIVATION_SHAPE_EDGE_ATTR]\n u_node_scope_str = str(original_nx_graph.nodes[u]['op_exec_context'].input_agnostic)\n num_bits = self.get_precision_for_activation_tensor(u, v, original_nx_graph)\n original_nx_graph.edges[u, v]['precision'] = num_bits\n memory_consumption_fp_model[u_node_scope_str] = np.prod(shape) * fp_num_bits\n memory_consumption_compressed_model[u_node_scope_str] = np.prod(shape) * num_bits\n try:\n self.stat[self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_FP32_MODEL_STR] =\\\n max(memory_consumption_fp_model.values()) / 2**23\n self.stat[self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_COMPRESSED_MODEL_STR] =\\\n max(memory_consumption_compressed_model.values()) / 2**23\n except ValueError:\n self.stat[self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_FP32_MODEL_STR] = 0\n self.stat[self.MAX_MEMORY_CONSUMPTION_ACTIVATION_TENSOR_IN_COMPRESSED_MODEL_STR] = 0\n\n def get_precision_for_activation_tensor(self, u_node, v_node, original_nx_graph):\n scope_u_node = original_nx_graph.nodes[u_node][NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].scope_in_model\n # pylint: disable=protected-access\n pred_u_nodes = original_nx_graph._pred[u_node]\n precision_enter_activation_tensor =\\\n max([0] + [original_nx_graph.edges[pred_u_node, u_node]['precision'] for pred_u_node in pred_u_nodes])\n module = self._compressed_model.get_module_by_scope(scope_u_node)\n if is_nncf_module(module):\n status, quantizer = self._get_quantizer_for_scope(scope_u_node, self._weights_quantizers)\n if status:\n precision = max(quantizer.num_bits, precision_enter_activation_tensor)\n else:\n precision = 32\n return precision\n\n u_node_scope_str = str(original_nx_graph.nodes[u_node][NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].input_agnostic)\n if u_node_scope_str in self._compressed_model.activation_quantizers:\n precision = self._compressed_model.activation_quantizers[u_node_scope_str].num_bits\n else:\n precision = precision_enter_activation_tensor\n return precision\n\n def _get_quantizer_for_scope(self, scope, quatizers):\n for quantizer_id, quantizer in quatizers.items():\n if quantizer_id.get_scope() == scope:\n return True, quantizer\n return False, None\n\n def get_metric_table(self):\n table = Texttable()\n data = [['Metric type', 'Value']]\n data.append([self.header[0], self.stat[self.header[0]]])\n for h in self.header[1:]:\n data.append([h + ' (Mbyte)', self.stat[h]])\n table.add_rows(data)\n\n retval = {\"Memory consumption statistics:\": table}\n return retval\n\n\nclass ShareEdgesQuantizedDataPath(BaseMetric):\n \"\"\"\n\n This metric calculates the percentage of quantized edges relative to the total number of edges\n in the original network graph. \"Quantized edge\" is an edge representing a quantized activation tensor.\n\n \"\"\"\n NAME_STR = 'ShareEdgesQuantizedDataPath'\n COUNT_QUANTIZED_EDGES_STR = 'Share edges of the quantized data path'\n QUANTIZED_EDGES_ATTR = 'quantized'\n PASSED_EDGES_ATTR = 'passed'\n NODES_GRAPH_ATTR = 'nodes'\n IS_MERGED_GRAPH_ATTR = 'is_merged'\n\n\n def __init__(self, compressed_model: NNCFNetwork):\n super().__init__()\n self._compressed_model = compressed_model\n self.stat = {}\n\n def collect(self):\n # pylint: disable=too-many-branches\n merged_original_graph =\\\n self.get_merged_original_graph_with_patterns(self._compressed_model.get_original_graph())\n self.stat[self.COUNT_QUANTIZED_EDGES_STR] = 0\n self.header = [self.COUNT_QUANTIZED_EDGES_STR]\n nx.set_edge_attributes(merged_original_graph, False, self.QUANTIZED_EDGES_ATTR)\n nx.set_edge_attributes(merged_original_graph, False, self.PASSED_EDGES_ATTR)\n # pylint: disable=protected-access\n input_nodes = [node for node in merged_original_graph.nodes if len(merged_original_graph._pred[node]) == 0]\n queue = deque()\n for input_node in input_nodes:\n # pylint: disable=protected-access\n next_nodes = merged_original_graph._succ[input_node]\n for next_node_key in next_nodes:\n edge = merged_original_graph.edges[input_node, next_node_key]\n edge[self.PASSED_EDGES_ATTR] = True\n edge[self.QUANTIZED_EDGES_ATTR] = True\n self.stat[self.COUNT_QUANTIZED_EDGES_STR] += 1\n queue.appendleft(next_node_key)\n visited_nodes = {}\n #pylint: disable=too-many-nested-blocks\n while len(queue) != 0:\n node_key = queue.pop()\n if node_key in visited_nodes:\n continue\n if self._all_enter_edges_in_node_of_type(merged_original_graph, node_key, self.PASSED_EDGES_ATTR):\n visited_nodes[node_key] = True\n node = merged_original_graph.nodes[node_key]\n if node[self.IS_MERGED_GRAPH_ATTR]:\n last_node = node[self.NODES_GRAPH_ATTR][-1]\n scope_str = str(last_node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].input_agnostic)\n if scope_str in self._compressed_model.activation_quantizers:\n self._marking_edges(merged_original_graph, node_key, queue)\n else:\n self._marking_edges(merged_original_graph, node_key, queue, False)\n else:\n scope_str = str(node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].input_agnostic)\n if scope_str in self._compressed_model.activation_quantizers:\n self._marking_edges(merged_original_graph, node_key, queue)\n else:\n is_op_non_change_precision_activation_tensor = True\n node_op_name = node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].operator_name\n for op in DEFAULT_QUANT_TRAIT_TO_OP_DICT[QuantizationTrait.INPUTS_QUANTIZABLE]:\n op_names = [op.name]\n if op.torch_tensor_patch_spec is not None:\n op_names = op.torch_tensor_patch_spec.underlying_function_names\n if node_op_name in op_names:\n is_op_non_change_precision_activation_tensor = False\n break\n status = is_op_non_change_precision_activation_tensor and\\\n self._all_enter_edges_in_node_of_type(merged_original_graph,\\\n node_key, self.QUANTIZED_EDGES_ATTR)\n self._marking_edges(merged_original_graph, node_key, queue, status)\n else:\n queue.appendleft(node_key)\n self.num_merged_original_graph_edges = len(merged_original_graph.edges)\n\n def _get_copy_statistics(self):\n statistics = deepcopy(self.stat)\n try:\n statistics[self.COUNT_QUANTIZED_EDGES_STR] /= self.num_merged_original_graph_edges\n statistics[self.COUNT_QUANTIZED_EDGES_STR] *= 100\n except ZeroDivisionError:\n statistics[self.COUNT_QUANTIZED_EDGES_STR] = 0\n\n return statistics\n\n def _all_enter_edges_in_node_of_type(self, graph, node_key, type_edge):\n # pylint: disable=protected-access\n prev_nodes = graph._pred[node_key]\n retval = True\n for prev_node_key in prev_nodes:\n edge = graph.edges[prev_node_key, node_key]\n if not edge[type_edge]:\n retval = False\n break\n return retval\n\n def _marking_edges(self, graph, node_key, queue, mark=True):\n # pylint: disable=protected-access\n next_nodes = graph._succ[node_key]\n for next_node_key in next_nodes:\n edge = graph.edges[node_key, next_node_key]\n edge[self.QUANTIZED_EDGES_ATTR] = mark\n edge[self.PASSED_EDGES_ATTR] = True\n queue.appendleft(next_node_key)\n if mark:\n self.stat[self.COUNT_QUANTIZED_EDGES_STR] += 1\n\n def get_metric_table(self):\n table = Texttable()\n data = [['Metric type', 'Value']]\n try:\n data.append([self.header[0], '{:.2f} % ({} / {})'.format(\n self.stat[self.COUNT_QUANTIZED_EDGES_STR] / self.num_merged_original_graph_edges * 100,\n self.stat[self.COUNT_QUANTIZED_EDGES_STR], self.num_merged_original_graph_edges)])\n except ZeroDivisionError:\n data.append([self.header[0], '{} % '.format(0)])\n table.add_rows(data)\n\n retval = {\"Quantization configuration statistics:\" : table}\n return retval\n\n def get_merged_original_graph_with_patterns(self, original_graph: NNCFGraph):\n import nncf.dynamic_graph.patterns as p\n from nncf.dynamic_graph.graph_matching import search_all\n\n pattern = p.LINEAR_OPS + p.ANY_BN_ACT_COMBO | p.LINEAR_OPS + p.ELTWISE_UNIFORM_OPS\n # pylint: disable=protected-access\n matches = search_all(original_graph._nx_graph, pattern)\n merged_graph = deepcopy(original_graph._nx_graph)\n nx.set_node_attributes(merged_graph, False, self.IS_MERGED_GRAPH_ATTR)\n for match in matches:\n if len(match) == 1:\n continue\n\n input_node_key = match[0]\n output_node_key = match[-1]\n in_edges = list(merged_graph.in_edges(input_node_key))\n out_edges = list(merged_graph.out_edges(output_node_key))\n\n in_edge_copies_dict = {}\n for in_edge_key in in_edges:\n in_edge_copies_dict[in_edge_key] = deepcopy(merged_graph.edges[in_edge_key])\n out_edge_copies_dict = {}\n for out_edge_key in out_edges:\n out_edge_copies_dict[out_edge_key] = deepcopy(merged_graph.edges[out_edge_key])\n\n merged_node_key = \"\"\n merged_nodes = []\n for node_key in match:\n merged_node_key += node_key + '\\n'\n # pylint: disable=protected-access\n merged_nodes.append(original_graph._nx_graph.nodes[node_key])\n merged_graph.remove_node(node_key)\n merged_node_attrs = {\n NNCFGraph.KEY_NODE_ATTR: merged_node_key,\n self.NODES_GRAPH_ATTR: merged_nodes,\n self.IS_MERGED_GRAPH_ATTR: True\n }\n merged_graph.add_node(merged_node_key, **merged_node_attrs)\n for in_edge_key, in_edge_attrs in in_edge_copies_dict.items():\n merged_graph.add_edge(in_edge_key[0], merged_node_key, **in_edge_attrs)\n for out_edge_key, out_edge_attrs in out_edge_copies_dict.items():\n merged_graph.add_edge(merged_node_key, out_edge_key[1], **out_edge_attrs)\n\n return merged_graph\n\n @staticmethod\n def visualize_marked_graph(merged_original_graph):\n out_graph = nx.DiGraph()\n for node_key, _ in merged_original_graph.nodes.items():\n out_graph.add_node(node_key)\n for u, v in merged_original_graph.edges:\n edge = merged_original_graph.edges[u, v]\n if edge[ShareEdgesQuantizedDataPath.QUANTIZED_EDGES_ATTR]:\n attrs = {\"color\": \"blue\"}\n out_graph.add_edge(u, v, **attrs)\n return out_graph\n"
] | [
[
"numpy.prod"
]
] |
nyw-pathfinder/Deep-Learning-Bootcamp-with-PyTorch | [
"5bf2efd3f921dc95461026df8f105ff7a5822fb5"
] | [
"generation/DCGAN/train.py"
] | [
"if __name__ == '__main__':\n import os\n from torchvision.transforms import Compose, Normalize, Resize, ToTensor\n from torch.utils.data import DataLoader\n from models import Discriminator, Generator, weights_init\n import torch\n import torch.nn as nn\n import matplotlib.pyplot as plt\n from time import time\n from tqdm import tqdm\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n BETA1, BETA2 = 0.5, 0.99\n BATCH_SIZE = 16\n DATASET_NAME = 'MNIST'\n DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0')\n EPOCHS = 1\n ITER_REPORT = 10\n LATENT_DIM = 100\n LR = 2e-4\n N_D_STEP = 1\n\n if DATASET_NAME == 'CIFAR10':\n from torchvision.datasets import CIFAR10\n transforms = Compose([ToTensor(), Normalize(mean=[0.5], std=[0.5])])\n dataset = CIFAR10(root='./datasets', train=True, transform=transforms, download=True)\n elif DATASET_NAME == 'LSUN':\n from torchvision.datasets import LSUN\n transforms = Compose([Resize(64), ToTensor(), Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])\n dataset = LSUN(root='./datasets/LSUN', classes=['bedroom_train'], transform=transforms)\n elif DATASET_NAME == 'MNIST':\n from torchvision.datasets import MNIST\n transforms = Compose([ToTensor(), Normalize(mean=[0.5], std=[0.5])])\n dataset = MNIST(root='./datasets', train=True, transform=transforms, download=True)\n else:\n raise NotImplementedError\n\n data_loader = DataLoader(dataset=dataset, batch_size=BATCH_SIZE, num_workers=0, shuffle=True)\n\n D = Discriminator(DATASET_NAME).apply(weights_init).to(DEVICE)\n G = Generator(DATASET_NAME).apply(weights_init).to(DEVICE)\n print(D, G)\n criterion = nn.BCELoss()\n\n optim_D = torch.optim.Adam(D.parameters(), lr=LR, betas=(BETA1, BETA2))\n optim_G = torch.optim.Adam(G.parameters(), lr=LR, betas=(BETA1, BETA2))\n\n list_D_loss = list()\n list_G_loss = list()\n total_step = 0\n\n st = time()\n for epoch in range(EPOCHS):\n for data in tqdm(data_loader):\n total_step += 1\n real, label = data[0].to(DEVICE), data[1].to(DEVICE)\n z = torch.randn(BATCH_SIZE, LATENT_DIM).to(DEVICE)\n\n fake = G(z)\n\n fake_score = D(fake.detach())\n real_score = D(real)\n\n D_loss = 0.5 * (criterion(fake_score, torch.zeros_like(fake_score).to(DEVICE))\n + criterion(real_score, torch.ones_like(real_score).to(DEVICE)))\n optim_D.zero_grad()\n D_loss.backward()\n optim_D.step()\n list_D_loss.append(D_loss.detach().cpu().item())\n\n if total_step % N_D_STEP == 0:\n fake_score = D(fake)\n G_loss = criterion(fake_score, torch.ones_like(fake_score))\n optim_G.zero_grad()\n G_loss.backward()\n optim_G.step()\n list_G_loss.append(G_loss.detach().cpu().item())\n\n if total_step % ITER_REPORT == 0:\n print(\"Epoch: {}, D_loss: {:.{prec}} G_loss: {:.{prec}}\"\n .format(epoch, D_loss.detach().cpu().item(), G_loss.detach().cpu().item(), prec=4))\n\n torch.save(D.state_dict(), '{}_D.pt'.format(DATASET_NAME))\n torch.save(G.state_dict(), '{}_G.pt'.format(DATASET_NAME))\n\n plt.figure()\n plt.plot(range(0, len(list_D_loss)), list_D_loss, linestyle='--', color='r', label='Discriminator loss')\n plt.plot(range(0, len(list_G_loss) * N_D_STEP, N_D_STEP), list_G_loss, linestyle='--', color='g',\n label='Generator loss')\n plt.xlabel('Iteration')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig('Loss.png')\n\n print(time() - st)\n"
] | [
[
"torch.ones_like",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.legend",
"torch.randn",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"torch.zeros_like",
"torch.cuda.is_available",
"matplotlib.pyplot.ylabel",
"torch.nn.BCELoss",
"matplotlib.pyplot.xlabel"
]
] |
rioyokotalab/RAFT | [
"d718fe86d11f8ab0d4d6d0b0b5c45fa029104444"
] | [
"core/utils/flow_viz.py"
] | [
"# Flow visualization code\n# used from https://github.com/tomrunia/OpticalFlow_Visualization\n\n# MIT License\n#\n# Copyright (c) 2018 Tom Runia\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to conditions.\n#\n# Author: Tom Runia\n# Date Created: 2018-08-03\n\nimport numpy as np\n\n\ndef make_colorwheel():\n \"\"\"\n Generates a color wheel for optical flow visualization as presented in:\n Baker et al. \"A Database and Evaluation Methodology for Optical Flow\"\n (ICCV, 2007)\n URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf\n\n Code follows the original C++ source code of Daniel Scharstein.\n Code follows the the Matlab source code of Deqing Sun.\n\n Returns:\n np.ndarray: Color wheel\n \"\"\"\n\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n ncols = RY + YG + GC + CB + BM + MR\n colorwheel = np.zeros((ncols, 3))\n col = 0\n\n # RY\n colorwheel[0:RY, 0] = 255\n colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY)\n col = col + RY\n # YG\n colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG)\n colorwheel[col:col + YG, 1] = 255\n col = col + YG\n # GC\n colorwheel[col:col + GC, 1] = 255\n colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC)\n col = col + GC\n # CB\n colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB)\n colorwheel[col:col + CB, 2] = 255\n col = col + CB\n # BM\n colorwheel[col:col + BM, 2] = 255\n colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM)\n col = col + BM\n # MR\n colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR)\n colorwheel[col:col + MR, 0] = 255\n return colorwheel\n\n\ndef flow_uv_to_colors(u, v, convert_to_bgr=False):\n \"\"\"\n Applies the flow color wheel to (possibly clipped) flow components u and v.\n\n According to the C++ source code of Daniel Scharstein\n According to the Matlab source code of Deqing Sun\n\n Args:\n u (np.ndarray): Input horizontal flow of shape [H,W]\n v (np.ndarray): Input vertical flow of shape [H,W]\n convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.\n\n Returns:\n np.ndarray: Flow visualization image of shape [H,W,3]\n \"\"\"\n flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)\n colorwheel = make_colorwheel() # shape [55x3]\n ncols = colorwheel.shape[0]\n rad = np.sqrt(np.square(u) + np.square(v))\n a = np.arctan2(-v, -u) / np.pi\n fk = (a + 1) / 2 * (ncols - 1)\n k0 = np.floor(fk).astype(np.int32)\n k1 = k0 + 1\n k1[k1 == ncols] = 0\n f = fk - k0\n for i in range(colorwheel.shape[1]):\n tmp = colorwheel[:, i]\n col0 = tmp[k0] / 255.0\n col1 = tmp[k1] / 255.0\n col = (1 - f) * col0 + f * col1\n idx = (rad <= 1)\n col[idx] = 1 - rad[idx] * (1 - col[idx])\n col[~idx] = col[~idx] * 0.75 # out of range\n # Note the 2-i => BGR instead of RGB\n ch_idx = 2 - i if convert_to_bgr else i\n flow_image[:, :, ch_idx] = np.floor(255 * col)\n return flow_image\n\n\ndef flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):\n \"\"\"\n Expects a two dimensional flow image of shape.\n\n Args:\n flow_uv (np.ndarray): Flow UV image of shape [H,W,2]\n clip_flow (float, optional): Clip maximum of flow values. Defaults to None.\n convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.\n\n Returns:\n np.ndarray: Flow visualization image of shape [H,W,3]\n \"\"\"\n assert flow_uv.ndim == 3, 'input flow must have three dimensions'\n assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'\n if clip_flow is not None:\n flow_uv = np.clip(flow_uv, 0, clip_flow)\n u = flow_uv[:, :, 0]\n v = flow_uv[:, :, 1]\n rad = np.sqrt(np.square(u) + np.square(v))\n rad_max = np.max(rad)\n epsilon = 1e-5\n u = u / (rad_max + epsilon)\n v = v / (rad_max + epsilon)\n return flow_uv_to_colors(u, v, convert_to_bgr)\n"
] | [
[
"numpy.arctan2",
"numpy.zeros",
"numpy.floor",
"numpy.arange",
"numpy.max",
"numpy.clip",
"numpy.square"
]
] |
magreiner/TractSeg | [
"5ac5278fc3a6d3262f9f06924dbdde01b399ccf6"
] | [
"tractseg/models/UNet_Pytorch_DeepSup.py"
] | [
"# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport glob\nfrom os.path import join\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import Adamax\nfrom torch.optim import Adam\nimport torch.optim.lr_scheduler as lr_scheduler\nfrom torch.autograd import Variable\n\nfrom tractseg.libs.PytorchUtils import PytorchUtils\nfrom tractseg.libs.ExpUtils import ExpUtils\nfrom tractseg.models.BaseModel import BaseModel\nfrom tractseg.libs.PytorchUtils import conv2d\nfrom tractseg.libs.PytorchUtils import deconv2d\n\n\nclass UNet_Pytorch_DeepSup(torch.nn.Module):\n def __init__(self, n_input_channels=3, n_classes=7, n_filt=64, batchnorm=False, dropout=False):\n super(UNet_Pytorch_DeepSup, self).__init__()\n\n self.dropout = dropout\n\n self.in_channel = n_input_channels\n self.n_classes = n_classes\n\n self.contr_1_1 = conv2d(n_input_channels, n_filt)\n self.contr_1_2 = conv2d(n_filt, n_filt)\n self.pool_1 = nn.MaxPool2d((2, 2))\n\n self.contr_2_1 = conv2d(n_filt, n_filt * 2)\n self.contr_2_2 = conv2d(n_filt * 2, n_filt * 2)\n self.pool_2 = nn.MaxPool2d((2, 2))\n\n self.contr_3_1 = conv2d(n_filt * 2, n_filt * 4)\n self.contr_3_2 = conv2d(n_filt * 4, n_filt * 4)\n self.pool_3 = nn.MaxPool2d((2, 2))\n\n self.contr_4_1 = conv2d(n_filt * 4, n_filt * 8)\n self.contr_4_2 = conv2d(n_filt * 8, n_filt * 8)\n self.pool_4 = nn.MaxPool2d((2, 2))\n\n self.dropout = nn.Dropout(p=0.4)\n\n self.encode_1 = conv2d(n_filt * 8, n_filt * 16)\n self.encode_2 = conv2d(n_filt * 16, n_filt * 16)\n self.deconv_1 = deconv2d(n_filt * 16, n_filt * 16, kernel_size=2, stride=2)\n # self.deconv_1 = nn.Upsample(scale_factor=2) #does only upscale width and height #Similar results to deconv2d\n\n self.expand_1_1 = conv2d(n_filt * 8 + n_filt * 16, n_filt * 8)\n self.expand_1_2 = conv2d(n_filt * 8, n_filt * 8)\n self.deconv_2 = deconv2d(n_filt * 8, n_filt * 8, kernel_size=2, stride=2)\n # self.deconv_2 = nn.Upsample(scale_factor=2)\n\n self.expand_2_1 = conv2d(n_filt * 4 + n_filt * 8, n_filt * 4, stride=1)\n self.expand_2_2 = conv2d(n_filt * 4, n_filt * 4, stride=1)\n self.deconv_3 = deconv2d(n_filt * 4, n_filt * 4, kernel_size=2, stride=2)\n # self.deconv_3 = nn.Upsample(scale_factor=2)\n\n self.output_2 = nn.Conv2d(n_filt * 4 + n_filt * 8, n_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.output_2_up = nn.Upsample(scale_factor=2, mode='bilinear') # does only upscale width and height\n\n self.expand_3_1 = conv2d(n_filt * 2 + n_filt * 4, n_filt * 2, stride=1)\n self.expand_3_2 = conv2d(n_filt * 2, n_filt * 2, stride=1)\n self.deconv_4 = deconv2d(n_filt * 2, n_filt * 2, kernel_size=2, stride=2)\n # self.deconv_4 = nn.Upsample(scale_factor=2)\n\n self.output_3 = nn.Conv2d(n_filt * 2 + n_filt * 4, n_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.output_3_up = nn.Upsample(scale_factor=2, mode='bilinear') # does only upscale width and height\n\n self.expand_4_1 = conv2d(n_filt + n_filt * 2, n_filt, stride=1)\n self.expand_4_2 = conv2d(n_filt, n_filt, stride=1)\n\n self.conv_5 = nn.Conv2d(n_filt, n_classes, kernel_size=1, stride=1, padding=0, bias=True) # no activation function, because is in LossFunction (...WithLogits)\n\n def forward(self, inpt):\n contr_1_1 = self.contr_1_1(inpt)\n contr_1_2 = self.contr_1_2(contr_1_1)\n pool_1 = self.pool_1(contr_1_2)\n\n contr_2_1 = self.contr_2_1(pool_1)\n contr_2_2 = self.contr_2_2(contr_2_1)\n pool_2 = self.pool_2(contr_2_2)\n\n contr_3_1 = self.contr_3_1(pool_2)\n contr_3_2 = self.contr_3_2(contr_3_1)\n pool_3 = self.pool_3(contr_3_2)\n\n contr_4_1 = self.contr_4_1(pool_3)\n contr_4_2 = self.contr_4_2(contr_4_1)\n pool_4 = self.pool_4(contr_4_2)\n\n # pool_4 = self.dropout(pool_4)\n\n encode_1 = self.encode_1(pool_4)\n encode_2 = self.encode_2(encode_1)\n deconv_1 = self.deconv_1(encode_2)\n\n concat1 = torch.cat([deconv_1, contr_4_2], 1)\n expand_1_1 = self.expand_1_1(concat1)\n expand_1_2 = self.expand_1_2(expand_1_1)\n deconv_2 = self.deconv_2(expand_1_2)\n\n concat2 = torch.cat([deconv_2, contr_3_2], 1)\n expand_2_1 = self.expand_2_1(concat2)\n expand_2_2 = self.expand_2_2(expand_2_1)\n deconv_3 = self.deconv_3(expand_2_2)\n\n output_2 = self.output_2(concat2)\n output_2_up = self.output_2_up(output_2)\n\n concat3 = torch.cat([deconv_3, contr_2_2], 1)\n expand_3_1 = self.expand_3_1(concat3)\n expand_3_2 = self.expand_3_2(expand_3_1)\n deconv_4 = self.deconv_4(expand_3_2)\n\n output_3 = output_2_up + self.output_3(concat3)\n output_3_up = self.output_3_up(output_3)\n\n concat4 = torch.cat([deconv_4, contr_1_2], 1)\n expand_4_1 = self.expand_4_1(concat4)\n expand_4_2 = self.expand_4_2(expand_4_1)\n\n conv_5 = self.conv_5(expand_4_2)\n\n final = output_3_up + conv_5\n\n # return conv_51\n # return final\n return final, F.sigmoid(final)"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.functional.sigmoid",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.cat",
"torch.nn.Dropout"
]
] |
ruinunca/data_tooling | [
"297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff"
] | [
"kenlm_training/cc_net/flat_hash_set.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport sys\nimport time\nimport warnings\nfrom typing import Iterable, Iterator, Sequence, Sized, Tuple, Type\n\nimport numpy as np\n\nHASH_TYPE: Type[np.uint64] = np.uint64\n\nGETPY_WARNING = False\n\n\nclass AbstractDedupHashSet(Sized, Iterable[np.uint64]):\n \"\"\"A dict-like that returns `True` for keys that have been added more than once.\n\n The API is batched and expect np.array as input. This batching grants better\n perf when using the C++ implementation.\n \"\"\"\n\n dtype: Type[np.uint64] = HASH_TYPE\n\n def __repr__(self):\n implementation = type(self).__name__\n return f\"[{implementation}, len: {len(self)}\"\n\n def __len__(self) -> int:\n ...\n\n def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray:\n ...\n\n def __getitem__(self, values) -> np.ndarray:\n ...\n\n def __setitem__(self, keys, values) -> None:\n ...\n\n def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]:\n ...\n\n def keys(self) -> Iterable[np.uint64]:\n ...\n\n def __iter__(self) -> Iterator[np.uint64]:\n return iter(self.keys())\n\n def add(self, h, contains=None):\n \"\"\"Add the given keys. First time a key is added the value is set to 0,\n then it's set to one.\"\"\"\n if not isinstance(h, np.ndarray):\n h = np.array(h, dtype=HASH_TYPE)\n if contains is None:\n contains = self.__contains__(h)\n\n self.__setitem__(h, contains)\n return contains\n\n def merge(self, keys, values):\n contains = self.__contains__(keys)\n self.__setitem__(keys, contains | values)\n\n def dump(self, filename):\n return self.dump_np(filename)\n\n def load(self, filename):\n return self.load_np(filename)\n\n def dump_np(self, filename):\n kv_type = np.dtype([(\"k\", HASH_TYPE), (\"v\", np.uint8)])\n items = np.fromiter(self.items(), dtype=kv_type, count=len(self))\n with open(filename, \"wb\") as f:\n np.save(f, items)\n\n def load_np(self, filename):\n items = np.load(str(filename))\n keys = items[\"k\"].copy()\n values = items[\"v\"].copy()\n self.merge(keys, values)\n\n def dump_np2(self, filename):\n keys = np.fromiter(\n (k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self)\n )\n with open(filename, \"wb\") as f:\n np.save(f, keys)\n\n values = np.fromiter(\n (v for (k, v) in self.items()), dtype=np.uint8, count=len(self)\n )\n with open(str(filename) + \".val\", \"wb\") as f:\n np.save(f, values)\n\n def load_np2(self, filename):\n keys = np.load(filename)\n values = np.load(str(filename) + \".val\")\n self.merge(keys, values)\n\n\nclass NaiveHashSet(dict, AbstractDedupHashSet):\n \"\"\"Pure python implementation of AbstractDedupHashSet.\n\n This implementation is quite fast, since Python dict are heavily optimized.\n \"\"\"\n\n def __init__(self, iterable=None):\n super().__init__()\n global GETPY_WARNING\n if GETPY_WARNING:\n warnings.warn(\n \"Module 'getpy' not found. Deduplication will take more RAM.\"\n \" Try `pip install cc_net[getpy]\"\n )\n GETPY_WARNING = False\n\n def __contains__(self, values):\n \"\"\"Returns `True` if the object has been added at list once.\"\"\"\n contains_point = super().__contains__\n return np.fromiter(\n map(contains_point, values), count=len(values), dtype=np.uint8\n )\n\n def __getitem__(self, values):\n \"\"\"Returns `True` if the object has been added at list twice.\"\"\"\n get_point = super().get\n return np.fromiter(\n map(lambda x: get_point(x, False), values),\n count=len(values),\n dtype=np.uint8,\n )\n\n def __setitem__(self, keys, values):\n assert len(keys) == len(values)\n for k, v in zip(keys, values):\n dict.__setitem__(self, k, v)\n\n\ntry:\n import getpy as gp # type: ignore\n\n class _FlatHashSet(gp.Dict, AbstractDedupHashSet):\n \"\"\"C++ backed implementation of AbstractDedupHashSet.\n\n This implementation is slightly slower than the Python one but uses\n 3x less RAM.\n See https://github.com/atom-moyer/getpy.\n \"\"\"\n\n def __init__(self):\n super().__init__(HASH_TYPE, np.uint8, default_value=False)\n\n def __contains__(self, h):\n \"\"\"Returns `True` if the object has been added at list once.\"\"\"\n if not isinstance(h, np.ndarray):\n h = np.array(h, dtype=HASH_TYPE)\n c = gp.Dict.__contains__(self, h)\n c.dtype = np.uint8\n return c\n\n def dump(self, filename):\n return self.dump_gp(filename)\n\n def load(self, filename):\n return self.load_gp(filename)\n\n def dump_gp(self, filename):\n return gp.Dict.dump(self, str(filename))\n\n def load_gp(self, filename):\n \"\"\"Override gp.Dict.load, to correctly merge values instead of overwriting.\"\"\"\n other = gp.Dict(HASH_TYPE, np.uint8, default_value=False)\n other.load(str(filename))\n n = len(other)\n keys = np.fromiter(\n (k for (k, v) in other.items()), dtype=HASH_TYPE, count=n\n )\n values = np.fromiter(\n (v for (k, v) in other.items()), dtype=np.uint8, count=n\n )\n self.merge(keys, values)\n\n FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet\nexcept ImportError:\n GETPY_WARNING = True\n FlatHashSet = NaiveHashSet\n\n\ndef timeit(message, function, *args):\n start = time.time()\n function(*args)\n end = time.time()\n print(message, f\"took {end - start:.0f}s\")\n\n\ndef compare_load(*filenames):\n assert filenames, \"No file given\"\n\n def load_list():\n hashes = []\n for f in filenames:\n h = FlatHashSet()\n h.load(f)\n print(f\"Loaded {h} from {f}.\")\n hashes.append(h)\n return hashes\n\n def load_all(load, ext):\n hashes = FlatHashSet()\n for f in filenames:\n load(hashes, f + ext)\n\n def dump_all(hashes, dump, ext):\n for h, f in zip(hashes, filenames):\n dump(h, f + ext)\n\n hashes = load_list()\n dump_gp = getattr(FlatHashSet, \"dump_gp\")\n if dump_gp is not None:\n timeit(\"Dumping using gp.dump\", dump_all, hashes, dump_gp, \".gp.test\")\n timeit(\"Dumping using dump_np\", dump_all, hashes, FlatHashSet.dump_np, \".npy.test\")\n timeit(\n \"Dumping using dump_np2\", dump_all, hashes, FlatHashSet.dump_np2, \".npy2.test\"\n )\n\n load_gp = getattr(FlatHashSet, \"load_gp\")\n if load_gp is not None:\n timeit(\"Loading using gp.load\", load_all, load_gp, \".gp.test\")\n timeit(\"Loading using load_np\", load_all, FlatHashSet.load_np, \".npy.test\")\n timeit(\"Loading using load_np2\", load_all, FlatHashSet.load_np2, \".npy2.test\")\n\n # Loading 10 shards:\n # [dedup] Dumping using gp.dump took 52s\n # [dedup] Dumping using dump_np took 270s\n # [dedup] Dumping using dump_np2 took 483s\n #\n # [dedup] Loading using gp.load took 654s\n # [dedup] Loading using load_np took 82s\n # [dedup] Loading using load_np2 took 76s\n\n\nif __name__ == \"__main__\":\n compare_load(*sys.argv[1:])\n"
] | [
[
"numpy.array",
"numpy.dtype",
"numpy.save",
"numpy.load"
]
] |
wdkwyf/mars | [
"3f750e360e64380eab779301a5103994d4886b6a"
] | [
"mars/tensor/merge/concatenate.py"
] | [
"# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport operator\nfrom collections import Iterable\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...compat import six, lrange, lmap\nfrom ...serialize import AnyField\nfrom ..array_utils import device, as_same_device\nfrom ..utils import validate_axis, unify_chunks\nfrom ..datasource import tensor as astensor\nfrom ..operands import TensorOperand, TensorOperandMixin\nfrom ..indexing.slice import TensorSlice\n\n\ndef _get_index(chunk):\n try:\n return chunk.index\n except AttributeError:\n if isinstance(chunk.op, TensorSlice):\n return chunk.inputs[0].index\n raise\n\n\ndef _norm_axis(axis):\n if isinstance(axis, six.integer_types):\n return axis, True\n if isinstance(axis, Iterable):\n axis = sorted(tuple(axis))\n if len(axis) == 1:\n return axis[0], True\n return axis, False\n\n assert axis is None\n return None, False\n\n\nclass TensorConcatenate(TensorOperand, TensorOperandMixin):\n _op_type_ = OperandDef.CONCATENATE\n\n _axis = AnyField('axis')\n\n def __init__(self, axis=None, dtype=None, sparse=False, **kw):\n super(TensorConcatenate, self).__init__(_axis=axis, _dtype=dtype,\n _sparse=sparse, **kw)\n\n @property\n def axis(self):\n return getattr(self, '_axis', None)\n\n def __call__(self, tensors):\n if len(set(t.ndim for t in tensors)) != 1:\n raise ValueError('all the input tensors must have same number of dimensions')\n\n axis = self._axis\n shapes = [t.shape[:axis] + t.shape[axis + 1:] for t in tensors]\n if len(set(shapes)) != 1:\n raise ValueError('all the input tensor dimensions '\n 'except for the concatenation axis must match exactly')\n\n shape = [0 if i == axis else tensors[0].shape[i] for i in range(tensors[0].ndim)]\n shape[axis] = sum(t.shape[axis] for t in tensors)\n\n if any(np.isnan(s) for i, s in enumerate(shape) if i != axis):\n raise ValueError('cannot concatenate tensor with unknown shape')\n\n return self.new_tensor(tensors, shape=tuple(shape))\n\n @classmethod\n def tile(cls, op):\n from ..indexing.slice import TensorSlice\n\n inputs = op.inputs\n axis = op.axis\n\n c = itertools.count(inputs[0].ndim)\n tensor_axes = [(t, tuple(i if i != axis else next(c) for i in range(t.ndim)))\n for t in inputs]\n inputs = unify_chunks(*tensor_axes)\n\n out_chunk_shape = [0 if i == axis else inputs[0].chunk_shape[i]\n for i in range(inputs[0].ndim)]\n out_chunk_shape[axis] = sum(t.chunk_shape[axis] for t in inputs)\n out_nsplits = [None if i == axis else inputs[0].nsplits[i]\n for i in range(inputs[0].ndim)]\n out_nsplits[axis] = tuple(itertools.chain(*[t.nsplits[axis] for t in inputs]))\n\n out_chunks = []\n axis_cum_chunk_shape = np.cumsum([t.chunk_shape[axis] for t in inputs])\n for out_idx in itertools.product(*[range(s) for s in out_chunk_shape]):\n axis_index = np.searchsorted(axis_cum_chunk_shape, out_idx[axis], side='right')\n t = inputs[axis_index]\n axis_inner_index = out_idx[axis] - \\\n (0 if axis_index < 1 else axis_cum_chunk_shape[axis_index - 1])\n idx = out_idx[:axis] + (axis_inner_index,) + out_idx[axis + 1:]\n in_chunk = t.cix[idx]\n if idx == out_idx:\n # if index is the same, just use the input chunk\n out_chunks.append(in_chunk)\n else:\n chunk_op = TensorSlice(slices=[slice(None) for _ in range(in_chunk.ndim)],\n dtype=in_chunk.dtype, sparse=in_chunk.op.sparse)\n out_chunk = chunk_op.new_chunk([in_chunk], shape=in_chunk.shape, index=out_idx)\n\n out_chunks.append(out_chunk)\n\n new_op = op.copy()\n return new_op.new_tensors(op.inputs, op.outputs[0].shape,\n nsplits=out_nsplits, chunks=out_chunks)\n\n @classmethod\n def execute(cls, ctx, op):\n def _base_concatenate(chunk, inputs):\n inputs, device_id, xp = as_same_device(inputs, device=chunk.op.device, ret_extra=True)\n\n axis, single_axis = _norm_axis(chunk.op.axis)\n if single_axis:\n with device(device_id):\n res = xp.concatenate(tuple(inputs), axis=axis)\n else:\n axes = axis or lrange(chunk.ndim)\n chunks = [(_get_index(input), data) for input, data in zip(chunk.inputs, inputs)]\n with device(device_id):\n for i in range(len(axes) - 1):\n new_chunks = []\n for idx, cs in itertools.groupby(chunks, key=lambda t: t[0][:-1]):\n cs = lmap(operator.itemgetter(1), cs)\n new_chunks.append((idx, xp.concatenate(cs, axis=len(axes) - i - 1)))\n chunks = new_chunks\n res = xp.concatenate(lmap(operator.itemgetter(1), chunks), axis=axes[0])\n return res\n\n chunk = op.outputs[0]\n inputs = [ctx[input.key] for input in op.inputs]\n\n if isinstance(inputs[0], tuple):\n ctx[chunk.key] = tuple(_base_concatenate(chunk, [input[i] for input in inputs])\n for i in range(len(inputs[0])))\n else:\n ctx[chunk.key] = _base_concatenate(chunk, inputs)\n\n\ndef concatenate(tensors, axis=0):\n \"\"\"\n Join a sequence of arrays along an existing axis.\n\n Parameters\n ----------\n a1, a2, ... : sequence of array_like\n The tensors must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the tensors will be joined. Default is 0.\n\n Returns\n -------\n res : Tensor\n The concatenated tensor.\n\n See Also\n --------\n array_split : Split a tensor into multiple sub-arrays of equal or\n near-equal size.\n split : Split tensor into a list of multiple sub-tensors of equal size.\n hsplit : Split tensor into multiple sub-tensors horizontally (column wise)\n vsplit : Split tensor into multiple sub-tensors vertically (row wise)\n dsplit : Split tensor into multiple sub-tensors along the 3rd axis (depth).\n stack : Stack a sequence of tensors along a new axis.\n hstack : Stack tensors in sequence horizontally (column wise)\n vstack : Stack tensors in sequence vertically (row wise)\n dstack : Stack tensors in sequence depth wise (along third dimension)\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> a = mt.array([[1, 2], [3, 4]])\n >>> b = mt.array([[5, 6]])\n >>> mt.concatenate((a, b), axis=0).execute()\n array([[1, 2],\n [3, 4],\n [5, 6]])\n >>> mt.concatenate((a, b.T), axis=1).execute()\n array([[1, 2, 5],\n [3, 4, 6]])\n\n \"\"\"\n tensors = [astensor(t) for t in tensors]\n\n axis = validate_axis(tensors[0].ndim, axis)\n dtype = np.result_type(*(t.dtype for t in tensors))\n sparse = all(t.issparse() for t in tensors)\n\n op = TensorConcatenate(axis=axis, dtype=dtype, sparse=sparse)\n return op(tensors)\n"
] | [
[
"numpy.result_type",
"numpy.cumsum",
"numpy.searchsorted",
"numpy.isnan"
]
] |
ash-vs/tensorflow | [
"303dc341a6300a4a2eee820679bca30547426aa6"
] | [
"tensorflow/contrib/learn/python/learn/estimators/estimator.py"
] | [
"# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Base Estimator class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport os\nimport tempfile\nimport time\n\nimport six\n\nfrom tensorflow.contrib import framework as contrib_framework\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib import losses\nfrom tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn\nfrom tensorflow.contrib.learn.python.learn.estimators import run_config\nfrom tensorflow.contrib.learn.python.learn.estimators import tensor_signature\nfrom tensorflow.contrib.learn.python.learn.graph_actions import evaluate\nfrom tensorflow.contrib.learn.python.learn.graph_actions import infer\nfrom tensorflow.contrib.learn.python.learn.graph_actions import train\nfrom tensorflow.contrib.learn.python.learn.io import data_feeder\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import device_setter\nfrom tensorflow.python.training import saver\n\n\n# Default metrics for evaluation.\n_EVAL_METRICS = {\n 'regression': {\n 'mean_squared_error': losses.sum_of_squares,\n },\n 'classification': {\n 'logistic': losses.sigmoid_cross_entropy,\n },}\n\n\nclass ModeKeys(object):\n \"\"\"Standard names for model modes.\n\n The following standard keys are defined:\n\n * `TRAIN`: training mode.\n * `EVAL`: evaluation mode.\n * `INFER`: inference mode.\n \"\"\"\n\n TRAIN = 'train'\n EVAL = 'eval'\n INFER = 'infer'\n\n\ndef _get_input_fn(x, y, batch_size):\n # TODO(ipoloshukin): Remove this when refactor of data_feeder is done\n if hasattr(x, 'create_graph') and hasattr(y, 'create_graph'):\n def input_fn():\n return x.create_graph(), y.create_graph()\n return input_fn, None\n\n df = data_feeder.setup_train_data_feeder(x, y,\n n_classes=None,\n batch_size=batch_size)\n return df.input_builder, df.get_feed_dict_fn()\n\n\ndef _get_predict_input_fn(x, batch_size):\n # TODO(ipoloshukin): Remove this when refactor of data_feeder is done\n if hasattr(x, 'create_graph'):\n def input_fn():\n return x.create_graph()\n return input_fn, None\n\n df = data_feeder.setup_train_data_feeder(x, None,\n n_classes=None,\n batch_size=batch_size)\n return df.input_builder, df.get_feed_dict_fn()\n\n\nclass BaseEstimator(sklearn.BaseEstimator):\n \"\"\"Abstract BaseEstimator class to train and evaluate TensorFlow models.\n\n Concrete implementation of this class should provide following functions:\n * _get_train_ops\n * _get_eval_ops\n * _get_predict_ops\n It may override _get_default_metric_functions.\n\n `Estimator` implemented below is a good example of how to use this class.\n\n Parameters:\n model_dir: Directory to save model parameters, graph and etc.\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n # TODO(wicke): Remove this once launcher takes over config functionality\n _Config = run_config.RunConfig # pylint: disable=invalid-name\n\n def __init__(self, model_dir=None):\n # Model directory.\n self._model_dir = model_dir\n if self._model_dir is None:\n self._model_dir = tempfile.mkdtemp()\n logging.info('Using temporary folder as model directory: %s',\n self._model_dir)\n\n # Create a run configuration\n self._config = BaseEstimator._Config()\n\n # Set device function depending if there are replicas or not.\n if self._config.num_ps_replicas > 0:\n ps_ops = ['Variable', 'AutoReloadVariable']\n self._device_fn = device_setter.replica_device_setter(\n ps_tasks=self._config.num_ps_replicas,\n merge_devices=False, ps_ops=ps_ops)\n else:\n self._device_fn = None\n\n # Features and targets TensorSingature objects.\n self._features_info = None\n self._targets_info = None\n\n @abc.abstractproperty\n def _get_train_ops(self, features, targets):\n \"\"\"Method that builds model graph and returns trainer ops.\n\n Expected to be overriden by sub-classes that require custom support.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n targets: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n Tuple of train `Operation` and loss `Tensor`.\n \"\"\"\n pass\n\n @abc.abstractproperty\n def _get_predict_ops(self, features):\n \"\"\"Method that builds model graph and returns prediction ops.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n predictions: `Tensor` or `dict` of `Tensor` objects.\n \"\"\"\n pass\n\n def _get_eval_ops(self, features, targets, metrics):\n \"\"\"Method that builds model graph and returns evaluation ops.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n targets: `Tensor` or `dict` of `Tensor` objects.\n metrics: `dict` of functions that take predictions and targets.\n\n Returns:\n metrics: `dict` of `Tensor` objects.\n \"\"\"\n predictions = self._get_predict_ops(features)\n result = {}\n for name, metric in six.iteritems(metrics):\n result[name] = metric(predictions, targets)\n return result\n\n def _get_feature_ops_from_example(self, examples_batch):\n \"\"\"Method that returns features given the batch of examples.\n\n This method will be used to export model into a server.\n\n Args:\n examples_batch: batch of tf.Example\n\n Returns:\n features: `Tensor` or `dict` of `Tensor` objects.\n \"\"\"\n raise NotImplementedError('_get_feature_ops_from_example not implemented '\n 'in BaseEstimator')\n\n def _get_default_metric_functions(self):\n \"\"\"Method that provides default metric operations.\n\n This functions is intented to be overridden by sub-classes.\n Returns:\n `dict` of functions that take predictions and targets `Tensor` objects and\n return `Tensor`.\n \"\"\"\n return {}\n\n def fit(self, x, y, steps, batch_size=32, monitor=None):\n \"\"\"Trains a model given training data X and y.\n\n Args:\n x: matrix or tensor of shape [n_samples, n_features...]. Can be\n iterator that returns arrays of features. The training input\n samples for fitting the model.\n y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be\n iterator that returns array of targets. The training target values\n (class labels in classification, real numbers in regression).\n steps: number of steps to train model for.\n batch_size: minibatch size to use on the input, defaults to 32.\n monitor: monitor object to print training progress and invoke\n early stopping.\n\n Returns:\n Returns self.\n \"\"\"\n input_fn, feed_fn = _get_input_fn(x, y, batch_size)\n return self._train_model(input_fn=input_fn,\n feed_fn=feed_fn,\n steps=steps,\n monitor=monitor)\n\n def train(self, input_fn, steps, monitor=None):\n \"\"\"Trains a model given input builder function.\n\n Args:\n input_fn: Input builder function, returns tuple of dicts or\n dict and Tensor.\n steps: number of steps to train model for.\n monitor: monitor object to print training progress and invoke\n early stopping.\n\n Returns:\n Returns self.\n \"\"\"\n return self._train_model(input_fn=input_fn, steps=steps, monitor=monitor)\n\n def partial_fit(self, x, y, steps=1, batch_size=32, monitor=None):\n \"\"\"Incremental fit on a batch of samples.\n\n This method is expected to be called several times consecutively\n on different or the same chunks of the dataset. This either can\n implement iterative training or out-of-core/online training.\n\n This is especially useful when the whole dataset is too big to\n fit in memory at the same time. Or when model is taking long time\n to converge, and you want to split up training into subparts.\n\n Args:\n x: matrix or tensor of shape [n_samples, n_features...]. Can be\n iterator that returns arrays of features. The training input\n samples for fitting the model.\n y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be\n iterator that returns array of targets. The training target values\n (class label in classification, real numbers in regression).\n steps: number of steps to train model for.\n batch_size: minibatch size to use on the input, defaults to 32.\n monitor: Monitor object to print training progress and invoke\n early stopping.\n\n Returns:\n Returns self.\n \"\"\"\n input_fn, feed_fn = _get_input_fn(x, y, batch_size)\n return self._train_model(input_fn=input_fn,\n feed_fn=feed_fn,\n steps=steps,\n monitor=monitor)\n\n def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,\n batch_size=32, steps=100, metrics=None):\n \"\"\"Evaluates given model with provided evaluation data.\n\n Args:\n x: features.\n y: targets.\n input_fn: Input function. If set, x and y must be None.\n feed_fn: Function creating a feed dict every time it is called. Called\n once per iteration.\n batch_size: minibatch size to use on the input, defaults to 32. Ignored\n if input_fn is set.\n steps: Number of steps to evalute for.\n metrics: Dict of metric ops to run.\n\n Returns:\n Returns self.\n\n Raises:\n ValueError: If x or y are not None while input_fn or feed_fn is not None.\n \"\"\"\n if (x is not None or y is not None) and input_fn is not None:\n raise ValueError('Either x and y or input_fn must be None.')\n if input_fn is None:\n assert x is not None\n input_fn, feed_fn = _get_input_fn(x, y, batch_size)\n return self._evaluate_model(input_fn=input_fn, feed_fn=feed_fn,\n steps=steps, metrics=metrics)\n\n def predict(self, x, axis=None, batch_size=None):\n \"\"\"Returns predictions for given features.\n\n Args:\n x: features.\n axis: Axis on which to argmax. (for classification).\n batch_size: Override default batch size.\n\n Returns:\n Numpy array of predicted classes or regression values.\n \"\"\"\n return self._infer_model(x=x, batch_size=batch_size, axis=axis)\n\n def predict_proba(self, x, batch_size=None):\n \"\"\"Returns prediction probabilities for given features (classification).\n\n Args:\n x: features.\n batch_size: OVerride default batch size.\n\n Returns:\n Numpy array of predicted probabilities.\n \"\"\"\n return self._infer_model(x=x, batch_size=batch_size, proba=True)\n\n def _check_inputs(self, features, targets):\n if self._features_info is not None:\n if not tensor_signature.tensors_compatible(features, self._features_info):\n raise ValueError('Features are incompatible with given information. '\n 'Given features: %s, required signatures: %s.' %\n (str(features), str(self._features_info)))\n else:\n self._features_info = tensor_signature.create_signatures(features)\n if self._targets_info is not None:\n if not tensor_signature.tensors_compatible(targets, self._targets_info):\n raise ValueError('Targets are incompatible with given information. '\n 'Given targets: %s, required signatures: %s.' %\n (str(targets), str(self._targets_info)))\n else:\n self._targets_info = tensor_signature.create_signatures(targets)\n\n def _train_model(self,\n input_fn,\n steps,\n feed_fn=None,\n device_fn=None,\n monitor=None,\n log_every_steps=100,\n fail_on_nan_loss=True):\n if self._config.execution_mode not in ('all', 'train'):\n return\n\n # Stagger startup of worker sessions based on task id.\n sleep_secs = min(self._config.training_worker_max_startup_secs,\n self._config.task *\n self._config.training_worker_session_startup_stagger_secs)\n if sleep_secs:\n logging.info('Waiting %d secs before starting task %d.', sleep_secs,\n self._config.task)\n time.sleep(sleep_secs)\n\n # Device allocation\n device_fn = device_fn or self._device_fn\n\n with ops.Graph().as_default() as g, g.device(device_fn):\n random_seed.set_random_seed(self._config.tf_random_seed)\n global_step = contrib_framework.create_global_step(g)\n features, targets = input_fn()\n self._check_inputs(features, targets)\n train_op, loss_op = self._get_train_ops(features, targets)\n return train(\n graph=g,\n output_dir=self._model_dir,\n train_op=train_op,\n loss_op=loss_op,\n global_step_tensor=global_step,\n log_every_steps=log_every_steps,\n supervisor_is_chief=(self._config.task == 0),\n supervisor_master=self._config.master,\n feed_fn=feed_fn,\n max_steps=steps,\n fail_on_nan_loss=fail_on_nan_loss)\n\n def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None):\n if self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset'):\n return\n\n checkpoint_path = saver.latest_checkpoint(self._model_dir)\n eval_dir = os.path.join(self._model_dir, 'eval')\n with ops.Graph().as_default() as g:\n random_seed.set_random_seed(self._config.tf_random_seed)\n global_step = contrib_framework.create_global_step(g)\n features, targets = input_fn()\n self._check_inputs(features, targets)\n eval_dict = self._get_eval_ops(features, targets, metrics or\n self._get_default_metric_functions())\n eval_results, _ = evaluate(\n graph=g,\n output_dir=eval_dir,\n checkpoint_path=checkpoint_path,\n eval_dict=eval_dict,\n global_step_tensor=global_step,\n supervisor_master=self._config.master,\n feed_fn=feed_fn,\n max_steps=steps)\n return eval_results\n\n def _infer_model(self, x, batch_size=None, axis=None, proba=False):\n # Converts inputs into tf.DataFrame / tf.Series.\n batch_size = -1 if batch_size is None else batch_size\n input_fn, feed_fn = _get_predict_input_fn(x, batch_size)\n\n checkpoint_path = saver.latest_checkpoint(self._model_dir)\n with ops.Graph().as_default() as g:\n random_seed.set_random_seed(self._config.tf_random_seed)\n contrib_framework.create_global_step(g)\n features, _ = input_fn()\n feed_dict = feed_fn() if feed_fn is not None else None\n predictions = self._get_predict_ops(features)\n if not isinstance(predictions, dict):\n predictions = {'predictions': predictions}\n # TODO(ipolosukhin): Support batching\n return infer(checkpoint_path, predictions, feed_dict=feed_dict)\n\n\nclass Estimator(BaseEstimator):\n \"\"\"Estimator class is the basic TensorFlow model trainer/evaluator.\n\n Parameters:\n model_fn: Model function, takes features and targets tensors or dicts of\n tensors and returns predictions and loss tensors.\n E.g. `(features, targets) -> (predictions, loss)`.\n model_dir: Directory to save model parameters, graph and etc.\n classification: boolean, true if classification problem.\n learning_rate: learning rate for the model.\n optimizer: optimizer for the model, can be:\n string: name of optimizer, like 'SGD', 'Adam', 'Adagrad', 'Ftl',\n 'Momentum', 'RMSProp', 'Momentum').\n Full list in contrib/layers/optimizers.py\n class: sub-class of Optimizer\n (like tf.train.GradientDescentOptimizer).\n clip_gradients: clip_norm value for call to `clip_by_global_norm`. None\n denotes no gradient clipping.\n \"\"\"\n\n def __init__(self,\n model_fn=None,\n model_dir=None,\n classification=True,\n learning_rate=0.01,\n optimizer='SGD',\n clip_gradients=None):\n super(Estimator, self).__init__(model_dir=model_dir)\n\n self._model_fn = model_fn\n self._classification = classification\n if isinstance(optimizer, six.string_types):\n if optimizer not in layers.OPTIMIZER_CLS_NAMES:\n raise ValueError(\n 'Optimizer name should be one of [%s], you provided %s.' %\n (', '.join(layers.OPTIMIZER_CLS_NAMES), optimizer))\n self.optimizer = optimizer\n self.learning_rate = learning_rate\n self.clip_gradients = clip_gradients\n\n def _get_train_ops(self, features, targets):\n \"\"\"Method that builds model graph and returns trainer ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n targets: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n Tuple of train `Operation` and loss `Tensor`.\n \"\"\"\n _, loss = self._model_fn(features, targets, ModeKeys.TRAIN)\n train_op = layers.optimize_loss(\n loss,\n contrib_framework.get_global_step(),\n learning_rate=self.learning_rate,\n optimizer=self.optimizer,\n clip_gradients=self.clip_gradients)\n return train_op, loss\n\n def _get_eval_ops(self, features, targets, metrics):\n \"\"\"Method that builds model graph and returns evaluation ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n targets: `Tensor` or `dict` of `Tensor` objects.\n metrics: `dict` of functions that take predictions and targets.\n\n Returns:\n metrics: `dict` of `Tensor` objects.\n \"\"\"\n predictions, loss = self._model_fn(features, targets, ModeKeys.EVAL)\n result = {'loss': loss}\n if isinstance(targets, dict) and len(targets) == 1:\n # Unpack single target into just tensor.\n targets = targets[targets.keys()[0]]\n for name, metric in six.iteritems(metrics):\n # TODO(ipolosukhin): Add support for multi-head metrics.\n result[name] = metric(predictions, targets)\n return result\n\n def _get_predict_ops(self, features):\n \"\"\"Method that builds model graph and returns prediction ops.\n\n Expected to be overriden by sub-classes that require custom support.\n This implementation uses `model_fn` passed as parameter to constructor to\n build model.\n\n Args:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Returns:\n predictions: `Tensor` or `dict` of `Tensor` objects.\n \"\"\"\n targets = tensor_signature.create_placeholders_from_signatures(\n self._targets_info)\n predictions, _ = self._model_fn(features, targets, ModeKeys.INFER)\n return predictions\n\n def _get_default_metric_functions(self):\n \"\"\"Method that provides default metric operations.\n\n Returns:\n a dictionary of metric operations.\n \"\"\"\n return _EVAL_METRICS[\n 'classification' if self._classification else 'regression']\n\n def _get_feature_ops_from_example(self, examples_batch):\n \"\"\"Unimplemented.\n\n TODO(vihanjain): We need a way to parse tf.Example into features.\n\n Args:\n examples_batch: batch of tf.Example\n\n Returns:\n features: `Tensor` or `dict` of `Tensor` objects.\n\n Raises:\n Exception: Unimplemented\n \"\"\"\n raise NotImplementedError('_get_feature_ops_from_example not yet '\n 'implemented')\n"
] | [
[
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.training.device_setter.replica_device_setter",
"tensorflow.python.framework.ops.Graph",
"tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_placeholders_from_signatures",
"tensorflow.contrib.learn.python.learn.graph_actions.train",
"tensorflow.contrib.framework.get_global_step",
"tensorflow.python.framework.random_seed.set_random_seed",
"tensorflow.contrib.framework.create_global_step",
"tensorflow.contrib.learn.python.learn.estimators.tensor_signature.tensors_compatible",
"tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_signatures",
"tensorflow.contrib.learn.python.learn.graph_actions.evaluate",
"tensorflow.contrib.learn.python.learn.graph_actions.infer",
"tensorflow.python.training.saver.latest_checkpoint",
"tensorflow.contrib.learn.python.learn.io.data_feeder.setup_train_data_feeder"
]
] |
ThomsonTan/nnvm | [
"dab5ce8ab6adbf4edd8bd2fa89f1a99f343b6e38"
] | [
"tests/python/compiler/test_top_level2.py"
] | [
"import numpy as np\n\nimport tvm\nfrom tvm.contrib import graph_runtime\nimport topi\nimport topi.testing\nimport nnvm.symbol as sym\nimport nnvm.compiler\nfrom nnvm.testing.config import ctx_list\n\n\ndef test_conv2d():\n x = sym.Variable(\"x\")\n y = sym.conv2d(x, channels=10, kernel_size=(3,3),\n name=\"y\", padding=(1,1))\n dtype = \"float32\"\n dshape = (1, 3, 18, 18)\n kshape = (10, 3, 3, 3)\n oshape = (1, 10, 18, 18)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))\n bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))\n m.run(x=data, y_weight=kernel, y_bias=bias)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n c_np = topi.testing.conv2d_nchw_python(\n data.asnumpy(), kernel.asnumpy(), 1, 1)\n c_np = c_np + bias.asnumpy().reshape(kshape[0], 1, 1)\n np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)\n\n\ndef test_dilated_conv2d():\n dilation = 3\n x = sym.Variable(\"x\")\n y = sym.conv2d(x, channels=10, kernel_size=(3, 3), dilation=(dilation, dilation),\n name=\"y\", padding=(1, 1))\n dtype = \"float32\"\n dshape = (1, 3, 18, 18)\n kshape = (10, 3, 3, 3)\n oshape = (1, 10, 14, 14)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))\n kernel_np = np.random.uniform(size=kshape).astype(dtype)\n kernel = tvm.nd.array(kernel_np)\n dkernel_np = topi.testing.dilate_python(kernel_np, (1, 1, dilation, dilation))\n m.run(x=data, y_weight=kernel, y_bias=bias)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n c_np = topi.testing.conv2d_nchw_python(\n data.asnumpy(), dkernel_np, 1, 1)\n c_np = c_np + bias.asnumpy().reshape(kshape[0], 1, 1)\n np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)\n\n\ndef test_grouped_conv2d():\n x = sym.Variable(\"x\")\n y = sym.conv2d(x, channels=32, kernel_size=(3,3), groups=32,\n name=\"y\", padding=(1,1))\n dtype = \"float32\"\n dshape = (1, 32, 18, 18)\n kshape = (32, 1, 3, 3)\n oshape = (1, 32, 18, 18)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))\n bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))\n m.run(x=data, y_weight=kernel, y_bias=bias)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n c_np = topi.testing.depthwise_conv2d_python_nchw(\n data.asnumpy(), kernel.asnumpy(), (1,1), 'SAME')\n c_np = c_np + bias.asnumpy().reshape(kshape[0], 1, 1)\n np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)\n\n\ndef test_conv2d_transpose():\n x = sym.Variable(\"x\")\n y = sym.conv2d_transpose(x, channels=10, kernel_size=(3,3), strides=(2,2),\n name=\"y\", padding=(1,1), output_padding=(2,2))\n dtype = \"float32\"\n dshape = (1, 3, 18, 18)\n kshape = (3, 10, 3, 3)\n oshape = (1, 10, 37, 37)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))\n bias = tvm.nd.array(np.random.uniform(size=kshape[1]).astype(dtype))\n m.run(x=data, y_weight=kernel, y_bias=bias)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n c_np = topi.testing.conv2d_transpose_nchw_python(\n data.asnumpy(), kernel.asnumpy(), 2, 1)\n c_np = c_np + bias.asnumpy().reshape(kshape[1], 1, 1)\n d_np = np.zeros(shape=oshape)\n d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np\n np.testing.assert_allclose(out.asnumpy(), d_np, rtol=1e-5)\n\n\ndef test_max_pool2d():\n x = sym.Variable(\"x\")\n y = sym.max_pool2d(x, pool_size=(2,2), strides=(2,2),\n padding=(0,0), name=\"y\", ceil_mode=True)\n dtype = \"float32\"\n dshape = (1, 3, 28, 28)\n oshape = (1, 3, 14, 14)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n b_np = np.max(data.asnumpy().reshape(1,3,14,2,14,2), axis=(3,5))\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\ndef test_avg_pool2d():\n x = sym.Variable(\"x\")\n y = sym.avg_pool2d(x, pool_size=(2,2), strides=(2,2), padding=(0,0), name=\"y\")\n dtype = \"float32\"\n dshape = (1, 3, 28, 28)\n oshape = (1, 3, 14, 14)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n b_np = np.mean(data.asnumpy().reshape(1,3,14,2,14,2), axis=(3,5))\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\ndef test_avg_pool2d_no_count_pad():\n kh, kw = (4, 4)\n sh, sw = (2, 2)\n ph, pw = (2, 2)\n \n x = sym.Variable(\"x\")\n y = sym.avg_pool2d(x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph, pw),\n name=\"y\", count_include_pad=False)\n dtype = \"float32\"\n n = 1\n (ic, ih, iw) = (3, 28, 28)\n (oc, oh, ow) = (3, 15, 15)\n\n a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)\n pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype)\n no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw)))\n pad_np[np.ix_(*no_zero)] = a_np\n b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)\n \n for i in range(oh):\n for j in range(ow):\n pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3))\n b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw],\n axis=(2,3)) / np.maximum(pad_count, 1)\n b_np = np.maximum(b_np, 0.0)\n shape_dict = {\"x\": (n, ic, ih, iw)}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(a_np)\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty((n, oc, oh, ow), dtype))\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\ndef test_global_max_pool2d():\n x = sym.Variable(\"x\")\n y = sym.global_max_pool2d(x, name=\"y\")\n dtype = \"float32\"\n dshape = (1, 1024, 7, 7)\n oshape = (1, 1024, 1, 1)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n b_np = np.max(data.asnumpy(), axis=(2,3), keepdims=True)\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\ndef test_global_avg_pool2d():\n x = sym.Variable(\"x\")\n y = sym.global_avg_pool2d(x, name=\"y\")\n dtype = \"float32\"\n dshape = (1, 1024, 7, 7)\n oshape = (1, 1024, 1, 1)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n b_np = np.mean(data.asnumpy(), axis=(2,3), keepdims=True)\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\ndef test_upsampling():\n x = sym.Variable(\"x\")\n scale = 2\n y = sym.upsampling(x, scale=scale, name=\"y\")\n dtype = \"float32\"\n dshape = (1, 16, 32, 32)\n oshape = (1, 16, 32*scale, 32*scale)\n shape_dict = {\"x\": dshape}\n for target, ctx in ctx_list():\n graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)\n m = graph_runtime.create(graph, lib, ctx)\n a_np = np.random.uniform(size=dshape).astype(dtype)\n data = tvm.nd.array(a_np)\n m.run(x=data)\n out = m.get_output(0, tvm.nd.empty(oshape, dtype))\n b_np = topi.testing.upsampling_python(a_np, scale)\n np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)\n\n\nif __name__ == \"__main__\":\n test_conv2d()\n test_dilated_conv2d()\n test_grouped_conv2d()\n test_conv2d_transpose()\n test_max_pool2d()\n test_avg_pool2d()\n test_avg_pool2d_no_count_pad()\n test_global_max_pool2d()\n test_global_avg_pool2d()\n test_upsampling()\n"
] | [
[
"numpy.random.uniform",
"numpy.ix_",
"numpy.sum",
"numpy.zeros",
"numpy.maximum"
]
] |
weidan-wd/numpy | [
"b7c27bd2a3817f59c84b004b87bba5db57d9a9b0"
] | [
"numpy/testing/tests/test_utils.py"
] | [
"import warnings\nimport sys\nimport os\nimport itertools\nimport textwrap\nimport pytest\nimport weakref\n\nimport numpy as np\nfrom numpy.testing import (\n assert_equal, assert_array_equal, assert_almost_equal,\n assert_array_almost_equal, assert_array_less, build_err_msg, raises,\n assert_raises, assert_warns, assert_no_warnings, assert_allclose,\n assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,\n clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,\n tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT\n )\nfrom numpy.core.overrides import ARRAY_FUNCTION_ENABLED\n\n\nclass _GenericTest(object):\n\n def _test_equal(self, a, b):\n self._assert_func(a, b)\n\n def _test_not_equal(self, a, b):\n with assert_raises(AssertionError):\n self._assert_func(a, b)\n\n def test_array_rank1_eq(self):\n \"\"\"Test two equal array of rank 1 are found equal.\"\"\"\n a = np.array([1, 2])\n b = np.array([1, 2])\n\n self._test_equal(a, b)\n\n def test_array_rank1_noteq(self):\n \"\"\"Test two different array of rank 1 are found not equal.\"\"\"\n a = np.array([1, 2])\n b = np.array([2, 2])\n\n self._test_not_equal(a, b)\n\n def test_array_rank2_eq(self):\n \"\"\"Test two equal array of rank 2 are found equal.\"\"\"\n a = np.array([[1, 2], [3, 4]])\n b = np.array([[1, 2], [3, 4]])\n\n self._test_equal(a, b)\n\n def test_array_diffshape(self):\n \"\"\"Test two arrays with different shapes are found not equal.\"\"\"\n a = np.array([1, 2])\n b = np.array([[1, 2], [1, 2]])\n\n self._test_not_equal(a, b)\n\n def test_objarray(self):\n \"\"\"Test object arrays.\"\"\"\n a = np.array([1, 1], dtype=object)\n self._test_equal(a, 1)\n\n def test_array_likes(self):\n self._test_equal([1, 2, 3], (1, 2, 3))\n\n\nclass TestArrayEqual(_GenericTest):\n\n def setup(self):\n self._assert_func = assert_array_equal\n\n def test_generic_rank1(self):\n \"\"\"Test rank 1 array for all dtypes.\"\"\"\n def foo(t):\n a = np.empty(2, t)\n a.fill(1)\n b = a.copy()\n c = a.copy()\n c.fill(0)\n self._test_equal(a, b)\n self._test_not_equal(c, b)\n\n # Test numeric types and object\n for t in '?bhilqpBHILQPfdgFDG':\n foo(t)\n\n # Test strings\n for t in ['S1', 'U1']:\n foo(t)\n\n def test_0_ndim_array(self):\n x = np.array(473963742225900817127911193656584771)\n y = np.array(18535119325151578301457182298393896)\n assert_raises(AssertionError, self._assert_func, x, y)\n\n y = x\n self._assert_func(x, y)\n\n x = np.array(43)\n y = np.array(10)\n assert_raises(AssertionError, self._assert_func, x, y)\n\n y = x\n self._assert_func(x, y)\n\n def test_generic_rank3(self):\n \"\"\"Test rank 3 array for all dtypes.\"\"\"\n def foo(t):\n a = np.empty((4, 2, 3), t)\n a.fill(1)\n b = a.copy()\n c = a.copy()\n c.fill(0)\n self._test_equal(a, b)\n self._test_not_equal(c, b)\n\n # Test numeric types and object\n for t in '?bhilqpBHILQPfdgFDG':\n foo(t)\n\n # Test strings\n for t in ['S1', 'U1']:\n foo(t)\n\n def test_nan_array(self):\n \"\"\"Test arrays with nan values in them.\"\"\"\n a = np.array([1, 2, np.nan])\n b = np.array([1, 2, np.nan])\n\n self._test_equal(a, b)\n\n c = np.array([1, 2, 3])\n self._test_not_equal(c, b)\n\n def test_string_arrays(self):\n \"\"\"Test two arrays with different shapes are found not equal.\"\"\"\n a = np.array(['floupi', 'floupa'])\n b = np.array(['floupi', 'floupa'])\n\n self._test_equal(a, b)\n\n c = np.array(['floupipi', 'floupa'])\n\n self._test_not_equal(c, b)\n\n def test_recarrays(self):\n \"\"\"Test record arrays.\"\"\"\n a = np.empty(2, [('floupi', float), ('floupa', float)])\n a['floupi'] = [1, 2]\n a['floupa'] = [1, 2]\n b = a.copy()\n\n self._test_equal(a, b)\n\n c = np.empty(2, [('floupipi', float), ('floupa', float)])\n c['floupipi'] = a['floupi'].copy()\n c['floupa'] = a['floupa'].copy()\n\n with suppress_warnings() as sup:\n l = sup.record(FutureWarning, message=\"elementwise == \")\n self._test_not_equal(c, b)\n assert_equal(len(l), 1)\n\n def test_masked_nan_inf(self):\n # Regression test for gh-11121\n a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])\n b = np.array([3., np.nan, 6.5])\n self._test_equal(a, b)\n self._test_equal(b, a)\n a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])\n b = np.array([np.inf, 4., 6.5])\n self._test_equal(a, b)\n self._test_equal(b, a)\n\n def test_subclass_that_overrides_eq(self):\n # While we cannot guarantee testing functions will always work for\n # subclasses, the tests should ideally rely only on subclasses having\n # comparison operators, not on them being able to store booleans\n # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.\n class MyArray(np.ndarray):\n def __eq__(self, other):\n return bool(np.equal(self, other).all())\n\n def __ne__(self, other):\n return not self == other\n\n a = np.array([1., 2.]).view(MyArray)\n b = np.array([2., 3.]).view(MyArray)\n assert_(type(a == a), bool)\n assert_(a == a)\n assert_(a != b)\n self._test_equal(a, a)\n self._test_not_equal(a, b)\n self._test_not_equal(b, a)\n\n @pytest.mark.skipif(\n not ARRAY_FUNCTION_ENABLED, reason='requires __array_function__')\n def test_subclass_that_does_not_implement_npall(self):\n class MyArray(np.ndarray):\n def __array_function__(self, *args, **kwargs):\n return NotImplemented\n\n a = np.array([1., 2.]).view(MyArray)\n b = np.array([2., 3.]).view(MyArray)\n with assert_raises(TypeError):\n np.all(a)\n self._test_equal(a, a)\n self._test_not_equal(a, b)\n self._test_not_equal(b, a)\n\n\nclass TestBuildErrorMessage(object):\n\n def test_build_err_msg_defaults(self):\n x = np.array([1.00001, 2.00002, 3.00003])\n y = np.array([1.00002, 2.00003, 3.00004])\n err_msg = 'There is a mismatch'\n\n a = build_err_msg([x, y], err_msg)\n b = ('\\nItems are not equal: There is a mismatch\\n ACTUAL: array(['\n '1.00001, 2.00002, 3.00003])\\n DESIRED: array([1.00002, '\n '2.00003, 3.00004])')\n assert_equal(a, b)\n\n def test_build_err_msg_no_verbose(self):\n x = np.array([1.00001, 2.00002, 3.00003])\n y = np.array([1.00002, 2.00003, 3.00004])\n err_msg = 'There is a mismatch'\n\n a = build_err_msg([x, y], err_msg, verbose=False)\n b = '\\nItems are not equal: There is a mismatch'\n assert_equal(a, b)\n\n def test_build_err_msg_custom_names(self):\n x = np.array([1.00001, 2.00002, 3.00003])\n y = np.array([1.00002, 2.00003, 3.00004])\n err_msg = 'There is a mismatch'\n\n a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))\n b = ('\\nItems are not equal: There is a mismatch\\n FOO: array(['\n '1.00001, 2.00002, 3.00003])\\n BAR: array([1.00002, 2.00003, '\n '3.00004])')\n assert_equal(a, b)\n\n def test_build_err_msg_custom_precision(self):\n x = np.array([1.000000001, 2.00002, 3.00003])\n y = np.array([1.000000002, 2.00003, 3.00004])\n err_msg = 'There is a mismatch'\n\n a = build_err_msg([x, y], err_msg, precision=10)\n b = ('\\nItems are not equal: There is a mismatch\\n ACTUAL: array(['\n '1.000000001, 2.00002 , 3.00003 ])\\n DESIRED: array(['\n '1.000000002, 2.00003 , 3.00004 ])')\n assert_equal(a, b)\n\n\nclass TestEqual(TestArrayEqual):\n\n def setup(self):\n self._assert_func = assert_equal\n\n def test_nan_items(self):\n self._assert_func(np.nan, np.nan)\n self._assert_func([np.nan], [np.nan])\n self._test_not_equal(np.nan, [np.nan])\n self._test_not_equal(np.nan, 1)\n\n def test_inf_items(self):\n self._assert_func(np.inf, np.inf)\n self._assert_func([np.inf], [np.inf])\n self._test_not_equal(np.inf, [np.inf])\n\n def test_datetime(self):\n self._test_equal(\n np.datetime64(\"2017-01-01\", \"s\"),\n np.datetime64(\"2017-01-01\", \"s\")\n )\n self._test_equal(\n np.datetime64(\"2017-01-01\", \"s\"),\n np.datetime64(\"2017-01-01\", \"m\")\n )\n\n # gh-10081\n self._test_not_equal(\n np.datetime64(\"2017-01-01\", \"s\"),\n np.datetime64(\"2017-01-02\", \"s\")\n )\n self._test_not_equal(\n np.datetime64(\"2017-01-01\", \"s\"),\n np.datetime64(\"2017-01-02\", \"m\")\n )\n\n def test_nat_items(self):\n # not a datetime\n nadt_no_unit = np.datetime64(\"NaT\")\n nadt_s = np.datetime64(\"NaT\", \"s\")\n nadt_d = np.datetime64(\"NaT\", \"ns\")\n # not a timedelta\n natd_no_unit = np.timedelta64(\"NaT\")\n natd_s = np.timedelta64(\"NaT\", \"s\")\n natd_d = np.timedelta64(\"NaT\", \"ns\")\n\n dts = [nadt_no_unit, nadt_s, nadt_d]\n tds = [natd_no_unit, natd_s, natd_d]\n for a, b in itertools.product(dts, dts):\n self._assert_func(a, b)\n self._assert_func([a], [b])\n self._test_not_equal([a], b)\n\n for a, b in itertools.product(tds, tds):\n self._assert_func(a, b)\n self._assert_func([a], [b])\n self._test_not_equal([a], b)\n\n for a, b in itertools.product(tds, dts):\n self._test_not_equal(a, b)\n self._test_not_equal(a, [b])\n self._test_not_equal([a], [b])\n self._test_not_equal([a], np.datetime64(\"2017-01-01\", \"s\"))\n self._test_not_equal([b], np.datetime64(\"2017-01-01\", \"s\"))\n self._test_not_equal([a], np.timedelta64(123, \"s\"))\n self._test_not_equal([b], np.timedelta64(123, \"s\"))\n\n def test_non_numeric(self):\n self._assert_func('ab', 'ab')\n self._test_not_equal('ab', 'abb')\n\n def test_complex_item(self):\n self._assert_func(complex(1, 2), complex(1, 2))\n self._assert_func(complex(1, np.nan), complex(1, np.nan))\n self._test_not_equal(complex(1, np.nan), complex(1, 2))\n self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))\n self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))\n\n def test_negative_zero(self):\n self._test_not_equal(np.PZERO, np.NZERO)\n\n def test_complex(self):\n x = np.array([complex(1, 2), complex(1, np.nan)])\n y = np.array([complex(1, 2), complex(1, 2)])\n self._assert_func(x, x)\n self._test_not_equal(x, y)\n\n def test_error_message(self):\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(np.array([1, 2]), np.array([[1, 2]]))\n msg = str(exc_info.value)\n msg2 = msg.replace(\"shapes (2L,), (1L, 2L)\", \"shapes (2,), (1, 2)\")\n msg_reference = textwrap.dedent(\"\"\"\\\n\n Arrays are not equal\n\n (shapes (2,), (1, 2) mismatch)\n x: array([1, 2])\n y: array([[1, 2]])\"\"\")\n\n try:\n assert_equal(msg, msg_reference)\n except AssertionError:\n assert_equal(msg2, msg_reference)\n\n def test_object(self):\n #gh-12942\n import datetime\n a = np.array([datetime.datetime(2000, 1, 1),\n datetime.datetime(2000, 1, 2)])\n self._test_not_equal(a, a[::-1])\n\n\nclass TestArrayAlmostEqual(_GenericTest):\n\n def setup(self):\n self._assert_func = assert_array_almost_equal\n\n def test_closeness(self):\n # Note that in the course of time we ended up with\n # `abs(x - y) < 1.5 * 10**(-decimal)`\n # instead of the previously documented\n # `abs(x - y) < 0.5 * 10**(-decimal)`\n # so this check serves to preserve the wrongness.\n\n # test scalars\n self._assert_func(1.499999, 0.0, decimal=0)\n assert_raises(AssertionError,\n lambda: self._assert_func(1.5, 0.0, decimal=0))\n\n # test arrays\n self._assert_func([1.499999], [0.0], decimal=0)\n assert_raises(AssertionError,\n lambda: self._assert_func([1.5], [0.0], decimal=0))\n\n def test_simple(self):\n x = np.array([1234.2222])\n y = np.array([1234.2223])\n\n self._assert_func(x, y, decimal=3)\n self._assert_func(x, y, decimal=4)\n assert_raises(AssertionError,\n lambda: self._assert_func(x, y, decimal=5))\n\n def test_nan(self):\n anan = np.array([np.nan])\n aone = np.array([1])\n ainf = np.array([np.inf])\n self._assert_func(anan, anan)\n assert_raises(AssertionError,\n lambda: self._assert_func(anan, aone))\n assert_raises(AssertionError,\n lambda: self._assert_func(anan, ainf))\n assert_raises(AssertionError,\n lambda: self._assert_func(ainf, anan))\n\n def test_inf(self):\n a = np.array([[1., 2.], [3., 4.]])\n b = a.copy()\n a[0, 0] = np.inf\n assert_raises(AssertionError,\n lambda: self._assert_func(a, b))\n b[0, 0] = -np.inf\n assert_raises(AssertionError,\n lambda: self._assert_func(a, b))\n\n def test_subclass(self):\n a = np.array([[1., 2.], [3., 4.]])\n b = np.ma.masked_array([[1., 2.], [0., 4.]],\n [[False, False], [True, False]])\n self._assert_func(a, b)\n self._assert_func(b, a)\n self._assert_func(b, b)\n\n # Test fully masked as well (see gh-11123).\n a = np.ma.MaskedArray(3.5, mask=True)\n b = np.array([3., 4., 6.5])\n self._test_equal(a, b)\n self._test_equal(b, a)\n a = np.ma.masked\n b = np.array([3., 4., 6.5])\n self._test_equal(a, b)\n self._test_equal(b, a)\n a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])\n b = np.array([1., 2., 3.])\n self._test_equal(a, b)\n self._test_equal(b, a)\n a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])\n b = np.array(1.)\n self._test_equal(a, b)\n self._test_equal(b, a)\n\n def test_subclass_that_cannot_be_bool(self):\n # While we cannot guarantee testing functions will always work for\n # subclasses, the tests should ideally rely only on subclasses having\n # comparison operators, not on them being able to store booleans\n # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.\n class MyArray(np.ndarray):\n def __eq__(self, other):\n return super(MyArray, self).__eq__(other).view(np.ndarray)\n\n def __lt__(self, other):\n return super(MyArray, self).__lt__(other).view(np.ndarray)\n\n def all(self, *args, **kwargs):\n raise NotImplementedError\n\n a = np.array([1., 2.]).view(MyArray)\n self._assert_func(a, a)\n\n\nclass TestAlmostEqual(_GenericTest):\n\n def setup(self):\n self._assert_func = assert_almost_equal\n\n def test_closeness(self):\n # Note that in the course of time we ended up with\n # `abs(x - y) < 1.5 * 10**(-decimal)`\n # instead of the previously documented\n # `abs(x - y) < 0.5 * 10**(-decimal)`\n # so this check serves to preserve the wrongness.\n\n # test scalars\n self._assert_func(1.499999, 0.0, decimal=0)\n assert_raises(AssertionError,\n lambda: self._assert_func(1.5, 0.0, decimal=0))\n\n # test arrays\n self._assert_func([1.499999], [0.0], decimal=0)\n assert_raises(AssertionError,\n lambda: self._assert_func([1.5], [0.0], decimal=0))\n\n def test_nan_item(self):\n self._assert_func(np.nan, np.nan)\n assert_raises(AssertionError,\n lambda: self._assert_func(np.nan, 1))\n assert_raises(AssertionError,\n lambda: self._assert_func(np.nan, np.inf))\n assert_raises(AssertionError,\n lambda: self._assert_func(np.inf, np.nan))\n\n def test_inf_item(self):\n self._assert_func(np.inf, np.inf)\n self._assert_func(-np.inf, -np.inf)\n assert_raises(AssertionError,\n lambda: self._assert_func(np.inf, 1))\n assert_raises(AssertionError,\n lambda: self._assert_func(-np.inf, np.inf))\n\n def test_simple_item(self):\n self._test_not_equal(1, 2)\n\n def test_complex_item(self):\n self._assert_func(complex(1, 2), complex(1, 2))\n self._assert_func(complex(1, np.nan), complex(1, np.nan))\n self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))\n self._test_not_equal(complex(1, np.nan), complex(1, 2))\n self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))\n self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))\n\n def test_complex(self):\n x = np.array([complex(1, 2), complex(1, np.nan)])\n z = np.array([complex(1, 2), complex(np.nan, 1)])\n y = np.array([complex(1, 2), complex(1, 2)])\n self._assert_func(x, x)\n self._test_not_equal(x, y)\n self._test_not_equal(x, z)\n\n def test_error_message(self):\n \"\"\"Check the message is formatted correctly for the decimal value.\n Also check the message when input includes inf or nan (gh12200)\"\"\"\n x = np.array([1.00000000001, 2.00000000002, 3.00003])\n y = np.array([1.00000000002, 2.00000000003, 3.00004])\n\n # Test with a different amount of decimal digits\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y, decimal=12)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)')\n assert_equal(msgs[4], 'Max absolute difference: 1.e-05')\n assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')\n assert_equal(\n msgs[6],\n ' x: array([1.00000000001, 2.00000000002, 3.00003 ])')\n assert_equal(\n msgs[7],\n ' y: array([1.00000000002, 2.00000000003, 3.00004 ])')\n\n # With the default value of decimal digits, only the 3rd element\n # differs. Note that we only check for the formatting of the arrays\n # themselves.\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)')\n assert_equal(msgs[4], 'Max absolute difference: 1.e-05')\n assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')\n assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])')\n assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])')\n\n # Check the error message when input includes inf\n x = np.array([np.inf, 0])\n y = np.array([np.inf, 1])\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)')\n assert_equal(msgs[4], 'Max absolute difference: 1.')\n assert_equal(msgs[5], 'Max relative difference: 1.')\n assert_equal(msgs[6], ' x: array([inf, 0.])')\n assert_equal(msgs[7], ' y: array([inf, 1.])')\n\n # Check the error message when dividing by zero\n x = np.array([1, 2])\n y = np.array([0, 0])\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)')\n assert_equal(msgs[4], 'Max absolute difference: 2')\n assert_equal(msgs[5], 'Max relative difference: inf')\n\n def test_error_message_2(self):\n \"\"\"Check the message is formatted correctly when either x or y is a scalar.\"\"\"\n x = 2\n y = np.ones(20)\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')\n assert_equal(msgs[4], 'Max absolute difference: 1.')\n assert_equal(msgs[5], 'Max relative difference: 1.')\n\n y = 2\n x = np.ones(20)\n with pytest.raises(AssertionError) as exc_info:\n self._assert_func(x, y)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')\n assert_equal(msgs[4], 'Max absolute difference: 1.')\n assert_equal(msgs[5], 'Max relative difference: 0.5')\n\n def test_subclass_that_cannot_be_bool(self):\n # While we cannot guarantee testing functions will always work for\n # subclasses, the tests should ideally rely only on subclasses having\n # comparison operators, not on them being able to store booleans\n # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.\n class MyArray(np.ndarray):\n def __eq__(self, other):\n return super(MyArray, self).__eq__(other).view(np.ndarray)\n\n def __lt__(self, other):\n return super(MyArray, self).__lt__(other).view(np.ndarray)\n\n def all(self, *args, **kwargs):\n raise NotImplementedError\n\n a = np.array([1., 2.]).view(MyArray)\n self._assert_func(a, a)\n\n\nclass TestApproxEqual(object):\n\n def setup(self):\n self._assert_func = assert_approx_equal\n\n def test_simple_0d_arrays(self):\n x = np.array(1234.22)\n y = np.array(1234.23)\n\n self._assert_func(x, y, significant=5)\n self._assert_func(x, y, significant=6)\n assert_raises(AssertionError,\n lambda: self._assert_func(x, y, significant=7))\n\n def test_simple_items(self):\n x = 1234.22\n y = 1234.23\n\n self._assert_func(x, y, significant=4)\n self._assert_func(x, y, significant=5)\n self._assert_func(x, y, significant=6)\n assert_raises(AssertionError,\n lambda: self._assert_func(x, y, significant=7))\n\n def test_nan_array(self):\n anan = np.array(np.nan)\n aone = np.array(1)\n ainf = np.array(np.inf)\n self._assert_func(anan, anan)\n assert_raises(AssertionError, lambda: self._assert_func(anan, aone))\n assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))\n\n def test_nan_items(self):\n anan = np.array(np.nan)\n aone = np.array(1)\n ainf = np.array(np.inf)\n self._assert_func(anan, anan)\n assert_raises(AssertionError, lambda: self._assert_func(anan, aone))\n assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))\n\n\nclass TestArrayAssertLess(object):\n\n def setup(self):\n self._assert_func = assert_array_less\n\n def test_simple_arrays(self):\n x = np.array([1.1, 2.2])\n y = np.array([1.2, 2.3])\n\n self._assert_func(x, y)\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n y = np.array([1.0, 2.3])\n\n assert_raises(AssertionError, lambda: self._assert_func(x, y))\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n def test_rank2(self):\n x = np.array([[1.1, 2.2], [3.3, 4.4]])\n y = np.array([[1.2, 2.3], [3.4, 4.5]])\n\n self._assert_func(x, y)\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n y = np.array([[1.0, 2.3], [3.4, 4.5]])\n\n assert_raises(AssertionError, lambda: self._assert_func(x, y))\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n def test_rank3(self):\n x = np.ones(shape=(2, 2, 2))\n y = np.ones(shape=(2, 2, 2))+1\n\n self._assert_func(x, y)\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n y[0, 0, 0] = 0\n\n assert_raises(AssertionError, lambda: self._assert_func(x, y))\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n def test_simple_items(self):\n x = 1.1\n y = 2.2\n\n self._assert_func(x, y)\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n y = np.array([2.2, 3.3])\n\n self._assert_func(x, y)\n assert_raises(AssertionError, lambda: self._assert_func(y, x))\n\n y = np.array([1.0, 3.3])\n\n assert_raises(AssertionError, lambda: self._assert_func(x, y))\n\n def test_nan_noncompare(self):\n anan = np.array(np.nan)\n aone = np.array(1)\n ainf = np.array(np.inf)\n self._assert_func(anan, anan)\n assert_raises(AssertionError, lambda: self._assert_func(aone, anan))\n assert_raises(AssertionError, lambda: self._assert_func(anan, aone))\n assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))\n\n def test_nan_noncompare_array(self):\n x = np.array([1.1, 2.2, 3.3])\n anan = np.array(np.nan)\n\n assert_raises(AssertionError, lambda: self._assert_func(x, anan))\n assert_raises(AssertionError, lambda: self._assert_func(anan, x))\n\n x = np.array([1.1, 2.2, np.nan])\n\n assert_raises(AssertionError, lambda: self._assert_func(x, anan))\n assert_raises(AssertionError, lambda: self._assert_func(anan, x))\n\n y = np.array([1.0, 2.0, np.nan])\n\n self._assert_func(y, x)\n assert_raises(AssertionError, lambda: self._assert_func(x, y))\n\n def test_inf_compare(self):\n aone = np.array(1)\n ainf = np.array(np.inf)\n\n self._assert_func(aone, ainf)\n self._assert_func(-ainf, aone)\n self._assert_func(-ainf, ainf)\n assert_raises(AssertionError, lambda: self._assert_func(ainf, aone))\n assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf))\n assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf))\n\n def test_inf_compare_array(self):\n x = np.array([1.1, 2.2, np.inf])\n ainf = np.array(np.inf)\n\n assert_raises(AssertionError, lambda: self._assert_func(x, ainf))\n assert_raises(AssertionError, lambda: self._assert_func(ainf, x))\n assert_raises(AssertionError, lambda: self._assert_func(x, -ainf))\n assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf))\n assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x))\n self._assert_func(-ainf, x)\n\n\[email protected](reason=\"The raises decorator depends on Nose\")\nclass TestRaises(object):\n\n def setup(self):\n class MyException(Exception):\n pass\n\n self.e = MyException\n\n def raises_exception(self, e):\n raise e\n\n def does_not_raise_exception(self):\n pass\n\n def test_correct_catch(self):\n raises(self.e)(self.raises_exception)(self.e) # raises?\n\n def test_wrong_exception(self):\n try:\n raises(self.e)(self.raises_exception)(RuntimeError) # raises?\n except RuntimeError:\n return\n else:\n raise AssertionError(\"should have caught RuntimeError\")\n\n def test_catch_no_raise(self):\n try:\n raises(self.e)(self.does_not_raise_exception)() # raises?\n except AssertionError:\n return\n else:\n raise AssertionError(\"should have raised an AssertionError\")\n\n\nclass TestWarns(object):\n\n def test_warn(self):\n def f():\n warnings.warn(\"yo\")\n return 3\n\n before_filters = sys.modules['warnings'].filters[:]\n assert_equal(assert_warns(UserWarning, f), 3)\n after_filters = sys.modules['warnings'].filters\n\n assert_raises(AssertionError, assert_no_warnings, f)\n assert_equal(assert_no_warnings(lambda x: x, 1), 1)\n\n # Check that the warnings state is unchanged\n assert_equal(before_filters, after_filters,\n \"assert_warns does not preserver warnings state\")\n\n def test_context_manager(self):\n\n before_filters = sys.modules['warnings'].filters[:]\n with assert_warns(UserWarning):\n warnings.warn(\"yo\")\n after_filters = sys.modules['warnings'].filters\n\n def no_warnings():\n with assert_no_warnings():\n warnings.warn(\"yo\")\n\n assert_raises(AssertionError, no_warnings)\n assert_equal(before_filters, after_filters,\n \"assert_warns does not preserver warnings state\")\n\n def test_warn_wrong_warning(self):\n def f():\n warnings.warn(\"yo\", DeprecationWarning)\n\n failed = False\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", DeprecationWarning)\n try:\n # Should raise a DeprecationWarning\n assert_warns(UserWarning, f)\n failed = True\n except DeprecationWarning:\n pass\n\n if failed:\n raise AssertionError(\"wrong warning caught by assert_warn\")\n\n\nclass TestAssertAllclose(object):\n\n def test_simple(self):\n x = 1e-3\n y = 1e-9\n\n assert_allclose(x, y, atol=1)\n assert_raises(AssertionError, assert_allclose, x, y)\n\n a = np.array([x, y, x, y])\n b = np.array([x, y, x, x])\n\n assert_allclose(a, b, atol=1)\n assert_raises(AssertionError, assert_allclose, a, b)\n\n b[-1] = y * (1 + 1e-8)\n assert_allclose(a, b)\n assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9)\n\n assert_allclose(6, 10, rtol=0.5)\n assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)\n\n def test_min_int(self):\n a = np.array([np.iinfo(np.int_).min], dtype=np.int_)\n # Should not raise:\n assert_allclose(a, a)\n\n def test_report_fail_percentage(self):\n a = np.array([1, 1, 1, 1])\n b = np.array([1, 1, 1, 2])\n\n with pytest.raises(AssertionError) as exc_info:\n assert_allclose(a, b)\n msg = str(exc_info.value)\n assert_('Mismatched elements: 1 / 4 (25%)\\n'\n 'Max absolute difference: 1\\n'\n 'Max relative difference: 0.5' in msg)\n\n def test_equal_nan(self):\n a = np.array([np.nan])\n b = np.array([np.nan])\n # Should not raise:\n assert_allclose(a, b, equal_nan=True)\n\n def test_not_equal_nan(self):\n a = np.array([np.nan])\n b = np.array([np.nan])\n assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)\n\n def test_equal_nan_default(self):\n # Make sure equal_nan default behavior remains unchanged. (All\n # of these functions use assert_array_compare under the hood.)\n # None of these should raise.\n a = np.array([np.nan])\n b = np.array([np.nan])\n assert_array_equal(a, b)\n assert_array_almost_equal(a, b)\n assert_array_less(a, b)\n assert_allclose(a, b)\n\n def test_report_max_relative_error(self):\n a = np.array([0, 1])\n b = np.array([0, 2])\n\n with pytest.raises(AssertionError) as exc_info:\n assert_allclose(a, b)\n msg = str(exc_info.value)\n assert_('Max relative difference: 0.5' in msg)\n\n\nclass TestArrayAlmostEqualNulp(object):\n\n def test_float64_pass(self):\n # The number of units of least precision\n # In this case, use a few places above the lowest level (ie nulp=1)\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float64)\n x = 10**x\n x = np.r_[-x, x]\n\n # Addition\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n # Subtraction\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n def test_float64_fail(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float64)\n x = 10**x\n x = np.r_[-x, x]\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n def test_float32_pass(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float32)\n x = 10**x\n x = np.r_[-x, x]\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n def test_float32_fail(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float32)\n x = 10**x\n x = np.r_[-x, x]\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n def test_float16_pass(self):\n nulp = 5\n x = np.linspace(-4, 4, 10, dtype=np.float16)\n x = 10**x\n x = np.r_[-x, x]\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp/2.\n assert_array_almost_equal_nulp(x, y, nulp)\n\n def test_float16_fail(self):\n nulp = 5\n x = np.linspace(-4, 4, 10, dtype=np.float16)\n x = 10**x\n x = np.r_[-x, x]\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n x, y, nulp)\n\n def test_complex128_pass(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float64)\n x = 10**x\n x = np.r_[-x, x]\n xi = x + x*1j\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp/2.\n assert_array_almost_equal_nulp(xi, x + y*1j, nulp)\n assert_array_almost_equal_nulp(xi, y + x*1j, nulp)\n # The test condition needs to be at least a factor of sqrt(2) smaller\n # because the real and imaginary parts both change\n y = x + x*eps*nulp/4.\n assert_array_almost_equal_nulp(xi, y + y*1j, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp/2.\n assert_array_almost_equal_nulp(xi, x + y*1j, nulp)\n assert_array_almost_equal_nulp(xi, y + x*1j, nulp)\n y = x - x*epsneg*nulp/4.\n assert_array_almost_equal_nulp(xi, y + y*1j, nulp)\n\n def test_complex128_fail(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float64)\n x = 10**x\n x = np.r_[-x, x]\n xi = x + x*1j\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, x + y*1j, nulp)\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + x*1j, nulp)\n # The test condition needs to be at least a factor of sqrt(2) smaller\n # because the real and imaginary parts both change\n y = x + x*eps*nulp\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + y*1j, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, x + y*1j, nulp)\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + x*1j, nulp)\n y = x - x*epsneg*nulp\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + y*1j, nulp)\n\n def test_complex64_pass(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float32)\n x = 10**x\n x = np.r_[-x, x]\n xi = x + x*1j\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp/2.\n assert_array_almost_equal_nulp(xi, x + y*1j, nulp)\n assert_array_almost_equal_nulp(xi, y + x*1j, nulp)\n y = x + x*eps*nulp/4.\n assert_array_almost_equal_nulp(xi, y + y*1j, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp/2.\n assert_array_almost_equal_nulp(xi, x + y*1j, nulp)\n assert_array_almost_equal_nulp(xi, y + x*1j, nulp)\n y = x - x*epsneg*nulp/4.\n assert_array_almost_equal_nulp(xi, y + y*1j, nulp)\n\n def test_complex64_fail(self):\n nulp = 5\n x = np.linspace(-20, 20, 50, dtype=np.float32)\n x = 10**x\n x = np.r_[-x, x]\n xi = x + x*1j\n\n eps = np.finfo(x.dtype).eps\n y = x + x*eps*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, x + y*1j, nulp)\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + x*1j, nulp)\n y = x + x*eps*nulp\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + y*1j, nulp)\n\n epsneg = np.finfo(x.dtype).epsneg\n y = x - x*epsneg*nulp*2.\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, x + y*1j, nulp)\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + x*1j, nulp)\n y = x - x*epsneg*nulp\n assert_raises(AssertionError, assert_array_almost_equal_nulp,\n xi, y + y*1j, nulp)\n\n\nclass TestULP(object):\n\n def test_equal(self):\n x = np.random.randn(10)\n assert_array_max_ulp(x, x, maxulp=0)\n\n def test_single(self):\n # Generate 1 + small deviation, check that adding eps gives a few UNL\n x = np.ones(10).astype(np.float32)\n x += 0.01 * np.random.randn(10).astype(np.float32)\n eps = np.finfo(np.float32).eps\n assert_array_max_ulp(x, x+eps, maxulp=20)\n\n def test_double(self):\n # Generate 1 + small deviation, check that adding eps gives a few UNL\n x = np.ones(10).astype(np.float64)\n x += 0.01 * np.random.randn(10).astype(np.float64)\n eps = np.finfo(np.float64).eps\n assert_array_max_ulp(x, x+eps, maxulp=200)\n\n def test_inf(self):\n for dt in [np.float32, np.float64]:\n inf = np.array([np.inf]).astype(dt)\n big = np.array([np.finfo(dt).max])\n assert_array_max_ulp(inf, big, maxulp=200)\n\n def test_nan(self):\n # Test that nan is 'far' from small, tiny, inf, max and min\n for dt in [np.float32, np.float64]:\n if dt == np.float32:\n maxulp = 1e6\n else:\n maxulp = 1e12\n inf = np.array([np.inf]).astype(dt)\n nan = np.array([np.nan]).astype(dt)\n big = np.array([np.finfo(dt).max])\n tiny = np.array([np.finfo(dt).tiny])\n zero = np.array([np.PZERO]).astype(dt)\n nzero = np.array([np.NZERO]).astype(dt)\n assert_raises(AssertionError,\n lambda: assert_array_max_ulp(nan, inf,\n maxulp=maxulp))\n assert_raises(AssertionError,\n lambda: assert_array_max_ulp(nan, big,\n maxulp=maxulp))\n assert_raises(AssertionError,\n lambda: assert_array_max_ulp(nan, tiny,\n maxulp=maxulp))\n assert_raises(AssertionError,\n lambda: assert_array_max_ulp(nan, zero,\n maxulp=maxulp))\n assert_raises(AssertionError,\n lambda: assert_array_max_ulp(nan, nzero,\n maxulp=maxulp))\n\n\nclass TestStringEqual(object):\n def test_simple(self):\n assert_string_equal(\"hello\", \"hello\")\n assert_string_equal(\"hello\\nmultiline\", \"hello\\nmultiline\")\n\n with pytest.raises(AssertionError) as exc_info:\n assert_string_equal(\"foo\\nbar\", \"hello\\nbar\")\n msg = str(exc_info.value)\n assert_equal(msg, \"Differences in strings:\\n- foo\\n+ hello\")\n\n assert_raises(AssertionError,\n lambda: assert_string_equal(\"foo\", \"hello\"))\n\n def test_regex(self):\n assert_string_equal(\"a+*b\", \"a+*b\")\n\n assert_raises(AssertionError,\n lambda: assert_string_equal(\"aaa\", \"a+b\"))\n\n\ndef assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):\n try:\n mod_warns = mod.__warningregistry__\n except AttributeError:\n # the lack of a __warningregistry__\n # attribute means that no warning has\n # occurred; this can be triggered in\n # a parallel test scenario, while in\n # a serial test scenario an initial\n # warning (and therefore the attribute)\n # are always created first\n mod_warns = {}\n\n num_warns = len(mod_warns)\n # Python 3.4 appears to clear any pre-existing warnings of the same type,\n # when raising warnings inside a catch_warnings block. So, there is a\n # warning generated by the tests within the context manager, but no\n # previous warnings.\n if 'version' in mod_warns:\n # Python 3 adds a 'version' entry to the registry,\n # do not count it.\n num_warns -= 1\n\n # Behavior of warnings is Python version dependent. Adjust the\n # expected result to compensate. In particular, Python 3.7 does\n # not make an entry for ignored warnings.\n if sys.version_info[:2] >= (3, 7):\n if py37 is not None:\n n_in_context = py37\n elif sys.version_info[:2] >= (3, 4):\n if py34 is not None:\n n_in_context = py34\n assert_equal(num_warns, n_in_context)\n\ndef test_warn_len_equal_call_scenarios():\n # assert_warn_len_equal is called under\n # varying circumstances depending on serial\n # vs. parallel test scenarios; this test\n # simply aims to probe both code paths and\n # check that no assertion is uncaught\n\n # parallel scenario -- no warning issued yet\n class mod(object):\n pass\n\n mod_inst = mod()\n\n assert_warn_len_equal(mod=mod_inst,\n n_in_context=0)\n\n # serial test scenario -- the __warningregistry__\n # attribute should be present\n class mod(object):\n def __init__(self):\n self.__warningregistry__ = {'warning1':1,\n 'warning2':2}\n\n mod_inst = mod()\n assert_warn_len_equal(mod=mod_inst,\n n_in_context=2)\n\n\ndef _get_fresh_mod():\n # Get this module, with warning registry empty\n my_mod = sys.modules[__name__]\n try:\n my_mod.__warningregistry__.clear()\n except AttributeError:\n # will not have a __warningregistry__ unless warning has been\n # raised in the module at some point\n pass\n return my_mod\n\n\ndef test_clear_and_catch_warnings():\n # Initial state of module, no warnings\n my_mod = _get_fresh_mod()\n assert_equal(getattr(my_mod, '__warningregistry__', {}), {})\n with clear_and_catch_warnings(modules=[my_mod]):\n warnings.simplefilter('ignore')\n warnings.warn('Some warning')\n assert_equal(my_mod.__warningregistry__, {})\n # Without specified modules, don't clear warnings during context\n # Python 3.7 catch_warnings doesn't make an entry for 'ignore'.\n with clear_and_catch_warnings():\n warnings.simplefilter('ignore')\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 1, py37=0)\n # Confirm that specifying module keeps old warning, does not add new\n with clear_and_catch_warnings(modules=[my_mod]):\n warnings.simplefilter('ignore')\n warnings.warn('Another warning')\n assert_warn_len_equal(my_mod, 1, py37=0)\n # Another warning, no module spec does add to warnings dict, except on\n # Python 3.4 (see comments in `assert_warn_len_equal`)\n # Python 3.7 catch_warnings doesn't make an entry for 'ignore'.\n with clear_and_catch_warnings():\n warnings.simplefilter('ignore')\n warnings.warn('Another warning')\n assert_warn_len_equal(my_mod, 2, py34=1, py37=0)\n\n\ndef test_suppress_warnings_module():\n # Initial state of module, no warnings\n my_mod = _get_fresh_mod()\n assert_equal(getattr(my_mod, '__warningregistry__', {}), {})\n\n def warn_other_module():\n # Apply along axis is implemented in python; stacklevel=2 means\n # we end up inside its module, not ours.\n def warn(arr):\n warnings.warn(\"Some warning 2\", stacklevel=2)\n return arr\n np.apply_along_axis(warn, 0, [0])\n\n # Test module based warning suppression:\n assert_warn_len_equal(my_mod, 0)\n with suppress_warnings() as sup:\n sup.record(UserWarning)\n # suppress warning from other module (may have .pyc ending),\n # if apply_along_axis is moved, had to be changed.\n sup.filter(module=np.lib.shape_base)\n warnings.warn(\"Some warning\")\n warn_other_module()\n # Check that the suppression did test the file correctly (this module\n # got filtered)\n assert_equal(len(sup.log), 1)\n assert_equal(sup.log[0].message.args[0], \"Some warning\")\n assert_warn_len_equal(my_mod, 0, py37=0)\n sup = suppress_warnings()\n # Will have to be changed if apply_along_axis is moved:\n sup.filter(module=my_mod)\n with sup:\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 0)\n # And test repeat works:\n sup.filter(module=my_mod)\n with sup:\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 0)\n\n # Without specified modules, don't clear warnings during context\n # Python 3.7 does not add ignored warnings.\n with suppress_warnings():\n warnings.simplefilter('ignore')\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 1, py37=0)\n\ndef test_suppress_warnings_type():\n # Initial state of module, no warnings\n my_mod = _get_fresh_mod()\n assert_equal(getattr(my_mod, '__warningregistry__', {}), {})\n\n # Test module based warning suppression:\n with suppress_warnings() as sup:\n sup.filter(UserWarning)\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 0)\n sup = suppress_warnings()\n sup.filter(UserWarning)\n with sup:\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 0)\n # And test repeat works:\n sup.filter(module=my_mod)\n with sup:\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 0)\n\n # Without specified modules, don't clear warnings during context\n # Python 3.7 does not add ignored warnings.\n with suppress_warnings():\n warnings.simplefilter('ignore')\n warnings.warn('Some warning')\n assert_warn_len_equal(my_mod, 1, py37=0)\n\n\ndef test_suppress_warnings_decorate_no_record():\n sup = suppress_warnings()\n sup.filter(UserWarning)\n\n @sup\n def warn(category):\n warnings.warn('Some warning', category)\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n warn(UserWarning) # should be supppressed\n warn(RuntimeWarning)\n assert_equal(len(w), 1)\n\n\ndef test_suppress_warnings_record():\n sup = suppress_warnings()\n log1 = sup.record()\n\n with sup:\n log2 = sup.record(message='Some other warning 2')\n sup.filter(message='Some warning')\n warnings.warn('Some warning')\n warnings.warn('Some other warning')\n warnings.warn('Some other warning 2')\n\n assert_equal(len(sup.log), 2)\n assert_equal(len(log1), 1)\n assert_equal(len(log2),1)\n assert_equal(log2[0].message.args[0], 'Some other warning 2')\n\n # Do it again, with the same context to see if some warnings survived:\n with sup:\n log2 = sup.record(message='Some other warning 2')\n sup.filter(message='Some warning')\n warnings.warn('Some warning')\n warnings.warn('Some other warning')\n warnings.warn('Some other warning 2')\n\n assert_equal(len(sup.log), 2)\n assert_equal(len(log1), 1)\n assert_equal(len(log2), 1)\n assert_equal(log2[0].message.args[0], 'Some other warning 2')\n\n # Test nested:\n with suppress_warnings() as sup:\n sup.record()\n with suppress_warnings() as sup2:\n sup2.record(message='Some warning')\n warnings.warn('Some warning')\n warnings.warn('Some other warning')\n assert_equal(len(sup2.log), 1)\n assert_equal(len(sup.log), 1)\n\n\ndef test_suppress_warnings_forwarding():\n def warn_other_module():\n # Apply along axis is implemented in python; stacklevel=2 means\n # we end up inside its module, not ours.\n def warn(arr):\n warnings.warn(\"Some warning\", stacklevel=2)\n return arr\n np.apply_along_axis(warn, 0, [0])\n\n with suppress_warnings() as sup:\n sup.record()\n with suppress_warnings(\"always\"):\n for i in range(2):\n warnings.warn(\"Some warning\")\n\n assert_equal(len(sup.log), 2)\n\n with suppress_warnings() as sup:\n sup.record()\n with suppress_warnings(\"location\"):\n for i in range(2):\n warnings.warn(\"Some warning\")\n warnings.warn(\"Some warning\")\n\n assert_equal(len(sup.log), 2)\n\n with suppress_warnings() as sup:\n sup.record()\n with suppress_warnings(\"module\"):\n for i in range(2):\n warnings.warn(\"Some warning\")\n warnings.warn(\"Some warning\")\n warn_other_module()\n\n assert_equal(len(sup.log), 2)\n\n with suppress_warnings() as sup:\n sup.record()\n with suppress_warnings(\"once\"):\n for i in range(2):\n warnings.warn(\"Some warning\")\n warnings.warn(\"Some other warning\")\n warn_other_module()\n\n assert_equal(len(sup.log), 2)\n\n\ndef test_tempdir():\n with tempdir() as tdir:\n fpath = os.path.join(tdir, 'tmp')\n with open(fpath, 'w'):\n pass\n assert_(not os.path.isdir(tdir))\n\n raised = False\n try:\n with tempdir() as tdir:\n raise ValueError()\n except ValueError:\n raised = True\n assert_(raised)\n assert_(not os.path.isdir(tdir))\n\n\ndef test_temppath():\n with temppath() as fpath:\n with open(fpath, 'w'):\n pass\n assert_(not os.path.isfile(fpath))\n\n raised = False\n try:\n with temppath() as fpath:\n raise ValueError()\n except ValueError:\n raised = True\n assert_(raised)\n assert_(not os.path.isfile(fpath))\n\n\nclass my_cacw(clear_and_catch_warnings):\n\n class_modules = (sys.modules[__name__],)\n\n\ndef test_clear_and_catch_warnings_inherit():\n # Test can subclass and add default modules\n my_mod = _get_fresh_mod()\n with my_cacw():\n warnings.simplefilter('ignore')\n warnings.warn('Some warning')\n assert_equal(my_mod.__warningregistry__, {})\n\n\[email protected](not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\nclass TestAssertNoGcCycles(object):\n \"\"\" Test assert_no_gc_cycles \"\"\"\n def test_passes(self):\n def no_cycle():\n b = []\n b.append([])\n return b\n\n with assert_no_gc_cycles():\n no_cycle()\n\n assert_no_gc_cycles(no_cycle)\n\n def test_asserts(self):\n def make_cycle():\n a = []\n a.append(a)\n a.append(a)\n return a\n\n with assert_raises(AssertionError):\n with assert_no_gc_cycles():\n make_cycle()\n\n with assert_raises(AssertionError):\n assert_no_gc_cycles(make_cycle)\n\n @pytest.mark.slow\n def test_fails(self):\n \"\"\"\n Test that in cases where the garbage cannot be collected, we raise an\n error, instead of hanging forever trying to clear it.\n \"\"\"\n\n class ReferenceCycleInDel(object):\n \"\"\"\n An object that not only contains a reference cycle, but creates new\n cycles whenever it's garbage-collected and its __del__ runs\n \"\"\"\n make_cycle = True\n\n def __init__(self):\n self.cycle = self\n\n def __del__(self):\n # break the current cycle so that `self` can be freed\n self.cycle = None\n\n if ReferenceCycleInDel.make_cycle:\n # but create a new one so that the garbage collector has more\n # work to do.\n ReferenceCycleInDel()\n\n try:\n w = weakref.ref(ReferenceCycleInDel())\n try:\n with assert_raises(RuntimeError):\n # this will be unable to get a baseline empty garbage\n assert_no_gc_cycles(lambda: None)\n except AssertionError:\n # the above test is only necessary if the GC actually tried to free\n # our object anyway, which python 2.7 does not.\n if w() is not None:\n pytest.skip(\"GC does not call __del__ on cyclic objects\")\n raise\n\n finally:\n # make sure that we stop creating reference cycles\n ReferenceCycleInDel.make_cycle = False\n"
] | [
[
"numpy.ones",
"numpy.testing.assert_equal",
"numpy.testing.assert_no_warnings",
"numpy.testing.assert_warns",
"numpy.datetime64",
"numpy.apply_along_axis",
"numpy.testing.tempdir",
"numpy.testing.clear_and_catch_warnings",
"numpy.testing.build_err_msg",
"numpy.timedelta64",
"numpy.ma.masked_array",
"numpy.testing.temppath",
"numpy.testing.assert_array_equal",
"numpy.testing.raises",
"numpy.ma.MaskedArray",
"numpy.linspace",
"numpy.testing.assert_array_max_ulp",
"numpy.testing.assert_array_less",
"numpy.equal",
"numpy.testing.assert_array_almost_equal_nulp",
"numpy.testing.assert_array_almost_equal",
"numpy.all",
"numpy.finfo",
"numpy.testing.assert_raises",
"numpy.empty",
"numpy.testing.assert_string_equal",
"numpy.random.randn",
"numpy.iinfo",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.testing.suppress_warnings",
"numpy.testing.assert_no_gc_cycles",
"numpy.testing.assert_"
]
] |
ISTE-NITK/istesleep | [
"caff596dfc02c67fc4fc11f4d386ba04b6e1a0d7"
] | [
"sleepAnalytics/Sleep Analysis.py"
] | [
"\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib.dates import date2num\nimport datetime\nimport matplotlib.dates as mdates\nimport time\nimport numpy as np\nfrom datetime import date\nimport matplotlib.lines as mlines\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.patches as mpatches\nimport category_encoders as ce\nimport glob\nfrom sklearn.cluster import KMeans\n\n\n# In[2]:\n\n#Getting all the data\ndef get_all_data(file_path):\n \n data = pd.DataFrame()\n quality = pd.DataFrame()\n \n avg_illuminance = pd.DataFrame()\n avg_airquaility = pd.DataFrame()\n avg_temp = pd.DataFrame()\n avg_humididty = pd.DataFrame()\n avg_quality = pd.DataFrame()\n \n sleep_quality_data = get_data('/home/prajwal/Desktop/istesleep/data/sleepdata.csv',delim=';')\n \n for filename in glob.glob(file_path): \n \n df = pd.read_csv(filename)\n \n avg_illuminance = avg_illuminance.append([np.mean(df[' illuminance'])])\n avg_airquaility = avg_airquaility.append([np.mean(df['airquality'])])\n avg_temp = avg_temp.append([np.mean(df[' temperature'])])\n avg_humididty = avg_humididty.append([np.mean(df[' humidity'])])\n \n date = df[' timestamp'].astype(str).str[:-22][0]\n date = date[1:11]\n \n sleep_quality = get_sleep_quality(sleep_quality_data,date)\n avg_quality = avg_quality.append(sleep_quality)\n sleep_quality = sleep_quality*df.shape[0]\n \n quality_date = pd.DataFrame(sleep_quality)\n quality = pd.concat([quality,quality_date],axis = 0,ignore_index=True)\n \n data = pd.concat([data,df],axis = 0,ignore_index=True)\n \n avg_data = pd.concat([avg_illuminance,avg_airquaility,avg_temp,avg_humididty,avg_quality],axis = 1, ignore_index=True)\n data = pd.concat([data,quality],axis = 1)\n \n return [data, avg_data]\n\n\n# In[53]:\n\ndef split_data(data):\n \n data_light = data[data['illuminance']>50]\n data_dark = data[data['illuminance']<50]\n #data_dark = data_dark[data_dark['illuminance']>5]\n \n return [data_light, data_dark]\n\n\n# In[ ]:\n\ndef get_sleep_quality(data,date):\n \n x = data['Sleep quality'][data['End'].astype(str)==date].tolist()\n \n return x\n\n\n# In[ ]:\n\ndef get_data(filepath,delim=','):\n \n data = pd.read_csv(filepath,sep=delim)\n \n return data\n\n\n# In[ ]:\n\ndef data_sample(data):\n\n data = data.iloc[::5, :]\n \n return data\n\n\n# In[ ]:\n\ndef data_to_csv(data,file_name):\n \n data.to_csv(file_name, sep=',')\n\n\n# In[ ]:\n\ndef convert_date(data):\n \n data[' timestamp'] = data[' timestamp'].astype(str).str[:-13]\n data[' timestamp'] = pd.to_datetime(data[' timestamp'])\n data['airquality'] = data['airquality'].astype(float)\n \n return data\n\n\n# In[ ]:\n\ndef plot_two(data, x,y):\n \n plt.scatter(data[x], data[y])\n plt.axis('tight')\n plt.ylabel(y)\n plt.xlabel(x)\n plt.show()\n\n\n# In[ ]:\n\ndef plot_simple(data,x,y,c='r',s = 40):\n \n plt.scatter(data[x], data[y], c = c,s=s, alpha=0.5)\n plt.axis('tight')\n plt.ylabel(y)\n plt.xlabel(x)\n plt.show()\n\n\n# In[ ]:\n\ndef plot(data,x,y,c='quality',s = 40):\n \n plt.scatter(data[x], data[y], c = data[c], s=s, alpha=0.5,cmap='viridis')\n plt.axis('tight')\n plt.colorbar()\n plt.ylabel(y)\n plt.xlabel(x)\n plt.show()\n\n\n# In[54]:\n\n#data, avg_data = get_all_data('/home/prajwal/Desktop/istesleep/data/Data/*.csv')\ndata = pd.read_csv('/home/prajwal/Desktop/Data/Data.csv')\navg_data = pd.read_csv('/home/prajwal/Desktop/Data/Avg_Data.csv')\n\n\n# In[55]:\n\n#Splitting data into two components, day and night data.\ndata_light,data_dark = split_data(data)\n\n\n# In[56]:\n\n#Plot - pass parameters, data and the column names you want to plot. Color indicates sleep quality\nplot(data_dark,'illuminance','airquality')\n\n\n# In[15]:\n\n#plot(avg_data,'avg_illuminance','avg_airquaility')\n\n\n# In[16]:\n\n#plot(avg_data,'avg_illuminance','avg_airquaility')\n\n\n# In[17]:\n\n#Plot - pass parameters, data and the column names you want to plot. \n#plot_two(data,'steps','quality')\n\n\n# In[18]:\n\n#x = np.mean(data['steps'][data['quality']==70])\n\n\n# In[19]:\n\n#y = np.mean(data['steps'][data['quality']==68])\n\n\n# In[20]:\n\n#z = np.mean(data['steps'][data['quality']==75])\n\n\n# In[21]:\n\n#v = np.mean(data['steps'][data['quality']==77])\n\n\n# In[22]:\n\n#t = x*70 + y*68 + z*75 + v*77\n\n\n# In[23]:\n\n#t/(70+71+75+77)\n\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.axis",
"pandas.DataFrame",
"pandas.to_datetime",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"pandas.concat",
"numpy.mean",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter"
]
] |
entrepreneur-interet-general/predisauvetage | [
"4d985ee79355652709da322db48daffb3e5a895a"
] | [
"collecte/postes_plage_snsm/convert.py"
] | [
"# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n# Likely coming from\n# https://www.google.com/maps/d/viewer?mid=151Itl_57S7UlpC7P-TdfvT2Pz7Y\n\n\nclass KMLConverter(object):\n def __init__(self, filepath):\n self.filepath = filepath\n self.postes = []\n self.parse()\n\n def clean_key(self, key):\n return {\n u'DÉPARTEMENT': 'departement',\n 'NB DE SAUVETEURS SNSM': 'nb_sauveteurs',\n 'CP': 'code_postal',\n 'VILLE': 'ville',\n }[key]\n\n def parse_coordinates(self, value):\n if value is None:\n return None, None\n parts = map(float, value.text.split(','))\n latitude, longitude = parts[1], parts[0]\n return latitude, longitude\n\n def parse(self):\n with open(self.filepath, 'r') as f:\n soup = BeautifulSoup(f, 'xml')\n for placemark in soup.kml.Document.Folder.find_all('Placemark'):\n poste = {}\n poste['nom'] = placemark.find('name').text.strip()\n poste['latitude'], poste['longitude'] = self.parse_coordinates(\n placemark.find('coordinates')\n )\n for data in placemark.ExtendedData.find_all('Data'):\n key, value = data['name'], data.text.strip()\n if key != 'gx_media_links':\n cleaned_key = self.clean_key(key)\n if cleaned_key == 'nb_sauveteurs':\n poste[cleaned_key] = int(float(value))\n else:\n poste[cleaned_key] = value\n self.postes.append(poste)\n\n def to_csv(self, filepath):\n df = pd.DataFrame(self.postes)\n df = df.sort_values(by='code_postal').reset_index(drop=True)\n df.index += 1\n df.to_csv(filepath, encoding='utf-8', index=True, index_label='id')\n"
] | [
[
"pandas.DataFrame"
]
] |
padmec-reservoir/PRESTO | [
"71525a8dece2bcc4f16ff4a2120d7627e9ecd776"
] | [
"presto/Preprocessors/Upscale/Structured/StructuredUpscalingMethods.py"
] | [
"import numpy as np\nimport collections\nimport time\nfrom pymoab import types\nfrom pymoab import topo_util\nfrom PyTrilinos import Epetra, AztecOO, ML\n\n\nclass StructuredUpscalingMethods:\n \"\"\"Defines a structured upscaling mesh representation\n Parameters\n ----------\n coarse_ratio: List or array of integers\n List or array containing three values indicating the coarsening ratio\n of the mesh in x, y and z directions.\n mesh_size: List or array of integers\n List or array containing three values indicating the mesh size\n (number of fine elements) of the mesh in x, y and z.\n block_size List o array of floats\n List or array containing three values indicating the constant\n increments of vertex coordinates in x, y and z.\n \"\"\"\n def __init__(self, coarse_ratio, mesh_size, block_size, method, moab):\n\n self.coarse_ratio = coarse_ratio\n self.mesh_size = mesh_size\n self.block_size = block_size\n self.method = method\n\n self.verts = None # Array containing MOAB vertex entities\n self.elems = [] # List containing MOAB volume entities\n\n self.coarse_verts = None # Array containing MOAB vertex entities for\n # the coarse mesh\n self.coarse_elems = [] # List containig MOAB volume entities for the\n # coarse mesh\n\n self.primals = {} # Mapping from tuples (idx, dy, idz) to Coarse\n # volumes\n self.primal_ids = []\n\n self.primals_adj = []\n\n self.perm = []\n\n # MOAB boilerplate\n self.mb = moab\n self.root_set = self.mb.get_root_set()\n self.mesh_topo_util = topo_util.MeshTopoUtil(self.mb)\n\n # Pytrilinos boilerplate\n self.comm = Epetra.PyComm()\n self.mlList = {\"max levels\": 3,\n \"output\": 10,\n \"smoother: type\": \"symmetric Gauss-Seidel\",\n \"aggregation: type\": \"Uncoupled\"\n }\n\n def create_tags(self):\n # TODO: - Should go on Common (?)\n\n self.gid_tag = self.mb.tag_get_handle(\n \"GLOBAL_ID\", 1, types.MB_TYPE_INTEGER,\n types.MB_TAG_DENSE, True)\n\n self.coarse_gid_tag = self.mb.tag_get_handle(\n \"GLOBAL_ID_COARSE\", 1, types.MB_TYPE_INTEGER,\n types.MB_TAG_DENSE, True)\n\n # this will gide through the meshsets corresponding to coarse scale\n # volumes\n self.primal_id_tag = self.mb.tag_get_handle(\n \"PRIMAL_ID\", 1, types.MB_TYPE_INTEGER,\n types.MB_TAG_SPARSE, True)\n\n self.phi_tag = self.mb.tag_get_handle(\n \"PHI\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n self.boundary_x_tag, self.boundary_y_tag, self.boundary_z_tag = (\n self.mb.tag_get_handle(\n \"LOCAL BOUNDARY CONDITIONS - X Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True),\n self.mb.tag_get_handle(\n \"LOCAL BOUNDARY CONDITIONS - y Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True),\n self.mb.tag_get_handle(\n \"LOCAL BOUNDARY CONDITIONS - z Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n )\n\n (self.primal_perm_x_tag,\n self.primal_perm_y_tag,\n self.primal_perm_z_tag) = (\n self.mb.tag_get_handle(\n \"COARSE PERMEABILITY - X Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True),\n self.mb.tag_get_handle(\n \"COARSE PERMEABILITY - y Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True),\n self.mb.tag_get_handle(\n \"COARSE PERMEABILITY - z Axis\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n )\n\n # tag handle for upscaling operation\n self.primal_phi_tag = self.mb.tag_get_handle(\n \"PRIMAL_PHI\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n self.perm_tag = self.mb.tag_get_handle(\n \"PERM\", 9, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n # tag handle for upscaling operation\n self.primal_perm_tag = self.mb.tag_get_handle(\n \"PRIMAL_PERM\", 9, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n # either shoud go or put other directions..., I...\n\n self.abs_perm_x_tag = self.mb.tag_get_handle(\n \"ABS_PERM_X\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n self.abs_perm_fine_x_tag = self.mb.tag_get_handle(\n \"ABS_PERM_X_FINE\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n\n self.fine_to_primal_tag = self.mb.tag_get_handle(\n \"FINE_TO_PRIMAL\", 1, types.MB_TYPE_HANDLE,\n types.MB_TAG_SPARSE, True)\n\n self.primal_adj_tag = self.mb.tag_get_handle(\n \"PRIMAL_ADJ\", 1, types.MB_TYPE_HANDLE,\n types.MB_TAG_SPARSE, True)\n\n self.coarse_injection_tag = self.mb.tag_get_handle(\n \"injection_well_coarse\", 1, types.MB_TYPE_INTEGER,\n types.MB_TAG_SPARSE, True)\n\n self.coarse_production_tag = self.mb.tag_get_handle(\n \"production_well_coarse\", 1, types.MB_TYPE_INTEGER,\n types.MB_TAG_SPARSE, True)\n\n def get_block_size_coarse(self):\n block_size_coarse = []\n total_size = (np.asarray(self.mesh_size, dtype='int32')) * np.asarray(\n self.block_size, dtype='float64')\n\n for dim in range(0, 3):\n block_size_coarse.append([self.coarse_ratio[dim] * np.asarray(\n self.block_size[dim], dtype='float64') * coarse_dim\n for coarse_dim in np.arange(self._coarse_dims()[dim],\n dtype='int32')])\n block_size_coarse[dim].append(total_size[dim])\n return block_size_coarse\n\n def create_coarse_vertices(self):\n # TODO: - Should go on Common\n\n block_size_coarse = self.get_block_size_coarse()\n\n coarse_coords = np.array([\n (i, j, k)\n for k in (np.array(block_size_coarse[2], dtype='float64'))\n for j in (np.array(block_size_coarse[1], dtype='float64'))\n for i in (np.array(block_size_coarse[0], dtype='float64'))\n ])\n return self.mb.create_vertices(coarse_coords.flatten())\n\n def _coarse_dims(self,):\n # TODO: - Should go on Common\n\n mesh_size_coarse = np.asarray(\n self.mesh_size, dtype='int32') // np.asarray(\n self.coarse_ratio, dtype='int32')\n return mesh_size_coarse\n\n def calculate_primal_ids(self):\n # TODO: - Should go on Common\n for dim in range(0, 3):\n self.primal_ids.append(\n [i // (self.coarse_ratio[dim]) for i in xrange(\n self.mesh_size[dim])])\n\n new_primal = []\n for dim in range(0, 3):\n new_primal.append(\n self.primal_ids[dim][(\n self.mesh_size[dim] // self.coarse_ratio[dim]) *\n self.coarse_ratio[dim]:])\n\n if len(new_primal[dim]) < (self.mesh_size[dim] // 2):\n new_primal[dim] = np.repeat(\n max(self.primal_ids[dim]) - 1,\n len(new_primal[dim])).tolist()\n self.primal_ids[dim] = (self.primal_ids[dim][:self.mesh_size[\n dim] // self.coarse_ratio[dim] * self.coarse_ratio[dim]] +\n new_primal[dim])\n\n def create_fine_vertices(self):\n # TODO: - Should go on Common\n\n coords = np.array([\n (i, j, k) for k in (np.arange(\n self.mesh_size[2] + 1, dtype='float64') *\n self.block_size[2])\n for j in (np.arange(\n self.mesh_size[1] + 1, dtype='float64') *\n self.block_size[1])\n for i in (np.arange(\n self.mesh_size[0] + 1, dtype='float64') *\n self.block_size[0])\n ], dtype='float64')\n return self.mb.create_vertices(coords.flatten())\n\n def _create_hexa(self, i, j, k, verts, mesh):\n # TODO: - Should go on Common\n # - Refactor this (????????)\n # (i, j, k)\n hexa = [verts[i + (j * (mesh[0] + 1)) +\n (k * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i+1, j, k)\n verts[(i + 1) + (j * (mesh[0] + 1)) +\n (k * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i+1, j+1, k)\n verts[(i + 1) + (j + 1) * (mesh[0]) +\n (j + 1) + (k * ((mesh[0] + 1)*(mesh[1] + 1)))],\n # (i, j+1, k)\n verts[i + (j + 1) * (mesh[0]) + (j + 1) +\n (k * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i, j, k+1)\n verts[i + (j * (mesh[0] + 1)) +\n ((k + 1) * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i+1, j, k+1)\n verts[(i + 1) + (j * (mesh[0] + 1)) +\n ((k + 1) * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i+1, j+1, k+1)\n verts[(i + 1) + (j + 1) * (mesh[0]) +\n (j + 1) + ((k + 1) * ((mesh[0] + 1) * (mesh[1] + 1)))],\n # (i, j+1, k+1)\n verts[i + (j + 1) * (mesh[0]) +\n (j + 1) + ((k + 1) * ((mesh[0] + 1) * (mesh[1] + 1)))]]\n\n return hexa\n\n def _coarsening_ratio(self, dim):\n coarsening = (collections.Counter(self.primal_ids[dim]))\n return coarsening.values()\n\n def create_fine_blocks_and_primal(self):\n # TODO: - Should go on Common\n fine_vertices = self.create_fine_vertices()\n cur_id = 0\n # Create fine grid\n for k, idz in zip(xrange(self.mesh_size[2]),\n self.primal_ids[2]):\n # Flake8 bug\n print(\"{0} / {1}\".format(k + 1, self.mesh_size[2]))\n for j, idy in zip(xrange(self.mesh_size[1]),\n self.primal_ids[1]):\n for i, idx in zip(xrange(self.mesh_size[0]),\n self.primal_ids[0]):\n\n hexa = self._create_hexa(i, j, k,\n fine_vertices,\n self.mesh_size)\n el = self.mb.create_element(types.MBHEX, hexa)\n\n self.mb.tag_set_data(self.gid_tag, el, cur_id)\n # Fine Global ID\n self.mb.tag_set_data(self.gid_tag, el, cur_id)\n # Fine Porosity\n self.mb.tag_set_data(self.phi_tag, el, self.phi_values[\n cur_id])\n # Fine Permeability tensor\n self.mb.tag_set_data(self.perm_tag, el, [\n self.perm_values[cur_id], 0, 0,\n 0, self.perm_values[cur_id + self.mesh_size[0] *\n self.mesh_size[1] *\n self.mesh_size[2]], 0,\n 0, 0, self.perm_values[cur_id + 2*self.mesh_size[0] *\n self.mesh_size[1] *\n self.mesh_size[2]]])\n self.mb.tag_set_data(self.abs_perm_fine_x_tag, el,\n self.perm_values[cur_id])\n self.elems.append(el)\n cur_id += 1\n\n # Create primal coarse grid\n try:\n primal = self.primals[(idx, idy, idz)]\n self.mb.add_entities(primal, [el])\n self.mb.tag_set_data(\n self.fine_to_primal_tag, el, primal)\n except KeyError:\n primal = self.mb.create_meshset()\n self.primals[(idx, idy, idz)] = primal\n self.mb.add_entities(primal, [el])\n self.mb.tag_set_data(\n self.fine_to_primal_tag, el, primal)\n\n # do a 'if flow based generate mesh bc over here'\n\n primal_id = 0\n for primal in self.primals.values():\n self.mb.tag_set_data(self.primal_id_tag, primal, primal_id)\n primal_id += 1\n\n def store_primal_adj(self):\n # TODO: - Should go on Common\n min_coarse_ids = np.array([0, 0, 0])\n max_coarse_ids = np.array([max(self.primal_ids[0]),\n max(self.primal_ids[1]),\n max(self.primal_ids[2])])\n\n for primal_id, primal in self.primals.iteritems():\n adj = self.mb.create_meshset()\n adj_ids = []\n\n for i in np.arange(-1, 2):\n for j in np.arange(-1, 2):\n for k in np.arange(-1, 2):\n coord_inc = np.array([i, j, k])\n adj_id = primal_id + coord_inc\n if any(adj_id != primal_id) and \\\n (sum(coord_inc == [0, 0, 0]) == 2) and \\\n all(adj_id >= min_coarse_ids) and \\\n all(adj_id <= max_coarse_ids):\n\n self.mb.add_entities(\n adj, [self.primals[tuple(adj_id)]])\n adj_ids.append(tuple(adj_id))\n\n self.mb.tag_set_data(self.primal_adj_tag, primal, adj)\n\n self.primal_adj[primal_id] = adj_ids\n\n def _get_block_by_ijk(self, i, j, k):\n # TODO: - Should go on Common\n # - Should reformulate to get self.mesh_size instead of input\n\n \"\"\"\n Track down the block from its (i,j,k) position.\n \"\"\"\n block = (k) * self.mesh_size[0] * self.mesh_size[1]+(\n (i)+(j) * self.mesh_size[0])\n return block\n\n def _get_elem_by_ijk(self, ijk):\n # TODO Should go on Common\n\n block_id = self._get_block_by_ijk(\n ijk[0], ijk[1], ijk[2])\n elem = self.elems[block_id]\n return elem # Why not \"return self.elems[block_id]\" ?????\n\n def read_phi(self):\n # TODO: - Should go on Common\n # - This should go on .cfg\n # - It should have a if option for reading or for generating\n phi_values = []\n with open('spe_phi.dat') as phi:\n for line in phi:\n phi_values.extend(line.rstrip().split(' \t'))\n self.phi_values = [float(val) for val in phi_values]\n\n def read_perm(self):\n # TODO: - Should go on Common\n # - This should go on .cfg\n # - It should have a if option for reading or for generating\n\n perm_values = []\n with open('spe_perm.dat') as perm:\n for line in perm:\n line_list = line.rstrip().split(' \t')\n if len(line_list) > 1:\n perm_values.extend(line_list)\n self.perm_values = [float(val) for val in perm_values]\n\n def upscale_phi(self):\n for _, primal in self.primals.iteritems():\n # Calculate mean phi on primal\n fine_elems_in_primal = self.mb.get_entities_by_type(\n primal, types.MBHEX)\n fine_elems_phi_values = self.mb.tag_get_data(self.phi_tag,\n fine_elems_in_primal)\n primal_mean_phi = fine_elems_phi_values.mean()\n # Store mean phi on the primal meshset and internal elements\n self.mb.tag_set_data(self.primal_phi_tag, primal, primal_mean_phi)\n\n def upscale_perm_mean(self, average_method):\n self.primal_perm = (self.primal_perm_x_tag,\n self.primal_perm_y_tag,\n self.primal_perm_z_tag)\n self.average_method = average_method\n basis = ((1, 0, 0), (0, 1, 0), (0, 0, 1))\n perm = []\n for primal_id, primal in self.primals.iteritems():\n\n fine_elems_in_primal = self.mb.get_entities_by_type(\n primal, types.MBHEX)\n fine_perm_values = self.mb.tag_get_data(self.perm_tag,\n fine_elems_in_primal)\n primal_perm = [tensor.reshape(3, 3) for tensor in fine_perm_values]\n for dim in range(0, 3):\n perm = [(np.dot(np.dot(tensor, basis[dim]), basis[dim]))\n for tensor in primal_perm]\n if average_method == 'Arithmetic':\n primal_perm[dim] = np.mean(perm)\n elif average_method == 'Geometric':\n primal_perm[dim] = np.prod(np.asarray(\n perm)) ** len(1 / np.asarray(perm))\n elif average_method == 'Harmonic':\n primal_perm[dim] = len(np.asarray(\n perm)) / sum(1/np.asarray(perm))\n else:\n print(\"Choose either Arithmetic, Geometric or Harmonic.\")\n exit()\n\n perm = primal_perm[dim]\n self.mb.tag_set_data(self.primal_perm[dim], primal, perm)\n\n self.mb.tag_set_data(self.primal_perm_tag, primal,\n [primal_perm[0], 0, 0,\n 0, primal_perm[1], 0,\n 0, 0, primal_perm[2]])\n\n def _primal_centroid(self, setid):\n coarse_sums = np.array(\n [[0, 0, 0],\n [0, 0, 1],\n [0, 1, 0],\n [0, 1, 1],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0],\n [1, 1, 1]]\n )\n primal_centroid = (\n (np.asarray(setid) + coarse_sums[0]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[1]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[2]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[3]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[4]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[5]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[6]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]) +\n (np.asarray(setid) + coarse_sums[7]) *\n np.array([self.coarse_ratio[0],\n self.coarse_ratio[1],\n self.coarse_ratio[2]]))\n\n primal_centroid = primal_centroid // 8\n return primal_centroid\n\n def get_boundary_meshsets(self):\n\n self.boundary_dir = (self.boundary_x_tag,\n self.boundary_y_tag,\n self.boundary_z_tag\n )\n self.boundary_meshsets = {}\n for dim in range(0, 3):\n for k, idz in zip(xrange(self.mesh_size[2]),\n self.primal_ids[2]):\n for j, idy in zip(xrange(self.mesh_size[1]),\n self.primal_ids[1]):\n for i, idx in zip(xrange(self.mesh_size[0]),\n self.primal_ids[0]):\n el = self._get_elem_by_ijk((i, j, k))\n if (i, j, k)[dim] == (self.coarse_ratio[dim] *\n self.primal_ids[dim][(i, j,\n k)[dim]]):\n self.mb.tag_set_data(self.boundary_dir[dim],\n el, 1.0)\n try:\n boundary_meshset = self.boundary_meshsets[\n (idx, idy, idz), dim]\n self.mb.add_entities(boundary_meshset, [el])\n\n except KeyError:\n boundary_meshset = self.mb.create_meshset()\n self.boundary_meshsets[\n (idx, idy, idz), dim] = boundary_meshset\n self.mb.add_entities(boundary_meshset, [el])\n\n if (i, j, k)[dim] == (self.coarse_ratio[dim] *\n self.primal_ids[dim][\n (i, j, k)[dim]] +\n self._coarsening_ratio(dim)[\n self.primal_ids[dim][\n (i, j, k)[dim]]] - 1):\n self.mb.tag_set_data(\n self.boundary_dir[dim], el, 0.0)\n\n try:\n boundary_meshset = self.boundary_meshsets[\n (idx, idy, idz), dim]\n self.mb.add_entities(boundary_meshset, [el])\n\n except KeyError:\n boundary_meshset = self.mb.create_meshset()\n self.boundary_meshsets[\n (idx, idy, idz), dim] = boundary_meshset\n self.mb.add_entities(boundary_meshset, [el])\n\n def set_global_problem(self):\n pass\n\n def upscale_perm_flow_based(self, domain, dim, boundary_meshset):\n self.average_method = 'flow-based'\n area = (self.block_size[1] * self.block_size[2],\n self.block_size[0] * self.block_size[2],\n self.block_size[0] * self.block_size[1],\n )\n pres_tag = self.mb.tag_get_handle(\n \"Pressure\", 1, types.MB_TYPE_DOUBLE,\n types.MB_TAG_SPARSE, True)\n std_map = Epetra.Map(len(domain), 0, self.comm)\n linear_vals = np.arange(0, len(domain))\n id_map = dict(zip(domain, linear_vals))\n boundary_elms = set()\n\n b = Epetra.Vector(std_map)\n x = Epetra.Vector(std_map)\n\n A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)\n\n t0 = time.time()\n for elem in boundary_meshset:\n if elem in boundary_elms:\n continue\n boundary_elms.add(elem)\n idx = id_map[elem]\n A.InsertGlobalValues(idx, [1], [idx])\n b[idx] = self.mb.tag_get_data(self.boundary_dir[dim], elem,\n flat=True)\n\n self.mb.tag_set_data(pres_tag, domain, np.repeat(0.0, len(domain)))\n t1 = time.time()\n for elem in (set(domain) ^ boundary_elms):\n\n adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(\n np.asarray([elem]), 2, 3, 0)\n adj_volumes = [elems for elems in adj_volumes if elems in domain]\n adj_volumes_set = set(adj_volumes)\n\n elem_center = self.mesh_topo_util.get_average_position(\n np.asarray([elem]))\n K1 = self.mb.tag_get_data(self.perm_tag, [elem], flat=True)\n adj_perms = []\n for adjacencies in range(len(adj_volumes)):\n adj_perms.append(self.mb.tag_get_data(\n self.perm_tag, adj_volumes, flat=True)[\n adjacencies*9:(adjacencies+1)*9])\n values = []\n ids = []\n for K2, adj in zip(adj_perms, adj_volumes_set):\n adj_center = self.mesh_topo_util.get_average_position(\n np.asarray([adj]))\n N = elem_center - adj_center\n N = N / np.sqrt(N[0] ** 2 + N[1] ** 2 + N[2] ** 2)\n K1proj = np.dot(np.dot(N, K1.reshape([3, 3])), N)\n K2proj = np.dot(np.dot(N, K2.reshape([3, 3])), N)\n dl = np.linalg.norm((elem_center - adj_center)/2)\n K_eq = (2 * K1proj * K2proj) / (K1proj * dl + K2proj * dl)\n values.append(- K_eq)\n if adj in id_map:\n ids.append(id_map[adj])\n values.append(-sum(values))\n idx = id_map[elem]\n ids.append(idx)\n A.InsertGlobalValues(idx, values, ids)\n A.FillComplete()\n t2 = time.time()\n\n linearProblem = Epetra.LinearProblem(A, x, b)\n solver = AztecOO.AztecOO(linearProblem)\n solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_warnings)\n solver.Iterate(300, 1e-9)\n # \"\"\"\n self.mb.tag_set_data(pres_tag, domain, np.asarray(x))\n print(\"took {0} seconds to solve.\".format(time.time() - t2))\n # Get the flux - should break down in another part\n flow_rate = 0.0\n total_area = 0.0\n for elem in boundary_meshset:\n elem_center = self.mesh_topo_util.get_average_position(\n np.asarray([elem]))\n adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(\n np.asarray([elem]), 2, 3)\n adj_volumes_set = set(adj_volumes).intersection(set(domain))\n adj_to_boundary_volumes = set()\n for el in adj_volumes_set:\n if el in boundary_meshset:\n adj_to_boundary_volumes.add(el)\n adj_volumes_set = adj_volumes_set - adj_to_boundary_volumes\n for adj in adj_volumes_set:\n adj_center = self.mesh_topo_util.get_average_position(\n np.asarray([adj]))\n N = elem_center - adj_center\n N = N / np.sqrt(N[0] ** 2 + N[1] ** 2 + N[2] ** 2)\n adj_pres = self.mb.tag_get_data(pres_tag, adj)\n adj_perm = np.dot(N, np.dot(self.mb.tag_get_data(\n self.perm_tag, adj).reshape(\n [3, 3]), N))\n elem_perm = np.dot(N, np.dot(self.mb.tag_get_data(\n self.perm_tag, elem).reshape(\n [3, 3]), N))\n dl = np.linalg.norm((elem_center - adj_center)/2)\n K_equiv = (2 * adj_perm * elem_perm) / (adj_perm * dl +\n elem_perm * dl)\n\n flow_rate = flow_rate + area[dim] * K_equiv * adj_pres / dl\n total_area = total_area + area[dim]\n perm = flow_rate * dl / total_area\n return perm\n\n def flow_based_coarse_perm(self):\n\n self.primal_perm = (self.primal_perm_x_tag,\n self.primal_perm_y_tag,\n self.primal_perm_z_tag)\n self.get_boundary_meshsets()\n\n for primal_id, primal in self.primals.iteritems():\n print(\"iterating over meshset {0}\".format(primal_id))\n fine_elems_in_primal = self.mb.get_entities_by_type(\n primal, types.MBHEX)\n # The A matrix should be called here\n for dim in range(0, 3):\n self.mb.add_child_meshset(self.primals[(primal_id)],\n self.boundary_meshsets[\n primal_id, dim])\n boundary = self.mb.get_entities_by_handle(np.asarray(\n self.boundary_meshsets[primal_id, dim]))\n perm = self.upscale_perm_flow_based(fine_elems_in_primal, dim,\n boundary)\n self.mb.tag_set_data(self.primal_perm[dim], primal, perm)\n\n def coarse_grid(self):\n # We should include a switch for either printing coarse grid or fine\n # grid here that is fedy by the .cfg file.\n \"\"\"\n This will not delete primal grid information prevously calculated,\n since it is only looking for elements within the root_set that are\n MBHEX, whilst all props from primal grid are stored as meshsets\n \"\"\"\n fine_grid = self.mb.get_entities_by_type(self.root_set, types.MBHEX)\n self.mb.delete_entities(fine_grid)\n coarse_vertices = self.create_coarse_vertices()\n coarse_dims = self._coarse_dims()\n cur_id = 0\n for k in xrange(coarse_dims[2]):\n print(\"{0} / {1}\".format(k + 1, coarse_dims[2]))\n for j in xrange(coarse_dims[1]):\n for i in xrange(coarse_dims[0]):\n\n hexa = self._create_hexa(i, j, k,\n coarse_vertices,\n coarse_dims)\n el = self.mb.create_element(types.MBHEX, hexa)\n\n # Assign coarse scale properties previously calculated\n\n self.mb.tag_set_data(\n self.coarse_gid_tag, el, cur_id)\n self.mb.tag_set_data(self.primal_phi_tag, el,\n self.mb.tag_get_data(\n self.primal_phi_tag,\n self.primals[(i, j, k)]))\n self.mb.tag_set_data(self.primal_perm_tag, el, [\n self.mb.tag_get_data(self.primal_perm[0],\n self.primals[(i, j, k)]), 0, 0,\n 0, self.mb.tag_get_data(self.primal_perm[1],\n self.primals[(i, j, k)]), 0, 0,\n 0, self.mb.tag_get_data(self.primal_perm[2],\n self.primals[(i, j, k)])])\n self.mb.tag_set_data(self.abs_perm_x_tag, el,\n self.mb.tag_get_data(self.primal_perm[\n 0], self.primals[(i, j, k)]))\n self.coarse_elems.append(el)\n cur_id += 1\n\n def _get_block_by_ijk_coarse(self, i, j, k):\n # TODO: - Should go on Common\n # - Should reformulate to get self.mesh_size instead of input\n mesh_size_coarse = self._coarse_dims()\n \"\"\"\n Track down the block from its (i,j,k) position.\n \"\"\"\n block = (k) * mesh_size_coarse[0] * mesh_size_coarse[1]+(\n (i)+(j) * mesh_size_coarse[0])\n return block\n\n def _get_elem_by_ijk_coarse(self, ijk):\n # TODO Should go on Common\n\n block_id = self._get_block_by_ijk_coarse(\n ijk[0], ijk[1], ijk[2])\n elem = self.coarse_elems[block_id]\n return elem\n\n def create_wells(self):\n mesh_size_coarse = self._coarse_dims()\n \"\"\"(self.mesh_size[0],\n self.mesh_size[1],\n self.mesh_size[2]) \"\"\" # ,self._coarse_dims()\n self.injection_wells_coarse = {}\n self.production_wells_coarse = {}\n\n self.injection_wells_coarse[1] = self.mb.create_meshset()\n\n self.production_wells_coarse[1] = self.mb.create_meshset()\n self.production_wells_coarse[2] = self.mb.create_meshset()\n self.production_wells_coarse[3] = self.mb.create_meshset()\n self.production_wells_coarse[4] = self.mb.create_meshset()\n\n well = [self._get_elem_by_ijk_coarse((0, mesh_size_coarse[1] - 1, z))\n for z in range(0, mesh_size_coarse[2])]\n for well_el in well:\n self.mb.add_entities(self.production_wells_coarse[1], [well_el])\n self.mb.tag_set_data(self.coarse_production_tag,\n self.production_wells_coarse[1], 1)\n\n well = [self._get_elem_by_ijk_coarse((0, 1, z))\n for z in range(0, mesh_size_coarse[2])]\n for well_el in well:\n self.mb.add_entities(self.production_wells_coarse[2], [well_el])\n self.mb.tag_set_data(self.coarse_production_tag,\n self.production_wells_coarse[2], 1)\n\n well = [self._get_elem_by_ijk_coarse((mesh_size_coarse[0] - 1,\n mesh_size_coarse[1] - 1, z)) for z in range(0,\n mesh_size_coarse[2])]\n for well_el in well:\n self.mb.add_entities(self.production_wells_coarse[3], [well_el])\n self.mb.tag_set_data(self.coarse_production_tag,\n self.production_wells_coarse[3], 1)\n\n well = [self._get_elem_by_ijk_coarse((mesh_size_coarse[0] - 1,\n mesh_size_coarse[1] - 1, z))\n for z in range(0, mesh_size_coarse[2])]\n for well_el in well:\n self.mb.add_entities(self.production_wells_coarse[4], [well_el])\n self.mb.tag_set_data(self.coarse_production_tag,\n self.production_wells_coarse[4], 1)\n\n well = [self._get_elem_by_ijk_coarse((0, 0, z)) for z in range(0,\n mesh_size_coarse[2])]\n for well_el in well:\n self.mb.add_entities(self.injection_wells_coarse[1], [well_el])\n self.mb.tag_set_data(self.coarse_injection_tag,\n self.injection_wells_coarse[1], 1)\n # def solve_it():\n\n def export_data(self):\n writedir = ('I', 'J', 'K')\n mesh_size_coarse = self._coarse_dims()\n with open('coarse_phi{0}_{1}.dat'.format(\n self.coarse_ratio, self.average_method), 'w') as coarse_phi:\n coarse_phi.write('*POR *ALL')\n coarse_phi.write('\\n')\n for k in xrange(mesh_size_coarse[2]):\n # coarse_phi.write('-- LAYER {0}'.format(k+1))\n coarse_phi.write('\\n')\n for j in xrange(mesh_size_coarse[1]):\n\n # coarse_phi.write('-- ROW {0}'.format(j+1))\n coarse_phi.write('\\n')\n for i in xrange(mesh_size_coarse[0]):\n if i < mesh_size_coarse[0] - 1:\n coarse_phi.write('%f' % (self.mb.tag_get_data(\n self.primal_phi_tag,\n self.primals[(i, j, k)])\n )\n )\n coarse_phi.write(' \t')\n else:\n coarse_phi.write('%f' % (self.mb.tag_get_data(\n self.primal_phi_tag,\n self.primals[(i, j, k)])\n )\n )\n coarse_phi.write('\\n')\n coarse_phi.close()\n with open('coarse_perm{0}_{1}.dat'.format(\n self.coarse_ratio, self.average_method), 'w') as coarse_perm:\n for dim in range(0, 3):\n coarse_perm.write('*PERM{0} *ALL'.format(writedir[dim]))\n coarse_perm.write('\\n')\n for k in xrange(mesh_size_coarse[2]):\n # coarse_perm.write('-- LAYER {0}'.format(k+1))\n coarse_perm.write('\\n')\n for j in xrange(mesh_size_coarse[1]):\n # coarse_perm.write('-- ROW {0}'.format(j+1))\n coarse_perm.write('\\n')\n for i in xrange(mesh_size_coarse[0]):\n if i < mesh_size_coarse[0] - 1:\n\n coarse_perm.write(\n '%f' % (self.mb.tag_get_data(\n self.primal_perm[dim],\n self.primals[(i, j, k)])))\n coarse_perm.write(' \t')\n else:\n coarse_perm.write(\n '%f' % (self.mb.tag_get_data(\n self.primal_perm[dim],\n self.primals[(i, j, k)])))\n coarse_perm.write('\\n')\n coarse_perm.close()\n\n def export(self, outfile):\n self.mb.write_file(outfile)\n"
] | [
[
"numpy.sqrt",
"numpy.asarray",
"numpy.arange",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm",
"numpy.mean"
]
] |
satsumas/tensorflow | [
"3fe3f2b1984aab6f159b89aa3ab0069988925689"
] | [
"tensorflow/python/module/module.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Modules encapsulate building stateful components.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"Module\")\nclass Module(tracking.AutoTrackable):\n \"\"\"Base neural network module class.\n\n A module is a named container for `tf.Variable`s, other `tf.Module`s and\n functions which apply to user input. For example a dense layer in a neural\n network might be implemented as a `tf.Module`:\n\n ```python\n >>> class Dense(tf.Module):\n ... def __init__(self, in_features, output_features, name=None):\n ... super(Dense, self).__init__(name=name)\n ... self.w = tf.Variable(\n ... tf.random_normal([input_features, output_features]), name='w')\n ... self.b = tf.Variable(tf.zeros([output_features]), name='b')\n ...\n ... def __call__(self, x):\n ... y = tf.matmul(x, self.w) + self.b\n ... return tf.nn.relu(y)\n ```\n\n You can use the Dense layer as you would expect:\n\n ```python\n >>> d = Dense(input_features=64, output_features=10)\n >>> d(tf.ones([100, 64]))\n <tf.Tensor: ...>\n ```\n\n By subclassing `tf.Module` instead of `object` any `tf.Variable` or\n `tf.Module` instances assigned to object properties can be collected using\n the `variables`, `trainable_variables` or `submodules` property:\n\n ```python\n >>> d.variables\n (<tf.Variable 'b:0' ...>, <tf.Variable 'w:0' ...>)\n ```\n\n Subclasses of `tf.Module` can also take advantage of the `_flatten` method\n which can be used to implement tracking of any other types.\n\n All `tf.Module` classes have an associated `tf.name_scope` which can be used\n to group operations in TensorBoard and create hierarchies for variable names\n which can help with debugging. We suggest using the name scope when creating\n nested submodules/parameters or for forward methods whose graph you might want\n to inspect in TensorBoard. You can enter the name scope explicitly using\n `with self.name_scope:` or you can annotate methods (apart from `__init__`)\n with `@tf.Module.with_name_scope`.\n\n ```python\n >>> class MLP(tf.Module):\n ... def __init__(self, input_size, sizes, name=None):\n ... super(MLP, self).__init__(name=name)\n ... self.layers = []\n ... with self.name_scope:\n ... for size in sizes:\n ... self.layers.append(Dense(input_size=input_size, output_size=size))\n ... input_size = size\n ...\n ... @tf.Module.with_name_scope\n ... def __call__(self, x):\n ... for layer in self.layers:\n ... x = layer(x)\n ... return x\n ```\n \"\"\"\n\n def __init__(self, name=None):\n if name is None:\n name = camel_to_snake(type(self).__name__)\n else:\n if not valid_identifier(name):\n raise ValueError(\n \"%r is not a valid module name. Module names must be valid Python \"\n \"identifiers (e.g. a valid class name).\" % name)\n\n self._name = name\n with ops.name_scope(name) as scope_name:\n self._scope_name = scope_name\n\n @property\n def name(self):\n \"\"\"Returns the name of this module as passed or determined in the ctor.\n\n NOTE: This is not the same as the `self.name_scope.name` which includes\n parent module names.\n \"\"\"\n return self._name\n\n @property\n def name_scope(self):\n \"\"\"Returns a `tf.name_scope` instance for this class.\"\"\"\n # TODO(tomhennigan) Memoize once name scopes are re-entrant.\n return ops.name_scope(self._scope_name)\n\n @property\n def variables(self):\n \"\"\"Sequence of variables owned by this module and it's submodules.\n\n Note: this method uses reflection to find variables on the current instance\n and submodules. For performance reasons you may wish to cache the result\n of calling this method if you don't expect the return value to change.\n\n Returns:\n A sequence of variables for the current module (sorted by attribute\n name) followed by variables from all submodules recursively (breadth\n first).\n \"\"\"\n return tuple(self._flatten(predicate=_is_variable_like))\n\n @property\n def trainable_variables(self):\n \"\"\"Sequence of variables owned by this module and it's submodules.\n\n Note: this method uses reflection to find variables on the current instance\n and submodules. For performance reasons you may wish to cache the result\n of calling this method if you don't expect the return value to change.\n\n Returns:\n A sequence of variables for the current module (sorted by attribute\n name) followed by variables from all submodules recursively (breadth\n first).\n \"\"\"\n return tuple(self._flatten(predicate=_is_trainable_variable))\n\n @property\n def submodules(self):\n \"\"\"Sequence of all sub-modules.\n\n Submodules are modules which are properties of this module, or found as\n properties of modules which are properties of this module (and so on).\n\n >>> a = tf.Module()\n >>> b = tf.Module()\n >>> c = tf.Module()\n >>> a.b = b\n >>> b.c = c\n >>> assert list(a.submodules) == [b, c]\n >>> assert list(b.submodules) == [c]\n >>> assert list(c.submodules) == []\n\n Returns:\n A sequence of all submodules.\n \"\"\"\n return tuple(self._flatten(predicate=_is_module))\n\n def _flatten(self,\n recursive=True,\n predicate=None,\n attribute_traversal_key=None,\n with_path=False):\n \"\"\"Flattened attribute values in sorted order by attribute name.\n\n Modules are flattened by first walking their attributes in name order.\n Each attribute value is then flattened to find leaf values. If flatten is\n to be applied `recursive`ly then if the leaf is a `Module` it will also be\n flattened to find leaves. Finally every leaf value is optionally tested\n against the given `predicate` and finally yielded.\n\n >>> class Foo(tf.Module):\n ... def __init__(self):\n ... super(Foo, self).__init__()\n ... self.x = [tf.constant('a'), tf.constant('b')]\n ... self.y = {'i': tf.constant('c'), 'j': tf.constant('d')}\n ... self.z = tf.constant('e')\n ...\n ... @property\n ... def tensors(self):\n ... return tuple(self._flatten(predicate=is_tensor, with_path=True))\n\n >>> foo = Foo()\n >>> foo.tensors\n ((('x', 0), <tf.Tensor: ...'a'>),\n (('x', 1), <tf.Tensor: ...'b'>),\n (('y', 'i'), <tf.Tensor: ...'c'>),\n (('y', 'j'), <tf.Tensor: ...'d'>),\n (('z',), <tf.Tensor: ...'e'>))\n\n `attribute_traversal_key` controls the order object properties are visited.\n If not set objects are visited in ascending order by name.\n\n Args:\n recursive: Whether to recurse into child modules or not.\n predicate: (Optional) If set then only values matching predicate are\n yielded. A value of `None` (the default) means no items will be\n filtered.\n attribute_traversal_key: (Optional) Method to rekey object attributes\n before they are sorted. Contract is the same as `key` argument to\n builtin `sorted` and only applies to object properties.\n with_path: (Optional) Whether to include the path to the object as well\n as the object itself. If `with_path` is `True` then leaves will not be\n de-duplicated (e.g. if the same leaf instance is reachable via multiple\n modules then it will be yielded multiple times with different paths).\n\n Returns:\n Flat generator for leaves of the current module and optionally all\n submodules.\n \"\"\"\n if predicate is None:\n predicate = lambda _: True\n\n return _flatten_module(\n self,\n recursive=recursive,\n predicate=predicate,\n attribute_traversal_key=attribute_traversal_key,\n with_path=with_path)\n\n @classmethod\n def with_name_scope(cls, method):\n \"\"\"Decorator to automatically enter the module name scope.\n\n >>> class MyModule(tf.Module):\n ... @tf.Module.with_name_scope\n ... def __call__(self, x):\n ... if not hasattr(self, 'w'):\n ... self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))\n ... return tf.matmul(x, self.w)\n\n Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose\n names included the module name:\n\n >>> mod = MyModule()\n >>> mod(tf.ones([8, 32]))\n <tf.Tensor: ...>\n >>> mod.w\n <tf.Variable ...'my_module/w:0'>\n\n Args:\n method: The method to wrap.\n\n Returns:\n The original method wrapped such that it enters the module's name scope.\n \"\"\"\n def method_with_name_scope(self, *args, **kwargs):\n with self.name_scope:\n return method(self, *args, **kwargs)\n\n return tf_decorator.make_decorator(method, method_with_name_scope)\n\n\ndef _is_variable_like(obj):\n return (isinstance(obj, variables.Variable) or\n resource_variable_ops.is_resource_variable(obj))\n\n\ndef _is_trainable_variable(obj):\n return _is_variable_like(obj) and getattr(obj, \"trainable\", False)\n\n\ndef _is_module(obj):\n return isinstance(obj, Module)\n\n_CAMEL_TO_SNAKE_R = re.compile(r\"((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))\")\n_VALID_IDENTIFIER = re.compile(r\"^[a-zA-Z_]([a-zA-Z0-9_])*$\")\n\n\ndef valid_identifier(name):\n return bool(_VALID_IDENTIFIER.match(name))\n\n\ndef camel_to_snake(value):\n return _CAMEL_TO_SNAKE_R.sub(r\"_\\1\", value).lower()\n\n\n# AutoTrackable adds object attributes that users will not expect us to\n# include when flattening (these reference dependencies reachable via other\n# object attributes).\nAUTO_CHECKPOINTABLE_ATTRS = (\"_unconditional_checkpoint_dependencies\",\n \"_unconditional_dependency_names\")\n\n\ndef _flatten_module(module,\n recursive,\n predicate,\n attribute_traversal_key,\n with_path,\n module_path=(),\n seen=None):\n \"\"\"Implementation of `flatten`.\"\"\"\n if seen is None:\n seen = set([id(module)])\n\n module_dict = vars(module)\n submodules = []\n\n for key in sorted(module_dict, key=attribute_traversal_key):\n if key in AUTO_CHECKPOINTABLE_ATTRS:\n continue\n\n for leaf_path, leaf in nest.flatten_with_tuple_paths(module_dict[key]):\n leaf_path = (key,) + leaf_path\n\n # TODO(tomhennigan) Handle cycles for `with_path=True` (e.g. `a.a = a`).\n if not with_path:\n leaf_id = id(leaf)\n if leaf_id in seen:\n continue\n seen.add(leaf_id)\n\n if predicate(leaf):\n if with_path:\n yield module_path + leaf_path, leaf\n else:\n yield leaf\n\n if recursive and _is_module(leaf):\n # Walk direct properties first then recurse.\n submodules.append((module_path + leaf_path, leaf))\n\n for submodule_path, submodule in submodules:\n subvalues = _flatten_module(\n submodule,\n recursive=recursive,\n predicate=predicate,\n attribute_traversal_key=attribute_traversal_key,\n with_path=with_path,\n module_path=submodule_path,\n seen=seen)\n\n for subvalue in subvalues:\n # Predicate is already tested for these values.\n yield subvalue\n"
] | [
[
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.util.tf_decorator.make_decorator",
"tensorflow.python.ops.resource_variable_ops.is_resource_variable",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.util.nest.flatten_with_tuple_paths"
]
] |
pikerbright/DeblurGAN | [
"39e8a4b408b90d0ef98608c5c4562eae3e184251"
] | [
"motion_blur/blur_image.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nfrom scipy import signal\nfrom scipy import misc\nfrom motion_blur.generate_PSF import PSF\nfrom motion_blur.generate_trajectory import Trajectory\n\n\nclass BlurImage(object):\n\n def __init__(self, image_path, PSFs=None, part=None, path__to_save=None):\n \"\"\"\n\n :param image_path: path to square, RGB image.\n :param PSFs: array of Kernels.\n :param part: int number of kernel to use.\n :param path__to_save: folder to save results.\n \"\"\"\n if os.path.isfile(image_path):\n self.image_path = image_path\n self.original = misc.imread(self.image_path)\n self.shape = self.original.shape\n if len(self.shape) < 3:\n raise Exception('We support only RGB images yet.')\n elif self.shape[0] != self.shape[1]:\n raise Exception('We support only square images yet.')\n else:\n raise Exception('Not correct path to image.')\n self.path_to_save = path__to_save\n if PSFs is None:\n if self.path_to_save is None:\n self.PSFs = PSF(canvas=self.shape[0]).fit()\n else:\n self.PSFs = PSF(canvas=self.shape[0], path_to_save=os.path.join(self.path_to_save,\n 'PSFs.png')).fit(save=True)\n else:\n self.PSFs = PSFs\n\n self.part = part\n self.result = []\n\n def blur_image(self, save=False, show=False):\n if self.part is None:\n psf = self.PSFs\n else:\n psf = [self.PSFs[self.part]]\n yN, xN, channel = self.shape\n key, kex = self.PSFs[0].shape\n delta = yN - key\n assert delta >= 0, 'resolution of image should be higher than kernel'\n result=[]\n if len(psf) > 1:\n for p in psf:\n tmp = np.pad(p, delta // 2, 'constant')\n cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n # blured = np.zeros(self.shape)\n blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_32F)\n blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))\n blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))\n blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))\n blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)\n result.append(np.abs(blured))\n else:\n psf = psf[0]\n tmp = np.pad(psf, delta // 2, 'constant')\n cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_32F)\n blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))\n blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))\n blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))\n blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)\n result.append(np.abs(blured))\n self.result = result\n if show or save:\n self.__plot_canvas(show, save)\n\n def __plot_canvas(self, show, save):\n if len(self.result) == 0:\n raise Exception('Please run blur_image() method first.')\n else:\n plt.close()\n plt.axis('off')\n fig, axes = plt.subplots(1, len(self.result), figsize=(10, 10))\n if len(self.result) > 1:\n for i in range(len(self.result)):\n axes[i].imshow(self.result[i])\n else:\n plt.axis('off')\n\n plt.imshow(self.result[0])\n if show and save:\n if self.path_to_save is None:\n raise Exception('Please create Trajectory instance with path_to_save')\n cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255)\n plt.show()\n elif save:\n if self.path_to_save is None:\n raise Exception('Please create Trajectory instance with path_to_save')\n cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255)\n elif show:\n plt.show()\n\n\nif __name__ == '__main__':\n folder = '/Users/mykolam/PycharmProjects/University/DeblurGAN2/results_sharp'\n folder_to_save = '/Users/mykolam/PycharmProjects/University/DeblurGAN2/blured'\n params = [0.01, 0.009, 0.008, 0.007, 0.005, 0.003]\n for path in os.listdir(folder):\n print(path)\n trajectory = Trajectory(canvas=64, max_len=60, expl=np.random.choice(params)).fit()\n psf = PSF(canvas=64, trajectory=trajectory).fit()\n BlurImage(os.path.join(folder, path), PSFs=psf,\n path__to_save=folder_to_save, part=np.random.choice([1, 2, 3])).\\\n blur_image(save=True)\n"
] | [
[
"scipy.signal.fftconvolve",
"matplotlib.pyplot.axis",
"numpy.abs",
"numpy.random.choice",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"numpy.pad",
"scipy.misc.imread"
]
] |
vibhatha/PSGDSVMPY | [
"69ed88f5db8d9a250ee944f44b88e54351f8696f"
] | [
"examples/SvmSgd.py"
] | [
"import numpy as np\n\nX = np.array([\n [-2,4,-1],\n [4,1,-1],\n [1, 6, -1],\n [2, 4, -1],\n [6, 2, -1],\n\n])\n\ny = np.array([-1,-1,1,1,1])\n\ndef svm_sgd(X, Y):\n\n w = np.zeros(len(X[0]))\n eta = 1\n epochs = 100000\n\n for epoch in range(1,epochs):\n for i, x in enumerate(X):\n if (Y[i]*np.dot(X[i], w)) < 1:\n w = w + eta * ( (X[i] * Y[i]) + (-2 *(1/epoch)* w) )\n else:\n w = w + eta * (-2 *(1/epoch)* w)\n\n return w\n\nw = svm_sgd(X,y)\nprint(w)\n"
] | [
[
"numpy.array",
"numpy.dot"
]
] |
SudeepSarkar/equilibrium-propagation | [
"ba6d9ee5426445e9ad91c96c816fa5287ff97258"
] | [
"run_energy_model_mnist.py"
] | [
"# MIT License\n\n# Copyright (c) 2020 Simon Schug, João Sacramento\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport argparse\nimport json\nimport logging\nimport sys\n\nimport torch\n\nfrom lib import config, data, energy, train, utils\n\n\ndef load_default_config(energy):\n \"\"\"\n Load default parameter configuration from file.\n\n Args:\n tasks: String with the energy name\n\n Returns:\n Dictionary of default parameters for the given energy\n \"\"\"\n if energy == \"restr_hopfield\":\n default_config = \"etc/energy_restr_hopfield.json\"\n elif energy == \"cond_gaussian\":\n default_config = \"etc/energy_cond_gaussian.json\"\n else:\n raise ValueError(\"Energy based model \\\"{}\\\" not defined.\".format(energy))\n\n with open(default_config) as config_json_file:\n cfg = json.load(config_json_file)\n\n return cfg\n\n\ndef parse_shell_args(args):\n \"\"\"\n Parse shell arguments for this script.\n\n Args:\n args: List of shell arguments\n\n Returns:\n Dictionary of shell arguments\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Train an energy-based model on MNIST using Equilibrium Propagation.\"\n )\n\n parser.add_argument(\"--batch_size\", type=int, default=argparse.SUPPRESS,\n help=\"Size of mini batches during training.\")\n parser.add_argument(\"--c_energy\", choices=[\"cross_entropy\", \"squared_error\"],\n default=argparse.SUPPRESS, help=\"Supervised learning cost function.\")\n parser.add_argument(\"--dimensions\", type=int, nargs=\"+\",\n default=argparse.SUPPRESS, help=\"Dimensions of the neural network.\")\n parser.add_argument(\"--energy\", choices=[\"cond_gaussian\", \"restr_hopfield\"],\n default=\"cond_gaussian\", help=\"Type of energy-based model.\")\n parser.add_argument(\"--epochs\", type=int, default=argparse.SUPPRESS,\n help=\"Number of epochs to train.\")\n parser.add_argument(\"--fast_ff_init\", action='store_true', default=argparse.SUPPRESS,\n help=\"Flag to enable fast feedforward initialization.\")\n parser.add_argument(\"--learning_rate\", type=float, default=argparse.SUPPRESS,\n help=\"Learning rate of the optimizer.\")\n parser.add_argument(\"--log_dir\", type=str, default=\"\",\n help=\"Subdirectory within ./log/ to store logs.\")\n parser.add_argument(\"--nonlinearity\", choices=[\"leaky_relu\", \"relu\", \"sigmoid\", \"tanh\"],\n default=argparse.SUPPRESS, help=\"Nonlinearity between network layers.\")\n parser.add_argument(\"--optimizer\", choices=[\"adam\", \"adagrad\", \"sgd\"],\n default=argparse.SUPPRESS, help=\"Optimizer used to train the model.\")\n parser.add_argument(\"--seed\", type=int, default=argparse.SUPPRESS,\n help=\"Random seed for pytorch\")\n\n return vars(parser.parse_args(args))\n\n\ndef run_energy_model_mnist(cfg):\n \"\"\"\n Main script.\n\n Args:\n cfg: Dictionary defining parameters of the run\n \"\"\"\n # Initialize seed if specified (might slow down the model)\n if cfg['seed'] is not None:\n torch.manual_seed(cfg['seed'])\n\n # Create the cost function to be optimized by the model\n c_energy = utils.create_cost(cfg['c_energy'], cfg['beta'])\n\n # Create activation functions for every layer as a list\n phi = utils.create_activations(cfg['nonlinearity'], len(cfg['dimensions']))\n\n # Initialize energy based model\n if cfg[\"energy\"] == \"restr_hopfield\":\n model = energy.RestrictedHopfield(\n cfg['dimensions'], c_energy, cfg['batch_size'], phi).to(config.device)\n elif cfg[\"energy\"] == \"cond_gaussian\":\n model = energy.ConditionalGaussian(\n cfg['dimensions'], c_energy, cfg['batch_size'], phi).to(config.device)\n else:\n raise ValueError(f'Energy based model \\\"{cfg[\"energy\"]}\\\" not defined.')\n\n # Define optimizer (may include l2 regularization via weight_decay)\n w_optimizer = utils.create_optimizer(model, cfg['optimizer'], lr=cfg['learning_rate'])\n\n # Create torch data loaders with the MNIST data set\n mnist_train, mnist_test = data.create_mnist_loaders(cfg['batch_size'])\n\n logging.info(\"Start training with parametrization:\\n{}\".format(\n json.dumps(cfg, indent=4, sort_keys=True)))\n\n for epoch in range(1, cfg['epochs'] + 1):\n # Training\n train.train(model, mnist_train, cfg['dynamics'], w_optimizer, cfg[\"fast_ff_init\"])\n\n # Testing\n test_acc, test_energy = train.test(model, mnist_test, cfg['dynamics'], cfg[\"fast_ff_init\"])\n\n # Logging\n logging.info(\n \"epoch: {} \\t test_acc: {:.4f} \\t mean_E: {:.4f}\".format(\n epoch, test_acc, test_energy)\n )\n\n\nif __name__ == '__main__':\n # Parse shell arguments as input configuration\n user_config = parse_shell_args(sys.argv[1:])\n\n # Load default parameter configuration from file for the specified energy-based model\n cfg = load_default_config(user_config[\"energy\"])\n\n # Overwrite default parameters with user configuration where applicable\n cfg.update(user_config)\n\n # Setup global logger and logging directory\n config.setup_logging(cfg[\"energy\"] + \"_\" + cfg[\"c_energy\"] + \"_\" + cfg[\"dataset\"],\n dir=cfg['log_dir'])\n\n # Run the script using the created paramter configuration\n run_energy_model_mnist(cfg)\n"
] | [
[
"torch.manual_seed"
]
] |
rgsl888prabhu/cugraph | [
"e030a2fe22ad308fba05d6146765a3c9aa865e5b"
] | [
"python/cugraph/tests/test_triangle_count.py"
] | [
"# Copyright (c) 2019, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gc\nfrom itertools import product\n\nimport pytest\nfrom scipy.io import mmread\n\nimport cudf\nimport cugraph\nfrom librmm_cffi import librmm as rmm\nfrom librmm_cffi import librmm_config as rmm_cfg\n\n# Temporarily suppress warnings till networkX fixes deprecation warnings\n# (Using or importing the ABCs from 'collections' instead of from\n# 'collections.abc' is deprecated, and in 3.8 it will stop working) for\n# python 3.7. Also, this import networkx needs to be relocated in the\n# third-party group once this gets fixed.\nimport warnings\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n import networkx as nx\n\n\ndef read_mtx_file(mm_file):\n print('Reading ' + str(mm_file) + '...')\n return mmread(mm_file).asfptype()\n\n\ndef cugraph_call(M, edgevals=False):\n M = M.tocoo()\n rows = cudf.Series(M.row)\n cols = cudf.Series(M.col)\n if edgevals is False:\n values = None\n else:\n values = cudf.Series(M.data)\n G = cugraph.Graph()\n G.add_edge_list(rows, cols, values)\n return cugraph.triangles(G)\n\n\ndef networkx_call(M):\n Gnx = nx.Graph(M)\n dic = nx.triangles(Gnx)\n count = 0\n for i in range(len(dic)):\n count += dic[i]\n return count\n\n\nDATASETS = ['../datasets/dolphins.mtx',\n '../datasets/karate.mtx',\n '../datasets/netscience.mtx']\n\n\n# Test all combinations of default/managed and pooled/non-pooled allocation\[email protected]('managed, pool',\n list(product([False, True], [False, True])))\[email protected]('graph_file', DATASETS)\ndef test_triangles(managed, pool, graph_file):\n gc.collect()\n\n rmm.finalize()\n rmm_cfg.use_managed_memory = managed\n rmm_cfg.use_pool_allocator = pool\n rmm.initialize()\n\n assert(rmm.is_initialized())\n\n M = read_mtx_file(graph_file)\n cu_count = cugraph_call(M)\n nx_count = networkx_call(M)\n assert cu_count == nx_count\n\n\n# Test all combinations of default/managed and pooled/non-pooled allocation\[email protected]('managed, pool',\n list(product([False, True], [False, True])))\[email protected]('graph_file', DATASETS)\ndef test_triangles_edge_vals(managed, pool, graph_file):\n gc.collect()\n\n rmm.finalize()\n rmm_cfg.use_managed_memory = managed\n rmm_cfg.use_pool_allocator = pool\n rmm.initialize()\n\n assert(rmm.is_initialized())\n\n M = read_mtx_file(graph_file)\n cu_count = cugraph_call(M, edgevals=True)\n nx_count = networkx_call(M)\n assert cu_count == nx_count\n"
] | [
[
"scipy.io.mmread"
]
] |
tobias-liaudat/wf-psf | [
"0ff1a12d06c46bd8599061d227785393fb528d76"
] | [
"wf_psf/SimPSFToolkit.py"
] | [
"import numpy as np\nimport scipy.signal as spsig\nimport scipy.interpolate as sinterp\nimport PIL\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\ntry:\n from cv2 import resize, INTER_AREA\nexcept:\n print('Problem importing opencv..')\n try:\n from skimage.transform import downscale_local_mean\n print('Falling back to skimage.')\n print('Only integer downsampling allowed with this method.')\n except:\n print('Problem importing skimage..')\n\n\nclass SimPSFToolkit(object):\n \"\"\"Simulate PSFs.\n\n In the future the zernike maps could be created with galsim or some other\n alternative.\n\n Parameters\n ----------\n zernike_maps: list of np.ndarray\n Each element of the list should contain a Zernike map of the order\n (OSA/ANSI index convention) corresponding to the position in the list.\n max_order: int\n Maximum Zernike polynomial order. Default is `45`.\n max_wfe_rms: float\n Maximum allowed WFE in RMS. Used forvnormalization. Units in [\\mu m].\n Default is ``0.1``.\n output_dim: int\n Output dimension of the square PSF stamp. Default is `64`.\n rand_seed: int\n Random seed to be used to generate random zernike values.\n Default is `None`.\n plot_opt: bool\n Option to plot some characteristics of the PSF generated.\n Default is `False`.\n oversampling_rate: float\n Oversampling rate for the wavefront PSF simulation. Default is `2.14`\n that is the minumum number required by Euclid so that there is no\n aliasing at any wavelength in the pass band [0.55um, 0.9um].\n output_Q: float\n Downsampling rate to match the specified telescope's sampling. The value\n of `output_Q` should be equal to `oversampling_rate` in order to have\n the right pixel sampling corresponding to the telescope characteristics\n `pix_sampling`, `tel_diameter`, `tel_focal_length`. The final\n oversampling obtained is `oversampling_rate/output_Q`.\n Default is `1`, so the output psf will be super-resolved by a factor of\n `oversampling_rate`.\n pix_sampling: float\n Pixel sampling in [um]. Default is `12`[um] (Euclid-like).\n tel_diameter: float\n Telescope's main mirror diameter in [m]. Default is `1.2`[m]\n (Euclid-like).\n tel_focal_length: float\n Telescope's focal length in [m]. Default is `24.5`[m] (Euclid-like).\n pupil_diameter: int\n Pupil diameter in pixels. Number of samples of the wavefront in the\n pupil plane. More specifically, the Optical Path Differences map.\n Default is `1024` [pix].\n euclid_obsc: bool\n Wheter to use Euclid-like obscurations. Defualt is `True`.\n LP_filter_length: int\n Length of one dimension of the Low-Pass (LP) filter to apply to the\n obscurations to avoid the aliasing effect. The filter is a simple\n top-hat filter. Default is `3`.\n verbose: int\n Self-explanatory variable. Default is `0`, use a value `>0` to activate.\n\n \"\"\"\n\n def __init__(\n self,\n zernike_maps,\n max_order=45,\n max_wfe_rms=0.1,\n output_dim=64,\n rand_seed=None,\n plot_opt=False,\n oversampling_rate=3.,\n output_Q=1,\n pix_sampling=12,\n tel_diameter=1.2,\n tel_focal_length=24.5,\n pupil_diameter=1024,\n euclid_obsc=True,\n LP_filter_length=3,\n verbose=0\n ):\n # Input attributes\n self.max_order = max_order\n self.rand_seed = rand_seed\n self.plot_opt = plot_opt\n self.zernike_maps = zernike_maps\n self.max_wfe_rms = max_wfe_rms # In [um]\n self.output_dim = output_dim # In pixels per dimension\n self.verbose = verbose\n\n # Telescope characteristics\n self.oversampling_rate = oversampling_rate # dimensionless\n self.output_Q = output_Q # dimensionless\n self.pix_sampling = pix_sampling # In [um]\n self.tel_diameter = tel_diameter # In [m]\n self.tel_focal_length = tel_focal_length # In [m]\n self.pupil_diameter = pupil_diameter # In [pix]\n\n # Class attributes\n self.z_coeffs = None\n self.psf = None\n self.opd = None\n self.phase = None\n self.lambda_obs = None\n self.poly_psf = None\n\n # Generate pupil mask\n self.pupil_mask = ~np.isnan(self.zernike_maps[0])\n\n # Generate obscurations\n if euclid_obsc:\n self.obscurations = self.generate_pupil_obscurations(\n N_pix=pupil_diameter, N_filter=LP_filter_length\n )\n else:\n self.obscurations = np.ones((pupil_diameter, pupil_diameter))\n\n @staticmethod\n def _OLD_fft_diffraction_op(wf, pupil_mask, pad_factor=2, match_shapes=True):\n \"\"\" Perform a fft-based diffraction.\n\n Parameters\n ----------\n wf: np.ndarray\n A complex 2D array that corresponds to the wavefront function.\n pupil_mask: np.ndarray of bools\n A 2D boolean mask that corresponds to the pupil function.\n\n\n Returns\n -------\n psf: np.ndarray\n A real 2D array corresponding to the PSF.\n\n \"\"\"\n start = (wf.shape[0] * pad_factor) // 2 - wf.shape[0] // 2\n stop = (wf.shape[0] * pad_factor) // 2 + wf.shape[0] // 2\n\n padded_wf = np.zeros((wf.shape[0] * pad_factor, wf.shape[1] * pad_factor),\n dtype=np.complex128)\n\n padded_wf[start:stop, start:stop][pupil_mask] = wf[pupil_mask]\n\n fft_wf = np.fft.fftshift(np.fft.fft2(padded_wf))\n # fft_wf = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(padded_wf)))\n\n psf = np.abs(fft_wf)**2\n\n if match_shapes:\n # Return the psf with its original shape without the padding factor\n x_dif = int((psf.shape[0] / pad_factor) // 2)\n y_dif = int((psf.shape[1] / pad_factor) // 2)\n\n return psf[x_dif:psf.shape[0] - x_dif, y_dif:psf.shape[1] - y_dif]\n else:\n return psf\n\n @staticmethod\n def fft_diffract(wf, output_Q, output_dim=64):\n # Perform the FFT-based diffraction operation\n fft_wf = np.fft.fftshift(np.fft.fft2(wf))\n psf = np.abs(fft_wf)**2\n\n # Calculate crop dimensions\n if output_dim * output_Q < psf.shape[0]:\n start = int(psf.shape[0] // 2 - (output_dim * output_Q) // 2)\n stop = int(psf.shape[0] // 2 + (output_dim * output_Q) // 2)\n else:\n start = int(0)\n stop = psf.shape[0]\n\n # Crop psf\n psf = psf[start:stop, start:stop]\n\n # Downsample the image depending on `self.output_Q`\n try:\n psf = resize(\n src=psf, dsize=(int(output_dim), int(output_dim)), interpolation=INTER_AREA\n )\n except:\n f_x = int(psf.shape[0] / output_dim)\n f_y = int(psf.shape[1] / output_dim)\n psf = downscale_local_mean(\n image=psf,\n factors=(f_x, f_y),\n )\n\n return psf\n\n @staticmethod\n def generate_pupil_obscurations(N_pix=1024, N_filter=3):\n \"\"\"Generate Euclid like pupil obscurations.\n\n Simple procedure considering only the 2D plane.\n No 3D projections wrt the angle of the FoV is done.\n\n Parameters\n ----------\n N_pix: int\n Total number of pixels\n N_filter: int\n Length of the low-pass filter [pixels]\n\n \"\"\"\n # Telescope parameters\n AS_diam = 1200 # Aperture stop diameter [mm]\n M1_diam = 395 # Mirror 1 cap stopper diameter [mm]\n\n sp_lenght = 700 # Spider length [mm]\n sp_width = 12 # Spider width [mm]\n\n AS_centre = [0, 0]\n M1_centre = [0, 51]\n\n sp1_angle = 106.78 - 90 # [degrees]\n sp2_angle = 50.11 - 90 # [degrees]\n sp3_angle = -10.76 - 90 # [degrees]\n\n sp1_x_pos = 260 # [mm]\n sp1_y_pos = 240 # [mm]\n sp2_x_pos = -330 # [mm]\n sp2_y_pos = 130 # [mm]\n sp3_x_pos = 70 # [mm]\n sp3_y_pos = -330 # [mm]\n\n # Build pupil plane\n pupil_plane = np.ones((N_pix, N_pix))\n\n # coordinates of map in [mm]\n W, H = np.meshgrid(\n np.linspace(-AS_diam // 2, AS_diam // 2, N_pix),\n np.linspace(-AS_diam // 2, AS_diam // 2, N_pix)\n )\n\n ### Calculate the Aperture stop and draw it ###\n aperture_stop_mask = np.sqrt((W - AS_centre[0])**2 + (H - AS_centre[1])**2) <= (AS_diam / 2)\n pupil_plane[~aperture_stop_mask] = 0\n\n ### Calculate the M1/M2 obscurations and draw them ###\n M1_mask = np.sqrt((W - M1_centre[0])**2 + (H - M1_centre[1])**2) <= (M1_diam / 2)\n pupil_plane[M1_mask] = 0\n\n ### Calculate the spiders and draw them ###\n\n # Spider 1\n sp1_a = np.tan(sp1_angle * (np.pi / 180))\n sp1_b = sp1_y_pos - sp1_a * sp1_x_pos\n\n sp1_mask_1 = sp1_a * W + sp1_b - sp_width / 2 * np.sqrt(1 + sp1_a**2) < H\n sp1_mask_2 = sp1_a * W + sp1_b + sp_width / 2 * np.sqrt(1 + sp1_a**2) > H\n sp1_mask = np.logical_and(sp1_mask_1, sp1_mask_2)\n\n sp1_length_mask = np.sqrt((W - sp1_x_pos)**2 + (H - sp1_y_pos)**2) <= (sp_lenght / 2)\n sp1_mask = np.logical_and(sp1_mask, sp1_length_mask)\n\n # Spider 2\n sp2_a = np.tan(sp2_angle * (np.pi / 180))\n sp2_b = sp2_y_pos - sp2_a * sp2_x_pos\n\n sp2_mask_1 = sp2_a * W + sp2_b - sp_width / 2 * np.sqrt(1 + sp2_a**2) < H\n sp2_mask_2 = sp2_a * W + sp2_b + sp_width / 2 * np.sqrt(1 + sp2_a**2) > H\n sp2_mask = np.logical_and(sp2_mask_1, sp2_mask_2)\n\n sp2_length_mask = np.sqrt((W - sp2_x_pos)**2 + (H - sp2_y_pos)**2) <= (sp_lenght / 2)\n sp2_mask = np.logical_and(sp2_mask, sp2_length_mask)\n\n # Spider 3\n sp3_a = np.tan(sp3_angle * (np.pi / 180))\n sp3_b = sp3_y_pos - sp3_a * sp3_x_pos\n\n sp3_mask_1 = sp3_a * W + sp3_b - sp_width / 2 * np.sqrt(1 + sp3_a**2) < H\n sp3_mask_2 = sp3_a * W + sp3_b + sp_width / 2 * np.sqrt(1 + sp3_a**2) > H\n sp3_mask = np.logical_and(sp3_mask_1, sp3_mask_2)\n\n sp3_length_mask = np.sqrt((W - sp3_x_pos)**2 + (H - sp3_y_pos)**2) <= (sp_lenght / 2)\n sp3_mask = np.logical_and(sp3_mask, sp3_length_mask)\n\n # Draw the three spider arms\n pupil_plane[sp1_mask] = 0\n pupil_plane[sp2_mask] = 0\n pupil_plane[sp3_mask] = 0\n\n ### Low-pass filter the image ###\n top_hat_filter = np.ones((N_filter, N_filter))\n\n pupil_plane = spsig.convolve2d(\n pupil_plane, top_hat_filter, boundary='fill', mode='same', fillvalue=0\n )\n\n pupil_plane /= np.sum(top_hat_filter)\n\n return pupil_plane\n\n @staticmethod\n def crop_img(to_crop_img, ref_im):\n cent_x = int(to_crop_img.shape[0] // 2)\n cent_y = int(to_crop_img.shape[1] // 2)\n\n delta_x = int(ref_im.shape[0] // 2)\n delta_y = int(ref_im.shape[1] // 2)\n\n return to_crop_img[cent_x - delta_x:cent_x + delta_x, cent_y - delta_y:cent_y + delta_y]\n\n @staticmethod\n def decimate_im(input_im, decim_f):\n \"\"\"Decimate image.\n\n Decimated by a factor of decim_f.\n Based on the PIL library using the default interpolator.\n\n \"\"\"\n\n pil_im = PIL.Image.fromarray(input_im)\n (width, height) = (pil_im.width // decim_f, pil_im.height // decim_f)\n im_resized = pil_im.resize((width, height))\n\n return np.array(im_resized)\n\n @staticmethod\n def get_radial_idx(max_order=45):\n it = 1\n radial_idxs = []\n\n while (len(radial_idxs) <= max_order):\n for _it in range(it):\n radial_idxs.append(it - 1)\n\n it += 1\n\n return np.array(radial_idxs)\n\n @staticmethod\n def psf_plotter(psf, lambda_obs=0.000, cmap='gist_stern', save_img=False):\n fig = plt.figure(figsize=(18, 10))\n\n ax1 = fig.add_subplot(131)\n im1 = ax1.imshow(psf, cmap=cmap, interpolation='None')\n divider = make_axes_locatable(ax1)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im1, cax=cax, orientation='vertical')\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.set_title('PSF (lambda=%.3f [um])' % (lambda_obs))\n\n ax2 = fig.add_subplot(132)\n im2 = ax2.imshow(np.sqrt(abs(psf)), cmap=cmap, interpolation='None')\n divider2 = make_axes_locatable(ax2)\n cax2 = divider2.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im2, cax=cax2, orientation='vertical')\n ax2.set_title('sqrt PSF (lambda=%.3f [um])' % (lambda_obs))\n ax2.set_xticks([])\n ax2.set_yticks([])\n\n ax3 = fig.add_subplot(133)\n im3 = ax3.imshow(np.log(abs(psf)), cmap=cmap, interpolation='None')\n divider3 = make_axes_locatable(ax3)\n cax3 = divider3.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im3, cax=cax3, orientation='vertical')\n ax3.set_title('log PSF (lambda=%.3f [um])' % (lambda_obs))\n ax3.set_xticks([])\n ax3.set_yticks([])\n\n if save_img:\n plt.savefig('./PSF_lambda_%.3f.pdf' % lambda_obs, bbox_inches='tight')\n\n plt.show()\n\n @staticmethod\n def opd_phase_plotter(pupil_mask, opd, phase, lambda_obs, cmap='viridis', save_img=False):\n fig = plt.figure(figsize=(18, 10))\n\n ax1 = fig.add_subplot(131)\n im1 = ax1.imshow(pupil_mask, interpolation='None')\n divider = make_axes_locatable(ax1)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im1, cax=cax, orientation='vertical')\n ax1.set_title('Pupil mask')\n ax1.set_xticks([])\n ax1.set_yticks([])\n\n vmax = np.max(abs(opd))\n ax2 = fig.add_subplot(132)\n im2 = ax2.imshow(opd, cmap=cmap, interpolation='None', vmin=-vmax, vmax=vmax)\n divider2 = make_axes_locatable(ax2)\n cax2 = divider2.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im2, cax=cax2, orientation='vertical')\n ax2.set_title('OPD [um]')\n ax2.set_xticks([])\n ax2.set_yticks([])\n\n vmax = np.max(abs(np.angle(phase)))\n ax3 = fig.add_subplot(133)\n im3 = ax3.imshow(np.angle(phase), cmap=cmap, interpolation='None', vmin=-vmax, vmax=vmax)\n divider3 = make_axes_locatable(ax3)\n cax3 = divider3.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(im3, cax=cax3, orientation='vertical')\n ax3.set_title('W phase [rad](wv=%.2f[um])' % (lambda_obs))\n ax3.set_xticks([])\n ax3.set_yticks([])\n\n if save_img:\n plt.savefig('./OPD_lambda_%.3f.pdf' % lambda_obs, bbox_inches='tight')\n\n plt.show()\n\n def get_psf(self):\n if self.psf is not None:\n return self.psf\n else:\n print('No PSF has been computed yet.')\n\n def plot_psf(self, cmap='gist_stern', save_img=False):\n if self.psf is not None:\n self.psf_plotter(self.psf, self.lambda_obs, cmap, save_img)\n else:\n print('No PSF has been computed yet.')\n\n def plot_opd_phase(self, cmap='viridis', save_img=False):\n if self.opd is not None:\n self.opd_phase_plotter(\n self.pupil_mask * self.obscurations, self.opd * self.obscurations, self.phase,\n self.lambda_obs, cmap, save_img\n )\n else:\n print('No WF has been computed yet.')\n\n def gen_random_Z_coeffs(self, max_order=45, rand_seed=None):\n \"\"\" Generate a random set of Zernike coefficients.\n\n The coefficients are generated following a uniform law U~[-1,1]\n divided by their radial zernike index.\n Ex: u_i / r(i) (u_i is a realization of U)\n\n Parameters\n ----------\n max_order: int\n Maximum order of Zernike polynomials.\n rand_seed: int\n Seed for the random initialization.\n\n Returns\n -------\n rand_coeffs: list of floats\n List containing the random coefficients.\n\n \"\"\"\n if rand_seed is not None:\n np.random.seed(rand_seed)\n\n rad_idx = self.get_radial_idx(max_order)\n rad_idx[0] = 1\n\n z_coeffs = []\n\n for it in range(max_order):\n z_coeffs.append((np.random.rand() - 0.5) * 2. / rad_idx[it])\n\n self.z_coeffs = z_coeffs\n\n def plot_z_coeffs(self, save_img=False):\n \"\"\"Plot random Zernike coefficients.\"\"\"\n if self.z_coeffs is not None:\n fig = plt.figure(figsize=(12, 6))\n ax1 = fig.add_subplot(111)\n im1 = ax1.bar(np.arange(len(self.z_coeffs)), np.array(self.z_coeffs))\n ax1.set_xlabel('Zernike coefficients')\n ax1.set_ylabel('Magnitude')\n\n if save_img:\n plt.savefig('./Z_coeffs.pdf', bbox_inches='tight')\n\n plt.show()\n else:\n print('Random coeffs not generated.')\n\n def get_z_coeffs(self):\n \"\"\"Get random coefficients\"\"\"\n if self.z_coeffs is not None:\n return self.z_coeffs\n else:\n print('Random coeffs not generated.')\n\n def set_z_coeffs(self, z_coeffs):\n \"\"\"Set zernike coefficients.\"\"\"\n if len(z_coeffs) == self.max_order:\n self.z_coeffs = z_coeffs\n else:\n print('Zernike coefficients should be of length %d' % (self.max_order))\n\n def normalize_zernikes(self, z_coeffs=None, max_wfe_rms=None):\n \"\"\"Normalize zernike coefficients.\"\"\"\n if max_wfe_rms is None:\n max_wfe_rms = self.max_wfe_rms\n\n # Calculate normalization factor\n wfe_rms = self.calculate_wfe_rms(z_coeffs=z_coeffs)\n mult_factor = max_wfe_rms / wfe_rms\n\n # Normalize Zernike coefficients and return them\n z_coeffs = [_z * mult_factor for _z in z_coeffs]\n\n return z_coeffs\n\n def calculate_wfe_rms(self, z_coeffs=None):\n \"\"\"Calculate WFE rms from a set of zernike coefficients.\"\"\"\n if z_coeffs is None:\n if self.z_coeffs is None:\n self.gen_random_Z_coeffs(self.max_order, self.rand_seed)\n z_coeffs = self.get_z_coeffs()\n else:\n z_coeffs = self.get_z_coeffs()\n\n # Create the phase with the Zernike basis\n opd = 0\n for it in range(self.max_order):\n opd += self.zernike_maps[it] * z_coeffs[it]\n\n # Proyect obscurations on to the OPD\n opd *= self.obscurations\n\n # Calculate normalization factor\n wfe_rms = np.sqrt(np.mean((opd[self.pupil_mask] - np.mean(opd[self.pupil_mask]))**2))\n\n return wfe_rms\n\n def check_wfe_rms(self, z_coeffs=None, max_wfe_rms=None):\n \"\"\"Check if Zernike coefficients are within the maximum admitted error.\"\"\"\n\n if max_wfe_rms is None:\n max_wfe_rms = self.max_wfe_rms\n\n # Calculate normalization factor\n wfe_rms = self.calculate_wfe_rms(z_coeffs=z_coeffs)\n\n return max_wfe_rms - wfe_rms\n\n def generate_mono_PSF(self, lambda_obs=0.725, regen_sample=False, get_psf=False):\n \"\"\"Generate monochromatic PSF.\"\"\"\n if lambda_obs < 0.55 * 0.9 or lambda_obs > 0.9 * 1.1:\n print(\n 'WARNING: requested wavelength %.4f um is not in VIS passband [0.55,0.9]um' %\n (lambda_obs)\n )\n self.lambda_obs = lambda_obs\n\n # Calculate the OPD from the Zernike coefficients\n self.calculate_opd(regen_sample)\n\n # Apply the diffraction operator using the opd (optical path differences)\n self.diffract_phase()\n\n if get_psf is True:\n return np.copy(self.psf)\n\n def calculate_opd(self, regen_sample=False):\n \"\"\"Calculate the OPD from the Zernike coefficients.\"\"\"\n if self.z_coeffs is None or regen_sample is True:\n # Generate a random sample of coefficients\n self.gen_random_Z_coeffs(self.max_order, self.rand_seed)\n # Normalize coefficients\n z_coeffs = self.normalize_zernikes(self.get_z_coeffs(), self.max_wfe_rms)\n # Save coefficients\n self.set_z_coeffs(z_coeffs)\n # Plot Zernike coefficients\n if self.plot_opt:\n self.plot_z_coeffs()\n\n else:\n # Get the stored Zernike coefficients\n z_coeffs = self.get_z_coeffs()\n\n # Create the phase with the Zernike basis\n opd = 0\n for it in range(self.max_order):\n opd += self.zernike_maps[it] * z_coeffs[it]\n\n # Save the wavefront\n self.opd = opd\n\n def diffract_phase(self, lambda_obs=None):\n \"\"\"Diffract the phase map.\"\"\"\n if lambda_obs is None:\n if self.lambda_obs is None:\n print('WARNING: No wavelength is defined. Using default value 0.8um.')\n lambda_obs = 0.8\n else:\n lambda_obs = self.lambda_obs\n elif lambda_obs < 0.55 * 0.99 or lambda_obs > 0.9 * 1.01:\n print('WARNING: wavelength %.4f is not in VIS passband [0.55,0.9]um' % (lambda_obs))\n\n # Calculate the feasible lambda closest to lambda_obs\n possible_lambda = self.feasible_wavelength(lambda_obs)\n\n # Save wavelength\n self.lambda_obs = possible_lambda\n\n # Calculate the required N for the input lambda_obs\n possible_N = self.feasible_N(self.lambda_obs)\n\n # Generate the full phase and\n # Add zeros to the phase to have the correct fourier sampling\n start = possible_N // 2 - self.opd.shape[0] // 2\n stop = possible_N // 2 + self.opd.shape[0] // 2\n\n self.phase = np.zeros((possible_N, possible_N), dtype=np.complex128)\n self.phase[start:stop,\n start:stop][self.pupil_mask\n ] = np.exp(2j * np.pi * self.opd[self.pupil_mask] / self.lambda_obs)\n\n # Project obscurations to the phase\n self.phase[start:stop, start:stop] *= self.obscurations\n\n # FFT-diffract the phase (wavefront) and then crop to desired dimension\n self.psf = self.fft_diffract(\n wf=self.phase, output_Q=self.output_Q, output_dim=self.output_dim\n )\n\n # Normalize psf\n self.psf /= np.sum(self.psf)\n\n def feasible_N(self, lambda_obs):\n \"\"\"Calculate the feasible N for a lambda_obs diffraction.\n\n Input wavelength must be in [um].\n \"\"\"\n # Calculate the required N for the input lambda_obs\n req_N = (self.oversampling_rate * self.pupil_diameter * lambda_obs *\n self.tel_focal_length) / (\n self.tel_diameter * self.pix_sampling\n )\n # Recalculate the req_N into a possible value (a pair integer)\n possible_N = int((req_N // 2) * 2)\n\n return possible_N\n\n def feasible_wavelength(self, lambda_obs):\n \"\"\"Calculate closest fesible wavelength to target wavelength.\n\n Input wavelength must be in [um].\n \"\"\"\n # Calculate a feasible N for the input lambda_obs\n possible_N = self.feasible_N(lambda_obs)\n\n # Recalculate the corresponding the wavelength\n possible_lambda = (possible_N * self.tel_diameter * self.pix_sampling) / (\n self.pupil_diameter * self.oversampling_rate * self.tel_focal_length\n )\n\n if self.verbose > 0:\n # print(\"Requested wavelength: %.5f \\nRequired N: %.2f\"%(lambda_obs, req_N))\n print(\"Possible wavelength: %.5f \\nPossible N: %.2f\" % (possible_lambda, possible_N))\n\n return possible_lambda\n\n @staticmethod\n def gen_SED_interp(SED, n_bins=35, interp_kind='cubic'):\n \"\"\"Generate SED interpolator.\n\n Returns the interpolator and the wavelengths in [nm].\n \"\"\"\n wv_max = 900\n wv_min = 550\n # wvlength = np.arange(wv_min, wv_max, int((wv_max-wv_min)/n_bins))\n wvlength = np.linspace(wv_min, wv_max, num=n_bins, endpoint=True)\n\n SED_interp = sinterp.interp1d(\n SED[:, 0], SED[:, 1], kind=interp_kind, bounds_error=False, fill_value=\"extrapolate\"\n )\n\n return wvlength, SED_interp\n\n def calc_SED_wave_values(self, SED, n_bins=35):\n \"\"\"Calculate feasible wavelength and SED values.\n\n Feasable so that the padding number N is integer.\n \"\"\"\n # Generate SED interpolator and wavelength array\n wvlength, SED_interp = self.gen_SED_interp(SED, n_bins)\n\n # Convert wavelength from [nm] to [um]\n wvlength_um = wvlength / 1e3\n\n # Calculate feasible wavelengths (in [um])\n verbose = self.verbose\n self.verbose = 0\n feasible_wv = np.array([self.feasible_wavelength(_wv) for _wv in wvlength_um])\n self.verbose = verbose\n\n # Interpolate and normalize SED\n SED_norm = SED_interp(feasible_wv * 1e3) # Interpolation is done in [nm]\n SED_norm /= np.sum(SED_norm)\n\n return feasible_wv, SED_norm\n\n def generate_poly_PSF(self, SED, n_bins=35):\n \"\"\"Generate polychromatic PSF with a specific SED.\n\n The wavelength space will be the Euclid VIS instrument band:\n [550,900]nm and will be sample in ``n_bins``.\n\n \"\"\"\n # Calculate the feasible values of wavelength and the corresponding\n # SED interpolated values\n feasible_wv, SED_norm = self.calc_SED_wave_values(SED, n_bins)\n\n if self.plot_opt:\n # Plot input SEDs and interpolated SEDs\n wvlength, SED_interp = self.gen_SED_interp(SED, n_bins)\n\n fig = plt.figure(figsize=(14, 8))\n ax1 = fig.add_subplot(111)\n ax1.plot(SED[:, 0], SED[:, 1], label='Input SED')\n ax1.scatter(\n feasible_wv * 1e3, SED_interp(feasible_wv * 1e3), label='Interpolated', c='red'\n )\n ax1.set_xlabel('wavelength [nm]')\n ax1.set_ylabel('SED(wavelength)')\n ax1.set_title('SED')\n ax1.legend()\n # plt.savefig(output_path+'SED_interp_nbin_%d.pdf'%n_bins, bbox_inches='tight')\n plt.show()\n\n stacked_psf = 0\n\n # Generate the required monochromatic PSFs\n for it in range(feasible_wv.shape[0]):\n self.generate_mono_PSF(lambda_obs=feasible_wv[it])\n stacked_psf += self.get_psf() * SED_norm[it]\n\n self.poly_psf = stacked_psf\n\n return stacked_psf\n\n\n# This pythonic version of the polychromatic calculation is not working\n# The parallelisation with the class with shared variables might not be working\n# It may work if we define a @staticmethod for the diffracvtion\n# psf_cube = np.array([_sed*self.generate_mono_PSF(_wv, get_psf=True)\n# for _wv, _sed in zip(feasible_wv, SED_norm)])\n# # Sum to obtain the polychromatic PSFs\n# self.poly_psf = np.sum(np_psf_cube, axis=0)\n# return np.copy(self.poly_psf)\n"
] | [
[
"numpy.ones",
"numpy.sum",
"scipy.interpolate.interp1d",
"numpy.random.seed",
"numpy.copy",
"matplotlib.pyplot.figure",
"numpy.logical_and",
"matplotlib.pyplot.savefig",
"numpy.abs",
"numpy.random.rand",
"numpy.isnan",
"numpy.linspace",
"numpy.mean",
"numpy.zeros",
"numpy.fft.fft2",
"numpy.tan",
"numpy.array",
"numpy.exp",
"matplotlib.pyplot.show",
"scipy.signal.convolve2d",
"numpy.angle",
"numpy.sqrt"
]
] |
binfen/FBDD | [
"26c859a2dbe3d308711898ef1d149a5f8c49055f"
] | [
"utils/postprocess.py"
] | [
"import time\nimport numpy as np\nimport pandas as pd\n\nfrom molecules import mol_from_smiles\nfrom molecules import add_property\nfrom molecules import (\n add_atom_counts, add_bond_counts, add_ring_counts)\n\nfrom .config import get_dataset_info\nfrom .filesystem import load_dataset\n\nSCORES = [\"validity\", \"novelty\", \"uniqueness\"]\n\n\ndef dump_scores(config, scores, epoch):\n filename = config.path('performance') / \"scores.csv\"\n df = pd.DataFrame([scores], columns=SCORES)\n\n if not filename.exists():\n df.to_csv(filename)\n is_max = True\n else:\n ref = pd.read_csv(filename, index_col=0)\n is_max = scores[2] >= ref.uniqueness.max()\n ref = pd.concat([ref, df], axis=0, sort=False, ignore_index=True)\n ref.to_csv(filename)\n\n return is_max\n\n\ndef retrieve_samples(config):\n dfs = []\n filenames = config.path('samples').glob('*_*.csv')\n\n for filename in filenames:\n dfs.append(pd.read_csv(filename, index_col=0))\n\n samples = pd.concat(dfs, axis=0, ignore_index=True, sort=False)\n samples = samples.reset_index(drop=True)\n return samples.copy()\n\n\ndef mask_valid_molecules(smiles):\n valid_mask = []\n\n for smi in smiles:\n try:\n mol = mol_from_smiles(smi)\n valid_mask.append(mol is not None)\n except Exception:\n valid_mask.append(False)\n\n return np.array(valid_mask)\n\n\ndef mask_novel_molecules(smiles, data_smiles):\n novel_mask = []\n\n for smi in smiles:\n novel_mask.append(smi not in data_smiles)\n\n return np.array(novel_mask)\n\n\ndef mask_unique_molecules(smiles):\n uniques, unique_mask = set(), []\n\n for smi in smiles:\n unique_mask.append(smi not in uniques)\n uniques.add(smi)\n\n return np.array(unique_mask)\n\n\ndef score_samples(samples, dataset, calc=True):\n def ratio(mask):\n total = mask.shape[0]\n if total == 0:\n return 0.0\n return mask.sum() / total\n\n if isinstance(samples, pd.DataFrame):\n smiles = samples.smiles.tolist()\n elif isinstance(samples, list):\n smiles = [s[0] for s in samples]\n data_smiles = dataset.smiles.tolist()\n\n valid_mask = mask_valid_molecules(smiles)\n novel_mask = mask_novel_molecules(smiles, data_smiles)\n unique_mask = mask_unique_molecules(smiles)\n\n scores = []\n if calc:\n start = time.time()\n print(\"Start scoring...\")\n validity_score = ratio(valid_mask)\n novelty_score = ratio(novel_mask[valid_mask])\n uniqueness_score = ratio(unique_mask[valid_mask])\n\n print(f\"valid: {validity_score} - \"\n f\"novel: {novelty_score} - \"\n f\"unique: {uniqueness_score}\")\n\n scores = [validity_score, novelty_score, uniqueness_score]\n end = time.time() - start\n elapsed = time.strftime(\"%H:%M:%S\", time.gmtime(end))\n print(f'Done. Time elapsed: {elapsed}.')\n\n return valid_mask * novel_mask * unique_mask, scores\n\n\ndef postprocess_samples(config, use_train=False, n_jobs=-1):\n start = time.time()\n print(\"Start postprocessing...\", end=\" \")\n kind = 'train' if use_train else 'test'\n dataset = load_dataset(config, kind=kind)\n samples = retrieve_samples(config)\n\n mask, _ = score_samples(samples, dataset, calc=False)\n samples = samples.iloc[mask, :].reset_index(drop=True)\n\n info = get_dataset_info(config.get('dataset'))\n samples = add_atom_counts(samples, info, n_jobs)\n samples = add_bond_counts(samples, info, n_jobs)\n samples = add_ring_counts(samples, info, n_jobs)\n\n for prop in info['properties']:\n samples = add_property(samples, prop, n_jobs)\n\n samples = samples[info['column_order']]\n samples['who'] = 'OURS'\n dataset['who'] = info['name']\n\n data = [samples, dataset]\n aggregated = pd.concat(data, axis=0, ignore_index=True, sort=False)\n aggregated.to_csv(config.path('samples') / 'aggregated.csv')\n\n end = time.time() - start\n elapsed = time.strftime(\"%H:%M:%S\", time.gmtime(end))\n print(f'Done. Time elapsed: {elapsed}.')\n"
] | [
[
"numpy.array",
"pandas.DataFrame",
"pandas.concat",
"pandas.read_csv"
]
] |
Jappenn/CCL | [
"a37cad61f060f3928fa5d47b1e2670db3e9bce6f"
] | [
"pyccl/tests/test_power.py"
] | [
"import numpy as np\nimport pytest\n\nimport pyccl as ccl\nfrom pyccl import CCLError, CCLWarning\n\n\nCOSMO = ccl.Cosmology(\n Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,\n transfer_function='bbks', matter_power_spectrum='halofit')\nCOSMO_HM = ccl.Cosmology(\n Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,\n transfer_function='bbks', matter_power_spectrum='halo_model',\n mass_function='shethtormen')\n\n\ndef test_halomod_f2d_copy():\n from pyccl.pyutils import assert_warns\n mdef = ccl.halos.MassDef('vir', 'matter')\n hmf = ccl.halos.MassFuncSheth99(COSMO_HM, mdef,\n mass_def_strict=False,\n use_delta_c_fit=True)\n hbf = ccl.halos.HaloBiasSheth99(COSMO_HM, mass_def=mdef,\n mass_def_strict=False)\n cc = ccl.halos.ConcentrationDuffy08(mdef)\n prf = ccl.halos.HaloProfileNFW(cc)\n hmc = ccl.halos.HMCalculator(COSMO_HM, hmf, hbf, mdef)\n pk2d = ccl.halos.halomod_Pk2D(COSMO_HM, hmc, prf, normprof1=True)\n psp_new = pk2d.psp\n # This just triggers the internal calculation\n pk_old = assert_warns(\n ccl.CCLWarning,\n ccl.nonlin_matter_power, COSMO_HM, 1., 0.8)\n pk_new = pk2d.eval(1., 0.8, COSMO_HM)\n psp_old = COSMO_HM.get_nonlin_power().psp\n assert psp_new.lkmin == psp_old.lkmin\n assert psp_new.lkmax == psp_old.lkmax\n assert psp_new.amin == psp_old.amin\n assert psp_new.amax == psp_old.amax\n assert psp_new.is_factorizable == psp_old.is_factorizable\n assert psp_new.is_k_constant == psp_old.is_k_constant\n assert psp_new.is_a_constant == psp_old.is_a_constant\n assert psp_new.is_log == psp_old.is_log\n assert psp_new.growth_factor_0 == psp_old.growth_factor_0\n assert psp_new.growth_exponent == psp_old.growth_exponent\n assert psp_new.extrap_order_lok == psp_old.extrap_order_lok\n assert psp_new.extrap_order_hik == psp_old.extrap_order_hik\n assert pk_old == pk_new\n\n\[email protected]('k', [\n 1,\n 1.0,\n [0.3, 0.5, 10],\n np.array([0.3, 0.5, 10])\n])\ndef test_nonlin_matter_power_halomod(k):\n a = 0.8\n pk = ccl.nonlin_matter_power(COSMO_HM, k, a)\n\n # New implementation\n mdef = ccl.halos.MassDef('vir', 'matter')\n hmf = ccl.halos.MassFuncSheth99(COSMO_HM, mdef,\n mass_def_strict=False,\n use_delta_c_fit=True)\n hbf = ccl.halos.HaloBiasSheth99(COSMO_HM, mass_def=mdef,\n mass_def_strict=False)\n cc = ccl.halos.ConcentrationDuffy08(mdef)\n prf = ccl.halos.HaloProfileNFW(cc)\n hmc = ccl.halos.HMCalculator(COSMO_HM, hmf, hbf, mdef)\n pkb = ccl.halos.halomod_power_spectrum(COSMO_HM, hmc, k, a,\n prf, normprof1=True)\n\n assert np.allclose(pk, pkb)\n assert np.all(np.isfinite(pk))\n assert np.shape(pk) == np.shape(k)\n\n\[email protected]('k', [\n 1,\n 1.0,\n [0.3, 0.5, 10],\n np.array([0.3, 0.5, 10])])\ndef test_linear_matter_power_smoke(k):\n a = 0.8\n pk = ccl.linear_matter_power(COSMO, k, a)\n assert np.all(np.isfinite(pk))\n assert np.shape(pk) == np.shape(k)\n\n\ndef test_linear_matter_power_raises():\n cosmo = ccl.CosmologyVanillaLCDM(transfer_function=None)\n with pytest.raises(ccl.CCLError):\n ccl.linear_matter_power(cosmo, 1., 1.)\n\n\ndef test_nonlin_matter_power_raises():\n cosmo = ccl.CosmologyVanillaLCDM(matter_power_spectrum=None)\n with pytest.raises(ccl.CCLError):\n ccl.nonlin_matter_power(cosmo, 1., 1.)\n\n\ndef test_linear_power_raises():\n cosmo = ccl.CosmologyVanillaLCDM(transfer_function='bbks')\n with pytest.raises(KeyError):\n ccl.linear_power(cosmo, 1., 1., p_of_k_a='a:b')\n\n\ndef test_nonlin_power_raises():\n cosmo = ccl.CosmologyVanillaLCDM(transfer_function='bbks')\n with pytest.raises(KeyError):\n ccl.nonlin_power(cosmo, 1., 1., p_of_k_a='a:b')\n\n\[email protected]('k', [\n 1,\n 1.0,\n [0.3, 0.5, 10],\n np.array([0.3, 0.5, 10])])\ndef test_nonlin_matter_power_smoke(k):\n a = 0.8\n pk = ccl.nonlin_matter_power(COSMO, k, a)\n assert np.all(np.isfinite(pk))\n assert np.shape(pk) == np.shape(k)\n\n\[email protected]('r', [\n 1,\n 1.0,\n [0.3, 0.5, 10],\n np.array([0.3, 0.5, 10])])\ndef test_sigmaR_smoke(r):\n a = 0.8\n sig = ccl.sigmaR(COSMO, r, a)\n assert np.all(np.isfinite(sig))\n assert np.shape(sig) == np.shape(r)\n\n\[email protected]('r', [\n 1,\n 1.0,\n [0.3, 0.5, 10],\n np.array([0.3, 0.5, 10])])\ndef test_sigmaV_smoke(r):\n a = 0.8\n sig = ccl.sigmaV(COSMO, r, a)\n assert np.all(np.isfinite(sig))\n assert np.shape(sig) == np.shape(r)\n\n\ndef test_sigma8_consistent():\n assert np.allclose(ccl.sigma8(COSMO), COSMO['sigma8'])\n assert np.allclose(ccl.sigmaR(COSMO, 8 / COSMO['h'], 1), COSMO['sigma8'])\n\n\[email protected]('A', [\n 1,\n 1.0,\n [0.3, 0.5, 1],\n np.array([0.3, 0.5, 1])])\ndef test_kNL(A):\n knl = ccl.kNL(COSMO, A)\n assert np.all(np.isfinite(knl))\n assert np.shape(knl) == np.shape(A)\n\n\[email protected]('tf,pk,m_nu', [\n # ('boltzmann_class', 'emu', 0.06), - this case is slow and not needed\n (None, 'emu', 0.06),\n ('bbks', 'emu', 0.06),\n ('eisenstein_hu', 'emu', 0.06),\n])\ndef test_transfer_matter_power_nu_raises(tf, pk, m_nu):\n cosmo = ccl.Cosmology(\n Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,\n transfer_function=tf, matter_power_spectrum=pk, m_nu=m_nu)\n\n if tf is not None:\n with pytest.warns(CCLWarning):\n ccl.linear_matter_power(cosmo, 1, 1)\n\n with pytest.raises(CCLError):\n ccl.nonlin_matter_power(cosmo, 1, 1)\n\n\[email protected]('tf', [\n 'boltzmann_class', 'boltzmann_camb', 'boltzmann_isitgr'])\ndef test_power_sigma8norm_norms_consistent(tf):\n # make a cosmo with A_s\n cosmo = ccl.Cosmology(\n Omega_c=0.27, Omega_b=0.045, h=0.67, A_s=2e-9, n_s=0.96,\n transfer_function=tf)\n sigma8 = ccl.sigma8(cosmo)\n\n # remake same but now give sigma8\n cosmo_s8 = ccl.Cosmology(\n Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=sigma8, n_s=0.96,\n transfer_function=tf)\n\n # make sure they come out the same-ish\n assert np.allclose(ccl.sigma8(cosmo), ccl.sigma8(cosmo_s8))\n\n # and that the power spectra look right\n a = 0.8\n gfac = (\n ccl.growth_factor(cosmo, a) / ccl.growth_factor(cosmo_s8, a))**2\n pk_rat = (\n ccl.linear_matter_power(cosmo, 1e-4, a) /\n ccl.linear_matter_power(cosmo_s8, 1e-4, a))\n assert np.allclose(pk_rat, gfac)\n\n\ndef test_input_lin_power_spectrum():\n # Setup\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7, n_s=0.965,\n A_s=2e-9)\n a_arr = np.linspace(0.1, 1.0, 50)\n chi_from_ccl = ccl.background.comoving_radial_distance(cosmo, a_arr)\n hoh0_from_ccl = ccl.background.h_over_h0(cosmo, a_arr)\n growth_from_ccl = ccl.background.growth_factor_unnorm(cosmo, a_arr)\n fgrowth_from_ccl = ccl.background.growth_rate(cosmo, a_arr)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pk_arr = np.empty(shape=(len(a_arr), len(k_arr)))\n for i, a in enumerate(a_arr):\n pk_arr[i] = ccl.power.linear_matter_power(cosmo, k_arr, a)\n\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n background={'a': a_arr,\n 'chi': chi_from_ccl,\n 'h_over_h0': hoh0_from_ccl},\n growth={'a': a_arr,\n 'growth_factor': growth_from_ccl,\n 'growth_rate': fgrowth_from_ccl},\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr})\n\n pk_CCL_input = ccl.power.linear_matter_power(cosmo_input, k_arr, 0.5)\n pk_CCL = ccl.power.linear_matter_power(cosmo, k_arr, 0.5)\n\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Test again with negative power spectrum (so it's not logscaled)\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n background={'a': a_arr,\n 'chi': chi_from_ccl,\n 'h_over_h0': hoh0_from_ccl},\n growth={'a': a_arr,\n 'growth_factor': growth_from_ccl,\n 'growth_rate': fgrowth_from_ccl},\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': -pk_arr})\n\n pk_CCL_input = -ccl.power.linear_matter_power(cosmo_input, k_arr, 0.5)\n\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Via `linear_power`\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n background={'a': a_arr,\n 'chi': chi_from_ccl,\n 'h_over_h0': hoh0_from_ccl},\n growth={'a': a_arr,\n 'growth_factor': growth_from_ccl,\n 'growth_rate': fgrowth_from_ccl},\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a:b': pk_arr})\n pk_CCL_input = ccl.power.linear_power(cosmo_input, k_arr, 0.5,\n p_of_k_a='a:b')\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n\ndef test_input_linpower_raises():\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n transfer_function='bbks')\n a_arr = np.linspace(0.1, 1.0, 50)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pk_arr = np.array([ccl.power.linear_matter_power(cosmo, k_arr, a)\n for a in a_arr])\n\n # Not a dictionary\n with pytest.raises(TypeError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear=np.pi)\n\n # a not increasing\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr[::-1], 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr})\n\n # Dm x Dm not present\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter;delta_matter': pk_arr})\n\n # Non-parsable power spectrum\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a;b': pk_arr})\n\n # Wrong shape\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a:b': pk_arr[0]})\n\n # Check new power spectrum is stored\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a:b': pk_arr})\n assert 'a:b' in cosmo_input._pk_lin\n assert cosmo_input.has_linear_power\n\n\ndef test_input_nonlinear_model():\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7, n_s=0.965,\n A_s=2e-9, transfer_function='boltzmann_class')\n a_arr = np.linspace(0.1, 1.0, 50)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pk_arr = np.empty(shape=(len(a_arr), len(k_arr)))\n for i, a in enumerate(a_arr):\n pk_arr[i] = ccl.power.nonlin_matter_power(cosmo, k_arr, a)\n\n pk_CCL = ccl.power.nonlin_matter_power(cosmo, k_arr, 0.5)\n\n # Test again passing only linear Pk, but letting HALOFIT do its thing\n kl_arr = np.logspace(-4, 1, 1000)\n pkl_arr = np.array([ccl.power.linear_matter_power(cosmo, kl_arr, a)\n for a in a_arr])\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n pk_linear={'a': a_arr, 'k': kl_arr,\n 'delta_matter:delta_matter': pkl_arr},\n nonlinear_model='halofit')\n\n pk_CCL_input = ccl.power.nonlin_matter_power(cosmo_input, k_arr, 0.5)\n\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Test extra power spectrum\n kl_arr = np.logspace(-4, 1, 1000)\n pkl_arr = np.array([ccl.power.linear_matter_power(cosmo, kl_arr, a)\n for a in a_arr])\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n pk_linear={'a': a_arr, 'k': kl_arr,\n 'delta_matter:delta_matter': pkl_arr,\n 'a:b': pkl_arr},\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': -pk_arr},\n nonlinear_model='halofit')\n\n pk_CCL_input = cosmo_input.get_nonlin_power('a:b').eval(k_arr,\n 0.5,\n cosmo_input)\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Via `nonlin_power`\n pk_CCL_input = ccl.power.nonlin_power(cosmo_input, k_arr, 0.5,\n p_of_k_a='a:b')\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Use dictionary\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n pk_linear={'a': a_arr, 'k': kl_arr,\n 'delta_matter:delta_matter': pkl_arr,\n 'a:b': pkl_arr, 'c:d': pkl_arr},\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': -pk_arr},\n nonlinear_model={'a:b': 'halofit',\n 'c:d': None})\n pk_CCL_input = ccl.power.nonlin_power(cosmo_input, k_arr, 0.5,\n p_of_k_a='a:b')\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n assert 'c:d' not in cosmo_input._pk_nl\n\n\ndef test_input_nonlin_power_spectrum():\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7, n_s=0.965,\n A_s=2e-9, transfer_function='boltzmann_class')\n a_arr = np.linspace(0.1, 1.0, 50)\n chi_from_ccl = ccl.background.comoving_radial_distance(cosmo, a_arr)\n hoh0_from_ccl = ccl.background.h_over_h0(cosmo, a_arr)\n growth_from_ccl = ccl.background.growth_factor_unnorm(cosmo, a_arr)\n fgrowth_from_ccl = ccl.background.growth_rate(cosmo, a_arr)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pk_arr = np.empty(shape=(len(a_arr), len(k_arr)))\n for i, a in enumerate(a_arr):\n pk_arr[i] = ccl.power.nonlin_matter_power(cosmo, k_arr, a)\n\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n background={'a': a_arr,\n 'chi': chi_from_ccl,\n 'h_over_h0': hoh0_from_ccl},\n growth={'a': a_arr,\n 'growth_factor': growth_from_ccl,\n 'growth_rate': fgrowth_from_ccl},\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr})\n\n pk_CCL_input = ccl.power.nonlin_matter_power(cosmo_input, k_arr, 0.5)\n pk_CCL = ccl.power.nonlin_matter_power(cosmo, k_arr, 0.5)\n\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n # Test again with negative power spectrum (so it's not logscaled)\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, A_s=2e-9,\n background={'a': a_arr,\n 'chi': chi_from_ccl,\n 'h_over_h0': hoh0_from_ccl},\n growth={'a': a_arr,\n 'growth_factor': growth_from_ccl,\n 'growth_rate': fgrowth_from_ccl},\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': -pk_arr})\n\n pk_CCL_input = -ccl.power.nonlin_matter_power(cosmo_input, k_arr, 0.5)\n\n assert np.allclose(pk_CCL_input, pk_CCL, atol=0., rtol=1e-5)\n\n\ndef test_input_nonlinear_model_raises():\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n transfer_function='bbks')\n a_arr = np.linspace(0.1, 1.0, 50)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pkl_arr = np.array([ccl.power.linear_matter_power(cosmo, k_arr, a)\n for a in a_arr])\n\n # If no non-linear model provided, delta_matter:delta_matter\n # should be there.\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'a:b': pkl_arr})\n\n with pytest.raises(TypeError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pkl_arr},\n nonlinear_model=np.pi)\n\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n nonlinear_model='halofit')\n\n with pytest.raises(KeyError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pkl_arr},\n nonlinear_model={'y:z': 'halofit'})\n\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pkl_arr},\n nonlinear_model={'delta_matter:delta_matter': None})\n\n with pytest.raises(KeyError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pkl_arr},\n nonlinear_model={'delta_matter:delta_matter': 'hmcode'})\n\n\ndef test_input_nonlin_raises():\n cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n transfer_function='bbks')\n a_arr = np.linspace(0.1, 1.0, 50)\n k_arr = np.logspace(np.log10(2e-4), np.log10(1), 1000)\n pkl_arr = np.array([ccl.power.linear_matter_power(cosmo, k_arr, a)\n for a in a_arr])\n pk_arr = np.array([ccl.power.nonlin_matter_power(cosmo, k_arr, a)\n for a in a_arr])\n\n # Not a dictionary\n with pytest.raises(TypeError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin=np.pi)\n\n # k not present\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr, 'kk': k_arr,\n 'delta_matter;delta_matter': pk_arr})\n\n # a not increasing\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr[::-1], 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr})\n\n # delta_matter:delta_matter not present\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter;delta_matter': pk_arr})\n\n # Non-parsable power spectrum\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a;b': pk_arr})\n\n # Wrong shape\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr,\n 'a:b': pk_arr[0]})\n\n # Linear Pk not set for halofit\n with pytest.raises(ValueError):\n ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n nonlinear_model='halofit')\n\n # Check new power spectrum is stored\n cosmo_input = ccl.CosmologyCalculator(\n Omega_c=0.27, Omega_b=0.05, h=0.7,\n n_s=0.965, sigma8=0.8,\n pk_linear={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pkl_arr,\n 'a:b': pkl_arr},\n pk_nonlin={'a': a_arr, 'k': k_arr,\n 'delta_matter:delta_matter': pk_arr},\n nonlinear_model='halofit')\n assert 'a:b' in cosmo_input._pk_nl\n assert cosmo_input.has_nonlin_power\n\n\ndef test_camb_de_model():\n \"\"\"Check that the dark energy model for CAMB has been properly defined.\"\"\"\n with pytest.raises(ValueError):\n cosmo = ccl.CosmologyVanillaLCDM(\n transfer_function='boltzmann_camb',\n extra_parameters={\"camb\": {\"dark_energy_model\": \"pf\"}})\n ccl.linear_matter_power(cosmo, 1, 1)\n\n \"\"\"Check that w is not less than -1, if the chosen dark energy model for\n CAMB is fluid.\"\"\"\n with pytest.raises(ValueError):\n cosmo = ccl.CosmologyVanillaLCDM(\n transfer_function='boltzmann_camb', w0=-1, wa=-1)\n ccl.linear_matter_power(cosmo, 1, 1)\n\n \"\"\"Check that ppf is running smoothly.\"\"\"\n cosmo = ccl.CosmologyVanillaLCDM(\n transfer_function='boltzmann_camb', w0=-1, wa=-1,\n extra_parameters={\"camb\": {\"dark_energy_model\": \"ppf\"}})\n assert np.isfinite(ccl.linear_matter_power(cosmo, 1, 1))\n"
] | [
[
"numpy.allclose",
"numpy.shape",
"numpy.log10",
"numpy.logspace",
"numpy.array",
"numpy.linspace",
"numpy.isfinite"
]
] |
vinzdef/rorrim | [
"7a3fd4a212420b6fbe6590005300ea095938bf66"
] | [
"python/frame_convert2.py"
] | [
"import numpy as np\nimport matplotlib\n\ndef to_hsv(numb):\n hue = np.interp(numb, [0, 1024], [0, 1])\n rgb = matplotlib.colors.hsv_to_rgb(np.array([hue, 0.5, 1]))\n bgr = rgb[:, :, ::-1] # RGB -> BGR\n return np.array(bgr)\n\ndef hsv_depth(depth):\n depth = to_hsv(depth)\n return depth\n\ndef pretty_depth(depth):\n \"\"\"Converts depth into a 'nicer' format for display\n\n This is abstracted to allow for experimentation with normalization\n\n Args:\n depth: A numpy array with 2 bytes per pixel\n\n Returns:\n A numpy array that has been processed with unspecified datatype\n \"\"\"\n np.clip(depth, 0, 1024, depth)\n depth >>= 2\n depth = depth.astype(np.uint8)\n return depth\n\n\ndef pretty_depth_cv(depth):\n \"\"\"Converts depth into a 'nicer' format for display\n\n This is abstracted to allow for experimentation with normalization\n\n Args:\n depth: A numpy array with 2 bytes per pixel\n\n Returns:\n A numpy array with unspecified datatype\n \"\"\"\n return pretty_depth(depth)\n\n\ndef video_cv(video):\n \"\"\"Converts video into a BGR format for display\n\n This is abstracted out to allow for experimentation\n\n Args:\n video: A numpy array with 1 byte per pixel, 3 channels RGB\n\n Returns:\n A numpy array with with 1 byte per pixel, 3 channels BGR\n \"\"\"\n return video[:, :, ::-1] # RGB -> BGR"
] | [
[
"numpy.array",
"numpy.clip",
"numpy.interp"
]
] |
guillefix/mt-lightning | [
"56e93a569d85a768c178b15461e5362c25fdc3e3"
] | [
"analysis/pymo/preprocessing.py"
] | [
"'''\nPreprocessing Tranformers Based on sci-kit's API\n\nBy Omid Alemi\nCreated on June 12, 2017\n'''\nimport copy\nimport pandas as pd\nimport numpy as np\nimport transforms3d as t3d\nimport scipy.ndimage.filters as filters\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nfrom analysis.pymo.rotation_tools import Rotation, euler2expmap, euler2expmap2, expmap2euler, euler_reorder, unroll\nfrom analysis.pymo.Quaternions import Quaternions\nfrom analysis.pymo.Pivots import Pivots\n\nclass MocapParameterizer(BaseEstimator, TransformerMixin):\n def __init__(self, param_type = 'euler'):\n '''\n\n param_type = {'euler', 'quat', 'expmap', 'position', 'expmap2pos'}\n '''\n self.param_type = param_type\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n print(\"MocapParameterizer: \" + self.param_type)\n if self.param_type == 'euler':\n return X\n elif self.param_type == 'expmap':\n return self._to_expmap(X)\n elif self.param_type == 'quat':\n return X\n elif self.param_type == 'position':\n return self._to_pos(X)\n elif self.param_type == 'expmap2pos':\n return self._expmap_to_pos(X)\n else:\n raise 'param types: euler, quat, expmap, position, expmap2pos'\n\n# return X\n\n def inverse_transform(self, X, copy=None):\n if self.param_type == 'euler':\n return X\n elif self.param_type == 'expmap':\n return self._expmap_to_euler(X)\n elif self.param_type == 'quat':\n raise 'quat2euler is not supported'\n elif self.param_type == 'position':\n # raise 'positions 2 eulers is not supported'\n print('positions 2 eulers is not supported')\n return X\n else:\n raise 'param types: euler, quat, expmap, position'\n\n def _to_pos(self, X):\n '''Converts joints rotations in Euler angles to joint positions'''\n\n Q = []\n for track in X:\n channels = []\n titles = []\n euler_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n pos_df = pd.DataFrame(index=euler_df.index)\n\n # Copy the root rotations into the new DataFrame\n # rxp = '%s_Xrotation'%track.root_name\n # ryp = '%s_Yrotation'%track.root_name\n # rzp = '%s_Zrotation'%track.root_name\n # pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)\n # pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)\n # pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)\n\n # List the columns that contain rotation channels\n rot_cols = [c for c in euler_df.columns if ('rotation' in c)]\n\n # List the columns that contain position channels\n pos_cols = [c for c in euler_df.columns if ('position' in c)]\n\n # List the joints that are not end sites, i.e., have channels\n joints = (joint for joint in track.skeleton)\n\n tree_data = {}\n\n for joint in track.traverse():\n parent = track.skeleton[joint]['parent']\n rot_order = track.skeleton[joint]['order']\n #print(\"rot_order:\" + joint + \" :\" + rot_order)\n\n # Get the rotation columns that belong to this joint\n rc = euler_df[[c for c in rot_cols if joint in c]]\n\n # Get the position columns that belong to this joint\n pc = euler_df[[c for c in pos_cols if joint in c]]\n\n # Make sure the columns are organized in xyz order\n if rc.shape[1] < 3:\n euler_values = np.zeros((euler_df.shape[0], 3))\n rot_order = \"XYZ\"\n else:\n euler_values = np.pi/180.0*np.transpose(np.array([track.values['%s_%srotation'%(joint, rot_order[0])], track.values['%s_%srotation'%(joint, rot_order[1])], track.values['%s_%srotation'%(joint, rot_order[2])]]))\n\n if pc.shape[1] < 3:\n pos_values = np.asarray([[0,0,0] for f in pc.iterrows()])\n else:\n pos_values =np.asarray([[f[1]['%s_Xposition'%joint],\n f[1]['%s_Yposition'%joint],\n f[1]['%s_Zposition'%joint]] for f in pc.iterrows()])\n\n quats = Quaternions.from_euler(np.asarray(euler_values), order=rot_order.lower(), world=False)\n\n tree_data[joint]=[\n [], # to store the rotation matrix\n [] # to store the calculated position\n ]\n if track.root_name == joint:\n tree_data[joint][0] = quats#rotmats\n # tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])\n tree_data[joint][1] = pos_values\n else:\n # for every frame i, multiply this joint's rotmat to the rotmat of its parent\n tree_data[joint][0] = tree_data[parent][0]*quats# np.matmul(rotmats, tree_data[parent][0])\n\n # add the position channel to the offset and store it in k, for every frame i\n k = pos_values + np.asarray(track.skeleton[joint]['offsets'])\n\n # multiply k to the rotmat of the parent for every frame i\n q = tree_data[parent][0]*k #np.matmul(k.reshape(k.shape[0],1,3), tree_data[parent][0])\n\n # add q to the position of the parent, for every frame i\n tree_data[joint][1] = tree_data[parent][1] + q #q.reshape(k.shape[0],3) + tree_data[parent][1]\n\n # Create the corresponding columns in the new DataFrame\n pos_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in tree_data[joint][1]], index=pos_df.index)\n pos_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in tree_data[joint][1]], index=pos_df.index)\n pos_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in tree_data[joint][1]], index=pos_df.index)\n\n\n new_track = track.clone()\n new_track.values = pos_df\n Q.append(new_track)\n return Q\n\n def _expmap2rot(self, expmap):\n\n theta = np.linalg.norm(expmap, axis=1, keepdims=True)\n nz = np.nonzero(theta)[0]\n\n expmap[nz,:] = expmap[nz,:]/theta[nz]\n\n nrows=expmap.shape[0]\n x = expmap[:,0]\n y = expmap[:,1]\n z = expmap[:,2]\n\n s = np.sin(theta*0.5).reshape(nrows)\n c = np.cos(theta*0.5).reshape(nrows)\n\n rotmats = np.zeros((nrows, 3, 3))\n\n rotmats[:,0,0] = 2*(x*x-1)*s*s+1\n rotmats[:,0,1] = 2*x*y*s*s-2*z*c*s\n rotmats[:,0,2] = 2*x*z*s*s+2*y*c*s\n rotmats[:,1,0] = 2*x*y*s*s+2*z*c*s\n rotmats[:,1,1] = 2*(y*y-1)*s*s+1\n rotmats[:,1,2] = 2*y*z*s*s-2*x*c*s\n rotmats[:,2,0] = 2*x*z*s*s-2*y*c*s\n rotmats[:,2,1] = 2*y*z*s*s+2*x*c*s\n rotmats[:,2,2] = 2*(z*z-1)*s*s+1\n\n return rotmats\n\n def _expmap_to_pos(self, X):\n '''Converts joints rotations in expmap notation to joint positions'''\n\n Q = []\n for track in X:\n channels = []\n titles = []\n exp_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n pos_df = pd.DataFrame(index=exp_df.index)\n\n # Copy the root rotations into the new DataFrame\n # rxp = '%s_Xrotation'%track.root_name\n # ryp = '%s_Yrotation'%track.root_name\n # rzp = '%s_Zrotation'%track.root_name\n # pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)\n # pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)\n # pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)\n\n # List the columns that contain rotation channels\n exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]\n\n # List the joints that are not end sites, i.e., have channels\n joints = (joint for joint in track.skeleton)\n\n tree_data = {}\n\n for joint in track.traverse():\n parent = track.skeleton[joint]['parent']\n\n if 'Nub' not in joint:\n r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint\n expmap = r.values\n #expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()]\n else:\n expmap = np.zeros((exp_df.shape[0], 3))\n\n # Convert the eulers to rotation matrices\n #rotmats = np.asarray([Rotation(f, 'expmap').rotmat for f in expmap])\n #angs = np.linalg.norm(expmap,axis=1, keepdims=True)\n rotmats = self._expmap2rot(expmap)\n\n tree_data[joint]=[\n [], # to store the rotation matrix\n [] # to store the calculated position\n ]\n pos_values = np.zeros((exp_df.shape[0], 3))\n\n if track.root_name == joint:\n tree_data[joint][0] = rotmats\n # tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])\n tree_data[joint][1] = pos_values\n else:\n # for every frame i, multiply this joint's rotmat to the rotmat of its parent\n tree_data[joint][0] = np.matmul(rotmats, tree_data[parent][0])\n\n # add the position channel to the offset and store it in k, for every frame i\n k = pos_values + track.skeleton[joint]['offsets']\n\n # multiply k to the rotmat of the parent for every frame i\n q = np.matmul(k.reshape(k.shape[0],1,3), tree_data[parent][0])\n\n # add q to the position of the parent, for every frame i\n tree_data[joint][1] = q.reshape(k.shape[0],3) + tree_data[parent][1]\n\n\n # Create the corresponding columns in the new DataFrame\n pos_df['%s_Xposition'%joint] = pd.Series(data=tree_data[joint][1][:,0], index=pos_df.index)\n pos_df['%s_Yposition'%joint] = pd.Series(data=tree_data[joint][1][:,1], index=pos_df.index)\n pos_df['%s_Zposition'%joint] = pd.Series(data=tree_data[joint][1][:,2], index=pos_df.index)\n\n new_track = track.clone()\n new_track.values = pos_df\n Q.append(new_track)\n return Q\n\n def _to_expmap(self, X):\n '''Converts Euler angles to Exponential Maps'''\n\n Q = []\n for track in X:\n channels = []\n titles = []\n euler_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n exp_df = euler_df.copy()# pd.DataFrame(index=euler_df.index)\n\n # Copy the root positions into the new DataFrame\n #rxp = '%s_Xposition'%track.root_name\n #ryp = '%s_Yposition'%track.root_name\n #rzp = '%s_Zposition'%track.root_name\n #exp_df[rxp] = pd.Series(data=euler_df[rxp], index=exp_df.index)\n #exp_df[ryp] = pd.Series(data=euler_df[ryp], index=exp_df.index)\n #exp_df[rzp] = pd.Series(data=euler_df[rzp], index=exp_df.index)\n\n # List the columns that contain rotation channels\n rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]\n\n # List the joints that are not end sites, i.e., have channels\n joints = (joint for joint in track.skeleton if 'Nub' not in joint)\n\n for joint in joints:\n #print(joint)\n r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint\n rot_order = track.skeleton[joint]['order']\n r1_col = '%s_%srotation'%(joint, rot_order[0])\n r2_col = '%s_%srotation'%(joint, rot_order[1])\n r3_col = '%s_%srotation'%(joint, rot_order[2])\n\n exp_df.drop([r1_col, r2_col, r3_col], axis=1, inplace=True)\n euler = [[f[1][r1_col], f[1][r2_col], f[1][r3_col]] for f in r.iterrows()]\n #exps = [Rotation(f, 'euler', from_deg=True, order=rot_order).to_expmap() for f in euler] # Convert the eulers to exp maps\n exps = unroll(np.array([euler2expmap(f, rot_order, True) for f in euler])) # Convert the exp maps to eulers\n # exps = np.array([euler2expmap(f, rot_order, True) for f in euler]) # Convert the exp maps to eulers\n #exps = euler2expmap2(euler, rot_order, True) # Convert the eulers to exp maps\n\n # Create the corresponding columns in the new DataFrame\n\n exp_df.insert(loc=0, column='%s_gamma'%joint, value=pd.Series(data=[e[2] for e in exps], index=exp_df.index))\n exp_df.insert(loc=0, column='%s_beta'%joint, value=pd.Series(data=[e[1] for e in exps], index=exp_df.index))\n exp_df.insert(loc=0, column='%s_alpha'%joint, value=pd.Series(data=[e[0] for e in exps], index=exp_df.index))\n\n #print(exp_df.columns)\n new_track = track.clone()\n new_track.values = exp_df\n Q.append(new_track)\n\n return Q\n\n def _expmap_to_euler(self, X):\n Q = []\n for track in X:\n channels = []\n titles = []\n exp_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n #euler_df = pd.DataFrame(index=exp_df.index)\n euler_df = exp_df.copy()\n\n # Copy the root positions into the new DataFrame\n #rxp = '%s_Xposition'%track.root_name\n #ryp = '%s_Yposition'%track.root_name\n #rzp = '%s_Zposition'%track.root_name\n #euler_df[rxp] = pd.Series(data=exp_df[rxp], index=euler_df.index)\n #euler_df[ryp] = pd.Series(data=exp_df[ryp], index=euler_df.index)\n #euler_df[rzp] = pd.Series(data=exp_df[rzp], index=euler_df.index)\n\n # List the columns that contain rotation channels\n exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]\n\n # List the joints that are not end sites, i.e., have channels\n joints = (joint for joint in track.skeleton if 'Nub' not in joint)\n\n for joint in joints:\n r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint\n\n euler_df.drop(['%s_alpha'%joint, '%s_beta'%joint, '%s_gamma'%joint], axis=1, inplace=True)\n expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order\n rot_order = track.skeleton[joint]['order']\n #euler_rots = [Rotation(f, 'expmap').to_euler(True, rot_order) for f in expmap] # Convert the exp maps to eulers\n euler_rots = [expmap2euler(f, rot_order, True) for f in expmap] # Convert the exp maps to eulers\n\n # Create the corresponding columns in the new DataFrame\n\n euler_df['%s_%srotation'%(joint, rot_order[0])] = pd.Series(data=[e[0] for e in euler_rots], index=euler_df.index)\n euler_df['%s_%srotation'%(joint, rot_order[1])] = pd.Series(data=[e[1] for e in euler_rots], index=euler_df.index)\n euler_df['%s_%srotation'%(joint, rot_order[2])] = pd.Series(data=[e[2] for e in euler_rots], index=euler_df.index)\n\n new_track = track.clone()\n new_track.values = euler_df\n Q.append(new_track)\n\n return Q\n\nclass Mirror(BaseEstimator, TransformerMixin):\n def __init__(self, axis=\"X\", append=True):\n \"\"\"\n Mirrors the data\n \"\"\"\n self.axis = axis\n self.append = append\n\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n print(\"Mirror: \" + self.axis)\n Q = []\n\n if self.append:\n for track in X:\n Q.append(track)\n\n for track in X:\n channels = []\n titles = []\n\n if self.axis == \"X\":\n signs = np.array([1,-1,-1])\n if self.axis == \"Y\":\n signs = np.array([-1,1,-1])\n if self.axis == \"Z\":\n signs = np.array([-1,-1,1])\n\n euler_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n new_df = pd.DataFrame(index=euler_df.index)\n\n # Copy the root positions into the new DataFrame\n rxp = '%s_Xposition'%track.root_name\n ryp = '%s_Yposition'%track.root_name\n rzp = '%s_Zposition'%track.root_name\n new_df[rxp] = pd.Series(data=-signs[0]*euler_df[rxp], index=new_df.index)\n new_df[ryp] = pd.Series(data=-signs[1]*euler_df[ryp], index=new_df.index)\n new_df[rzp] = pd.Series(data=-signs[2]*euler_df[rzp], index=new_df.index)\n\n # List the columns that contain rotation channels\n rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]\n #lft_rots = [c for c in euler_df.columns if ('Left' in c and 'rotation' in c and 'Nub' not in c)]\n #rgt_rots = [c for c in euler_df.columns if ('Right' in c and 'rotation' in c and 'Nub' not in c)]\n lft_joints = (joint for joint in track.skeleton if 'Left' in joint and 'Nub' not in joint)\n rgt_joints = (joint for joint in track.skeleton if 'Right' in joint and 'Nub' not in joint)\n\n new_track = track.clone()\n\n for lft_joint in lft_joints:\n #lr = euler_df[[c for c in rots if lft_joint + \"_\" in c]]\n #rot_order = track.skeleton[lft_joint]['order']\n #lft_eulers = [[f[1]['%s_Xrotation'%lft_joint], f[1]['%s_Yrotation'%lft_joint], f[1]['%s_Zrotation'%lft_joint]] for f in lr.iterrows()]\n\n rgt_joint = lft_joint.replace('Left', 'Right')\n #rr = euler_df[[c for c in rots if rgt_joint + \"_\" in c]]\n #rot_order = track.skeleton[rgt_joint]['order']\n# rgt_eulers = [[f[1]['%s_Xrotation'%rgt_joint], f[1]['%s_Yrotation'%rgt_joint], f[1]['%s_Zrotation'%rgt_joint]] for f in rr.iterrows()]\n\n # Create the corresponding columns in the new DataFrame\n\n new_df['%s_Xrotation'%lft_joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%rgt_joint], index=new_df.index)\n new_df['%s_Yrotation'%lft_joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%rgt_joint], index=new_df.index)\n new_df['%s_Zrotation'%lft_joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%rgt_joint], index=new_df.index)\n\n new_df['%s_Xrotation'%rgt_joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%lft_joint], index=new_df.index)\n new_df['%s_Yrotation'%rgt_joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%lft_joint], index=new_df.index)\n new_df['%s_Zrotation'%rgt_joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%lft_joint], index=new_df.index)\n\n # List the joints that are not left or right, i.e. are on the trunk\n joints = (joint for joint in track.skeleton if 'Nub' not in joint and 'Left' not in joint and 'Right' not in joint)\n\n for joint in joints:\n #r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint\n #rot_order = track.skeleton[joint]['order']\n\n #eulers = [[f[1]['%s_Xrotation'%joint], f[1]['%s_Yrotation'%joint], f[1]['%s_Zrotation'%joint]] for f in r.iterrows()]\n\n # Create the corresponding columns in the new DataFrame\n new_df['%s_Xrotation'%joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%joint], index=new_df.index)\n new_df['%s_Yrotation'%joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%joint], index=new_df.index)\n new_df['%s_Zrotation'%joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%joint], index=new_df.index)\n\n new_track.values = new_df\n Q.append(new_track)\n\n return Q\n\n def inverse_transform(self, X, copy=None, start_pos=None):\n return X\n\nclass EulerReorder(BaseEstimator, TransformerMixin):\n def __init__(self, new_order):\n \"\"\"\n Add a\n \"\"\"\n self.new_order = new_order\n\n\n def fit(self, X, y=None):\n self.orig_skeleton = copy.deepcopy(X[0].skeleton)\n print(self.orig_skeleton)\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n for track in X:\n channels = []\n titles = []\n euler_df = track.values\n\n # Create a new DataFrame to store the exponential map rep\n new_df = pd.DataFrame(index=euler_df.index)\n\n # Copy the root positions into the new DataFrame\n rxp = '%s_Xposition'%track.root_name\n ryp = '%s_Yposition'%track.root_name\n rzp = '%s_Zposition'%track.root_name\n new_df[rxp] = pd.Series(data=euler_df[rxp], index=new_df.index)\n new_df[ryp] = pd.Series(data=euler_df[ryp], index=new_df.index)\n new_df[rzp] = pd.Series(data=euler_df[rzp], index=new_df.index)\n\n # List the columns that contain rotation channels\n rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]\n\n # List the joints that are not end sites, i.e., have channels\n joints = (joint for joint in track.skeleton if 'Nub' not in joint)\n\n new_track = track.clone()\n for joint in joints:\n r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint\n rot_order = track.skeleton[joint]['order']\n\n euler = [[f[1]['%s_Xrotation'%(joint)], f[1]['%s_Yrotation'%(joint)], f[1]['%s_Zrotation'%(joint)]] for f in r.iterrows()]\n new_euler = [euler_reorder(f, rot_order, self.new_order, True) for f in euler]\n #new_euler = euler_reorder2(np.array(euler), rot_order, self.new_order, True)\n\n # Create the corresponding columns in the new DataFrame\n new_df['%s_%srotation'%(joint, self.new_order[0])] = pd.Series(data=[e[0] for e in new_euler], index=new_df.index)\n new_df['%s_%srotation'%(joint, self.new_order[1])] = pd.Series(data=[e[1] for e in new_euler], index=new_df.index)\n new_df['%s_%srotation'%(joint, self.new_order[2])] = pd.Series(data=[e[2] for e in new_euler], index=new_df.index)\n\n new_track.skeleton[joint]['order'] = self.new_order\n\n new_track.values = new_df\n Q.append(new_track)\n\n return Q\n\n def inverse_transform(self, X, copy=None, start_pos=None):\n return X\n# Q = []\n#\n# for track in X:\n# channels = []\n# titles = []\n# euler_df = track.values\n#\n# # Create a new DataFrame to store the exponential map rep\n# new_df = pd.DataFrame(index=euler_df.index)\n#\n# # Copy the root positions into the new DataFrame\n# rxp = '%s_Xposition'%track.root_name\n# ryp = '%s_Yposition'%track.root_name\n# rzp = '%s_Zposition'%track.root_name\n# new_df[rxp] = pd.Series(data=euler_df[rxp], index=new_df.index)\n# new_df[ryp] = pd.Series(data=euler_df[ryp], index=new_df.index)\n# new_df[rzp] = pd.Series(data=euler_df[rzp], index=new_df.index)\n#\n# # List the columns that contain rotation channels\n# rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]\n#\n# # List the joints that are not end sites, i.e., have channels\n# joints = (joint for joint in track.skeleton if 'Nub' not in joint)\n#\n# new_track = track.clone()\n# for joint in joints:\n# r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint\n# rot_order = track.skeleton[joint]['order']\n# new_order = self.orig_skeleton[joint]['order']\n# print(\"rot_order:\" + str(rot_order))\n# print(\"new_order:\" + str(new_order))\n#\n# euler = [[f[1]['%s_%srotation'%(joint, rot_order[0])], f[1]['%s_%srotation'%(joint, rot_order[1])], f[1]['%s_%srotation'%(joint, rot_order[2])]] for f in r.iterrows()]\n# #new_euler = [euler_reorder(f, rot_order, new_order, True) for f in euler]\n# new_euler = euler_reorder2(np.array(euler), rot_order, self.new_order, True)\n#\n# # Create the corresponding columns in the new DataFrame\n# new_df['%s_%srotation'%(joint, new_order[0])] = pd.Series(data=[e[0] for e in new_euler], index=new_df.index)\n# new_df['%s_%srotation'%(joint, new_order[1])] = pd.Series(data=[e[1] for e in new_euler], index=new_df.index)\n# new_df['%s_%srotation'%(joint, new_order[2])] = pd.Series(data=[e[2] for e in new_euler], index=new_df.index)\n#\n# new_track.skeleton[joint]['order'] = new_order\n#\n# new_track.values = new_df\n# Q.append(new_track)\n# return Q\n\nclass JointSelector(BaseEstimator, TransformerMixin):\n '''\n Allows for filtering the mocap data to include only the selected joints\n '''\n def __init__(self, joints, include_root=False):\n self.joints = joints\n self.include_root = include_root\n\n def fit(self, X, y=None):\n selected_joints = []\n selected_channels = []\n\n if self.include_root:\n selected_joints.append(X[0].root_name)\n\n selected_joints.extend(self.joints)\n\n for joint_name in selected_joints:\n selected_channels.extend([o for o in X[0].values.columns if (joint_name + \"_\") in o and 'Nub' not in o])\n\n self.selected_joints = selected_joints\n self.selected_channels = selected_channels\n self.not_selected = X[0].values.columns.difference(selected_channels)\n self.not_selected_values = {c:X[0].values[c].values[0] for c in self.not_selected}\n\n self.orig_skeleton = X[0].skeleton\n return self\n\n def transform(self, X, y=None):\n print(\"JointSelector\")\n Q = []\n for track in X:\n t2 = track.clone()\n for key in track.skeleton.keys():\n if key not in self.selected_joints:\n parent = t2.skeleton[key]['parent']\n if parent in t2.skeleton:\n t2.skeleton[parent]['children'].remove(key)\n t2.skeleton.pop(key)\n t2.values = track.values[self.selected_channels]\n\n Q.append(t2)\n\n\n return Q\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n t2 = track.clone()\n t2.skeleton = self.orig_skeleton\n for d in self.not_selected:\n t2.values[d] = self.not_selected_values[d]\n Q.append(t2)\n\n return Q\n\n\nclass Numpyfier(BaseEstimator, TransformerMixin):\n '''\n Just converts the values in a MocapData object into a numpy array\n Useful for the final stage of a pipeline before training\n '''\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n self.org_mocap_ = X[0].clone()\n self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)\n\n return self\n\n def transform(self, X, y=None):\n print(\"Numpyfier\")\n Q = []\n\n for track in X:\n Q.append(track.values.values)\n #print(\"Numpyfier:\" + str(track.values.columns))\n\n return np.array(Q)\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n\n new_mocap = self.org_mocap_.clone()\n time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')\n\n # print(self.org_mocap_.values.columns)\n # import pdb;pdb.set_trace()\n new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)\n\n new_mocap.values = new_df\n\n\n Q.append(new_mocap)\n\n return Q\n\nclass Slicer(BaseEstimator, TransformerMixin):\n '''\n Slice the data into intervals of equal size\n '''\n def __init__(self, window_size, overlap=0.5):\n self.window_size = window_size\n self.overlap = overlap\n pass\n\n def fit(self, X, y=None):\n self.org_mocap_ = X[0].clone()\n self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)\n\n return self\n\n def transform(self, X, y=None):\n print(\"Slicer\")\n Q = []\n\n for track in X:\n vals = track.values.values\n nframes = vals.shape[0]\n overlap_frames = (int)(self.overlap*self.window_size)\n\n n_sequences = (nframes-overlap_frames)//(self.window_size-overlap_frames)\n\n if n_sequences>0:\n y = np.zeros((n_sequences, self.window_size, vals.shape[1]))\n\n # extract sequences from the input data\n for i in range(0,n_sequences):\n frameIdx = (self.window_size-overlap_frames) * i\n Q.append(vals[frameIdx:frameIdx+self.window_size,:])\n\n return np.array(Q)\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n\n new_mocap = self.org_mocap_.clone()\n time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')\n\n new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)\n\n new_mocap.values = new_df\n\n\n Q.append(new_mocap)\n\n return Q\n\nclass RootTransformer(BaseEstimator, TransformerMixin):\n def __init__(self, method, position_smoothing=0, rotation_smoothing=0):\n \"\"\"\n Accepted methods:\n abdolute_translation_deltas\n pos_rot_deltas\n \"\"\"\n self.method = method\n self.position_smoothing=position_smoothing\n self.rotation_smoothing=rotation_smoothing\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n print(\"RootTransformer\")\n Q = []\n\n for track in X:\n if self.method == 'abdolute_translation_deltas':\n new_df = track.values.copy()\n xpcol = '%s_Xposition'%track.root_name\n ypcol = '%s_Yposition'%track.root_name\n zpcol = '%s_Zposition'%track.root_name\n\n\n dxpcol = '%s_dXposition'%track.root_name\n dzpcol = '%s_dZposition'%track.root_name\n\n x=track.values[xpcol].copy()\n z=track.values[zpcol].copy()\n\n if self.position_smoothing>0:\n x_sm = filters.gaussian_filter1d(x, self.position_smoothing, axis=0, mode='nearest')\n z_sm = filters.gaussian_filter1d(z, self.position_smoothing, axis=0, mode='nearest')\n dx = pd.Series(data=x_sm, index=new_df.index).diff()\n dz = pd.Series(data=z_sm, index=new_df.index).diff()\n new_df[xpcol] = x-x_sm\n new_df[zpcol] = z-z_sm\n else:\n dx = x.diff()\n dz = z.diff()\n new_df.drop([xpcol, zpcol], axis=1, inplace=True)\n\n dx[0] = dx[1]\n dz[0] = dz[1]\n\n new_df[dxpcol] = dx\n new_df[dzpcol] = dz\n\n new_track = track.clone()\n new_track.values = new_df\n # end of abdolute_translation_deltas\n\n elif self.method == 'pos_rot_deltas':\n new_track = track.clone()\n\n # Absolute columns\n xp_col = '%s_Xposition'%track.root_name\n yp_col = '%s_Yposition'%track.root_name\n zp_col = '%s_Zposition'%track.root_name\n\n #rot_order = track.skeleton[track.root_name]['order']\n #%(joint, rot_order[0])\n\n rot_order = track.skeleton[track.root_name]['order']\n r1_col = '%s_%srotation'%(track.root_name, rot_order[0])\n r2_col = '%s_%srotation'%(track.root_name, rot_order[1])\n r3_col = '%s_%srotation'%(track.root_name, rot_order[2])\n\n # Delta columns\n dxp_col = '%s_dXposition'%track.root_name\n dzp_col = '%s_dZposition'%track.root_name\n\n dxr_col = '%s_dXrotation'%track.root_name\n dyr_col = '%s_dYrotation'%track.root_name\n dzr_col = '%s_dZrotation'%track.root_name\n\n positions = np.transpose(np.array([track.values[xp_col], track.values[yp_col], track.values[zp_col]]))\n rotations = np.pi/180.0*np.transpose(np.array([track.values[r1_col], track.values[r2_col], track.values[r3_col]]))\n\n \"\"\" Get Trajectory and smooth it\"\"\"\n trajectory_filterwidth = self.position_smoothing\n reference = positions.copy()*np.array([1,0,1])\n if trajectory_filterwidth>0:\n reference = filters.gaussian_filter1d(reference, trajectory_filterwidth, axis=0, mode='nearest')\n\n \"\"\" Get Root Velocity \"\"\"\n velocity = np.diff(reference, axis=0)\n velocity = np.vstack((velocity[0,:], velocity))\n\n \"\"\" Remove Root Translation \"\"\"\n positions = positions-reference\n\n \"\"\" Get Forward Direction along the x-z plane, assuming character is facig z-forward \"\"\"\n #forward = [Rotation(f, 'euler', from_deg=True, order=rot_order).rotmat[:,2] for f in rotations] # get the z-axis of the rotation matrix, assuming character is facig z-forward\n #print(\"order:\" + rot_order.lower())\n quats = Quaternions.from_euler(rotations, order=rot_order.lower(), world=False)\n forward = quats*np.array([[0,0,1]])\n forward[:,1] = 0\n\n \"\"\" Smooth Forward Direction \"\"\"\n direction_filterwidth = self.rotation_smoothing\n if direction_filterwidth>0:\n forward = filters.gaussian_filter1d(forward, direction_filterwidth, axis=0, mode='nearest')\n\n forward = forward / np.sqrt((forward**2).sum(axis=-1))[...,np.newaxis]\n\n \"\"\" Remove Y Rotation \"\"\"\n target = np.array([[0,0,1]]).repeat(len(forward), axis=0)\n rotation = Quaternions.between(target, forward)[:,np.newaxis]\n positions = (-rotation[:,0]) * positions\n new_rotations = (-rotation[:,0]) * quats\n velocity = (-rotation[:,0]) * velocity\n\n \"\"\" Get Root Rotation \"\"\"\n #print(rotation[:,0])\n rvelocity = Pivots.from_quaternions(rotation[1:] * -rotation[:-1]).ps\n rvelocity = np.vstack((rvelocity[0], rvelocity))\n\n eulers = np.array([t3d.euler.quat2euler(q, axes=('s'+rot_order.lower()[::-1]))[::-1] for q in new_rotations])*180.0/np.pi\n\n new_df = track.values.copy()\n\n root_pos_x = pd.Series(data=positions[:,0], index=new_df.index)\n root_pos_y = pd.Series(data=positions[:,1], index=new_df.index)\n root_pos_z = pd.Series(data=positions[:,2], index=new_df.index)\n root_pos_x_diff = pd.Series(data=velocity[:,0], index=new_df.index)\n root_pos_z_diff = pd.Series(data=velocity[:,2], index=new_df.index)\n\n root_rot_1 = pd.Series(data=eulers[:,0], index=new_df.index)\n root_rot_2 = pd.Series(data=eulers[:,1], index=new_df.index)\n root_rot_3 = pd.Series(data=eulers[:,2], index=new_df.index)\n root_rot_y_diff = pd.Series(data=rvelocity[:,0], index=new_df.index)\n\n #new_df.drop([xr_col, yr_col, zr_col, xp_col, zp_col], axis=1, inplace=True)\n\n new_df[xp_col] = root_pos_x\n new_df[yp_col] = root_pos_y\n new_df[zp_col] = root_pos_z\n new_df[dxp_col] = root_pos_x_diff\n new_df[dzp_col] = root_pos_z_diff\n\n new_df[r1_col] = root_rot_1\n new_df[r2_col] = root_rot_2\n new_df[r3_col] = root_rot_3\n #new_df[dxr_col] = root_rot_x_diff\n new_df[dyr_col] = root_rot_y_diff\n #new_df[dzr_col] = root_rot_z_diff\n\n new_track.values = new_df\n\n\n elif self.method == 'hip_centric':\n new_track = track.clone()\n\n # Absolute columns\n xp_col = '%s_Xposition'%track.root_name\n yp_col = '%s_Yposition'%track.root_name\n zp_col = '%s_Zposition'%track.root_name\n\n xr_col = '%s_Xrotation'%track.root_name\n yr_col = '%s_Yrotation'%track.root_name\n zr_col = '%s_Zrotation'%track.root_name\n\n new_df = track.values.copy()\n\n all_zeros = np.zeros(track.values[xp_col].values.shape)\n\n new_df[xp_col] = pd.Series(data=all_zeros, index=new_df.index)\n new_df[yp_col] = pd.Series(data=all_zeros, index=new_df.index)\n new_df[zp_col] = pd.Series(data=all_zeros, index=new_df.index)\n\n new_df[xr_col] = pd.Series(data=all_zeros, index=new_df.index)\n new_df[yr_col] = pd.Series(data=all_zeros, index=new_df.index)\n new_df[zr_col] = pd.Series(data=all_zeros, index=new_df.index)\n\n new_track.values = new_df\n\n #print(new_track.values.columns)\n Q.append(new_track)\n\n return Q\n\n def inverse_transform(self, X, copy=None, start_pos=None):\n Q = []\n\n #TODO: simplify this implementation\n\n startx = 0\n startz = 0\n\n if start_pos is not None:\n startx, startz = start_pos\n\n for track in X:\n new_track = track.clone()\n if self.method == 'abdolute_translation_deltas':\n new_df = new_track.values\n xpcol = '%s_Xposition'%track.root_name\n ypcol = '%s_Yposition'%track.root_name\n zpcol = '%s_Zposition'%track.root_name\n\n\n dxpcol = '%s_dXposition'%track.root_name\n dzpcol = '%s_dZposition'%track.root_name\n\n dx = track.values[dxpcol].values\n dz = track.values[dzpcol].values\n\n recx = [startx]\n recz = [startz]\n\n for i in range(dx.shape[0]-1):\n recx.append(recx[i]+dx[i+1])\n recz.append(recz[i]+dz[i+1])\n\n # recx = [recx[i]+dx[i+1] for i in range(dx.shape[0]-1)]\n # recz = [recz[i]+dz[i+1] for i in range(dz.shape[0]-1)]\n # recx = dx[:-1] + dx[1:]\n # recz = dz[:-1] + dz[1:]\n if self.position_smoothing > 0:\n new_df[xpcol] = pd.Series(data=new_df[xpcol]+recx, index=new_df.index)\n new_df[zpcol] = pd.Series(data=new_df[zpcol]+recz, index=new_df.index)\n else:\n new_df[xpcol] = pd.Series(data=recx, index=new_df.index)\n new_df[zpcol] = pd.Series(data=recz, index=new_df.index)\n\n new_df.drop([dxpcol, dzpcol], axis=1, inplace=True)\n\n new_track.values = new_df\n # end of abdolute_translation_deltas\n\n elif self.method == 'pos_rot_deltas':\n # Absolute columns\n rot_order = track.skeleton[track.root_name]['order']\n xp_col = '%s_Xposition'%track.root_name\n yp_col = '%s_Yposition'%track.root_name\n zp_col = '%s_Zposition'%track.root_name\n\n xr_col = '%s_Xrotation'%track.root_name\n yr_col = '%s_Yrotation'%track.root_name\n zr_col = '%s_Zrotation'%track.root_name\n r1_col = '%s_%srotation'%(track.root_name, rot_order[0])\n r2_col = '%s_%srotation'%(track.root_name, rot_order[1])\n r3_col = '%s_%srotation'%(track.root_name, rot_order[2])\n\n # Delta columns\n dxp_col = '%s_dXposition'%track.root_name\n dzp_col = '%s_dZposition'%track.root_name\n\n dyr_col = '%s_dYrotation'%track.root_name\n\n positions = np.transpose(np.array([track.values[xp_col], track.values[yp_col], track.values[zp_col]]))\n rotations = np.pi/180.0*np.transpose(np.array([track.values[r1_col], track.values[r2_col], track.values[r3_col]]))\n quats = Quaternions.from_euler(rotations, order=rot_order.lower(), world=False)\n\n new_df = track.values.copy()\n\n dx = track.values[dxp_col].values\n dz = track.values[dzp_col].values\n\n dry = track.values[dyr_col].values\n\n #rec_p = np.array([startx, 0, startz])+positions[0,:]\n rec_ry = Quaternions.id(quats.shape[0])\n rec_xp = [0]\n rec_zp = [0]\n\n #rec_r = Quaternions.id(quats.shape[0])\n\n for i in range(dx.shape[0]-1):\n #print(dry[i])\n q_y = Quaternions.from_angle_axis(np.array(dry[i+1]), np.array([0,1,0]))\n rec_ry[i+1] = q_y*rec_ry[i]\n #print(\"dx: + \" + str(dx[i+1]))\n dp = rec_ry[i+1]*np.array([dx[i+1], 0, dz[i+1]])\n rec_xp.append(rec_xp[i]+dp[0,0])\n rec_zp.append(rec_zp[i]+dp[0,2])\n\n rec_r=rec_ry*quats\n pp=rec_ry*positions\n rec_xp = rec_xp + pp[:,0]\n rec_zp = rec_zp + pp[:,2]\n\n eulers = np.array([t3d.euler.quat2euler(q, axes=('s'+rot_order.lower()[::-1]))[::-1] for q in rec_r])*180.0/np.pi\n\n new_df = track.values.copy()\n\n root_rot_1 = pd.Series(data=eulers[:,0], index=new_df.index)\n root_rot_2 = pd.Series(data=eulers[:,1], index=new_df.index)\n root_rot_3 = pd.Series(data=eulers[:,2], index=new_df.index)\n\n new_df[xp_col] = pd.Series(data=rec_xp, index=new_df.index)\n new_df[zp_col] = pd.Series(data=rec_zp, index=new_df.index)\n\n new_df[r1_col] = pd.Series(data=root_rot_1, index=new_df.index)\n new_df[r2_col] = pd.Series(data=root_rot_2, index=new_df.index)\n new_df[r3_col] = pd.Series(data=root_rot_3, index=new_df.index)\n\n new_df.drop([dyr_col, dxp_col, dzp_col], axis=1, inplace=True)\n\n\n new_track.values = new_df\n\n #print(new_track.values.columns)\n Q.append(new_track)\n\n return Q\n\n\nclass RootCentricPositionNormalizer(BaseEstimator, TransformerMixin):\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n for track in X:\n new_track = track.clone()\n\n rxp = '%s_Xposition'%track.root_name\n ryp = '%s_Yposition'%track.root_name\n rzp = '%s_Zposition'%track.root_name\n\n projected_root_pos = track.values[[rxp, ryp, rzp]]\n\n projected_root_pos.loc[:,ryp] = 0 # we want the root's projection on the floor plane as the ref\n\n new_df = pd.DataFrame(index=track.values.index)\n\n all_but_root = [joint for joint in track.skeleton if track.root_name not in joint]\n # all_but_root = [joint for joint in track.skeleton]\n for joint in all_but_root:\n new_df['%s_Xposition'%joint] = pd.Series(data=track.values['%s_Xposition'%joint]-projected_root_pos[rxp], index=new_df.index)\n new_df['%s_Yposition'%joint] = pd.Series(data=track.values['%s_Yposition'%joint]-projected_root_pos[ryp], index=new_df.index)\n new_df['%s_Zposition'%joint] = pd.Series(data=track.values['%s_Zposition'%joint]-projected_root_pos[rzp], index=new_df.index)\n\n\n # keep the root as it is now\n new_df[rxp] = track.values[rxp]\n new_df[ryp] = track.values[ryp]\n new_df[rzp] = track.values[rzp]\n\n new_track.values = new_df\n\n Q.append(new_track)\n\n return Q\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n new_track = track.clone()\n\n rxp = '%s_Xposition'%track.root_name\n ryp = '%s_Yposition'%track.root_name\n rzp = '%s_Zposition'%track.root_name\n\n projected_root_pos = track.values[[rxp, ryp, rzp]]\n\n projected_root_pos.loc[:,ryp] = 0 # we want the root's projection on the floor plane as the ref\n\n new_df = pd.DataFrame(index=track.values.index)\n\n for joint in track.skeleton:\n new_df['%s_Xposition'%joint] = pd.Series(data=track.values['%s_Xposition'%joint]+projected_root_pos[rxp], index=new_df.index)\n new_df['%s_Yposition'%joint] = pd.Series(data=track.values['%s_Yposition'%joint]+projected_root_pos[ryp], index=new_df.index)\n new_df['%s_Zposition'%joint] = pd.Series(data=track.values['%s_Zposition'%joint]+projected_root_pos[rzp], index=new_df.index)\n\n\n new_track.values = new_df\n\n Q.append(new_track)\n\n return Q\n\nclass Flattener(BaseEstimator, TransformerMixin):\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n return np.concatenate(X, axis=0)\n\nclass ConstantsRemover(BaseEstimator, TransformerMixin):\n '''\n For now it just looks at the first track\n '''\n\n def __init__(self, eps = 1e-6, only_cols=None):\n self.eps = eps\n self.only_cols = only_cols\n\n\n def fit(self, X, y=None):\n stds = X[0].values.std()\n cols = X[0].values.columns.values\n if self.only_cols is not None:\n self.const_dims_ = [c for c in cols if ((stds[c] < self.eps).any()) and c in self.only_cols]\n else:\n self.const_dims_ = [c for c in cols if (stds[c] < self.eps).any()]\n # self.const_values_ = {c:X[0].values[c].values[0] for c in cols if (stds[c] < self.eps).any()}\n self.const_values_ = {c:X[0].values[c].values[0] for c in cols if self.const_dims_}\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n\n for track in X:\n t2 = track.clone()\n #for key in t2.skeleton.keys():\n # if key in self.ConstDims_:\n # t2.skeleton.pop(key)\n #print(track.values.columns.difference(self.const_dims_))\n t2.values.drop(self.const_dims_, axis=1, inplace=True)\n #t2.values = track.values[track.values.columns.difference(self.const_dims_)]\n Q.append(t2)\n\n return Q\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n t2 = track.clone()\n for d in self.const_dims_:\n t2.values[d] = self.const_values_[d]\n# t2.values.assign(d=pd.Series(data=self.const_values_[d], index = t2.values.index))\n Q.append(t2)\n\n return Q\n\nclass ListStandardScaler(BaseEstimator, TransformerMixin):\n def __init__(self, is_DataFrame=False):\n self.is_DataFrame = is_DataFrame\n\n def fit(self, X, y=None):\n if self.is_DataFrame:\n X_train_flat = np.concatenate([m.values for m in X], axis=0)\n else:\n X_train_flat = np.concatenate([m for m in X], axis=0)\n\n self.data_mean_ = np.mean(X_train_flat, axis=0)\n self.data_std_ = np.std(X_train_flat, axis=0)\n\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n for track in X:\n if self.is_DataFrame:\n normalized_track = track.copy()\n normalized_track.values = (track.values - self.data_mean_) / self.data_std_\n else:\n normalized_track = (track - self.data_mean_) / self.data_std_\n\n Q.append(normalized_track)\n\n if self.is_DataFrame:\n return Q\n else:\n return np.array(Q)\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n\n if self.is_DataFrame:\n unnormalized_track = track.copy()\n unnormalized_track.values = (track.values * self.data_std_) + self.data_mean_\n else:\n unnormalized_track = (track * self.data_std_) + self.data_mean_\n\n Q.append(unnormalized_track)\n\n if self.is_DataFrame:\n return Q\n else:\n return np.array(Q)\n\nclass ListMinMaxScaler(BaseEstimator, TransformerMixin):\n def __init__(self, is_DataFrame=False):\n self.is_DataFrame = is_DataFrame\n\n def fit(self, X, y=None):\n if self.is_DataFrame:\n X_train_flat = np.concatenate([m.values for m in X], axis=0)\n else:\n X_train_flat = np.concatenate([m for m in X], axis=0)\n\n self.data_max_ = np.max(X_train_flat, axis=0)\n self.data_min_ = np.min(X_train_flat, axis=0)\n\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n for track in X:\n if self.is_DataFrame:\n normalized_track = track.copy()\n normalized_track.values = (track.values - self.data_min_) / (self.data_max_ - self.data_min_)\n else:\n normalized_track = (track - self.data_min_) / (self.data_max_ - self.data_min_)\n\n Q.append(normalized_track)\n\n if self.is_DataFrame:\n return Q\n else:\n return np.array(Q)\n\n def inverse_transform(self, X, copy=None):\n Q = []\n\n for track in X:\n\n if self.is_DataFrame:\n unnormalized_track = track.copy()\n unnormalized_track.values = (track.values * (self.data_max_ - self.data_min_)) + self.data_min_\n else:\n unnormalized_track = (track * (self.data_max_ - self.data_min_)) + self.data_min_\n\n Q.append(unnormalized_track)\n\n if self.is_DataFrame:\n return Q\n else:\n return np.array(Q)\n\nclass DownSampler(BaseEstimator, TransformerMixin):\n def __init__(self, tgt_fps, keep_all=False):\n self.tgt_fps = tgt_fps\n self.keep_all = keep_all\n\n\n def fit(self, X, y=None):\n\n return self\n\n def transform(self, X, y=None):\n Q = []\n\n for track in X:\n orig_fps=round(1.0/track.framerate)\n rate = orig_fps//self.tgt_fps\n if orig_fps%self.tgt_fps!=0:\n print(\"error orig_fps (\" + str(orig_fps) + \") is not dividable with tgt_fps (\" + str(self.tgt_fps) + \")\")\n else:\n print(\"downsampling with rate: \" + str(rate))\n\n #print(track.values.size)\n for ii in range(0,rate):\n new_track = track.clone()\n new_track.values = track.values[ii:-1:rate].copy()\n #print(new_track.values.size)\n #new_track = track[0:-1:self.rate]\n new_track.framerate = 1.0/self.tgt_fps\n Q.append(new_track)\n if not self.keep_all:\n break\n\n return Q\n\n def inverse_transform(self, X, copy=None):\n return X\n\nclass ReverseTime(BaseEstimator, TransformerMixin):\n def __init__(self, append=True):\n self.append = append\n\n\n def fit(self, X, y=None):\n\n return self\n\n def transform(self, X, y=None):\n Q = []\n if self.append:\n for track in X:\n Q.append(track)\n for track in X:\n new_track = track.clone()\n new_track.values = track.values[-1::-1]\n Q.append(new_track)\n\n return Q\n\n def inverse_transform(self, X, copy=None):\n return X\n\n#TODO: JointsSelector (x)\n#TODO: SegmentMaker\n#TODO: DynamicFeaturesAdder\n#TODO: ShapeFeaturesAdder\n#TODO: DataFrameNumpier (x)\n\nclass TemplateTransform(BaseEstimator, TransformerMixin):\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n return X\n"
] | [
[
"numpy.vstack",
"numpy.matmul",
"pandas.Series",
"numpy.sin",
"numpy.zeros",
"numpy.diff",
"pandas.DataFrame",
"numpy.cos",
"numpy.asarray",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.max",
"numpy.min",
"numpy.nonzero",
"numpy.array",
"numpy.std",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.mean"
]
] |
techthiyanes/openspeech | [
"10307587f08615224df5a868fb5249c68c70b12d"
] | [
"openspeech/search/beam_search_rnn_transducer.py"
] | [
"# MIT License\n#\n# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport torch\n\nfrom openspeech.search.beam_search_base import OpenspeechBeamSearchBase\nfrom openspeech.decoders import RNNTransducerDecoder\n\n\nclass BeamSearchRNNTransducer(OpenspeechBeamSearchBase):\n r\"\"\"\n RNN Transducer Beam Search\n Reference: RNN-T FOR LATENCY CONTROLLED ASR WITH IMPROVED BEAM SEARCH (https://arxiv.org/pdf/1911.01629.pdf)\n\n Args: joint, decoder, beam_size, expand_beam, state_beam, blank_id\n joint: joint `encoder_outputs` and `decoder_outputs`\n decoder (TransformerTransducerDecoder): base decoder of transformer transducer model.\n beam_size (int): size of beam.\n expand_beam (int): The threshold coefficient to limit the number of expanded hypotheses.\n state_beam (int): The threshold coefficient to decide if hyps in A (process_hyps)\n is likely to compete with hyps in B (ongoing_beams)\n blank_id (int): blank id\n\n Inputs: encoder_output, max_length\n encoder_output (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size\n ``(seq_length, dimension)``\n max_length (int): max decoding time step\n\n Returns:\n * predictions (torch.LongTensor): model predictions.\n \"\"\"\n def __init__(\n self,\n joint,\n decoder: RNNTransducerDecoder,\n beam_size: int = 3,\n expand_beam: float = 2.3,\n state_beam: float = 4.6,\n blank_id: int = 3,\n ) -> None:\n super(BeamSearchRNNTransducer, self).__init__(decoder, beam_size)\n self.joint = joint\n self.expand_beam = expand_beam\n self.state_beam = state_beam\n self.blank_id = blank_id\n\n def forward(self, encoder_outputs: torch.Tensor, max_length: int):\n r\"\"\"\n Beam search decoding.\n\n Inputs: encoder_output, max_length\n encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size\n ``(batch, seq_length, dimension)``\n max_length (int): max decoding time step\n\n Returns:\n * predictions (torch.LongTensor): model predictions.\n \"\"\"\n hypothesis = list()\n hypothesis_score = list()\n\n for batch_idx in range(encoder_outputs.size(0)):\n blank = (\n torch.ones((1, 1), device=encoder_outputs.device, dtype=torch.long) * self.blank_id\n )\n step_input = (\n torch.ones((1, 1), device=encoder_outputs.device, dtype=torch.long) * self.sos_id\n )\n hyp = {\n \"prediction\": [self.sos_id],\n \"logp_score\": 0.0,\n \"hidden_states\": None,\n }\n ongoing_beams = [hyp]\n\n for t_step in range(max_length):\n process_hyps = ongoing_beams\n ongoing_beams = list()\n\n while True:\n if len(ongoing_beams) >= self.beam_size:\n break\n\n a_best_hyp = max(process_hyps, key=lambda x: x[\"logp_score\"] / len(x[\"prediction\"]))\n\n if len(ongoing_beams) > 0:\n b_best_hyp = max(\n ongoing_beams,\n key=lambda x: x[\"logp_score\"] / len(x[\"prediction\"]),\n )\n\n a_best_prob = a_best_hyp[\"logp_score\"]\n b_best_prob = b_best_hyp[\"logp_score\"]\n\n if b_best_prob >= self.state_beam + a_best_prob:\n break\n\n process_hyps.remove(a_best_hyp)\n\n step_input[0, 0] = a_best_hyp[\"prediction\"][-1]\n\n step_outputs, hidden_states = self.decoder(step_input, a_best_hyp[\"hidden_states\"])\n log_probs = self.joint(encoder_outputs[batch_idx, t_step, :], step_outputs.view(-1))\n\n topk_targets, topk_idx = log_probs.topk(k=self.beam_size)\n\n if topk_idx[0] != blank:\n best_logp = topk_targets[0]\n else:\n best_logp = topk_targets[1]\n\n for j in range(topk_targets.size(0)):\n topk_hyp = {\n \"prediction\": a_best_hyp[\"prediction\"][:],\n \"logp_score\": a_best_hyp[\"logp_score\"] + topk_targets[j],\n \"hidden_states\": a_best_hyp[\"hidden_states\"],\n }\n\n if topk_idx[j] == self.blank_id:\n ongoing_beams.append(topk_hyp)\n continue\n\n if topk_targets[j] >= best_logp - self.expand_beam:\n topk_hyp[\"prediction\"].append(topk_idx[j].item())\n topk_hyp[\"hidden_states\"] = hidden_states\n process_hyps.append(topk_hyp)\n\n ongoing_beams = sorted(\n ongoing_beams,\n key=lambda x: x[\"logp_score\"] / len(x[\"prediction\"]),\n reverse=True,\n )[0]\n\n hypothesis.append(torch.LongTensor(ongoing_beams[\"prediction\"][1:]))\n hypothesis_score.append(ongoing_beams[\"logp_score\"] / len(ongoing_beams[\"prediction\"]))\n\n return self._fill_sequence(hypothesis)"
] | [
[
"torch.ones",
"torch.LongTensor"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.